Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[fix](Nereids) fixed the limit offset error (#39316) #41936

Merged
merged 4 commits into from
Oct 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1560,10 +1560,35 @@ public PlanFragment visitPhysicalNestedLoopJoin(
public PlanFragment visitPhysicalLimit(PhysicalLimit<? extends Plan> physicalLimit, PlanTranslatorContext context) {
PlanFragment inputFragment = physicalLimit.child(0).accept(this, context);
PlanNode child = inputFragment.getPlanRoot();
child.setLimit(MergeLimits.mergeLimit(physicalLimit.getLimit(), physicalLimit.getOffset(), child.getLimit()));
// TODO: plan node don't support limit
// child.setOffset(MergeLimits.mergeOffset(physicalLimit.getOffset(), child.getOffset()));
updateLegacyPlanIdToPhysicalPlan(child, physicalLimit);

if (physicalLimit.getPhase().isLocal()) {
child.setLimit(MergeLimits.mergeLimit(physicalLimit.getLimit(), physicalLimit.getOffset(),
child.getLimit()));
} else if (physicalLimit.getPhase().isGlobal()) {
if (!(child instanceof ExchangeNode)) {
ExchangeNode exchangeNode = new ExchangeNode(context.nextPlanNodeId(), child);
exchangeNode.setLimit(physicalLimit.getLimit());
exchangeNode.setOffset(physicalLimit.getOffset());
exchangeNode.setPartitionType(TPartitionType.UNPARTITIONED);
exchangeNode.setNumInstances(1);
PlanFragment fragment = new PlanFragment(context.nextFragmentId(), exchangeNode,
DataPartition.UNPARTITIONED);
inputFragment.setDestination(exchangeNode);
inputFragment.setOutputPartition(DataPartition.UNPARTITIONED);
DataStreamSink sink = new DataStreamSink(exchangeNode.getId());
sink.setOutputPartition(DataPartition.UNPARTITIONED);
inputFragment.setSink(sink);
context.addPlanFragment(fragment);
inputFragment = fragment;
} else {
ExchangeNode exchangeNode = (ExchangeNode) child;
exchangeNode.setLimit(MergeLimits.mergeLimit(physicalLimit.getLimit(), physicalLimit.getOffset(),
exchangeNode.getLimit()));
exchangeNode.setOffset(MergeLimits.mergeOffset(physicalLimit.getOffset(), exchangeNode.getOffset()));
}
}

updateLegacyPlanIdToPhysicalPlan(inputFragment.getPlanRoot(), physicalLimit);
return inputFragment;
}

Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ public List<PlanPostProcessor> getProcessors() {
builder.add(new ColumnPruningPostProcessor());
builder.add(new MergeProjectPostProcessor());
builder.add(new RecomputeLogicalPropertiesProcessor());
builder.add(new AddOffsetIntoDistribute());
builder.add(new TopNScanOpt());
// after generate rf, DO NOT replace PLAN NODE
builder.add(new FragmentProcessor());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
import org.apache.doris.statistics.StatsRecursiveDerive;
import org.apache.doris.thrift.TExchangeNode;
import org.apache.doris.thrift.TExplainLevel;
import org.apache.doris.thrift.TPartitionType;
import org.apache.doris.thrift.TPlanNode;
import org.apache.doris.thrift.TPlanNodeType;

Expand Down Expand Up @@ -64,6 +65,7 @@ public class ExchangeNode extends PlanNode {
private SortInfo mergeInfo;

private boolean isRightChildOfBroadcastHashJoin = false;
private TPartitionType partitionType;

/**
* use for Nereids only.
Expand All @@ -77,6 +79,10 @@ public ExchangeNode(PlanNodeId id, PlanNode inputNode) {
computeTupleIds();
}

public void setPartitionType(TPartitionType partitionType) {
this.partitionType = partitionType;
}

/**
* Create ExchangeNode that consumes output of inputNode.
* An ExchangeNode doesn't have an input node as a child, which is why we
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -187,15 +187,6 @@
-- !exist_corr_limit0 --

-- !exist_unCorrelated_limit1_offset1 --
1 2
1 3
2 4
2 5
3 3
3 4
20 2
22 3
24 4

-- !exist_unCorrelated_limit0_offset1 --

Expand Down
7 changes: 7 additions & 0 deletions regression-test/data/nereids_syntax_p0/test_limit.out
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
-- This file is automatically generated. You should know what you did if you want to edit this
-- !limit1 --
2 7844 TURNER SALESMAN 7698 1981-09-08 1500.0 0.0 30

-- !lmit2 --
3 7934 MILLER CLERK 7782 1982-01-23 1300.0 0.0 10

39 changes: 35 additions & 4 deletions regression-test/suites/nereids_syntax_p0/test_limit.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,6 @@
// under the License.

suite("test_limit") {
sql 'set enable_nereids_planner=true'
sql 'set enable_fallback_to_original_planner=false'


sql """
drop table if exists test1
"""
Expand All @@ -36,4 +32,39 @@ suite("test_limit") {
sql "select * from test1 limit 2 offset 1"
result([[1]])
}

sql """
drop table if exists row_number_limit_tbl;
"""
sql """
CREATE TABLE row_number_limit_tbl (
k1 INT NULL,
k2 VARCHAR(255) NULL,
k3 VARCHAR(255) NULL,
k4 INT NULL,
k5 VARCHAR(255) NULL,
k6 FLOAT NULL,
k7 FLOAT NULL,
k8 INT NULL
) ENGINE=OLAP
DUPLICATE KEY(k1, k2)
DISTRIBUTED BY HASH(k1) BUCKETS 3
PROPERTIES (
"replication_allocation" = "tag.location.default: 1"
);
"""
sql """ INSERT INTO row_number_limit_tbl VALUES (7788, 'SCOTT', 'ANALYST', 7566, '1987-04-19', 3000, 0, 20); """
sql """ INSERT INTO row_number_limit_tbl VALUES (7844, 'TURNER', 'SALESMAN', 7698, '1981-09-08', 1500, 0, 30); """
qt_limit1 """
select row_number() over(order by k6 desc) k6s, t.* from row_number_limit_tbl t order by k6s limit 1 offset 1;
"""

sql """ truncate table row_number_limit_tbl; """
sql """ INSERT INTO row_number_limit_tbl VALUES (7788, 'SCOTT', 'ANALYST', 7566, '1987-04-19', 3000, 0, 20); """
sql """ INSERT INTO row_number_limit_tbl VALUES (7844, 'TURNER', 'SALESMAN', 7698, '1981-09-08', 1500, 0, 30); """
sql """ INSERT INTO row_number_limit_tbl VALUES (7934, 'MILLER', 'CLERK', 7782, '1982-01-23', 1300, 0, 10); """

qt_lmit2 """
select row_number() over(order by k6 desc) k6s, t.* from row_number_limit_tbl t limit 1 offset 2;
"""
}
Loading