Skip to content

Commit

Permalink
align node id in explain with nereids node id (apache#25068)
Browse files Browse the repository at this point in the history
it is painful to align node in `explain` and node in `explain physical plan`, since they use two different sets of node IDs.
This pr makes 'explain' command use node IDs of their correspond node in 'explain physical plan'
  • Loading branch information
englefly authored Oct 9, 2023
1 parent aa1704c commit ffaa145
Show file tree
Hide file tree
Showing 5 changed files with 47 additions and 35 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ public PlanFragment visitPhysicalDistribute(PhysicalDistribute<? extends Plan> d
}
}

ExchangeNode exchangeNode = new ExchangeNode(context.nextPlanNodeId(), inputFragment.getPlanRoot());
ExchangeNode exchangeNode = new ExchangeNode(distribute.translatePlanNodeId(), inputFragment.getPlanRoot());
updateLegacyPlanIdToPhysicalPlan(exchangeNode, distribute);
List<ExprId> validOutputIds = distribute.getOutputExprIds();
if (distribute.child() instanceof PhysicalHashAggregate) {
Expand Down Expand Up @@ -472,24 +472,24 @@ public PlanFragment visitPhysicalFileScan(PhysicalFileScan fileScan, PlanTransla
if (table instanceof HMSExternalTable) {
switch (((HMSExternalTable) table).getDlaType()) {
case HUDI:
scanNode = new HudiScanNode(context.nextPlanNodeId(), tupleDescriptor, false);
scanNode = new HudiScanNode(fileScan.translatePlanNodeId(), tupleDescriptor, false);
break;
case ICEBERG:
scanNode = new IcebergScanNode(context.nextPlanNodeId(), tupleDescriptor, false);
scanNode = new IcebergScanNode(fileScan.translatePlanNodeId(), tupleDescriptor, false);
break;
case HIVE:
scanNode = new HiveScanNode(context.nextPlanNodeId(), tupleDescriptor, false);
scanNode = new HiveScanNode(fileScan.translatePlanNodeId(), tupleDescriptor, false);
((HiveScanNode) scanNode).setSelectedPartitions(fileScan.getSelectedPartitions());
break;
default:
throw new RuntimeException("do not support DLA type " + ((HMSExternalTable) table).getDlaType());
}
} else if (table instanceof IcebergExternalTable) {
scanNode = new IcebergScanNode(context.nextPlanNodeId(), tupleDescriptor, false);
scanNode = new IcebergScanNode(fileScan.translatePlanNodeId(), tupleDescriptor, false);
} else if (table instanceof PaimonExternalTable) {
scanNode = new PaimonScanNode(context.nextPlanNodeId(), tupleDescriptor, false);
scanNode = new PaimonScanNode(fileScan.translatePlanNodeId(), tupleDescriptor, false);
} else if (table instanceof MaxComputeExternalTable) {
scanNode = new MaxComputeScanNode(context.nextPlanNodeId(), tupleDescriptor, false);
scanNode = new MaxComputeScanNode(fileScan.translatePlanNodeId(), tupleDescriptor, false);
} else {
throw new RuntimeException("do not support table type " + table.getType());
}
Expand Down Expand Up @@ -532,7 +532,7 @@ public PlanFragment visitPhysicalEmptyRelation(PhysicalEmptyRelation emptyRelati

ArrayList<TupleId> tupleIds = new ArrayList<>();
tupleIds.add(tupleDescriptor.getId());
EmptySetNode emptySetNode = new EmptySetNode(context.nextPlanNodeId(), tupleIds);
EmptySetNode emptySetNode = new EmptySetNode(emptyRelation.translatePlanNodeId(), tupleIds);

PlanFragment planFragment = createPlanFragment(emptySetNode,
DataPartition.UNPARTITIONED, emptyRelation);
Expand All @@ -546,7 +546,7 @@ public PlanFragment visitPhysicalEsScan(PhysicalEsScan esScan, PlanTranslatorCon
List<Slot> slots = esScan.getOutput();
ExternalTable table = esScan.getTable();
TupleDescriptor tupleDescriptor = generateTupleDesc(slots, table, context);
EsScanNode esScanNode = new EsScanNode(context.nextPlanNodeId(), tupleDescriptor, true);
EsScanNode esScanNode = new EsScanNode(esScan.translatePlanNodeId(), tupleDescriptor, true);
esScanNode.addConjuncts(translateToLegacyConjuncts(esScan.getConjuncts()));
Utils.execWithUncheckedException(esScanNode::init);
context.addScanNode(esScanNode);
Expand All @@ -568,7 +568,7 @@ public PlanFragment visitPhysicalJdbcScan(PhysicalJdbcScan jdbcScan, PlanTransla
List<Slot> slots = jdbcScan.getOutput();
TableIf table = jdbcScan.getTable();
TupleDescriptor tupleDescriptor = generateTupleDesc(slots, table, context);
JdbcScanNode jdbcScanNode = new JdbcScanNode(context.nextPlanNodeId(), tupleDescriptor,
JdbcScanNode jdbcScanNode = new JdbcScanNode(jdbcScan.translatePlanNodeId(), tupleDescriptor,
table instanceof JdbcExternalTable);
jdbcScanNode.addConjuncts(translateToLegacyConjuncts(jdbcScan.getConjuncts()));
Utils.execWithUncheckedException(jdbcScanNode::init);
Expand Down Expand Up @@ -596,8 +596,7 @@ public PlanFragment visitPhysicalOlapScan(PhysicalOlapScan olapScan, PlanTransla
if (olapScan.getSelectedIndexId() != olapScan.getTable().getBaseIndexId()) {
generateTupleDesc(olapScan.getBaseOutputs(), olapTable, context);
}

OlapScanNode olapScanNode = new OlapScanNode(context.nextPlanNodeId(), tupleDescriptor, "OlapScanNode");
OlapScanNode olapScanNode = new OlapScanNode(olapScan.translatePlanNodeId(), tupleDescriptor, "OlapScanNode");
// TODO: move all node set cardinality into one place
if (olapScan.getStats() != null) {
olapScanNode.setCardinality((long) olapScan.getStats().getRowCount());
Expand Down Expand Up @@ -702,7 +701,7 @@ public PlanFragment visitPhysicalOneRowRelation(PhysicalOneRowRelation oneRowRel
slotDescriptor.setIsNullable(slots.get(i).nullable());
}

UnionNode unionNode = new UnionNode(context.nextPlanNodeId(), oneRowTuple.getId());
UnionNode unionNode = new UnionNode(oneRowRelation.translatePlanNodeId(), oneRowTuple.getId());
unionNode.setCardinality(1L);
unionNode.addConstExprList(legacyExprs);
unionNode.finalizeForNereids(oneRowTuple.getSlots(), new ArrayList<>());
Expand All @@ -718,7 +717,7 @@ public PlanFragment visitPhysicalSchemaScan(PhysicalSchemaScan schemaScan, PlanT
Table table = schemaScan.getTable();
List<Slot> slots = ImmutableList.copyOf(schemaScan.getOutput());
TupleDescriptor tupleDescriptor = generateTupleDesc(slots, table, context);
SchemaScanNode scanNode = new SchemaScanNode(context.nextPlanNodeId(), tupleDescriptor);
SchemaScanNode scanNode = new SchemaScanNode(schemaScan.translatePlanNodeId(), tupleDescriptor);
context.getRuntimeTranslator().ifPresent(
runtimeFilterGenerator -> runtimeFilterGenerator.getTargetOnScanNode(schemaScan.getRelationId())
.forEach(expr -> runtimeFilterGenerator.translateRuntimeFilterTarget(expr, scanNode, context)
Expand All @@ -738,7 +737,7 @@ public PlanFragment visitPhysicalTVFRelation(PhysicalTVFRelation tvfRelation, Pl
TupleDescriptor tupleDescriptor = generateTupleDesc(slots, tvfRelation.getFunction().getTable(), context);

TableValuedFunctionIf catalogFunction = tvfRelation.getFunction().getCatalogFunction();
ScanNode scanNode = catalogFunction.getScanNode(context.nextPlanNodeId(), tupleDescriptor);
ScanNode scanNode = catalogFunction.getScanNode(tvfRelation.translatePlanNodeId(), tupleDescriptor);
Utils.execWithUncheckedException(scanNode::init);
context.getRuntimeTranslator().ifPresent(
runtimeFilterGenerator -> runtimeFilterGenerator.getTargetOnScanNode(tvfRelation.getRelationId())
Expand Down Expand Up @@ -816,7 +815,7 @@ public PlanFragment visitPhysicalHashAggregate(
boolean isPartial = aggregate.getAggregateParam().aggMode.productAggregateBuffer;
AggregateInfo aggInfo = AggregateInfo.create(execGroupingExpressions, execAggregateFunctions,
aggFunOutputIds, isPartial, outputTupleDesc, outputTupleDesc, aggregate.getAggPhase().toExec());
AggregationNode aggregationNode = new AggregationNode(context.nextPlanNodeId(),
AggregationNode aggregationNode = new AggregationNode(aggregate.translatePlanNodeId(),
inputPlanFragment.getPlanRoot(), aggInfo);
if (!aggregate.getAggMode().isFinalPhase) {
aggregationNode.unsetNeedsFinalize();
Expand Down Expand Up @@ -908,7 +907,7 @@ public PlanFragment visitPhysicalAssertNumRows(PhysicalAssertNumRows<? extends P
PlanTranslatorContext context) {
PlanFragment currentFragment = assertNumRows.child().accept(this, context);
// create assertNode
AssertNumRowsNode assertNumRowsNode = new AssertNumRowsNode(context.nextPlanNodeId(),
AssertNumRowsNode assertNumRowsNode = new AssertNumRowsNode(assertNumRows.translatePlanNodeId(),
currentFragment.getPlanRoot(),
ExpressionTranslator.translateAssert(assertNumRows.getAssertNumRowsElement()));
addPlanRoot(currentFragment, assertNumRowsNode, assertNumRows);
Expand Down Expand Up @@ -1012,7 +1011,7 @@ public PlanFragment visitPhysicalFilter(PhysicalFilter<? extends Plan> filter, P
PlanNode planNode = inputFragment.getPlanRoot();
if (planNode instanceof ExchangeNode || planNode instanceof SortNode || planNode instanceof UnionNode) {
// the three nodes don't support conjuncts, need create a SelectNode to filter data
SelectNode selectNode = new SelectNode(context.nextPlanNodeId(), planNode);
SelectNode selectNode = new SelectNode(filter.translatePlanNodeId(), planNode);
addConjunctsToPlanNode(filter, selectNode, context);
addPlanRoot(inputFragment, selectNode, filter);
} else {
Expand Down Expand Up @@ -1046,7 +1045,7 @@ public PlanFragment visitPhysicalGenerate(PhysicalGenerate<? extends Plan> gener
.flatMap(List::stream)
.map(SlotDescriptor::getId)
.collect(Collectors.toList());
TableFunctionNode tableFunctionNode = new TableFunctionNode(context.nextPlanNodeId(),
TableFunctionNode tableFunctionNode = new TableFunctionNode(generate.translatePlanNodeId(),
currentFragment.getPlanRoot(), tupleDescriptor.getId(), functionCalls, outputSlotIds);
addPlanRoot(currentFragment, tableFunctionNode, generate);
return currentFragment;
Expand Down Expand Up @@ -1124,7 +1123,7 @@ public PlanFragment visitPhysicalHashJoin(
.map(e -> ExpressionTranslator.translate(e, context))
.collect(Collectors.toList());

HashJoinNode hashJoinNode = new HashJoinNode(context.nextPlanNodeId(), leftPlanRoot,
HashJoinNode hashJoinNode = new HashJoinNode(hashJoin.translatePlanNodeId(), leftPlanRoot,
rightPlanRoot, JoinType.toJoinOperator(joinType), execEqConjuncts, Lists.newArrayList(),
null, null, null, hashJoin.isMarkJoin());

Expand Down Expand Up @@ -1366,8 +1365,7 @@ public PlanFragment visitPhysicalNestedLoopJoin(
.collect(Collectors.toList());

JoinType joinType = nestedLoopJoin.getJoinType();

NestedLoopJoinNode nestedLoopJoinNode = new NestedLoopJoinNode(context.nextPlanNodeId(),
NestedLoopJoinNode nestedLoopJoinNode = new NestedLoopJoinNode(nestedLoopJoin.translatePlanNodeId(),
leftFragmentPlanRoot, rightFragmentPlanRoot, tupleIds, JoinType.toJoinOperator(joinType),
null, null, null, nestedLoopJoin.isMarkJoin());
if (nestedLoopJoin.getStats() != null) {
Expand Down Expand Up @@ -1695,11 +1693,11 @@ public PlanFragment visitPhysicalSetOperation(
SetOperationNode setOperationNode;
// create setOperationNode
if (setOperation instanceof PhysicalUnion) {
setOperationNode = new UnionNode(context.nextPlanNodeId(), setTuple.getId());
setOperationNode = new UnionNode(setOperation.translatePlanNodeId(), setTuple.getId());
} else if (setOperation instanceof PhysicalExcept) {
setOperationNode = new ExceptNode(context.nextPlanNodeId(), setTuple.getId());
setOperationNode = new ExceptNode(setOperation.translatePlanNodeId(), setTuple.getId());
} else if (setOperation instanceof PhysicalIntersect) {
setOperationNode = new IntersectNode(context.nextPlanNodeId(), setTuple.getId());
setOperationNode = new IntersectNode(setOperation.translatePlanNodeId(), setTuple.getId());
} else {
throw new RuntimeException("not support set operation type " + setOperation);
}
Expand Down Expand Up @@ -1908,7 +1906,7 @@ public PlanFragment visitPhysicalRepeat(PhysicalRepeat<? extends Plan> repeat, P
.flatMap(Set::stream)
.collect(ImmutableSet.toImmutableSet());

RepeatNode repeatNode = new RepeatNode(context.nextPlanNodeId(),
RepeatNode repeatNode = new RepeatNode(repeat.translatePlanNodeId(),
inputPlanFragment.getPlanRoot(), groupingInfo, repeatSlotIdList,
allSlotId, repeat.computeVirtualSlotValues(sortedVirtualSlots));
addPlanRoot(inputPlanFragment, repeatNode, repeat);
Expand Down Expand Up @@ -1983,7 +1981,7 @@ public PlanFragment visitPhysicalWindow(PhysicalWindow<? extends Plan> physicalW

// 4. generate AnalyticEvalNode
AnalyticEvalNode analyticEvalNode = new AnalyticEvalNode(
context.nextPlanNodeId(),
physicalWindow.translatePlanNodeId(),
inputPlanFragment.getPlanRoot(),
analyticFnCalls,
partitionExprs,
Expand Down Expand Up @@ -2020,7 +2018,7 @@ private PartitionSortNode translatePartitionSortNode(PhysicalPartitionTopN<? ext
nullsFirstParams.add(k.isNullFirst());
});
SortInfo sortInfo = new SortInfo(orderingExprs, ascOrders, nullsFirstParams, sortTuple);
PartitionSortNode partitionSortNode = new PartitionSortNode(context.nextPlanNodeId(), childNode,
PartitionSortNode partitionSortNode = new PartitionSortNode(partitionTopN.translatePlanNodeId(), childNode,
partitionTopN.getFunction(), partitionExprs, sortInfo, partitionTopN.hasGlobalLimit(),
partitionTopN.getPartitionLimit(), partitionTopN.getPhase());
if (partitionTopN.getStats() != null) {
Expand All @@ -2043,7 +2041,7 @@ private SortNode translateSortNode(AbstractPhysicalSort<? extends Plan> sort, Pl
nullsFirstParams.add(k.isNullFirst());
});
SortInfo sortInfo = new SortInfo(orderingExprs, ascOrders, nullsFirstParams, sortTuple);
SortNode sortNode = new SortNode(context.nextPlanNodeId(), childNode, sortInfo, sort instanceof PhysicalTopN);
SortNode sortNode = new SortNode(sort.translatePlanNodeId(), childNode, sortInfo, sort instanceof PhysicalTopN);
if (sort.getStats() != null) {
sortNode.setCardinality((long) sort.getStats().getRowCount());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import org.apache.doris.nereids.trees.expressions.StatementScopeIdGenerator;
import org.apache.doris.nereids.trees.plans.ObjectId;
import org.apache.doris.planner.PlanNodeId;

import com.google.common.collect.ImmutableList;

Expand Down Expand Up @@ -58,4 +59,12 @@ public List<NODE_TYPE> children() {
public int arity() {
return children.size();
}

/**
* used for PhysicalPlanTranslator only
* @return PlanNodeId
*/
public PlanNodeId translatePlanNodeId() {
return id.toPlanNodeId();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import org.apache.doris.common.Id;
import org.apache.doris.common.IdGenerator;
import org.apache.doris.nereids.trees.expressions.StatementScopeIdGenerator;
import org.apache.doris.planner.PlanNodeId;

/**
* relation id
Expand All @@ -46,4 +47,8 @@ public ObjectId getNextId() {
public String toString() {
return "ObjectId#" + id;
}

public PlanNodeId toPlanNodeId() {
return new PlanNodeId(id);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ public List<Slot> getBaseOutputs() {

@Override
public String toString() {
return Utils.toSqlString("PhysicalOlapScan[" + relationId.asInt() + "]" + getGroupIdWithPrefix(),
return Utils.toSqlString("PhysicalOlapScan[" + id.asInt() + "]" + getGroupIdWithPrefix(),
"qualified", Utils.qualifiedName(qualifier, table.getName()),
"stats", statistics, "fr", getMutableState(AbstractPlan.FRAGMENT_ID)
);
Expand Down
10 changes: 5 additions & 5 deletions regression-test/suites/nereids_syntax_p0/agg_4_phase.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,11 @@ suite("agg_4_phase") {
"""
explain{
sql(test_sql)
contains "5:VAGGREGATE (merge finalize)"
contains "4:VEXCHANGE"
contains "3:VAGGREGATE (update serialize)"
contains "2:VAGGREGATE (merge serialize)"
contains "1:VAGGREGATE (update serialize)"
contains ":VAGGREGATE (merge finalize)"
contains ":VEXCHANGE"
contains ":VAGGREGATE (update serialize)"
contains ":VAGGREGATE (merge serialize)"
contains ":VAGGREGATE (update serialize)"
}
qt_4phase (test_sql)
}

0 comments on commit ffaa145

Please sign in to comment.