diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobExecutor.java b/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobExecutor.java index 48b3c6b251f891..0053861361ed9d 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobExecutor.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobExecutor.java @@ -14,23 +14,32 @@ package com.starrocks.alter; -import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; import com.starrocks.analysis.ParseNode; import com.starrocks.analysis.TableName; +import com.starrocks.analysis.TableRef; import com.starrocks.catalog.Database; import com.starrocks.catalog.MaterializedView; import com.starrocks.catalog.OlapTable; +import com.starrocks.catalog.Partition; import com.starrocks.catalog.Table; +import com.starrocks.common.AnalysisException; import com.starrocks.common.DdlException; +import com.starrocks.common.ErrorReport; +import com.starrocks.common.InvalidOlapTableStateException; import com.starrocks.common.MaterializedViewExceptions; +import com.starrocks.common.util.DynamicPartitionUtil; +import com.starrocks.common.util.PropertyAnalyzer; import com.starrocks.common.util.concurrent.lock.LockType; import com.starrocks.common.util.concurrent.lock.Locker; +import com.starrocks.connector.ConnectorMetadata; import com.starrocks.persist.AlterViewInfo; import com.starrocks.persist.SwapTableOperationLog; import com.starrocks.qe.ConnectContext; import com.starrocks.scheduler.mv.MaterializedViewMgr; import com.starrocks.server.GlobalStateMgr; import com.starrocks.server.LocalMetastore; +import com.starrocks.server.MetadataMgr; import com.starrocks.sql.analyzer.SemanticException; import com.starrocks.sql.ast.AddColumnClause; import com.starrocks.sql.ast.AddColumnsClause; @@ -64,14 +73,21 @@ import com.starrocks.sql.ast.SwapTableClause; import com.starrocks.sql.ast.TableRenameClause; import com.starrocks.sql.ast.TruncatePartitionClause; +import com.starrocks.sql.ast.TruncateTableStmt; import com.starrocks.sql.common.MetaUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import java.util.List; +import java.util.Map; +import java.util.Optional; + import static com.starrocks.sql.common.UnsupportedException.unsupportedException; public class AlterJobExecutor implements AstVisitor { protected static final Logger LOG = LogManager.getLogger(AlterJobExecutor.class); + protected TableName tableName; + protected ConnectorMetadata connectorMetadata; protected Database db; protected Table table; @@ -83,13 +99,96 @@ public void process(StatementBase statement, ConnectContext context) { visit(statement, context); } - //Alter system clause - @Override public Void visitNode(ParseNode node, ConnectContext context) { throw new AlterJobException("Not support alter table operation : " + node.getClass().getName()); } + @Override + public Void visitAlterTableStatement(AlterTableStmt statement, ConnectContext context) { + TableName tableName = statement.getTbl(); + this.tableName = tableName; + + String catalogName = statement.getCatalogName(); + Optional optionalMetadata = GlobalStateMgr.getCurrentState() + .getMetadataMgr().getOptionalMetadata(catalogName); + if (optionalMetadata.isEmpty()) { + throw new SemanticException("Invalid catalog " + catalogName + " , ConnectorMetadata doesn't exist"); + } + this.connectorMetadata = optionalMetadata.get(); + Database db = MetaUtils.getDatabase(context, tableName); + Table table = MetaUtils.getTable(tableName); + + if (table.getType() == Table.TableType.VIEW || table.getType() == Table.TableType.MATERIALIZED_VIEW) { + throw new SemanticException("The specified table [" + tableName + "] is not a table"); + } + + if (table instanceof OlapTable && ((OlapTable) table).getState() != OlapTable.OlapTableState.NORMAL) { + OlapTable olapTable = (OlapTable) table; + throw new AlterJobException("", InvalidOlapTableStateException.of(olapTable.getState(), olapTable.getName())); + } + + this.db = db; + this.table = table; + + for (AlterClause alterClause : statement.getAlterClauseList()) { + visit(alterClause, context); + } + return null; + } + + @Override + public Void visitAlterViewStatement(AlterViewStmt statement, ConnectContext context) { + TableName tableName = statement.getTableName(); + Database db = MetaUtils.getDatabase(context, tableName); + Table table = MetaUtils.getTable(tableName); + + if (table.getType() != Table.TableType.VIEW) { + throw new SemanticException("The specified table [" + tableName + "] is not a view"); + } + + this.db = db; + this.table = table; + AlterViewClause alterViewClause = (AlterViewClause) statement.getAlterClause(); + visit(alterViewClause, context); + return null; + } + + @Override + public Void visitAlterMaterializedViewStatement(AlterMaterializedViewStmt stmt, ConnectContext context) { + // check db + final TableName mvName = stmt.getMvName(); + Database db = MetaUtils.getDatabase(context, mvName); + + Locker locker = new Locker(); + if (!locker.lockDatabaseAndCheckExist(db, LockType.WRITE)) { + throw new AlterJobException("alter materialized failed. database:" + db.getFullName() + " not exist"); + } + + try { + Table table = MetaUtils.getTable(mvName); + if (!table.isMaterializedView()) { + throw new SemanticException("The specified table [" + mvName + "] is not a view"); + } + this.db = db; + this.table = table; + + MaterializedView materializedView = (MaterializedView) table; + // check materialized view state + if (materializedView.getState() != OlapTable.OlapTableState.NORMAL) { + throw new AlterJobException("Materialized view [" + materializedView.getName() + "]'s state is not NORMAL. " + + "Do not allow to do ALTER ops"); + } + + MaterializedViewMgr.getInstance().stopMaintainMV(materializedView); + visit(stmt.getAlterTableClause()); + MaterializedViewMgr.getInstance().rebuildMaintainMV(materializedView); + return null; + } finally { + locker.unLockDatabase(db, LockType.WRITE); + } + } + //Alter table clause @Override @@ -120,44 +219,46 @@ public Void visitAlterTableCommentClause(AlterTableCommentClause clause, Connect public Void visitSwapTableClause(SwapTableClause clause, ConnectContext context) { // must hold db write lock Locker locker = new Locker(); - Preconditions.checkState(locker.isDbWriteLockHeldByCurrentThread(db)); - - OlapTable origTable = (OlapTable) table; - - String origTblName = origTable.getName(); - String newTblName = clause.getTblName(); - Table newTbl = db.getTable(newTblName); - if (newTbl == null || !(newTbl.isOlapOrCloudNativeTable() || newTbl.isMaterializedView())) { - throw new AlterJobException("Table " + newTblName + " does not exist or is not OLAP/LAKE table"); - } - OlapTable olapNewTbl = (OlapTable) newTbl; - - // First, we need to check whether the table to be operated on can be renamed + locker.lockTablesWithIntensiveDbLock(db, Lists.newArrayList(table.getId()), LockType.WRITE); try { - olapNewTbl.checkAndSetName(origTblName, true); - origTable.checkAndSetName(newTblName, true); - - if (origTable.isMaterializedView() || newTbl.isMaterializedView()) { - if (!(origTable.isMaterializedView() && newTbl.isMaterializedView())) { - throw new AlterJobException("Materialized view can only SWAP WITH materialized view"); - } + OlapTable origTable = (OlapTable) table; + String origTblName = origTable.getName(); + String newTblName = clause.getTblName(); + Table newTbl = db.getTable(newTblName); + if (newTbl == null || !(newTbl.isOlapOrCloudNativeTable() || newTbl.isMaterializedView())) { + throw new AlterJobException("Table " + newTblName + " does not exist or is not OLAP/LAKE table"); } + OlapTable olapNewTbl = (OlapTable) newTbl; - // inactive the related MVs - LocalMetastore.inactiveRelatedMaterializedView(db, origTable, - MaterializedViewExceptions.inactiveReasonForBaseTableSwapped(origTblName)); - LocalMetastore.inactiveRelatedMaterializedView(db, olapNewTbl, - MaterializedViewExceptions.inactiveReasonForBaseTableSwapped(newTblName)); + // First, we need to check whether the table to be operated on can be renamed + try { + olapNewTbl.checkAndSetName(origTblName, true); + origTable.checkAndSetName(newTblName, true); - SwapTableOperationLog log = new SwapTableOperationLog(db.getId(), origTable.getId(), olapNewTbl.getId()); - GlobalStateMgr.getCurrentState().getAlterJobMgr().swapTableInternal(log); - GlobalStateMgr.getCurrentState().getEditLog().logSwapTable(log); + if (origTable.isMaterializedView() || newTbl.isMaterializedView()) { + if (!(origTable.isMaterializedView() && newTbl.isMaterializedView())) { + throw new AlterJobException("Materialized view can only SWAP WITH materialized view"); + } + } - LOG.info("finish swap table {}-{} with table {}-{}", origTable.getId(), origTblName, newTbl.getId(), - newTblName); - return null; - } catch (DdlException e) { - throw new AlterJobException(e.getMessage(), e); + // inactive the related MVs + LocalMetastore.inactiveRelatedMaterializedView(db, origTable, + MaterializedViewExceptions.inactiveReasonForBaseTableSwapped(origTblName)); + LocalMetastore.inactiveRelatedMaterializedView(db, olapNewTbl, + MaterializedViewExceptions.inactiveReasonForBaseTableSwapped(newTblName)); + + SwapTableOperationLog log = new SwapTableOperationLog(db.getId(), origTable.getId(), olapNewTbl.getId()); + GlobalStateMgr.getCurrentState().getAlterJobMgr().swapTableInternal(log); + GlobalStateMgr.getCurrentState().getEditLog().logSwapTable(log); + + LOG.info("finish swap table {}-{} with table {}-{}", origTable.getId(), origTblName, newTbl.getId(), + newTblName); + return null; + } catch (DdlException e) { + throw new AlterJobException(e.getMessage(), e); + } + } finally { + locker.unLockTablesWithIntensiveDbLock(db, Lists.newArrayList(table.getId()), LockType.WRITE); } } @@ -229,7 +330,8 @@ public Void visitRollupRenameClause(RollupRenameClause clause, ConnectContext co @Override public Void visitCompactionClause(CompactionClause clause, ConnectContext context) { - unsupportedException("Not support"); + ErrorReport.wrapWithRuntimeException(() -> + CompactionHandler.process(Lists.newArrayList(clause), db, (OlapTable) table)); return null; } @@ -247,33 +349,58 @@ public Void visitDropFieldClause(DropFieldClause clause, ConnectContext context) //Alter partition clause - @Override - public Void visitModifyPartitionClause(ModifyPartitionClause clause, ConnectContext context) { - unsupportedException("Not support"); - return null; - } - @Override public Void visitAddPartitionClause(AddPartitionClause clause, ConnectContext context) { - unsupportedException("Not support"); + /* + * This check is not appropriate here. However, because the dynamically generated Partition Clause needs to be analyzed, + * this check cannot be placed in analyze. + * For the same reason, it cannot be placed in LocalMetastore.addPartition. + * If there is a subsequent refactoring, this check logic should be placed in a more appropriate location. + */ + if (!clause.isTempPartition() && table instanceof OlapTable) { + DynamicPartitionUtil.checkAlterAllowed((OlapTable) table); + } + ErrorReport.wrapWithRuntimeException(() -> connectorMetadata.addPartitions(context, db, table.getName(), clause)); return null; } @Override public Void visitDropPartitionClause(DropPartitionClause clause, ConnectContext context) { - unsupportedException("Not support"); + /* + * This check is not appropriate here. However, because the dynamically generated Partition Clause needs to be analyzed, + * this check cannot be placed in analyze. + * For the same reason, it cannot be placed in LocalMetastore.dropPartition. + * If there is a subsequent refactoring, this check logic should be placed in a more appropriate location. + */ + if (!clause.isTempPartition() && table instanceof OlapTable) { + DynamicPartitionUtil.checkAlterAllowed((OlapTable) table); + } + + MetadataMgr metadataMgr = GlobalStateMgr.getCurrentState().getMetadataMgr(); + Optional connectorMetadata = metadataMgr.getOptionalMetadata(""); + connectorMetadata.ifPresent(metadata -> + ErrorReport.wrapWithRuntimeException(() -> metadata.dropPartition(db, table, clause))); return null; } @Override public Void visitTruncatePartitionClause(TruncatePartitionClause clause, ConnectContext context) { - unsupportedException("Not support"); + // This logic is used to adapt mysql syntax. + // ALTER TABLE test TRUNCATE PARTITION p1; + TableRef tableRef = new TableRef(tableName, null, clause.getPartitionNames()); + TruncateTableStmt tStmt = new TruncateTableStmt(tableRef); + ConnectContext ctx = new ConnectContext(); + ctx.setGlobalStateMgr(GlobalStateMgr.getCurrentState()); + + ErrorReport.wrapWithRuntimeException(() -> + GlobalStateMgr.getCurrentState().getLocalMetastore().truncateTable(tStmt, ctx)); return null; } @Override public Void visitReplacePartitionClause(ReplacePartitionClause clause, ConnectContext context) { - unsupportedException("Not support"); + ErrorReport.wrapWithRuntimeException(() -> + GlobalStateMgr.getCurrentState().getLocalMetastore().replaceTempPartition(db, table.getName(), clause)); return null; } @@ -283,6 +410,42 @@ public Void visitPartitionRenameClause(PartitionRenameClause clause, ConnectCont return null; } + @Override + public Void visitModifyPartitionClause(ModifyPartitionClause clause, ConnectContext context) { + try { + // expand the partition names if it is 'Modify Partition(*)' + List partitionNames = clause.getPartitionNames(); + if (clause.isNeedExpand()) { + partitionNames.clear(); + for (Partition partition : table.getPartitions()) { + partitionNames.add(partition.getName()); + } + } + Map properties = clause.getProperties(); + if (properties.containsKey(PropertyAnalyzer.PROPERTIES_INMEMORY)) { + if (table.isCloudNativeTable()) { + throw new SemanticException("Lake table not support alter in_memory"); + } + + SchemaChangeHandler schemaChangeHandler = GlobalStateMgr.getCurrentState().getSchemaChangeHandler(); + schemaChangeHandler.updatePartitionsInMemoryMeta( + db, table.getName(), partitionNames, properties); + } + + Locker locker = new Locker(); + locker.lockTablesWithIntensiveDbLock(db, Lists.newArrayList(table.getId()), LockType.WRITE); + try { + AlterJobMgr alterJobMgr = GlobalStateMgr.getCurrentState().getAlterJobMgr(); + alterJobMgr.modifyPartitionsProperty(db, (OlapTable) table, partitionNames, properties); + } finally { + locker.unLockTablesWithIntensiveDbLock(db, Lists.newArrayList(table.getId()), LockType.WRITE); + } + } catch (DdlException | AnalysisException e) { + throw new AlterJobException(e.getMessage()); + } + return null; + } + // Alter View @Override public Void visitAlterViewClause(AlterViewClause alterViewClause, ConnectContext ctx) { @@ -295,74 +458,4 @@ public Void visitAlterViewClause(AlterViewClause alterViewClause, ConnectContext GlobalStateMgr.getCurrentState().getEditLog().logModifyViewDef(alterViewInfo); return null; } - - @Override - public Void visitAlterTableStatement(AlterTableStmt statement, ConnectContext context) { - TableName tableName = statement.getTbl(); - Database db = MetaUtils.getDatabase(context, tableName); - Table table = MetaUtils.getTable(tableName); - - if (table.getType() == Table.TableType.VIEW || table.getType() == Table.TableType.MATERIALIZED_VIEW) { - throw new SemanticException("The specified table [" + tableName + "] is not a table"); - } - - this.db = db; - this.table = table; - for (AlterClause alterClause : statement.getAlterClauseList()) { - visit(alterClause, context); - } - return null; - } - - @Override - public Void visitAlterViewStatement(AlterViewStmt statement, ConnectContext context) { - TableName tableName = statement.getTableName(); - Database db = MetaUtils.getDatabase(context, tableName); - Table table = MetaUtils.getTable(tableName); - - if (table.getType() != Table.TableType.VIEW) { - throw new SemanticException("The specified table [" + tableName + "] is not a view"); - } - - this.db = db; - this.table = table; - AlterViewClause alterViewClause = (AlterViewClause) statement.getAlterClause(); - visit(alterViewClause, context); - return null; - } - - @Override - public Void visitAlterMaterializedViewStatement(AlterMaterializedViewStmt stmt, ConnectContext context) { - // check db - final TableName mvName = stmt.getMvName(); - Database db = MetaUtils.getDatabase(context, mvName); - - Locker locker = new Locker(); - if (!locker.lockDatabaseAndCheckExist(db, LockType.WRITE)) { - throw new AlterJobException("alter materialized failed. database:" + db.getFullName() + " not exist"); - } - - try { - Table table = MetaUtils.getTable(mvName); - if (!table.isMaterializedView()) { - throw new SemanticException("The specified table [" + mvName + "] is not a view"); - } - this.db = db; - this.table = table; - - MaterializedView materializedView = (MaterializedView) table; - // check materialized view state - if (materializedView.getState() != OlapTable.OlapTableState.NORMAL) { - throw new AlterJobException("Materialized view [" + materializedView.getName() + "]'s state is not NORMAL. " - + "Do not allow to do ALTER ops"); - } - - MaterializedViewMgr.getInstance().stopMaintainMV(materializedView); - visit(stmt.getAlterTableClause()); - MaterializedViewMgr.getInstance().rebuildMaintainMV(materializedView); - return null; - } finally { - locker.unLockDatabase(db, LockType.WRITE); - } - } } diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobMgr.java b/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobMgr.java index f78f1774a195ed..76a63e78dbdf53 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobMgr.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobMgr.java @@ -40,7 +40,6 @@ import com.google.common.collect.Sets; import com.starrocks.analysis.DateLiteral; import com.starrocks.analysis.TableName; -import com.starrocks.analysis.TableRef; import com.starrocks.authentication.AuthenticationMgr; import com.starrocks.catalog.BaseTableInfo; import com.starrocks.catalog.ColocateTableIndex; @@ -72,10 +71,8 @@ import com.starrocks.common.MetaNotFoundException; import com.starrocks.common.UserException; import com.starrocks.common.util.DateUtils; -import com.starrocks.common.util.DynamicPartitionUtil; import com.starrocks.common.util.PropertyAnalyzer; import com.starrocks.common.util.TimeUtils; -import com.starrocks.common.util.Util; import com.starrocks.common.util.concurrent.lock.LockType; import com.starrocks.common.util.concurrent.lock.Locker; import com.starrocks.persist.AlterMaterializedViewBaseTableInfosLog; @@ -102,35 +99,27 @@ import com.starrocks.sql.analyzer.Analyzer; import com.starrocks.sql.analyzer.MaterializedViewAnalyzer; import com.starrocks.sql.analyzer.SemanticException; -import com.starrocks.sql.ast.AddPartitionClause; import com.starrocks.sql.ast.AlterClause; import com.starrocks.sql.ast.AlterMaterializedViewStatusClause; import com.starrocks.sql.ast.AlterSystemStmt; import com.starrocks.sql.ast.AlterTableCommentClause; import com.starrocks.sql.ast.AlterTableStmt; import com.starrocks.sql.ast.ColumnRenameClause; -import com.starrocks.sql.ast.CompactionClause; import com.starrocks.sql.ast.CreateMaterializedViewStatement; import com.starrocks.sql.ast.CreateMaterializedViewStmt; import com.starrocks.sql.ast.DropMaterializedViewStmt; -import com.starrocks.sql.ast.DropPartitionClause; -import com.starrocks.sql.ast.ModifyPartitionClause; import com.starrocks.sql.ast.ModifyTablePropertiesClause; import com.starrocks.sql.ast.PartitionRenameClause; import com.starrocks.sql.ast.QueryStatement; -import com.starrocks.sql.ast.ReplacePartitionClause; import com.starrocks.sql.ast.RollupRenameClause; import com.starrocks.sql.ast.StatementBase; import com.starrocks.sql.ast.TableRenameClause; -import com.starrocks.sql.ast.TruncatePartitionClause; -import com.starrocks.sql.ast.TruncateTableStmt; import com.starrocks.sql.ast.UserIdentity; import com.starrocks.sql.optimizer.rule.transformation.materialization.MvUtils; import com.starrocks.sql.parser.SqlParser; import com.starrocks.thrift.TStorageMedium; import com.starrocks.thrift.TTabletMetaType; import com.starrocks.thrift.TTabletType; -import org.apache.commons.collections4.CollectionUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Strings; @@ -156,7 +145,6 @@ public class AlterJobMgr { private final SchemaChangeHandler schemaChangeHandler; private final MaterializedViewHandler materializedViewHandler; private final SystemHandler clusterHandler; - private final CompactionHandler compactionHandler; public AlterJobMgr(SchemaChangeHandler schemaChangeHandler, MaterializedViewHandler materializedViewHandler, @@ -165,7 +153,6 @@ public AlterJobMgr(SchemaChangeHandler schemaChangeHandler, this.schemaChangeHandler = schemaChangeHandler; this.materializedViewHandler = materializedViewHandler; this.clusterHandler = systemHandler; - this.compactionHandler = compactionHandler; } public void start() { @@ -535,11 +522,7 @@ public void processAlterTable(AlterTableStmt stmt) throws UserException { if (table == null) { ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName); } - OlapTable olapTable = (OlapTable) table; - // some operations will take long time to process, need to be done outside the databse lock - boolean needProcessOutsideDatabaseLock = false; - boolean isSynchronous = true; // some operations will take long time to process, need to be done outside the databse lock boolean needProcessOutsideDatabaseLock = false; @@ -548,6 +531,7 @@ public void processAlterTable(AlterTableStmt stmt) throws UserException { if (!(table.isOlapOrCloudNativeTable() || table.isMaterializedView())) { throw new DdlException("Do not support alter non-native table/materialized-view[" + tableName + "]"); } + OlapTable olapTable = (OlapTable) table; List alterClauses = stmt.getAlterClauseList(); Locker locker = new Locker(); @@ -567,66 +551,8 @@ public void processAlterTable(AlterTableStmt stmt) throws UserException { } else if (stmt.hasRollupOp()) { materializedViewHandler.process(alterClauses, db, olapTable); isSynchronous = false; - } else if (stmt.hasPartitionOp()) { - Preconditions.checkState(alterClauses.size() == 1); - AlterClause alterClause = alterClauses.get(0); - if (alterClause instanceof DropPartitionClause) { - DropPartitionClause dropPartitionClause = (DropPartitionClause) alterClause; - if (!dropPartitionClause.isTempPartition()) { - DynamicPartitionUtil.checkAlterAllowed((OlapTable) db.getTable(tableName)); - } - if (dropPartitionClause.getPartitionName() != null && dropPartitionClause.getPartitionName() - .startsWith(ExpressionRangePartitionInfo.SHADOW_PARTITION_PREFIX)) { - throw new DdlException("Deletion of shadow partitions is not allowed"); - } - List partitionNames = dropPartitionClause.getPartitionNames(); - if (CollectionUtils.isNotEmpty(partitionNames)) { - boolean hasShadowPartition = partitionNames.stream() - .anyMatch(partitionName -> partitionName.startsWith( - ExpressionRangePartitionInfo.SHADOW_PARTITION_PREFIX)); - if (hasShadowPartition) { - throw new DdlException("Deletion of shadow partitions is not allowed"); - } - } - GlobalStateMgr.getCurrentState().getLocalMetastore().dropPartition(db, olapTable, dropPartitionClause); - } else if (alterClause instanceof ReplacePartitionClause) { - ReplacePartitionClause replacePartitionClause = (ReplacePartitionClause) alterClause; - List partitionNames = replacePartitionClause.getPartitionNames(); - for (String partitionName : partitionNames) { - if (partitionName.startsWith(ExpressionRangePartitionInfo.SHADOW_PARTITION_PREFIX)) { - throw new DdlException("Replace shadow partitions is not allowed"); - } - } - GlobalStateMgr.getCurrentState().getLocalMetastore() - .replaceTempPartition(db, tableName, replacePartitionClause); - } else if (alterClause instanceof ModifyPartitionClause) { - ModifyPartitionClause clause = ((ModifyPartitionClause) alterClause); - // expand the partition names if it is 'Modify Partition(*)' - if (clause.isNeedExpand()) { - List partitionNames = clause.getPartitionNames(); - partitionNames.clear(); - for (Partition partition : olapTable.getPartitions()) { - partitionNames.add(partition.getName()); - } - } - Map properties = clause.getProperties(); - if (properties.containsKey(PropertyAnalyzer.PROPERTIES_INMEMORY)) { - needProcessOutsideDatabaseLock = true; - } else { - List partitionNames = clause.getPartitionNames(); - modifyPartitionsProperty(db, olapTable, partitionNames, properties); - } - } else if (alterClause instanceof AddPartitionClause) { - needProcessOutsideDatabaseLock = true; - } else { - throw new DdlException("Invalid alter operation: " + alterClause.getOpType()); - } - } else if (stmt.contains(AlterOpType.TRUNCATE_PARTITION)) { - needProcessOutsideDatabaseLock = true; } else if (stmt.contains(AlterOpType.RENAME)) { processRename(db, olapTable, alterClauses); - } else if (stmt.contains(AlterOpType.SWAP)) { - new AlterJobExecutor().process(stmt, ConnectContext.get()); } else if (stmt.contains(AlterOpType.ALTER_COMMENT)) { processAlterComment(db, olapTable, alterClauses); } else if (stmt.contains(AlterOpType.MODIFY_TABLE_PROPERTY_SYNC)) { @@ -644,42 +570,7 @@ public void processAlterTable(AlterTableStmt stmt) throws UserException { if (needProcessOutsideDatabaseLock) { Preconditions.checkState(alterClauses.size() == 1); AlterClause alterClause = alterClauses.get(0); - if (alterClause instanceof AddPartitionClause) { - if (!((AddPartitionClause) alterClause).isTempPartition()) { - DynamicPartitionUtil.checkAlterAllowed((OlapTable) db.getTable(tableName)); - } - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, tableName, (AddPartitionClause) alterClause); - } else if (alterClause instanceof TruncatePartitionClause) { - // This logic is used to adapt mysql syntax. - // ALTER TABLE test TRUNCATE PARTITION p1; - TruncatePartitionClause clause = (TruncatePartitionClause) alterClause; - TableRef tableRef = new TableRef(stmt.getTbl(), null, clause.getPartitionNames()); - TruncateTableStmt tStmt = new TruncateTableStmt(tableRef); - ConnectContext ctx = new ConnectContext(); - ctx.setGlobalStateMgr(GlobalStateMgr.getCurrentState()); - GlobalStateMgr.getCurrentState().getLocalMetastore().truncateTable(tStmt, ctx); - } else if (alterClause instanceof ModifyPartitionClause) { - ModifyPartitionClause clause = ((ModifyPartitionClause) alterClause); - Map properties = clause.getProperties(); - List partitionNames = clause.getPartitionNames(); - // currently, only in memory property could reach here - Preconditions.checkState(properties.containsKey(PropertyAnalyzer.PROPERTIES_INMEMORY)); - olapTable = (OlapTable) db.getTable(tableName); - if (olapTable.isCloudNativeTable()) { - throw new DdlException("Lake table not support alter in_memory"); - } - - schemaChangeHandler.updatePartitionsInMemoryMeta( - db, tableName, partitionNames, properties); - - locker.lockTablesWithIntensiveDbLock(db, Lists.newArrayList(olapTable.getId()), LockType.WRITE); - try { - modifyPartitionsProperty(db, olapTable, partitionNames, properties); - } finally { - locker.unLockTablesWithIntensiveDbLock(db, Lists.newArrayList(olapTable.getId()), LockType.WRITE); - } - } else if (alterClause instanceof ModifyTablePropertiesClause) { + if (alterClause instanceof ModifyTablePropertiesClause) { Map properties = alterClause.getProperties(); Preconditions.checkState(properties.containsKey(PropertyAnalyzer.PROPERTIES_INMEMORY) || properties.containsKey(PropertyAnalyzer.PROPERTIES_ENABLE_PERSISTENT_INDEX) || @@ -726,10 +617,6 @@ public void processAlterTable(AlterTableStmt stmt) throws UserException { } else { throw new DdlException("Invalid alter operation: " + alterClause.getOpType()); } - } else if (alterClause instanceof CompactionClause) { - String s = (((CompactionClause) alterClause).isBaseCompaction() ? "base" : "cumulative") - + " compact " + tableName + " partitions: " + ((CompactionClause) alterClause).getPartitionNames(); - compactionHandler.process(alterClauses, db, olapTable); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/CompactionHandler.java b/fe/fe-core/src/main/java/com/starrocks/alter/CompactionHandler.java index ca39ce5422947e..7e27d400b15fbc 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/CompactionHandler.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/CompactionHandler.java @@ -23,7 +23,6 @@ import com.starrocks.catalog.Partition; import com.starrocks.catalog.PhysicalPartition; import com.starrocks.catalog.Tablet; -import com.starrocks.common.DdlException; import com.starrocks.common.UserException; import com.starrocks.common.util.concurrent.lock.LockType; import com.starrocks.common.util.concurrent.lock.Locker; @@ -33,14 +32,12 @@ import com.starrocks.server.GlobalStateMgr; import com.starrocks.server.RunMode; import com.starrocks.sql.ast.AlterClause; -import com.starrocks.sql.ast.CancelStmt; import com.starrocks.sql.ast.CompactionClause; import com.starrocks.task.AgentBatchTask; import com.starrocks.task.AgentTask; import com.starrocks.task.AgentTaskExecutor; import com.starrocks.task.AgentTaskQueue; import com.starrocks.task.CompactionTask; -import org.apache.commons.lang.NotImplementedException; import org.apache.hadoop.util.Lists; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -50,28 +47,12 @@ import java.util.ArrayList; import java.util.List; -public class CompactionHandler extends AlterHandler { +public class CompactionHandler { private static final Logger LOG = LogManager.getLogger(CompactionHandler.class); - public CompactionHandler() { - super("compaction"); - } - - - @Override - protected void runAfterCatalogReady() { - super.runAfterCatalogReady(); - } - - @Override - public List> getAlterJobInfosByDb(Database db) { - throw new NotImplementedException(); - } - - @Override // add synchronized to avoid process 2 or more stmts at same time - public synchronized ShowResultSet process(List alterClauses, Database db, - OlapTable olapTable) throws UserException { + public static synchronized ShowResultSet process(List alterClauses, Database db, + OlapTable olapTable) throws UserException { Preconditions.checkArgument(alterClauses.size() == 1); AlterClause alterClause = alterClauses.get(0); Preconditions.checkState(alterClause instanceof CompactionClause); @@ -142,7 +123,7 @@ public synchronized ShowResultSet process(List alterClauses, Databa } @NotNull - private List findAllPartitions(OlapTable olapTable, CompactionClause compactionClause) { + private static List findAllPartitions(OlapTable olapTable, CompactionClause compactionClause) { List allPartitions = new ArrayList<>(); if (compactionClause.getPartitionNames().isEmpty()) { allPartitions.addAll(olapTable.getPartitions()); @@ -159,10 +140,4 @@ private List findAllPartitions(OlapTable olapTable, CompactionClause } return allPartitions; } - - @Override - public synchronized void cancel(CancelStmt stmt) throws DdlException { - throw new NotImplementedException(); - } - } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/ListPartitionInfo.java b/fe/fe-core/src/main/java/com/starrocks/catalog/ListPartitionInfo.java index dcffd567ed53dc..bd69e0873821a1 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/ListPartitionInfo.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/ListPartitionInfo.java @@ -386,7 +386,6 @@ public void handleNewListPartitionDescs(Map idToColumn, long partitionId = partition.getId(); PartitionDesc partitionDesc = entry.second; Preconditions.checkArgument(partitionDesc instanceof SinglePartitionDesc); - Preconditions.checkArgument(((SinglePartitionDesc) partitionDesc).isAnalyzed()); this.idToDataProperty.put(partitionId, partitionDesc.getPartitionDataProperty()); this.idToReplicationNum.put(partitionId, partitionDesc.getReplicationNum()); this.idToInMemory.put(partitionId, partitionDesc.isInMemory()); diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/RangePartitionInfo.java b/fe/fe-core/src/main/java/com/starrocks/catalog/RangePartitionInfo.java index a90b9771f1a36e..327dd631ed1b06 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/RangePartitionInfo.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/RangePartitionInfo.java @@ -231,7 +231,6 @@ private Range checkNewRange(List partitionColumns, Partiti public Range handleNewSinglePartitionDesc(Map schema, SingleRangePartitionDesc desc, long partitionId, boolean isTemp) throws DdlException { - Preconditions.checkArgument(desc.isAnalyzed()); Range range; try { range = checkAndCreateRange(schema, desc, isTemp); @@ -277,7 +276,6 @@ public void handleNewRangePartitionDescs(Map schema, if (!existPartitionNameSet.contains(partition.getName())) { long partitionId = partition.getId(); SingleRangePartitionDesc desc = (SingleRangePartitionDesc) entry.second; - Preconditions.checkArgument(desc.isAnalyzed()); Range range; try { range = checkAndCreateRange(schema, (SingleRangePartitionDesc) entry.second, isTemp); diff --git a/fe/fe-core/src/main/java/com/starrocks/common/util/DynamicPartitionUtil.java b/fe/fe-core/src/main/java/com/starrocks/common/util/DynamicPartitionUtil.java index 1a5f48dca36a68..0b0aa0d2e7b803 100644 --- a/fe/fe-core/src/main/java/com/starrocks/common/util/DynamicPartitionUtil.java +++ b/fe/fe-core/src/main/java/com/starrocks/common/util/DynamicPartitionUtil.java @@ -405,12 +405,12 @@ public static Map analyzeDynamicPartition(Map pr return analyzedProperties; } - public static void checkAlterAllowed(OlapTable olapTable) throws DdlException { + public static void checkAlterAllowed(OlapTable olapTable) { TableProperty tableProperty = olapTable.getTableProperty(); if (tableProperty != null && tableProperty.getDynamicPartitionProperty() != null && tableProperty.getDynamicPartitionProperty().isExists() && tableProperty.getDynamicPartitionProperty().isEnabled()) { - throw new DdlException("Cannot add/drop partition on a Dynamic Partition Table, " + + throw new SemanticException("Cannot add/drop partition on a Dynamic Partition Table, " + "Use command `ALTER TABLE tbl_name SET (\"dynamic_partition.enable\" = \"false\")` firstly."); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/qe/DDLStmtExecutor.java b/fe/fe-core/src/main/java/com/starrocks/qe/DDLStmtExecutor.java index 5de7bec0e6dd81..756db65c30d59c 100644 --- a/fe/fe-core/src/main/java/com/starrocks/qe/DDLStmtExecutor.java +++ b/fe/fe-core/src/main/java/com/starrocks/qe/DDLStmtExecutor.java @@ -16,6 +16,8 @@ import com.google.common.base.Strings; import com.google.common.collect.Lists; +import com.starrocks.alter.AlterJobExecutor; +import com.starrocks.alter.AlterOpType; import com.starrocks.analysis.FunctionName; import com.starrocks.analysis.ParseNode; import com.starrocks.catalog.Database; @@ -392,9 +394,20 @@ public ShowResultSet visitCancelRefreshMaterializedViewStatement(CancelRefreshMa @Override public ShowResultSet visitAlterTableStatement(AlterTableStmt stmt, ConnectContext context) { - ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getMetadataMgr().alterTable(stmt); - }); + /* + * Due to the implementation of the Table Level Lock transformation plan, + * we need to sort out the lock layering logic. Therefore, we plan to gradually + * migrate the alter table related logic scattered everywhere to AlterJobExecutor. + * This is a compatible logic, so that the functions that have not been migrated still use the original logic. + */ + if (stmt.hasPartitionOp() || stmt.contains(AlterOpType.SWAP)) { + AlterJobExecutor alterJobExecutor = new AlterJobExecutor(); + alterJobExecutor.process(stmt, context); + } else { + ErrorReport.wrapWithRuntimeException(() -> { + context.getGlobalStateMgr().getMetadataMgr().alterTable(stmt); + }); + } return null; } @@ -1097,7 +1110,7 @@ public ShowResultSet visitDataCacheSelectStatement(DataCacheSelectStatement stat public ShowResultSet visitCreateDictionaryStatement(CreateDictionaryStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { context.getGlobalStateMgr().getDictionaryMgr().createDictionary(stmt, - context.getCurrentCatalog(), context.getDatabase()); + context.getCurrentCatalog(), context.getDatabase()); }); return null; } diff --git a/fe/fe-core/src/main/java/com/starrocks/server/LocalMetastore.java b/fe/fe-core/src/main/java/com/starrocks/server/LocalMetastore.java index ef1486d84fafad..66c81c16a1149d 100644 --- a/fe/fe-core/src/main/java/com/starrocks/server/LocalMetastore.java +++ b/fe/fe-core/src/main/java/com/starrocks/server/LocalMetastore.java @@ -862,7 +862,10 @@ public void addPartitions(ConnectContext ctx, Database db, String tableName, Add } finally { locker.unLockDatabase(db, LockType.READ); } - addPartitions(ctx, db, tableName, addPartitionClause.getResolvedPartitionDescList(), addPartitionClause); + addPartitions(ctx, db, tableName, + addPartitionClause.getResolvedPartitionDescList(), + addPartitionClause.isTempPartition(), + addPartitionClause.getDistributionDesc()); } private OlapTable checkTable(Database db, String tableName) throws DdlException { @@ -892,12 +895,11 @@ private void checkPartitionType(PartitionInfo partitionInfo) throws DdlException } } - private DistributionInfo getDistributionInfo(OlapTable olapTable, AddPartitionClause addPartitionClause) + private DistributionInfo getDistributionInfo(OlapTable olapTable, DistributionDesc distributionDesc) throws DdlException { DistributionInfo distributionInfo; List baseSchema = olapTable.getBaseSchema(); DistributionInfo defaultDistributionInfo = olapTable.getDefaultDistributionInfo(); - DistributionDesc distributionDesc = addPartitionClause.getDistributionDesc(); if (distributionDesc != null) { distributionInfo = distributionDesc.toDistributionInfo(baseSchema); // for now. we only support modify distribution's bucket num @@ -1020,10 +1022,9 @@ private void checkIfMetaChange(OlapTable olapTable, OlapTable copiedTable, Strin } private void updatePartitionInfo(PartitionInfo partitionInfo, List> partitionList, - Set existPartitionNameSet, AddPartitionClause addPartitionClause, + Set existPartitionNameSet, boolean isTempPartition, OlapTable olapTable) throws DdlException { - boolean isTempPartition = addPartitionClause.isTempPartition(); if (partitionInfo instanceof RangePartitionInfo) { RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo; rangePartitionInfo.handleNewRangePartitionDescs(olapTable.getIdToColumn(), @@ -1054,9 +1055,8 @@ private void updatePartitionInfo(PartitionInfo partitionInfo, List partitionDescs, - AddPartitionClause addPartitionClause, PartitionInfo partitionInfo, + boolean isTempPartition, PartitionInfo partitionInfo, List partitionList, Set existPartitionNameSet) { - boolean isTempPartition = addPartitionClause.isTempPartition(); int partitionLen = partitionList.size(); List partitionInfoV2List = Lists.newArrayListWithCapacity(partitionLen); if (partitionLen == 1) { @@ -1104,7 +1104,7 @@ private void addRangePartitionLog(Database db, OlapTable olapTable, List partitionDescs, - AddPartitionClause addPartitionClause, PartitionInfo partitionInfo, + boolean isTempPartition, PartitionInfo partitionInfo, List partitionList, Set existPartitionNameSet) throws DdlException { if (partitionList == null) { @@ -1116,7 +1116,6 @@ public void addListPartitionLog(Database db, OlapTable olapTable, List partitionDescs, - AddPartitionClause addPartitionClause, PartitionInfo partitionInfo, + boolean isTempPartition, PartitionInfo partitionInfo, List partitionList, Set existPartitionNameSet) throws DdlException { PartitionType partitionType = partitionInfo.getType(); if (partitionInfo.isRangePartition()) { - addRangePartitionLog(db, olapTable, partitionDescs, addPartitionClause, partitionInfo, partitionList, + addRangePartitionLog(db, olapTable, partitionDescs, isTempPartition, partitionInfo, partitionList, existPartitionNameSet); } else if (partitionType == PartitionType.LIST) { - addListPartitionLog(db, olapTable, partitionDescs, addPartitionClause, partitionInfo, partitionList, + addListPartitionLog(db, olapTable, partitionDescs, isTempPartition, partitionInfo, partitionList, existPartitionNameSet); } else { throw new DdlException("Only support adding partition log to range/list partitioned table"); @@ -1197,7 +1196,7 @@ private void cleanTabletIdSetForAll(Set tabletIdSetForAll) { } private void addPartitions(ConnectContext ctx, Database db, String tableName, List partitionDescs, - AddPartitionClause addPartitionClause) throws DdlException { + boolean isTempPartition, DistributionDesc distributionDesc) throws DdlException { DistributionInfo distributionInfo; OlapTable olapTable; OlapTable copiedTable; @@ -1215,7 +1214,7 @@ private void addPartitions(ConnectContext ctx, Database db, String tableName, Li checkPartitionType(partitionInfo); // get distributionInfo - distributionInfo = getDistributionInfo(olapTable, addPartitionClause).copy(); + distributionInfo = getDistributionInfo(olapTable, distributionDesc).copy(); olapTable.inferDistribution(distributionInfo); // check colocation @@ -1273,7 +1272,7 @@ private void addPartitions(ConnectContext ctx, Database db, String tableName, Li checkPartitionType(partitionInfo); // update partition info - updatePartitionInfo(partitionInfo, newPartitions, existPartitionNameSet, addPartitionClause, olapTable); + updatePartitionInfo(partitionInfo, newPartitions, existPartitionNameSet, isTempPartition, olapTable); try { colocateTableIndex.updateLakeTableColocationInfo(olapTable, true /* isJoin */, @@ -1283,7 +1282,7 @@ private void addPartitions(ConnectContext ctx, Database db, String tableName, Li } // add partition log - addPartitionLog(db, olapTable, partitionDescs, addPartitionClause, partitionInfo, partitionList, + addPartitionLog(db, olapTable, partitionDescs, isTempPartition, partitionInfo, partitionList, existPartitionNameSet); } finally { cleanExistPartitionNameSet(existPartitionNameSet, partitionNameToTabletSet); @@ -1615,12 +1614,12 @@ private PhysicalPartition createPhysicalPartition(String name, Database db, Olap } public void addSubPartitions(Database db, OlapTable table, Partition partition, - int numSubPartition, long warehouseId) throws DdlException { + int numSubPartition, long warehouseId) throws DdlException { addSubPartitions(db, table, partition, numSubPartition, null, warehouseId); } public void addSubPartitions(Database db, OlapTable table, Partition partition, - int numSubPartition, String[] subPartitionNames, long warehouseId) throws DdlException { + int numSubPartition, String[] subPartitionNames, long warehouseId) throws DdlException { OlapTable olapTable; OlapTable copiedTable; @@ -3417,7 +3416,7 @@ public String refreshMaterializedView(RefreshMaterializedViewStatement refreshMa boolean force = refreshMaterializedViewStatement.isForceRefresh(); PartitionRangeDesc range = refreshMaterializedViewStatement.getPartitionRangeDesc(); return refreshMaterializedView(dbName, mvName, force, range, Constants.TaskRunPriority.HIGH.value(), - Config.enable_mv_refresh_sync_refresh_mergeable, true, refreshMaterializedViewStatement.isSync()); + Config.enable_mv_refresh_sync_refresh_mergeable, true, refreshMaterializedViewStatement.isSync()); } @Override diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/analyzer/AlterTableClauseAnalyzer.java b/fe/fe-core/src/main/java/com/starrocks/sql/analyzer/AlterTableClauseAnalyzer.java index 5e3e99aa17777b..f7af108e60f8b6 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/analyzer/AlterTableClauseAnalyzer.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/analyzer/AlterTableClauseAnalyzer.java @@ -32,6 +32,7 @@ import com.starrocks.catalog.Column; import com.starrocks.catalog.DataProperty; import com.starrocks.catalog.DynamicPartitionProperty; +import com.starrocks.catalog.ExpressionRangePartitionInfo; import com.starrocks.catalog.HashDistributionInfo; import com.starrocks.catalog.Index; import com.starrocks.catalog.KeysType; @@ -99,6 +100,7 @@ import com.starrocks.sql.ast.StructFieldDesc; import com.starrocks.sql.ast.TableRenameClause; import com.starrocks.sql.common.MetaUtils; +import org.apache.commons.collections4.CollectionUtils; import java.time.format.DateTimeParseException; import java.util.Arrays; @@ -937,6 +939,17 @@ public Void visitReplacePartitionClause(ReplacePartitionClause clause, ConnectCo if (clause.getProperties() != null && !clause.getProperties().isEmpty()) { throw new SemanticException("Unknown properties: " + clause.getProperties()); } + + + if (table instanceof OlapTable) { + List partitionNames = clause.getPartitionNames(); + for (String partitionName : partitionNames) { + if (partitionName.startsWith(ExpressionRangePartitionInfo.SHADOW_PARTITION_PREFIX)) { + throw new SemanticException("Replace shadow partitions is not allowed"); + } + } + } + return null; } @@ -1200,6 +1213,21 @@ public Void visitDropPartitionClause(DropPartitionClause clause, ConnectContext clause.setResolvedPartitionNames(clause.getPartitionNames()); } + if (table instanceof OlapTable) { + if (clause.getPartitionName() != null && clause.getPartitionName().startsWith( + ExpressionRangePartitionInfo.SHADOW_PARTITION_PREFIX)) { + throw new SemanticException("Deletion of shadow partitions is not allowed"); + } + List partitionNames = clause.getPartitionNames(); + if (CollectionUtils.isNotEmpty(partitionNames)) { + boolean hasShadowPartition = partitionNames.stream().anyMatch(partitionName -> + partitionName.startsWith(ExpressionRangePartitionInfo.SHADOW_PARTITION_PREFIX)); + if (hasShadowPartition) { + throw new SemanticException("Deletion of shadow partitions is not allowed"); + } + } + } + return null; } } diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/analyzer/AlterTableStatementAnalyzer.java b/fe/fe-core/src/main/java/com/starrocks/sql/analyzer/AlterTableStatementAnalyzer.java index 4fce7a6988c7df..09e533d7a03d17 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/analyzer/AlterTableStatementAnalyzer.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/analyzer/AlterTableStatementAnalyzer.java @@ -41,6 +41,7 @@ public static void analyze(AlterTableStmt statement, ConnectContext context) { TableName tbl = statement.getTbl(); MetaUtils.normalizationTableName(context, tbl); MetaUtils.checkNotSupportCatalog(tbl.getCatalog(), "ALTER"); + List alterClauseList = statement.getAlterClauseList(); if (alterClauseList == null || alterClauseList.isEmpty()) { ErrorReport.reportSemanticException(ErrorCode.ERR_NO_ALTER_OPERATION); diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/ast/AlterTableStmt.java b/fe/fe-core/src/main/java/com/starrocks/sql/ast/AlterTableStmt.java index a67dcc2546bfb3..ba312f22136b2a 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/ast/AlterTableStmt.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/ast/AlterTableStmt.java @@ -67,9 +67,11 @@ public boolean contains(AlterOpType op) { public boolean hasPartitionOp() { List currentOps = alterClauseList.stream().map(AlterClause::getOpType).collect(Collectors.toList()); - return currentOps.contains(AlterOpType.ADD_PARTITION) || currentOps.contains(AlterOpType.DROP_PARTITION) - || currentOps.contains(AlterOpType.REPLACE_PARTITION) || - currentOps.contains(AlterOpType.MODIFY_PARTITION); + return currentOps.contains(AlterOpType.ADD_PARTITION) + || currentOps.contains(AlterOpType.DROP_PARTITION) + || currentOps.contains(AlterOpType.REPLACE_PARTITION) + || currentOps.contains(AlterOpType.MODIFY_PARTITION) + || currentOps.contains(AlterOpType.TRUNCATE_PARTITION); } public boolean hasSchemaChangeOp() { diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/ast/MultiItemListPartitionDesc.java b/fe/fe-core/src/main/java/com/starrocks/sql/ast/MultiItemListPartitionDesc.java index 43414db9afa968..9935ff8920a188 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/ast/MultiItemListPartitionDesc.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/ast/MultiItemListPartitionDesc.java @@ -70,16 +70,10 @@ public List> getMultiLiteralExprValues() throws AnalysisExcept } public void analyze(List columnDefList, Map tableProperties) throws AnalysisException { - if (isAnalyzed) { - return; - } - FeNameFormat.checkPartitionName(getPartitionName()); analyzeValues(columnDefList.size()); analyzeProperties(tableProperties, null); this.columnDefList = columnDefList; - - isAnalyzed = true; } private void analyzeValues(int partitionColSize) throws AnalysisException { diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/ast/SingleItemListPartitionDesc.java b/fe/fe-core/src/main/java/com/starrocks/sql/ast/SingleItemListPartitionDesc.java index d00f148186ec2c..27b89335e89408 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/ast/SingleItemListPartitionDesc.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/ast/SingleItemListPartitionDesc.java @@ -59,10 +59,6 @@ public List getLiteralExprValues() throws AnalysisException { } public void analyze(List columnDefList, Map tableProperties) throws AnalysisException { - if (isAnalyzed) { - return; - } - FeNameFormat.checkPartitionName(this.getPartitionName()); analyzeProperties(tableProperties, null); @@ -70,8 +66,6 @@ public void analyze(List columnDefList, Map tableProp throw new AnalysisException("Partition column size should be one when use single list partition "); } this.columnDefList = columnDefList; - - isAnalyzed = true; } @Override diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/ast/SinglePartitionDesc.java b/fe/fe-core/src/main/java/com/starrocks/sql/ast/SinglePartitionDesc.java index 7dc1d95969c37c..f82b9fb0ae3752 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/ast/SinglePartitionDesc.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/ast/SinglePartitionDesc.java @@ -47,8 +47,6 @@ public abstract class SinglePartitionDesc extends PartitionDesc { private boolean isInMemory; private DataCacheInfo dataCacheInfo; - protected boolean isAnalyzed; - public SinglePartitionDesc(boolean ifNotExists, String partName, Map properties, NodePosition pos) { super(pos); this.partName = partName; @@ -60,7 +58,6 @@ public SinglePartitionDesc(boolean ifNotExists, String partName, Map tableProperties, PartitionKeyDesc partitionKeyDesc) throws AnalysisException { Map partitionAndTableProperties = Maps.newHashMap(); diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/ast/SingleRangePartitionDesc.java b/fe/fe-core/src/main/java/com/starrocks/sql/ast/SingleRangePartitionDesc.java index 8d4a13e38c73db..14b83f1831ee35 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/ast/SingleRangePartitionDesc.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/ast/SingleRangePartitionDesc.java @@ -42,10 +42,6 @@ public PartitionKeyDesc getPartitionKeyDesc() { } public void analyze(int partColNum, Map tableProperties) throws AnalysisException { - if (isAnalyzed) { - return; - } - FeNameFormat.checkPartitionName(getPartitionName()); partitionKeyDesc.analyze(partColNum); @@ -54,7 +50,6 @@ public void analyze(int partColNum, Map tableProperties) throws } else { analyzeProperties(tableProperties, null); } - isAnalyzed = true; } @Override diff --git a/fe/fe-core/src/test/java/com/starrocks/alter/AlterTableTest.java b/fe/fe-core/src/test/java/com/starrocks/alter/AlterTableTest.java index 87e77c7c193a62..3eec24ec185331 100644 --- a/fe/fe-core/src/test/java/com/starrocks/alter/AlterTableTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/alter/AlterTableTest.java @@ -198,7 +198,8 @@ public void testAlterTableStorageCoolDownTTLPartition() throws Exception { String sql = "ALTER TABLE test_alter_cool_down_ttl_partition\n" + "MODIFY PARTITION (*) SET(\"storage_cooldown_ttl\" = \"2 day\", \"storage_medium\" = \"SSD\");"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(alterTableStmt); + AlterJobExecutor alterJobExecutor = new AlterJobExecutor(); + alterJobExecutor.process(alterTableStmt, ctx); p20200321 = rangePartitionInfo.getDataProperty(olapTable.getPartition("p20200321").getId()); p20200322 = rangePartitionInfo.getDataProperty(olapTable.getPartition("p20200322").getId()); diff --git a/fe/fe-core/src/test/java/com/starrocks/alter/AlterTest.java b/fe/fe-core/src/test/java/com/starrocks/alter/AlterTest.java index 195730171f2cc2..822a2e5826bf84 100644 --- a/fe/fe-core/src/test/java/com/starrocks/alter/AlterTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/alter/AlterTest.java @@ -335,7 +335,7 @@ private static void cancelRefreshMaterializedView(String sql, boolean expectedEx private static void alterTableWithNewParser(String sql, boolean expectedException) throws Exception { try { AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(alterTableStmt); + DDLStmtExecutor.execute(alterTableStmt, connectContext); if (expectedException) { Assert.fail(); } @@ -2425,7 +2425,7 @@ public void testDropListPartition() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String sql = "ALTER TABLE t_recharge_detail DROP PARTITION p2 force;"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(alterTableStmt); + DDLStmtExecutor.execute(alterTableStmt, ctx); } @Test(expected = AnalysisException.class) diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogRecycleBinLakeTableTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogRecycleBinLakeTableTest.java index 7b91113f2e356a..a7a4f0e06ca26e 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogRecycleBinLakeTableTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogRecycleBinLakeTableTest.java @@ -21,6 +21,7 @@ import com.starrocks.proto.DropTableResponse; import com.starrocks.proto.StatusPB; import com.starrocks.qe.ConnectContext; +import com.starrocks.qe.DDLStmtExecutor; import com.starrocks.rpc.BrpcProxy; import com.starrocks.rpc.LakeService; import com.starrocks.rpc.RpcException; @@ -67,7 +68,7 @@ private static void dropTable(ConnectContext connectContext, String sql) throws private static void alterTable(ConnectContext connectContext, String sql) throws Exception { AlterTableStmt stmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(stmt); + DDLStmtExecutor.execute(stmt, connectContext); } private static void recoverDatabase(ConnectContext connectContext, String sql) throws Exception { diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/DropPartitionTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/DropPartitionTest.java index e3a8c05dac5cfd..85084573217677 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/DropPartitionTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/DropPartitionTest.java @@ -37,6 +37,7 @@ import com.starrocks.common.DdlException; import com.starrocks.common.ExceptionChecker; import com.starrocks.qe.ConnectContext; +import com.starrocks.qe.DDLStmtExecutor; import com.starrocks.server.GlobalStateMgr; import com.starrocks.sql.ast.AlterTableStmt; import com.starrocks.sql.ast.CreateDbStmt; @@ -91,7 +92,7 @@ public void dropTable() throws Exception { private void dropPartition(String sql) throws Exception { AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(alterTableStmt); + DDLStmtExecutor.execute(alterTableStmt, connectContext); } @Test diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/MaterializedViewTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/MaterializedViewTest.java index 0627c01212b53e..2c64c7c6c01ec8 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/MaterializedViewTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/MaterializedViewTest.java @@ -27,6 +27,7 @@ import com.starrocks.common.UserException; import com.starrocks.persist.AlterMaterializedViewBaseTableInfosLog; import com.starrocks.qe.ConnectContext; +import com.starrocks.qe.DDLStmtExecutor; import com.starrocks.qe.QueryState; import com.starrocks.qe.ShowExecutor; import com.starrocks.qe.ShowResultSet; @@ -615,11 +616,11 @@ public void testAlterMVWithIndex() throws Exception { String bloomfilterSql = "alter table test.index_mv_to_check set (\"bloom_filter_columns\"=\"k2\")"; AlterTableStmt alterMVStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(bitmapSql, connectContext); - GlobalStateMgr.getCurrentState().getAlterJobMgr().processAlterTable(alterMVStmt); + DDLStmtExecutor.execute(alterMVStmt, connectContext); waitForSchemaChangeAlterJobFinish(); alterMVStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(bloomfilterSql, connectContext); - GlobalStateMgr.getCurrentState().getAlterJobMgr().processAlterTable(alterMVStmt); + DDLStmtExecutor.execute(alterMVStmt, connectContext); waitForSchemaChangeAlterJobFinish(); Assert.assertEquals(QueryState.MysqlStateType.OK, connectContext.getState().getStateType()); @@ -658,7 +659,7 @@ public void testAlterViewWithIndex() throws Exception { AlterTableStmt alterViewStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(bitmapSql, connectContext); Assert.assertThrows("Do not support alter non-native table/materialized-view[index_view_to_check]", DdlException.class, - () -> GlobalStateMgr.getCurrentState().getAlterJobMgr().processAlterTable(alterViewStmt)); + () -> DDLStmtExecutor.execute(alterViewStmt, connectContext)); } public void testCreateMV(String mvSql) throws Exception { diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/StorageMediumInferTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/StorageMediumInferTest.java index 397772ceb91a1e..9fd815f8416f86 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/StorageMediumInferTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/StorageMediumInferTest.java @@ -37,6 +37,7 @@ import com.google.common.collect.Lists; import com.starrocks.common.Config; import com.starrocks.qe.ConnectContext; +import com.starrocks.qe.DDLStmtExecutor; import com.starrocks.server.GlobalStateMgr; import com.starrocks.sql.ast.AlterTableStmt; import com.starrocks.sql.ast.CreateDbStmt; @@ -79,7 +80,7 @@ private static void createTable(String sql) throws Exception { private static void alterTableWithNewParser(String sql) throws Exception { AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(alterTableStmt); + DDLStmtExecutor.execute(alterTableStmt, connectContext); } @Test diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/TempPartitionTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/TempPartitionTest.java index 669a9d8cd3673b..cb4a0244d79d8c 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/TempPartitionTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/TempPartitionTest.java @@ -44,6 +44,7 @@ import com.starrocks.common.FeConstants; import com.starrocks.common.jmockit.Deencapsulation; import com.starrocks.qe.ConnectContext; +import com.starrocks.qe.DDLStmtExecutor; import com.starrocks.qe.ShowExecutor; import com.starrocks.qe.ShowResultSet; import com.starrocks.server.GlobalStateMgr; @@ -111,7 +112,7 @@ private List> checkShowPartitionsResultNum(String tbl, boolean isTe private void alterTableWithNewAnalyzer(String sql, boolean expectedException) throws Exception { try { AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getAlterJobMgr().processAlterTable(alterTableStmt); + DDLStmtExecutor.execute(alterTableStmt, ctx); if (expectedException) { Assert.fail("expected exception not thrown"); } diff --git a/fe/fe-core/src/test/java/com/starrocks/service/FrontendServiceImplTest.java b/fe/fe-core/src/test/java/com/starrocks/service/FrontendServiceImplTest.java index 2345ee9759cd8c..4ff2972b275134 100644 --- a/fe/fe-core/src/test/java/com/starrocks/service/FrontendServiceImplTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/service/FrontendServiceImplTest.java @@ -1226,7 +1226,7 @@ public TransactionState getTransactionState(long dbId, long transactionId) { partitionList.add(p19910425); currentState.getLocalMetastore().addListPartitionLog(testDb, olapTable, partitionDescs, - addPartitionClause, partitionInfo, partitionList, Sets.newSet("p19900425")); + addPartitionClause.isTempPartition(), partitionInfo, partitionList, Sets.newSet("p19900425")); } diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/PrivilegeCheckerTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/PrivilegeCheckerTest.java index a2f046308ad577..cfc616929b16aa 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/PrivilegeCheckerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/PrivilegeCheckerTest.java @@ -1161,9 +1161,9 @@ public void testTableCreateDrop() throws Exception { // check alter table: ALTER verifyGrantRevoke( - "alter table db1.tbl1 drop partition p1", - "grant ALTER on db1.tbl1 to test", - "revoke ALTER on db1.tbl1 from test", + "alter table db2.tbl1 drop partition p1", + "grant ALTER on db2.tbl1 to test", + "revoke ALTER on db2.tbl1 from test", "Access denied; you need (at least one of) the ALTER privilege(s) on TABLE tbl1 for this operation"); // check cancel alter table: ALTER