diff --git a/be/src/olap/base_tablet.cpp b/be/src/olap/base_tablet.cpp index a5df20d63835df..8cad5b87f5b134 100644 --- a/be/src/olap/base_tablet.cpp +++ b/be/src/olap/base_tablet.cpp @@ -931,7 +931,7 @@ Status BaseTablet::generate_new_block_for_partial_update( // read current rowset first, if a row in the current rowset has delete sign mark // we don't need to read values from old block RETURN_IF_ERROR(read_plan_update.read_columns_by_plan( - *rowset_schema, update_cids, rsid_to_rowset, update_block, &read_index_update)); + *rowset_schema, update_cids, rsid_to_rowset, update_block, &read_index_update, false)); size_t update_rows = read_index_update.size(); for (auto i = 0; i < update_cids.size(); ++i) { for (auto idx = 0; idx < update_rows; ++idx) { @@ -951,19 +951,17 @@ Status BaseTablet::generate_new_block_for_partial_update( // rowid in the final block(start from 0, increase, may not continuous becasue we skip to read some rows) -> rowid to read in old_block std::map read_index_old; RETURN_IF_ERROR(read_plan_ori.read_columns_by_plan(*rowset_schema, missing_cids, rsid_to_rowset, - old_block, &read_index_old, + old_block, &read_index_old, true, new_block_delete_signs)); size_t old_rows = read_index_old.size(); const auto* __restrict old_block_delete_signs = get_delete_sign_column_data(old_block, old_rows); - + DCHECK(old_block_delete_signs != nullptr); // build default value block auto default_value_block = old_block.clone_empty(); - if (old_block_delete_signs != nullptr || new_block_delete_signs != nullptr) { - RETURN_IF_ERROR(BaseTablet::generate_default_value_block( - *rowset_schema, missing_cids, partial_update_info->default_values, old_block, - default_value_block)); - } + RETURN_IF_ERROR(BaseTablet::generate_default_value_block(*rowset_schema, missing_cids, + partial_update_info->default_values, + old_block, default_value_block)); auto mutable_default_value_columns = default_value_block.mutate_columns(); CHECK(update_rows >= old_rows); diff --git a/be/src/olap/partial_update_info.cpp b/be/src/olap/partial_update_info.cpp index 247353103dfdcf..a1ae19a6aa7b02 100644 --- a/be/src/olap/partial_update_info.cpp +++ b/be/src/olap/partial_update_info.cpp @@ -24,7 +24,6 @@ #include "olap/olap_common.h" #include "olap/rowset/rowset.h" #include "olap/rowset/rowset_writer_context.h" -#include "olap/tablet_meta.h" #include "olap/tablet_schema.h" #include "olap/utils.h" #include "util/bitmap_value.h" @@ -206,9 +205,21 @@ void PartialUpdateReadPlan::prepare_to_read(const RowLocation& row_location, siz // read columns by read plan // read_index: ori_pos-> block_idx Status PartialUpdateReadPlan::read_columns_by_plan( - const TabletSchema& tablet_schema, const std::vector cids_to_read, + const TabletSchema& tablet_schema, std::vector cids_to_read, const std::map& rsid_to_rowset, vectorized::Block& block, - std::map* read_index, const signed char* __restrict skip_map) const { + std::map* read_index, bool force_read_old_delete_signs, + const signed char* __restrict cur_delete_signs) const { + if (force_read_old_delete_signs) { + // always read delete sign column from historical data + if (const vectorized::ColumnWithTypeAndName* old_delete_sign_column = + block.try_get_by_name(DELETE_SIGN); + old_delete_sign_column == nullptr) { + auto del_col_cid = tablet_schema.field_index(DELETE_SIGN); + cids_to_read.emplace_back(del_col_cid); + block.swap(tablet_schema.create_block_by_cids(cids_to_read)); + } + } + bool has_row_column = tablet_schema.has_row_store_for_all_columns(); auto mutable_columns = block.mutate_columns(); size_t read_idx = 0; @@ -218,7 +229,7 @@ Status PartialUpdateReadPlan::read_columns_by_plan( CHECK(rowset_iter != rsid_to_rowset.end()); std::vector rids; for (auto [rid, pos] : mappings) { - if (skip_map && skip_map[pos]) { + if (cur_delete_signs && cur_delete_signs[pos]) { continue; } rids.emplace_back(rid); @@ -263,17 +274,15 @@ Status PartialUpdateReadPlan::fill_missing_columns( // record real pos, key is input line num, value is old_block line num std::map read_index; RETURN_IF_ERROR(read_columns_by_plan(tablet_schema, missing_cids, rsid_to_rowset, - old_value_block, &read_index, nullptr)); - - const auto* delete_sign_column_data = BaseTablet::get_delete_sign_column_data(old_value_block); + old_value_block, &read_index, true, nullptr)); + const auto* old_delete_signs = BaseTablet::get_delete_sign_column_data(old_value_block); + DCHECK(old_delete_signs != nullptr); // build default value columns auto default_value_block = old_value_block.clone_empty(); - if (has_default_or_nullable || delete_sign_column_data != nullptr) { - RETURN_IF_ERROR(BaseTablet::generate_default_value_block( - tablet_schema, missing_cids, rowset_ctx->partial_update_info->default_values, - old_value_block, default_value_block)); - } + RETURN_IF_ERROR(BaseTablet::generate_default_value_block( + tablet_schema, missing_cids, rowset_ctx->partial_update_info->default_values, + old_value_block, default_value_block)); auto mutable_default_value_columns = default_value_block.mutate_columns(); // fill all missing value from mutable_old_columns, need to consider default value and null value @@ -285,8 +294,8 @@ Status PartialUpdateReadPlan::fill_missing_columns( // read values from old rows for missing values in this occasion. So we should read the DELETE_SIGN column // to check if a row REALLY exists in the table. auto pos_in_old_block = read_index[idx + segment_start_pos]; - if (use_default_or_null_flag[idx] || (delete_sign_column_data != nullptr && - delete_sign_column_data[pos_in_old_block] != 0)) { + if (use_default_or_null_flag[idx] || + (old_delete_signs != nullptr && old_delete_signs[pos_in_old_block] != 0)) { for (auto i = 0; i < missing_cids.size(); ++i) { // if the column has default value, fill it with default value // otherwise, if the column is nullable, fill it with null value diff --git a/be/src/olap/partial_update_info.h b/be/src/olap/partial_update_info.h index 278b027942eb20..88fac1a3e9c5d6 100644 --- a/be/src/olap/partial_update_info.h +++ b/be/src/olap/partial_update_info.h @@ -80,10 +80,11 @@ class PartialUpdateReadPlan { public: void prepare_to_read(const RowLocation& row_location, size_t pos); Status read_columns_by_plan(const TabletSchema& tablet_schema, - const std::vector cids_to_read, + std::vector cids_to_read, const std::map& rsid_to_rowset, vectorized::Block& block, std::map* read_index, - const signed char* __restrict skip_map = nullptr) const; + bool force_read_old_delete_signs, + const signed char* __restrict cur_delete_signs = nullptr) const; Status fill_missing_columns(RowsetWriterContext* rowset_ctx, const std::map& rsid_to_rowset, const TabletSchema& tablet_schema, vectorized::Block& full_block, diff --git a/regression-test/data/fault_injection_p0/partial_update/test_partial_update_with_delete_col_in_publish.out b/regression-test/data/fault_injection_p0/partial_update/test_partial_update_with_delete_col_in_publish.out new file mode 100644 index 00000000000000..39b43afd898120 --- /dev/null +++ b/regression-test/data/fault_injection_p0/partial_update/test_partial_update_with_delete_col_in_publish.out @@ -0,0 +1,15 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql -- +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 + +-- !sql -- +1 1 1 987 987 +2 \N \N 987 987 +3 3 3 3 3 +4 -1 -1 987 987 +5 \N \N 987 987 + diff --git a/regression-test/data/unique_with_mow_c_p0/partial_update/test_partial_update_seq_col_delete.out b/regression-test/data/unique_with_mow_c_p0/partial_update/test_partial_update_seq_col_delete.out index afa7ccfc9bbfb2..ad704cac25149b 100644 Binary files a/regression-test/data/unique_with_mow_c_p0/partial_update/test_partial_update_seq_col_delete.out and b/regression-test/data/unique_with_mow_c_p0/partial_update/test_partial_update_seq_col_delete.out differ diff --git a/regression-test/data/unique_with_mow_c_p0/partial_update/test_partial_update_seq_type_delete.out b/regression-test/data/unique_with_mow_c_p0/partial_update/test_partial_update_seq_type_delete.out index 05f0c5dab4e12d..8621761c3a636f 100644 Binary files a/regression-test/data/unique_with_mow_c_p0/partial_update/test_partial_update_seq_type_delete.out and b/regression-test/data/unique_with_mow_c_p0/partial_update/test_partial_update_seq_type_delete.out differ diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_merge_type.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_merge_type.out index 04e4b07f2dce3f..d21693cda10719 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_merge_type.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_merge_type.out @@ -28,7 +28,7 @@ 7 7 7 7 8 8 8 8 10 \N 999 \N -11 \N 888 \N +11 \N \N \N -- !sql_2_1 -- 0 0 0 0 @@ -36,7 +36,7 @@ 7 7 7 7 8 8 8 8 10 \N 999 \N -11 \N 888 \N +11 \N \N \N -- !sql_2_2 -- 0 0 0 0 @@ -90,7 +90,7 @@ 7 7 7 7 8 8 8 8 10 \N 999 \N -11 \N 888 \N +11 \N \N \N -- !inspect -- 0 0 0 0 1 0 @@ -109,7 +109,7 @@ 8 8 8 8 1 0 10 \N 999 \N 2 0 11 \N 888 \N 2 1 -11 \N 888 \N 3 0 +11 \N \N \N 3 0 -- !sql_4_1 -- 0 0 0 0 @@ -117,7 +117,7 @@ 7 7 7 7 8 8 8 8 10 \N 999 \N -11 \N 888 \N +11 \N \N \N -- !inspect -- 0 0 0 0 1 0 @@ -138,7 +138,7 @@ 8 8 8 8 1 0 10 \N 999 \N 2 0 11 \N 888 \N 2 1 -11 \N 888 \N 3 0 +11 \N \N \N 3 0 -- !sql_4_2 -- 0 0 0 0 @@ -166,8 +166,8 @@ 8 8 8 8 1 0 10 \N 999 \N 2 0 11 \N 888 \N 2 1 -11 \N 888 \N 3 0 -11 \N 888 \N 5 1 +11 \N \N \N 3 0 +11 \N \N \N 5 1 -- !sql -- 0 0 0 0 @@ -198,7 +198,7 @@ 7 7 7 7 8 8 8 8 10 \N 999 \N -11 \N 888 \N +11 \N \N \N -- !sql_2_1 -- 0 0 0 0 @@ -206,7 +206,7 @@ 7 7 7 7 8 8 8 8 10 \N 999 \N -11 \N 888 \N +11 \N \N \N -- !sql_2_2 -- 0 0 0 0 @@ -260,7 +260,7 @@ 7 7 7 7 8 8 8 8 10 \N 999 \N -11 \N 888 \N +11 \N \N \N -- !inspect -- 0 0 0 0 1 0 @@ -279,7 +279,7 @@ 8 8 8 8 1 0 10 \N 999 \N 2 0 11 \N 888 \N 2 1 -11 \N 888 \N 3 0 +11 \N \N \N 3 0 -- !sql_4_1 -- 0 0 0 0 @@ -287,7 +287,7 @@ 7 7 7 7 8 8 8 8 10 \N 999 \N -11 \N 888 \N +11 \N \N \N -- !inspect -- 0 0 0 0 1 0 @@ -308,7 +308,7 @@ 8 8 8 8 1 0 10 \N 999 \N 2 0 11 \N 888 \N 2 1 -11 \N 888 \N 3 0 +11 \N \N \N 3 0 -- !sql_4_2 -- 0 0 0 0 @@ -336,6 +336,6 @@ 8 8 8 8 1 0 10 \N 999 \N 2 0 11 \N 888 \N 2 1 -11 \N 888 \N 3 0 -11 \N 888 \N 5 1 +11 \N \N \N 3 0 +11 \N \N \N 5 1 diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_col_delete.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_col_delete.out index 3818f6612023e7..ab2c14cf8bca85 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_col_delete.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_col_delete.out @@ -13,7 +13,7 @@ -- !partial_update_with_seq_hidden_columns -- 1 doris 200 123 1 2023-01-01 1 3 2023-01-01 -2 doris2 2600 223 1 2023-07-20 1 4 2023-07-20 +2 unknown 2600 \N 4321 2023-07-20 1 4 2023-07-20 3 unknown 1500 \N 4321 2022-07-20 1 4 2022-07-20 -- !select_default -- @@ -30,6 +30,6 @@ -- !partial_update_with_seq_hidden_columns -- 1 doris 200 123 1 2023-01-01 1 3 2023-01-01 -2 doris2 2600 223 1 2023-07-20 1 4 2023-07-20 +2 unknown 2600 \N 4321 2023-07-20 1 4 2023-07-20 3 unknown 1500 \N 4321 2022-07-20 1 4 2022-07-20 diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_type_delete.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_type_delete.out index 90ea414b995352..266ebff5714665 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_type_delete.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_type_delete.out @@ -13,7 +13,7 @@ -- !partial_update_with_seq_test -- -- !partial_update_with_seq_test_hidden -- -1 doris 2300 2300 1 2021-05-19 1 4 2300 +1 unknown 2300 2300 4321 2021-05-19 1 4 2300 2 doris2 3600 2400 1 2019-01-23 1 3 3600 3 unknown 1500 2500 4321 2022-03-31 1 4 2500 @@ -41,7 +41,7 @@ -- !partial_update_with_seq_test -- -- !partial_update_with_seq_test_hidden -- -1 doris 2300 2300 1 2021-05-19 1 4 2300 +1 unknown 2300 2300 4321 2021-05-19 1 4 2300 2 doris2 3600 2400 1 2019-01-23 1 3 3600 3 unknown 1500 2500 4321 2022-03-31 1 4 2500 diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_with_delete_sign_col.out b/regression-test/data/unique_with_mow_p0/partial_update/test_with_delete_sign_col.out new file mode 100644 index 00000000000000..66960554a61c80 --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_with_delete_sign_col.out @@ -0,0 +1,133 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql_1 -- +0 0 0 0 0 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +6 6 6 6 6 +7 7 7 7 7 +8 8 8 8 8 +9 9 9 9 9 +10 10 10 10 10 + +-- !sql_1 -- +0 0 0 0 0 +1 1 1 1 1 +4 4 4 4 4 +5 5 5 55 55 +8 8 8 8 8 +9 9 9 9 9 +10 10 10 1010 1010 + +-- !sql_1 -- +0 0 0 0 0 +1 11 11 1 1 +2 22 22 888 777 +3 33 33 888 777 +4 4 4 4 4 +5 5 5 55 55 +6 66 66 888 777 +8 8 8 8 8 +10 10 10 1010 1010 + +-- !sql_2 -- +0 0 0 0 0 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +6 6 6 6 6 +7 7 7 7 7 +8 8 8 8 8 +9 9 9 9 9 +10 10 10 10 10 + +-- !sql_2 -- +0 0 0 0 0 +1 1 1 1 1 +4 4 4 4 4 +5 5 5 55 55 +8 8 8 8 8 +9 9 9 9 9 +10 10 10 1010 1010 + +-- !sql_1 -- +0 0 0 0 0 +1 11 11 1 1 +2 22 22 888 777 +3 33 33 888 777 +4 4 4 4 4 +5 5 5 55 55 +6 66 66 888 777 +8 8 8 8 8 +10 10 10 1010 1010 + +-- !sql_1 -- +0 0 0 0 0 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +6 6 6 6 6 +7 7 7 7 7 +8 8 8 8 8 +9 9 9 9 9 +10 10 10 10 10 + +-- !sql_1 -- +0 0 0 0 0 +1 1 1 1 1 +4 4 4 4 4 +5 5 5 55 55 +8 8 8 8 8 +9 9 9 9 9 +10 10 10 1010 1010 + +-- !sql_1 -- +0 0 0 0 0 +1 11 11 1 1 +2 22 22 888 777 +3 33 33 888 777 +4 4 4 4 4 +5 5 5 55 55 +6 66 66 888 777 +8 8 8 8 8 +10 10 10 1010 1010 + +-- !sql_2 -- +0 0 0 0 0 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +6 6 6 6 6 +7 7 7 7 7 +8 8 8 8 8 +9 9 9 9 9 +10 10 10 10 10 + +-- !sql_2 -- +0 0 0 0 0 +1 1 1 1 1 +4 4 4 4 4 +5 5 5 55 55 +8 8 8 8 8 +9 9 9 9 9 +10 10 10 1010 1010 + +-- !sql_1 -- +0 0 0 0 0 +1 11 11 1 1 +2 22 22 888 777 +3 33 33 888 777 +4 4 4 4 4 +5 5 5 55 55 +6 66 66 888 777 +8 8 8 8 8 +10 10 10 1010 1010 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/with_delete1.csv b/regression-test/data/unique_with_mow_p0/partial_update/with_delete1.csv new file mode 100644 index 00000000000000..c13ead6ffadb5d --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/with_delete1.csv @@ -0,0 +1,6 @@ +2,22,22,1 +3,33,33,1 +5,55,55,0 +6,66,66,1 +7,77,77,1 +10,1010,1010,0 diff --git a/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_with_delete_col_in_publish.groovy b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_with_delete_col_in_publish.groovy new file mode 100644 index 00000000000000..caae2c59af8ea5 --- /dev/null +++ b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_with_delete_col_in_publish.groovy @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert +import java.util.concurrent.TimeUnit +import org.awaitility.Awaitility + +suite("test_partial_update_with_delete_col_in_publish", "nonConcurrent") { + + def tableName = "test_partial_update_with_delete_col_in_publish" + sql """ DROP TABLE IF EXISTS ${tableName} force;""" + sql """ CREATE TABLE ${tableName} ( + `k` int(11) NULL, + `v1` BIGINT NULL, + `v2` BIGINT NULL, + `v3` BIGINT NULL, + `v4` BIGINT NULL, + ) UNIQUE KEY(`k`) DISTRIBUTED BY HASH(`k`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "light_schema_change" = "true", + "store_row_column" = "false"); """ + def show_res = sql "show create table ${tableName}" + sql """insert into ${tableName} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5);""" + qt_sql "select * from ${tableName} order by k;" + + def enable_publish_spin_wait = { + if (isCloudMode()) { + GetDebugPoint().enableDebugPointForAllFEs("CloudGlobalTransactionMgr.getDeleteBitmapUpdateLock.enable_spin_wait") + } else { + GetDebugPoint().enableDebugPointForAllBEs("EnginePublishVersionTask::execute.enable_spin_wait") + } + } + + def enable_block_in_publish = { + if (isCloudMode()) { + GetDebugPoint().enableDebugPointForAllFEs("CloudGlobalTransactionMgr.getDeleteBitmapUpdateLock.block") + } else { + GetDebugPoint().enableDebugPointForAllBEs("EnginePublishVersionTask::execute.block") + } + } + + def disable_block_in_publish = { + if (isCloudMode()) { + GetDebugPoint().disableDebugPointForAllFEs("CloudGlobalTransactionMgr.getDeleteBitmapUpdateLock.block") + } else { + GetDebugPoint().disableDebugPointForAllBEs("EnginePublishVersionTask::execute.block") + } + } + + try { + GetDebugPoint().clearDebugPointsForAllFEs() + GetDebugPoint().clearDebugPointsForAllBEs() + + // block the partial update in publish phase + enable_publish_spin_wait() + enable_block_in_publish() + + def threads = [] + + threads << Thread.start { + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false" + sql "sync;" + sql "insert into ${tableName}(k,v1,v2,__DORIS_DELETE_SIGN__) values(2,222,222,1),(4,-1,-1,0),(5,555,555,1);" + } + + Thread.sleep(500) + + threads << Thread.start { + sql "set enable_unique_key_partial_update=true;" + sql "sync;" + sql "insert into ${tableName}(k,v3,v4) values(1,987,987),(2,987,987),(4,987,987),(5,987,987);" + } + + Thread.sleep(500) + + disable_block_in_publish() + threads.each { t -> t.join() } + + qt_sql "select * from ${tableName} order by k;" + } catch(Exception e) { + logger.info(e.getMessage()) + throw e + } finally { + GetDebugPoint().clearDebugPointsForAllFEs() + GetDebugPoint().clearDebugPointsForAllBEs() + } +} diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_with_delete_sign_col.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_with_delete_sign_col.groovy new file mode 100644 index 00000000000000..ee2a56f7ee5c6c --- /dev/null +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_with_delete_sign_col.groovy @@ -0,0 +1,98 @@ + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_with_delete_sign_col", "p0") { + + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + def inspectRows = { sqlStr -> + sql "set skip_delete_sign=true;" + sql "set skip_delete_bitmap=true;" + sql "sync" + qt_inspect sqlStr + sql "set skip_delete_sign=false;" + sql "set skip_delete_bitmap=false;" + sql "sync" + } + + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect( context.config.jdbcUser, context.config.jdbcPassword, context.config.jdbcUrl) { + sql "use ${db};" + def table1 = "test_with_delete_sign_col" + sql "DROP TABLE IF EXISTS ${table1} FORCE;" + sql """ CREATE TABLE IF NOT EXISTS ${table1} ( + `k1` int NOT NULL, + `c1` int, + `c2` int default "999", + `c3` int default "888", + `c4` int default "777" + )UNIQUE KEY(k1) + DISTRIBUTED BY HASH(k1) BUCKETS 1 + PROPERTIES ( + "enable_mow_light_delete" = "false", + "disable_auto_compaction" = "true", + "replication_num" = "1"); """ + + sql """insert into ${table1} select number,number,number,number,number from numbers("number"="11");""" + qt_sql_1 "select * from ${table1} order by k1;" + + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + + sql "insert into ${table1}(k1,c3,c4,__DORIS_DELETE_SIGN__) values(2,22,22,1),(3,33,33,1),(5,55,55,0),(6,66,66,1),(7,77,77,1),(10,1010,1010,0);" + qt_sql_1 "select * from ${table1} order by k1;" + + sql "insert into ${table1}(k1,c1,c2,__DORIS_DELETE_SIGN__) values(1,11,11,0),(2,22,22,0),(3,33,33,0),(6,66,66,0),(9,99,99,1);" + sql "set enable_unique_key_partial_update=false;" + sql "set enable_insert_strict=true;" + sql "sync;" + qt_sql_1 "select * from ${table1} order by k1;" + + + sql "truncate table ${table1};" + sql """insert into ${table1} select number,number,number,number,number from numbers("number"="11");""" + qt_sql_2 "select * from ${table1} order by k1;" + + streamLoad { + table "${table1}" + set 'column_separator', ',' + set 'format', 'csv' + set 'columns', 'k1,c3,c4,del' + set 'partial_columns', 'true' + set 'merge_type', 'MERGE' + set 'delete', 'del=1' + file 'with_delete1.csv' + time 10000 + } + qt_sql_2 "select * from ${table1} order by k1;" + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + sql "insert into ${table1}(k1,c1,c2,__DORIS_DELETE_SIGN__) values(1,11,11,0),(2,22,22,0),(3,33,33,0),(6,66,66,0),(9,99,99,1);" + sql "set enable_unique_key_partial_update=false;" + sql "set enable_insert_strict=true;" + sql "sync;" + qt_sql_1 "select * from ${table1} order by k1;" + } + } +}