Skip to content

Commit

Permalink
add case
Browse files Browse the repository at this point in the history
  • Loading branch information
bobhan1 committed Jan 7, 2025
1 parent 56ee1cb commit 0ee059e
Show file tree
Hide file tree
Showing 6 changed files with 346 additions and 2 deletions.
4 changes: 2 additions & 2 deletions be/src/olap/rowset/segment_v2/segment_writer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -660,7 +660,7 @@ Status SegmentWriter::fill_missing_columns(vectorized::MutableColumns& mutable_f
auto tablet = static_cast<Tablet*>(_tablet.get());
// create old value columns
const auto& cids_missing = _opts.rowset_ctx->partial_update_info->missing_cids;
auto cids_to_read = missing_cids;
auto cids_to_read = cids_missing;
auto old_value_block = _tablet_schema->create_block_by_cids(cids_missing);
CHECK_EQ(cids_missing.size(), old_value_block.columns());
// always read delete sign column from historical data
Expand Down Expand Up @@ -695,7 +695,7 @@ Status SegmentWriter::fill_missing_columns(vectorized::MutableColumns& mutable_f
continue;
}
auto mutable_old_columns = old_value_block.mutate_columns();
for (size_t cid = 0; cid < cids_to_read.size(); ++cid) {
for (size_t cid = 0; cid < mutable_old_columns.size(); ++cid) {
TabletColumn tablet_column = _tablet_schema->column(cids_to_read[cid]);
auto st = tablet->fetch_value_by_rowids(rowset, seg_it.first, rids, tablet_column,
mutable_old_columns[cid]);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
-- This file is automatically generated. You should know what you did if you want to edit this
-- !sql --
1 1 1 1 1
2 2 2 2 2
3 3 3 3 3
4 4 4 4 4
5 5 5 5 5

-- !sql --
1 1 1 987 987
2 \N \N 987 987
3 3 3 3 3
4 -1 -1 987 987
5 \N \N 987 987

Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
-- This file is automatically generated. You should know what you did if you want to edit this
-- !sql_1 --
0 0 0 0 0
1 1 1 1 1
2 2 2 2 2
3 3 3 3 3
4 4 4 4 4
5 5 5 5 5
6 6 6 6 6
7 7 7 7 7
8 8 8 8 8
9 9 9 9 9
10 10 10 10 10

-- !sql_1 --
0 0 0 0 0
1 1 1 1 1
4 4 4 4 4
5 5 5 55 55
8 8 8 8 8
9 9 9 9 9
10 10 10 1010 1010

-- !sql_1 --
0 0 0 0 0
1 11 11 1 1
2 22 22 888 777
3 33 33 888 777
4 4 4 4 4
5 5 5 55 55
6 66 66 888 777
8 8 8 8 8
10 10 10 1010 1010

-- !sql_2 --
0 0 0 0 0
1 1 1 1 1
2 2 2 2 2
3 3 3 3 3
4 4 4 4 4
5 5 5 5 5
6 6 6 6 6
7 7 7 7 7
8 8 8 8 8
9 9 9 9 9
10 10 10 10 10

-- !sql_2 --
0 0 0 0 0
1 1 1 1 1
4 4 4 4 4
5 5 5 55 55
8 8 8 8 8
9 9 9 9 9
10 10 10 1010 1010

-- !sql_1 --
0 0 0 0 0
1 11 11 1 1
2 22 22 888 777
3 33 33 888 777
4 4 4 4 4
5 5 5 55 55
6 66 66 888 777
8 8 8 8 8
10 10 10 1010 1010

-- !sql_1 --
0 0 0 0 0
1 1 1 1 1
2 2 2 2 2
3 3 3 3 3
4 4 4 4 4
5 5 5 5 5
6 6 6 6 6
7 7 7 7 7
8 8 8 8 8
9 9 9 9 9
10 10 10 10 10

-- !sql_1 --
0 0 0 0 0
1 1 1 1 1
4 4 4 4 4
5 5 5 55 55
8 8 8 8 8
9 9 9 9 9
10 10 10 1010 1010

-- !sql_1 --
0 0 0 0 0
1 11 11 1 1
2 22 22 888 777
3 33 33 888 777
4 4 4 4 4
5 5 5 55 55
6 66 66 888 777
8 8 8 8 8
10 10 10 1010 1010

-- !sql_2 --
0 0 0 0 0
1 1 1 1 1
2 2 2 2 2
3 3 3 3 3
4 4 4 4 4
5 5 5 5 5
6 6 6 6 6
7 7 7 7 7
8 8 8 8 8
9 9 9 9 9
10 10 10 10 10

-- !sql_2 --
0 0 0 0 0
1 1 1 1 1
4 4 4 4 4
5 5 5 55 55
8 8 8 8 8
9 9 9 9 9
10 10 10 1010 1010

-- !sql_1 --
0 0 0 0 0
1 11 11 1 1
2 22 22 888 777
3 33 33 888 777
4 4 4 4 4
5 5 5 55 55
6 66 66 888 777
8 8 8 8 8
10 10 10 1010 1010

Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
2,22,22,1
3,33,33,1
5,55,55,0
6,66,66,1
7,77,77,1
10,1010,1010,0
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

import org.junit.Assert
import java.util.concurrent.TimeUnit
import org.awaitility.Awaitility

suite("test_partial_update_with_delete_col_in_publish", "nonConcurrent") {

def tableName = "test_partial_update_with_delete_col_in_publish"
sql """ DROP TABLE IF EXISTS ${tableName} force;"""
sql """ CREATE TABLE ${tableName} (
`k` int(11) NULL,
`v1` BIGINT NULL,
`v2` BIGINT NULL,
`v3` BIGINT NULL,
`v4` BIGINT NULL,
) UNIQUE KEY(`k`) DISTRIBUTED BY HASH(`k`) BUCKETS 1
PROPERTIES(
"replication_num" = "1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"store_row_column" = "false"); """
def show_res = sql "show create table ${tableName}"
sql """insert into ${tableName} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5);"""
qt_sql "select * from ${tableName} order by k;"

def enable_publish_spin_wait = {
GetDebugPoint().enableDebugPointForAllBEs("EnginePublishVersionTask::execute.enable_spin_wait")
}

def enable_block_in_publish = {
GetDebugPoint().enableDebugPointForAllBEs("EnginePublishVersionTask::execute.block")
}

def disable_block_in_publish = {
GetDebugPoint().disableDebugPointForAllBEs("EnginePublishVersionTask::execute.block")
}

try {
GetDebugPoint().clearDebugPointsForAllFEs()
GetDebugPoint().clearDebugPointsForAllBEs()

// block the partial update in publish phase
enable_publish_spin_wait()
enable_block_in_publish()

def threads = []

threads << Thread.start {
sql "set enable_unique_key_partial_update=true;"
sql "set enable_insert_strict=false"
sql "sync;"
sql "insert into ${tableName}(k,v1,v2,__DORIS_DELETE_SIGN__) values(2,222,222,1),(4,-1,-1,0),(5,555,555,1);"
}

Thread.sleep(500)

threads << Thread.start {
sql "set enable_unique_key_partial_update=true;"
sql "sync;"
sql "insert into ${tableName}(k,v3,v4) values(1,987,987),(2,987,987),(4,987,987),(5,987,987);"
}

Thread.sleep(500)

disable_block_in_publish()
threads.each { t -> t.join() }

qt_sql "select * from ${tableName} order by k;"
} catch(Exception e) {
logger.info(e.getMessage())
throw e
} finally {
GetDebugPoint().clearDebugPointsForAllFEs()
GetDebugPoint().clearDebugPointsForAllBEs()
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@

// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

suite("test_with_delete_sign_col", "p0") {

String db = context.config.getDbNameByFile(context.file)
sql "select 1;" // to create database

def inspectRows = { sqlStr ->
sql "set skip_delete_sign=true;"
sql "set skip_delete_bitmap=true;"
sql "sync"
qt_inspect sqlStr
sql "set skip_delete_sign=false;"
sql "set skip_delete_bitmap=false;"
sql "sync"
}


for (def use_row_store : [false, true]) {
logger.info("current params: use_row_store: ${use_row_store}")

connect( context.config.jdbcUser, context.config.jdbcPassword, context.config.jdbcUrl) {
sql "use ${db};"
def table1 = "test_with_delete_sign_col"
sql "DROP TABLE IF EXISTS ${table1} FORCE;"
sql """ CREATE TABLE IF NOT EXISTS ${table1} (
`k1` int NOT NULL,
`c1` int,
`c2` int default "999",
`c3` int default "888",
`c4` int default "777"
)UNIQUE KEY(k1)
DISTRIBUTED BY HASH(k1) BUCKETS 1
PROPERTIES (
"enable_mow_light_delete" = "false",
"disable_auto_compaction" = "true",
"replication_num" = "1"); """

sql """insert into ${table1} select number,number,number,number,number from numbers("number"="11");"""
qt_sql_1 "select * from ${table1} order by k1;"

sql "set enable_unique_key_partial_update=true;"
sql "set enable_insert_strict=false;"
sql "sync;"

sql "insert into ${table1}(k1,c3,c4,__DORIS_DELETE_SIGN__) values(2,22,22,1),(3,33,33,1),(5,55,55,0),(6,66,66,1),(7,77,77,1),(10,1010,1010,0);"
qt_sql_1 "select * from ${table1} order by k1;"

sql "insert into ${table1}(k1,c1,c2,__DORIS_DELETE_SIGN__) values(1,11,11,0),(2,22,22,0),(3,33,33,0),(6,66,66,0),(9,99,99,1);"
sql "set enable_unique_key_partial_update=false;"
sql "set enable_insert_strict=true;"
sql "sync;"
qt_sql_1 "select * from ${table1} order by k1;"


sql "truncate table ${table1};"
sql """insert into ${table1} select number,number,number,number,number from numbers("number"="11");"""
qt_sql_2 "select * from ${table1} order by k1;"

streamLoad {
table "${table1}"
set 'column_separator', ','
set 'format', 'csv'
set 'columns', 'k1,c3,c4,del'
set 'partial_columns', 'true'
set 'merge_type', 'MERGE'
set 'delete', 'del=1'
file 'with_delete1.csv'
time 10000
}
qt_sql_2 "select * from ${table1} order by k1;"
sql "set enable_unique_key_partial_update=true;"
sql "set enable_insert_strict=false;"
sql "sync;"
sql "insert into ${table1}(k1,c1,c2,__DORIS_DELETE_SIGN__) values(1,11,11,0),(2,22,22,0),(3,33,33,0),(6,66,66,0),(9,99,99,1);"
sql "set enable_unique_key_partial_update=false;"
sql "set enable_insert_strict=true;"
sql "sync;"
qt_sql_1 "select * from ${table1} order by k1;"
}
}
}

0 comments on commit 0ee059e

Please sign in to comment.