Skip to content

Commit

Permalink
[Fix] Add compile check for SchemaScanner (#45797)
Browse files Browse the repository at this point in the history
  • Loading branch information
wangbo authored Dec 24, 2024
1 parent 583e6cd commit cd42ec1
Show file tree
Hide file tree
Showing 15 changed files with 70 additions and 42 deletions.
6 changes: 4 additions & 2 deletions be/src/exec/schema_scanner/schema_active_queries_scanner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@
#include "vec/data_types/data_type_factory.hpp"

namespace doris {
#include "common/compile_check_begin.h"

std::vector<SchemaScanner::ColumnDesc> SchemaActiveQueriesScanner::_s_tbls_columns = {
// name, type, size
{"QUERY_ID", TYPE_VARCHAR, sizeof(StringRef), true},
Expand Down Expand Up @@ -92,7 +94,7 @@ Status SchemaActiveQueriesScanner::_get_active_queries_block_from_fe() {
_active_query_block->reserve(_block_rows_limit);

if (result_data.size() > 0) {
int col_size = result_data[0].column_value.size();
auto col_size = result_data[0].column_value.size();
if (col_size != _s_tbls_columns.size()) {
return Status::InternalError<false>("active queries schema is not match for FE and BE");
}
Expand All @@ -119,7 +121,7 @@ Status SchemaActiveQueriesScanner::get_next_block_internal(vectorized::Block* bl

if (_active_query_block == nullptr) {
RETURN_IF_ERROR(_get_active_queries_block_from_fe());
_total_rows = _active_query_block->rows();
_total_rows = (int)_active_query_block->rows();
}

if (_row_idx == _total_rows) {
Expand Down
4 changes: 3 additions & 1 deletion be/src/exec/schema_scanner/schema_backend_active_tasks.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@
#include "vec/data_types/data_type_factory.hpp"

namespace doris {
#include "common/compile_check_begin.h"

std::vector<SchemaScanner::ColumnDesc> SchemaBackendActiveTasksScanner::_s_tbls_columns = {
// name, type, size
{"BE_ID", TYPE_BIGINT, sizeof(int64_t), false},
Expand Down Expand Up @@ -76,7 +78,7 @@ Status SchemaBackendActiveTasksScanner::get_next_block_internal(vectorized::Bloc

ExecEnv::GetInstance()->runtime_query_statistics_mgr()->get_active_be_tasks_block(
_task_stats_block.get());
_total_rows = _task_stats_block->rows();
_total_rows = (int)_task_stats_block->rows();
}

if (_row_idx == _total_rows) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@
#include "vec/data_types/data_type_factory.hpp"

namespace doris {
#include "common/compile_check_begin.h"

std::vector<SchemaScanner::ColumnDesc> SchemaCatalogMetaCacheStatsScanner::_s_tbls_columns = {
{"CATALOG_NAME", TYPE_STRING, sizeof(StringRef), true},
{"CACHE_NAME", TYPE_STRING, sizeof(StringRef), true},
Expand Down Expand Up @@ -86,7 +88,7 @@ Status SchemaCatalogMetaCacheStatsScanner::_get_meta_cache_from_fe() {
_block->reserve(_block_rows_limit);

if (result_data.size() > 0) {
int col_size = result_data[0].column_value.size();
auto col_size = result_data[0].column_value.size();
if (col_size != _s_tbls_columns.size()) {
return Status::InternalError<false>(
"catalog meta cache stats schema is not match for FE and BE");
Expand Down Expand Up @@ -115,7 +117,7 @@ Status SchemaCatalogMetaCacheStatsScanner::get_next_block_internal(vectorized::B

if (_block == nullptr) {
RETURN_IF_ERROR(_get_meta_cache_from_fe());
_total_rows = _block->rows();
_total_rows = (int)_block->rows();
}

if (_row_idx == _total_rows) {
Expand Down
12 changes: 7 additions & 5 deletions be/src/exec/schema_scanner/schema_columns_scanner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@
#include "vec/common/string_ref.h"

namespace doris {
#include "common/compile_check_begin.h"

class RuntimeState;

namespace vectorized {
Expand Down Expand Up @@ -411,7 +413,7 @@ Status SchemaColumnsScanner::_fill_block_impl(vectorized::Block* block) {
{
std::vector<StringRef> strs(columns_num);
int offset_index = 0;
int cur_table_index = _table_index - _desc_result.tables_offset.size();
int cur_table_index = int(_table_index - _desc_result.tables_offset.size());

for (int i = 0; i < columns_num; ++i) {
while (_desc_result.tables_offset[offset_index] <= i) {
Expand Down Expand Up @@ -609,14 +611,14 @@ Status SchemaColumnsScanner::_fill_block_impl(vectorized::Block* block) {
// EXTRA
{
StringRef str = StringRef("", 0);
std::vector<void*> datas(columns_num, &str);
RETURN_IF_ERROR(fill_dest_column_for_range(block, 17, datas));
std::vector<void*> filled_values(columns_num, &str);
RETURN_IF_ERROR(fill_dest_column_for_range(block, 17, filled_values));
}
// PRIVILEGES
{
StringRef str = StringRef("", 0);
std::vector<void*> datas(columns_num, &str);
RETURN_IF_ERROR(fill_dest_column_for_range(block, 18, datas));
std::vector<void*> filled_values(columns_num, &str);
RETURN_IF_ERROR(fill_dest_column_for_range(block, 18, filled_values));
}
// COLUMN_COMMENT
{
Expand Down
3 changes: 2 additions & 1 deletion be/src/exec/schema_scanner/schema_file_cache_statistics.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include "vec/data_types/data_type_factory.hpp"

namespace doris {
#include "common/compile_check_begin.h"

std::vector<SchemaScanner::ColumnDesc> SchemaFileCacheStatisticsScanner::_s_tbls_columns = {
// name, type, size
Expand Down Expand Up @@ -68,7 +69,7 @@ Status SchemaFileCacheStatisticsScanner::get_next_block_internal(vectorized::Blo
_stats_block->reserve(_block_rows_limit);

ExecEnv::GetInstance()->file_cache_factory()->get_cache_stats_block(_stats_block.get());
_total_rows = _stats_block->rows();
_total_rows = (int)_stats_block->rows();
}

if (_row_idx == _total_rows) {
Expand Down
6 changes: 4 additions & 2 deletions be/src/exec/schema_scanner/schema_partitions_scanner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@
#include "vec/data_types/data_type_factory.hpp"

namespace doris {
#include "common/compile_check_begin.h"

class RuntimeState;
namespace vectorized {
class Block;
Expand Down Expand Up @@ -138,7 +140,7 @@ Status SchemaPartitionsScanner::get_onedb_info_from_fe(int64_t dbId) {
}
_partitions_block->reserve(_block_rows_limit);
if (result_data.size() > 0) {
int col_size = result_data[0].column_value.size();
auto col_size = result_data[0].column_value.size();
if (col_size != _s_tbls_columns.size()) {
return Status::InternalError<false>("table options schema is not match for FE and BE");
}
Expand Down Expand Up @@ -178,7 +180,7 @@ Status SchemaPartitionsScanner::get_next_block_internal(vectorized::Block* block
if (_db_index < _db_result.db_ids.size()) {
RETURN_IF_ERROR(get_onedb_info_from_fe(_db_result.db_ids[_db_index]));
_row_idx = 0; // reset row index so that it start filling for next block.
_total_rows = _partitions_block->rows();
_total_rows = (int)_partitions_block->rows();
_db_index++;
}
}
Expand Down
3 changes: 2 additions & 1 deletion be/src/exec/schema_scanner/schema_processlist_scanner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
#include "vec/data_types/data_type_factory.hpp"

namespace doris {
#include "common/compile_check_begin.h"

std::vector<SchemaScanner::ColumnDesc> SchemaProcessListScanner::_s_processlist_columns = {
{"CURRENT_CONNECTED", TYPE_VARCHAR, sizeof(StringRef), false},
Expand Down Expand Up @@ -126,7 +127,7 @@ Status SchemaProcessListScanner::_fill_block_impl(vectorized::Block* block) {
datas[row_idx] = &int_vals[row_idx];
} else if (_s_processlist_columns[col_idx].type == TYPE_DATETIMEV2) {
auto* dv = reinterpret_cast<DateV2Value<DateTimeV2ValueType>*>(&int_vals[row_idx]);
if (!dv->from_date_str(column_value.data(), column_value.size(), -1,
if (!dv->from_date_str(column_value.data(), (int)column_value.size(), -1,
config::allow_zero_date)) {
return Status::InternalError(
"process list meet invalid data, column={}, data={}, reason={}",
Expand Down
6 changes: 4 additions & 2 deletions be/src/exec/schema_scanner/schema_routine_scanner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@
#include "vec/data_types/data_type_factory.hpp"

namespace doris {
#include "common/compile_check_begin.h"

std::vector<SchemaScanner::ColumnDesc> SchemaRoutinesScanner::_s_tbls_columns = {
{"SPECIFIC_NAME", TYPE_VARCHAR, sizeof(StringRef), true},
{"ROUTINE_CATALOG", TYPE_VARCHAR, sizeof(StringRef), true},
Expand Down Expand Up @@ -94,7 +96,7 @@ Status SchemaRoutinesScanner::get_block_from_fe() {
}
_routines_block->reserve(_block_rows_limit);
if (result_data.size() > 0) {
int col_size = result_data[0].column_value.size();
auto col_size = result_data[0].column_value.size();
if (col_size != _s_tbls_columns.size()) {
return Status::InternalError<false>("routine table schema is not match for FE and BE");
}
Expand All @@ -121,7 +123,7 @@ Status SchemaRoutinesScanner::get_next_block_internal(vectorized::Block* block,

if (_routines_block == nullptr) {
RETURN_IF_ERROR(get_block_from_fe());
_total_rows = _routines_block->rows();
_total_rows = (int)_routines_block->rows();
}

if (_row_idx == _total_rows) {
Expand Down
32 changes: 17 additions & 15 deletions be/src/exec/schema_scanner/schema_rowsets_scanner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ namespace vectorized {
class Block;
} // namespace vectorized

#include "common/compile_check_begin.h"

std::vector<SchemaScanner::ColumnDesc> SchemaRowsetsScanner::_s_tbls_columns = {
// name, type, size, is_null
{"BACKEND_ID", TYPE_BIGINT, sizeof(int64_t), true},
Expand Down Expand Up @@ -132,13 +134,13 @@ Status SchemaRowsetsScanner::get_next_block_internal(vectorized::Block* block, b
Status SchemaRowsetsScanner::_fill_block_impl(vectorized::Block* block) {
SCOPED_TIMER(_fill_block_timer);
size_t fill_rowsets_num = std::min(1000UL, rowsets_.size() - _rowsets_idx);
auto fill_idx_begin = _rowsets_idx;
auto fill_idx_end = _rowsets_idx + fill_rowsets_num;
size_t fill_idx_begin = _rowsets_idx;
size_t fill_idx_end = _rowsets_idx + fill_rowsets_num;
std::vector<void*> datas(fill_rowsets_num);
// BACKEND_ID
{
int64_t src = backend_id_;
for (int i = fill_idx_begin; i < fill_idx_end; ++i) {
for (size_t i = fill_idx_begin; i < fill_idx_end; ++i) {
datas[i - fill_idx_begin] = &src;
}
RETURN_IF_ERROR(fill_dest_column_for_range(block, 0, datas));
Expand All @@ -147,7 +149,7 @@ Status SchemaRowsetsScanner::_fill_block_impl(vectorized::Block* block) {
{
std::vector<std::string> rowset_ids(fill_rowsets_num);
std::vector<StringRef> strs(fill_rowsets_num);
for (int i = fill_idx_begin; i < fill_idx_end; ++i) {
for (size_t i = fill_idx_begin; i < fill_idx_end; ++i) {
RowsetSharedPtr rowset = rowsets_[i];
rowset_ids[i - fill_idx_begin] = rowset->rowset_id().to_string();
strs[i - fill_idx_begin] = StringRef(rowset_ids[i - fill_idx_begin].c_str(),
Expand All @@ -159,7 +161,7 @@ Status SchemaRowsetsScanner::_fill_block_impl(vectorized::Block* block) {
// TABLET_ID
{
std::vector<int64_t> srcs(fill_rowsets_num);
for (int i = fill_idx_begin; i < fill_idx_end; ++i) {
for (size_t i = fill_idx_begin; i < fill_idx_end; ++i) {
RowsetSharedPtr rowset = rowsets_[i];
srcs[i - fill_idx_begin] = rowset->rowset_meta()->tablet_id();
datas[i - fill_idx_begin] = srcs.data() + i - fill_idx_begin;
Expand All @@ -169,7 +171,7 @@ Status SchemaRowsetsScanner::_fill_block_impl(vectorized::Block* block) {
// ROWSET_NUM_ROWS
{
std::vector<int64_t> srcs(fill_rowsets_num);
for (int i = fill_idx_begin; i < fill_idx_end; ++i) {
for (size_t i = fill_idx_begin; i < fill_idx_end; ++i) {
RowsetSharedPtr rowset = rowsets_[i];
srcs[i - fill_idx_begin] = rowset->num_rows();
datas[i - fill_idx_begin] = srcs.data() + i - fill_idx_begin;
Expand All @@ -179,7 +181,7 @@ Status SchemaRowsetsScanner::_fill_block_impl(vectorized::Block* block) {
// TXN_ID
{
std::vector<int64_t> srcs(fill_rowsets_num);
for (int i = fill_idx_begin; i < fill_idx_end; ++i) {
for (size_t i = fill_idx_begin; i < fill_idx_end; ++i) {
RowsetSharedPtr rowset = rowsets_[i];
srcs[i - fill_idx_begin] = rowset->txn_id();
datas[i - fill_idx_begin] = srcs.data() + i - fill_idx_begin;
Expand All @@ -189,7 +191,7 @@ Status SchemaRowsetsScanner::_fill_block_impl(vectorized::Block* block) {
// NUM_SEGMENTS
{
std::vector<int64_t> srcs(fill_rowsets_num);
for (int i = fill_idx_begin; i < fill_idx_end; ++i) {
for (size_t i = fill_idx_begin; i < fill_idx_end; ++i) {
RowsetSharedPtr rowset = rowsets_[i];
srcs[i - fill_idx_begin] = rowset->num_segments();
datas[i - fill_idx_begin] = srcs.data() + i - fill_idx_begin;
Expand All @@ -199,7 +201,7 @@ Status SchemaRowsetsScanner::_fill_block_impl(vectorized::Block* block) {
// START_VERSION
{
std::vector<int64_t> srcs(fill_rowsets_num);
for (int i = fill_idx_begin; i < fill_idx_end; ++i) {
for (size_t i = fill_idx_begin; i < fill_idx_end; ++i) {
RowsetSharedPtr rowset = rowsets_[i];
srcs[i - fill_idx_begin] = rowset->start_version();
datas[i - fill_idx_begin] = srcs.data() + i - fill_idx_begin;
Expand All @@ -209,7 +211,7 @@ Status SchemaRowsetsScanner::_fill_block_impl(vectorized::Block* block) {
// END_VERSION
{
std::vector<int64_t> srcs(fill_rowsets_num);
for (int i = fill_idx_begin; i < fill_idx_end; ++i) {
for (size_t i = fill_idx_begin; i < fill_idx_end; ++i) {
RowsetSharedPtr rowset = rowsets_[i];
srcs[i - fill_idx_begin] = rowset->end_version();
datas[i - fill_idx_begin] = srcs.data() + i - fill_idx_begin;
Expand All @@ -219,7 +221,7 @@ Status SchemaRowsetsScanner::_fill_block_impl(vectorized::Block* block) {
// INDEX_DISK_SIZE
{
std::vector<int64_t> srcs(fill_rowsets_num);
for (int i = fill_idx_begin; i < fill_idx_end; ++i) {
for (size_t i = fill_idx_begin; i < fill_idx_end; ++i) {
RowsetSharedPtr rowset = rowsets_[i];
srcs[i - fill_idx_begin] = rowset->index_disk_size();
datas[i - fill_idx_begin] = srcs.data() + i - fill_idx_begin;
Expand All @@ -229,7 +231,7 @@ Status SchemaRowsetsScanner::_fill_block_impl(vectorized::Block* block) {
// DATA_DISK_SIZE
{
std::vector<int64_t> srcs(fill_rowsets_num);
for (int i = fill_idx_begin; i < fill_idx_end; ++i) {
for (size_t i = fill_idx_begin; i < fill_idx_end; ++i) {
RowsetSharedPtr rowset = rowsets_[i];
srcs[i - fill_idx_begin] = rowset->data_disk_size();
datas[i - fill_idx_begin] = srcs.data() + i - fill_idx_begin;
Expand All @@ -239,7 +241,7 @@ Status SchemaRowsetsScanner::_fill_block_impl(vectorized::Block* block) {
// CREATION_TIME
{
std::vector<VecDateTimeValue> srcs(fill_rowsets_num);
for (int i = fill_idx_begin; i < fill_idx_end; ++i) {
for (size_t i = fill_idx_begin; i < fill_idx_end; ++i) {
RowsetSharedPtr rowset = rowsets_[i];
int64_t creation_time = rowset->creation_time();
srcs[i - fill_idx_begin].from_unixtime(creation_time, TimezoneUtils::default_time_zone);
Expand All @@ -250,7 +252,7 @@ Status SchemaRowsetsScanner::_fill_block_impl(vectorized::Block* block) {
// NEWEST_WRITE_TIMESTAMP
{
std::vector<VecDateTimeValue> srcs(fill_rowsets_num);
for (int i = fill_idx_begin; i < fill_idx_end; ++i) {
for (size_t i = fill_idx_begin; i < fill_idx_end; ++i) {
RowsetSharedPtr rowset = rowsets_[i];
int64_t newest_write_timestamp = rowset->newest_write_timestamp();
srcs[i - fill_idx_begin].from_unixtime(newest_write_timestamp,
Expand All @@ -262,7 +264,7 @@ Status SchemaRowsetsScanner::_fill_block_impl(vectorized::Block* block) {
// SCHEMA_VERSION
{
std::vector<int32_t> srcs(fill_rowsets_num);
for (int i = fill_idx_begin; i < fill_idx_end; ++i) {
for (size_t i = fill_idx_begin; i < fill_idx_end; ++i) {
RowsetSharedPtr rowset = rowsets_[i];
srcs[i - fill_idx_begin] = rowset->tablet_schema()->schema_version();
datas[i - fill_idx_begin] = srcs.data() + i - fill_idx_begin;
Expand Down
6 changes: 4 additions & 2 deletions be/src/exec/schema_scanner/schema_table_options_scanner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@
#include "vec/data_types/data_type_factory.hpp"

namespace doris {
#include "common/compile_check_begin.h"

std::vector<SchemaScanner::ColumnDesc> SchemaTableOptionsScanner::_s_tbls_columns = {
{"TABLE_CATALOG", TYPE_VARCHAR, sizeof(StringRef), true},
{"TABLE_SCHEMA", TYPE_VARCHAR, sizeof(StringRef), true},
Expand Down Expand Up @@ -110,7 +112,7 @@ Status SchemaTableOptionsScanner::get_onedb_info_from_fe(int64_t dbId) {
}
_tableoptions_block->reserve(_block_rows_limit);
if (result_data.size() > 0) {
int col_size = result_data[0].column_value.size();
auto col_size = result_data[0].column_value.size();
if (col_size != _s_tbls_columns.size()) {
return Status::InternalError<false>("table options schema is not match for FE and BE");
}
Expand Down Expand Up @@ -150,7 +152,7 @@ Status SchemaTableOptionsScanner::get_next_block_internal(vectorized::Block* blo
if (_db_index < _db_result.db_ids.size()) {
RETURN_IF_ERROR(get_onedb_info_from_fe(_db_result.db_ids[_db_index]));
_row_idx = 0; // reset row index so that it start filling for next block.
_total_rows = _tableoptions_block->rows();
_total_rows = (int)_tableoptions_block->rows();
_db_index++;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@
#include "vec/data_types/data_type_factory.hpp"

namespace doris {
#include "common/compile_check_begin.h"

std::vector<SchemaScanner::ColumnDesc> SchemaTablePropertiesScanner::_s_tbls_columns = {
{"TABLE_CATALOG", TYPE_VARCHAR, sizeof(StringRef), true},
{"TABLE_SCHEMA", TYPE_VARCHAR, sizeof(StringRef), true},
Expand Down Expand Up @@ -108,7 +110,7 @@ Status SchemaTablePropertiesScanner::get_onedb_info_from_fe(int64_t dbId) {
}
_tableproperties_block->reserve(_block_rows_limit);
if (result_data.size() > 0) {
int col_size = result_data[0].column_value.size();
auto col_size = result_data[0].column_value.size();
if (col_size != _s_tbls_columns.size()) {
return Status::InternalError<false>("table options schema is not match for FE and BE");
}
Expand Down Expand Up @@ -148,7 +150,7 @@ Status SchemaTablePropertiesScanner::get_next_block_internal(vectorized::Block*
if (_db_index < _db_result.db_ids.size()) {
RETURN_IF_ERROR(get_onedb_info_from_fe(_db_result.db_ids[_db_index]));
_row_idx = 0; // reset row index so that it start filling for next block.
_total_rows = _tableproperties_block->rows();
_total_rows = (int)_tableproperties_block->rows();
_db_index++;
}
}
Expand Down
Loading

0 comments on commit cd42ec1

Please sign in to comment.