diff --git a/.gitmodules b/.gitmodules index a778699774..f45803cf28 100644 --- a/.gitmodules +++ b/.gitmodules @@ -32,12 +32,6 @@ [submodule "third_party/librdkafka"] path = third_party/librdkafka url = https://github.com/edenhill/librdkafka.git -[submodule "third_party/protobuf"] - path = third_party/protobuf - url = https://github.com/protocolbuffers/protobuf.git -[submodule "third_party/hadoop"] - path = third_party/hadoop - url = https://github.com/apache/hadoop.git [submodule "third_party/HierarchicalKV"] path = third_party/HierarchicalKV url = https://github.com/NVIDIA-Merlin/HierarchicalKV.git diff --git a/.nspect-vuln-allowlist.toml b/.nspect-vuln-allowlist.toml index dede93276b..6efea76379 100644 --- a/.nspect-vuln-allowlist.toml +++ b/.nspect-vuln-allowlist.toml @@ -1,12 +1,57 @@ -version = "4.3.0" +version = "24.06" [oss] [oss.excluded] - + +[[oss.excluded.directories]] +paths = ['third_party/hadoop/hadoop-mapreduce-project'] +comment = 'No Use' +nspect_ids = ['NSPECT-OZP9-WUQA'] + +[[oss.excluded.directories]] +paths = ['third_party/hadoop/hadoop-tools/hadoop-azure'] +comment = 'No Use' +nspect_ids = ['NSPECT-OZP9-WUQA'] + +[[oss.excluded.directories]] +paths = ['third_party/hadoop/hadoop-yarn-project/hadoop-yarn'] +comment = 'No Use' +nspect_ids = ['NSPECT-OZP9-WUQA'] + +[[oss.excluded.directories]] +paths = ['third_party/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api] +comment = 'No Use' +nspect_ids = ['NSPECT-OZP9-WUQA'] + +[[oss.excluded.directories]] +paths = ['third_party/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core'] +comment = 'No Use' +nspect_ids = ['NSPECT-OZP9-WUQA'] + +[[oss.excluded.directories]] +paths = ['third_party/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice'] +comment = 'No Use' +nspect_ids = ['NSPECT-OZP9-WUQA'] + +[[oss.excluded.directories]] +paths = ['hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server'] +comment = 'No Use' +nspect_ids = ['NSPECT-OZP9-WUQA'] + +[[oss.excluded.directories]] +paths = ['third_party/hadoop/hadoop-tools'] +comment = 'No Use' +nspect_ids = ['NSPECT-OZP9-WUQA'] + +[[oss.excluded.directories]] +paths = ['third_party/hadoop/hadoop-common'] +comment = 'No Use' +nspect_ids = ['NSPECT-OZP9-WUQA'] + [[oss.excluded.directories]] -paths = ['third_party/hadoop/*'] -comment = 'We do not use and are not planning on using the Hadoop Yarn Web UI' +paths = ['third_party/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/*'] +comment = 'No Use' nspect_ids = ['NSPECT-OZP9-WUQA'] [[oss.excluded.directories]] @@ -18,3 +63,8 @@ nspect_ids = ['NSPECT-OZP9-WUQA'] paths = ['third_party/protobuf/*'] comment = 'We never use csharp, java, php, the thir party googletest, etc., inside ptotobuf' nspect_ids = ['NSPECT-OZP9-WUQA'] + +[[oss.excluded.directories]] +paths = ['third_party/hadoop', 'third_party/hadoop/*'] +comment = 'No Use' +nspect_ids = ['NSPECT-OZP9-WUQA'] diff --git a/CMakeLists.txt b/CMakeLists.txt index 63b5ab334a..6e247b964b 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,6 +14,7 @@ # cmake_minimum_required(VERSION 3.17) + project(HugeCTR LANGUAGES CXX CUDA) list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/Modules) @@ -351,17 +352,57 @@ add_subdirectory(gpu_cache/src) option(ENABLE_HDFS "Enable HDFS" OFF) if(ENABLE_HDFS) - if(ENABLE_HDFS STREQUAL "MINIMAL") - message("HDFS build mode: Client only") - else() - message("HDFS build mode: Full") - endif() + message(STATUS "HDFS build mode: Client only") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DENABLE_HDFS") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DENABLE_HDFS") + + set(FETCHCONTENT_QUIET OFF) + + # Java. + if (NOT EXISTS /usr/bin/mvn) + execute_process(WORKING_DIRECTORY "${CMAKE_BINARY_DIR}" + COMMAND /bin/bash ${PROJECT_SOURCE_DIR}/sbin/install-jdk-and-maven.sh + COMMAND_ERROR_IS_FATAL ANY + ) + endif() - # Build and Install Hadoop - include(SetupHadoop) - hadoop_setup(${ENABLE_HDFS}) + # Hadoop. + # sudo apt install libboost-date-time-dev + # sudo apt install libboost-program-options-dev + # sudo apt install libprotobuf-dev + # sudo apt install libfuse-dev + # sudo apt install libprotoc-dev + FetchContent_Declare(hadoop + DOWNLOAD_COMMAND git clone + --branch rel/release-3.4.0 + --depth 1 + --progress https://github.com/apache/hadoop.git + "${CMAKE_BINARY_DIR}/_deps/hadoop-src" + ) + FetchContent_Populate(hadoop) + set(hadoop_SOURCE_DIR "${hadoop_SOURCE_DIR}/hadoop-hdfs-project/hadoop-hdfs-native-client") + set(hadoop_BINARY_DIR "${hadoop_SOURCE_DIR}/target/hadoop-hdfs-native-client-3.4.0") + if(EXISTS ${hadoop_BINARY_DIR}/include/hdfs.h AND EXISTS ${hadoop_BINARY_DIR}/lib/native/libhdfs.a) + message(STATUS "Found hdfs library in ${hadoop_BINARY_DIR}") + else() + execute_process(WORKING_DIRECTORY "${hadoop_SOURCE_DIR}" + COMMAND mvn clean package + -Pdist,native + -DskipTests + -Dtar + -Dmaven.javadoc.skip=true + -Drequire.snappy + -Drequire.zstd + -Drequire.openssl + -Drequire.pmdk + COMMAND_ERROR_IS_FATAL ANY + ) + endif() + set(FETCHCONTENT_QUIET ON) + + include_directories("${hadoop_BINARY_DIR}/include") + link_directories("${hadoop_BINARY_DIR}/lib/native") + set(ENABLE_HDFS ON) endif() diff --git a/HugeCTR/embedding/all2all_embedding_collection.cu b/HugeCTR/embedding/all2all_embedding_collection.cu index 8eba46bd6d..63acc5ee29 100644 --- a/HugeCTR/embedding/all2all_embedding_collection.cu +++ b/HugeCTR/embedding/all2all_embedding_collection.cu @@ -380,7 +380,7 @@ __global__ void cal_lookup_idx(size_t lookup_num, offset_t *bucket_after_filter, } template -__global__ void count_ratio_filter(size_t bucket_num, char *filterd, const offset_t *bucket_range, +__global__ void count_ratio_filter(size_t bucket_num, char *filtered, const offset_t *bucket_range, offset_t *bucket_after_filter) { int32_t i = blockIdx.x * blockDim.x + threadIdx.x; int32_t step = blockDim.x * gridDim.x; @@ -389,7 +389,7 @@ __global__ void count_ratio_filter(size_t bucket_num, char *filterd, const offse offset_t end = bucket_range[i + 1]; bucket_after_filter[i + 1] = 0; for (offset_t idx = start; idx < end; idx++) { - if (filterd[idx] == 1) { + if (filtered[idx] == 1) { bucket_after_filter[i + 1]++; } } @@ -400,7 +400,7 @@ __global__ void count_ratio_filter(size_t bucket_num, char *filterd, const offse } void filter(std::shared_ptr core, - const UniformModelParallelEmbeddingMeta &meta, const core23::Tensor &filterd, + const UniformModelParallelEmbeddingMeta &meta, const core23::Tensor &filtered, core23::Tensor &bucket_range, core23::Tensor &bucket_after_filter, core23::TensorParams ¶ms, EmbeddingInput &emb_input, core23::Tensor &lookup_offset, core23::Tensor &temp_scan_storage, core23::Tensor &temp_select_storage, @@ -416,7 +416,7 @@ void filter(std::shared_ptr core, DISPATCH_INTEGRAL_FUNCTION_CORE23(keys_after_filter.data_type().type(), key_t, [&] { offset_t *bucket_after_filter_ptr = bucket_after_filter.data(); const offset_t *bucket_range_ptr = bucket_range.data(); - char *filterd_ptr = filterd.data(); + char *filterd_ptr = filtered.data(); count_ratio_filter<<>>( bucket_num, filterd_ptr, bucket_range_ptr, bucket_after_filter_ptr); cub::DeviceScan::InclusiveSum( diff --git a/HugeCTR/include/data_readers/multi_hot/detail/aio_context.hpp b/HugeCTR/include/data_readers/multi_hot/detail/aio_context.hpp index ab05b31fa0..408636ac7a 100644 --- a/HugeCTR/include/data_readers/multi_hot/detail/aio_context.hpp +++ b/HugeCTR/include/data_readers/multi_hot/detail/aio_context.hpp @@ -36,10 +36,11 @@ class AIOContext : public IOContext { size_t io_depth_ = 0; size_t num_inflight_ = 0; + size_t alignment_ = 0; io_context_t ctx_ = 0; std::vector tmp_events_; // prevent dynamic memory allocation std::vector iocb_buffer_; std::queue free_cbs_; }; -} // namespace HugeCTR \ No newline at end of file +} // namespace HugeCTR diff --git a/HugeCTR/src/CMakeLists.txt b/HugeCTR/src/CMakeLists.txt index 66be2d3b9f..6a6158ea9f 100755 --- a/HugeCTR/src/CMakeLists.txt +++ b/HugeCTR/src/CMakeLists.txt @@ -67,7 +67,7 @@ target_link_libraries(huge_ctr_shared PRIVATE nlohmann_json::nlohmann_json) target_link_libraries(huge_ctr_shared PUBLIC gpu_cache) if(ENABLE_HDFS) - target_link_libraries(huge_ctr_shared PUBLIC ${DB_LIB_PATHS}/libhdfs.so) + target_link_libraries(huge_ctr_shared PUBLIC hdfs) endif() if(ENABLE_S3) diff --git a/HugeCTR/src/data_readers/multi_hot/detail/aio_context.cpp b/HugeCTR/src/data_readers/multi_hot/detail/aio_context.cpp index d9a8c6ee94..29cb99b73d 100644 --- a/HugeCTR/src/data_readers/multi_hot/detail/aio_context.cpp +++ b/HugeCTR/src/data_readers/multi_hot/detail/aio_context.cpp @@ -35,6 +35,12 @@ AIOContext::AIOContext(size_t io_depth) : io_depth_(io_depth), iocb_buffer_(io_d if (io_queue_init(io_depth, &ctx_) < 0) { throw std::runtime_error("io_queue_init failed"); } + + long page_size = sysconf(_SC_PAGESIZE); + if (page_size == -1) { + throw std::runtime_error("sysconf failed to return page size."); + } + alignment_ = static_cast(page_size); } AIOContext::~AIOContext() { @@ -118,8 +124,6 @@ IOError AIOContext::errno_to_enum(int err) { } } -size_t AIOContext::get_alignment() const { - return 4096; // O_DIRECT requirement -} +size_t AIOContext::get_alignment() const { return alignment_; } -} // namespace HugeCTR \ No newline at end of file +} // namespace HugeCTR diff --git a/HugeCTR/src/hps/CMakeLists.txt b/HugeCTR/src/hps/CMakeLists.txt index db9a9b28a9..a481e9ca86 100644 --- a/HugeCTR/src/hps/CMakeLists.txt +++ b/HugeCTR/src/hps/CMakeLists.txt @@ -36,11 +36,7 @@ add_compile_definitions(LIBCUDACXX_ENABLE_EXPERIMENTAL_MEMORY_RESOURCE) add_library(huge_ctr_hps SHARED ${huge_ctr_hps_src}) if(ENABLE_HDFS) - target_link_libraries( - huge_ctr_hps - PUBLIC - ${DB_LIB_PATHS}/libhdfs.so # from Hugectr - ) + target_link_libraries(huge_ctr_hps PUBLIC hdfs) endif() if(ENABLE_S3) diff --git a/HugeCTR/src/inference_benchmark/CMakeLists.txt b/HugeCTR/src/inference_benchmark/CMakeLists.txt index bd7add40da..5873701c10 100644 --- a/HugeCTR/src/inference_benchmark/CMakeLists.txt +++ b/HugeCTR/src/inference_benchmark/CMakeLists.txt @@ -20,11 +20,7 @@ file(GLOB hps_benchmark_src ) if(ENABLE_HDFS) - target_link_libraries( - huge_ctr_inference - PUBLIC - ${DB_LIB_PATHS}/libhdfs.so # from Hugectr - ) + target_link_libraries(huge_ctr_inference PUBLIC hdfs) endif() if(ENABLE_S3) diff --git a/docs/source/hugectr_contributor_guide.md b/docs/source/hugectr_contributor_guide.md index 50b8fe2a90..431a5341f1 100755 --- a/docs/source/hugectr_contributor_guide.md +++ b/docs/source/hugectr_contributor_guide.md @@ -104,10 +104,10 @@ To build HugeCTR Training Container from source, do the following: - **ENABLE_INFERENCE**: You can use this option to build HugeCTR in inference mode, which was designed for the inference framework. In this mode, an inference shared library will be built for the HugeCTR Backend. Only interfaces that support the HugeCTR Backend can be used. Therefore, you can’t train models in this mode. This option is set to OFF by default. For building inference container, please refer to [Build HugeCTR Inference Container from Source](#build-hugectr-inference-container-from-source) - - **ENABLE_HDFS**: You can use this option to build HugeCTR together with HDFS to enable HDFS related functions. Permissible values are `ON`, `MINIMAL` and `OFF` *(default)*. Setting this option to `ON` leads to building all necessary Hadoop modules that are required for building AND running both HugeCTR and HDFS. In contrast, `MINIMAL` restricts building only the minimum necessary set of components for building HugeCTR. + - **ENABLE_HDFS**: You can use this option to build HugeCTR together with HDFS to enable HDFS related functions. Permissible values are `ON` and `OFF` *(default)*. Setting this option to `ON` leads to building all necessary Hadoop modules that are required for building so that it can connect to HDFS deployments. - **ENABLE_S3**: You can use this option to build HugeCTR together with Amazon AWS S3 SDK to enable S3 related functions. Permissible values are `ON` and `OFF` *(default)*. Setting this option to `ON` leads to building all necessary AWS SKKs and dependencies that are required for building AND running both HugeCTR and S3. - **Please note that setting DENABLE_HDFS=ON/MINIMAL or DENABLE_S3=ON requires root permission. So before using these two options to do the customized building, make sure you use `-u root` when you run the docker container.** + **Please note that setting DENABLE_HDFS=ON or DENABLE_S3=ON requires root permission. So before using these two options to do the customized building, make sure you use `-u root` when you run the docker container.** Here are some examples of how you can build HugeCTR using these build options: ```shell @@ -124,7 +124,7 @@ To build HugeCTR Training Container from source, do the following: ```shell $ mkdir -p build && cd build - $ cmake -DCMAKE_BUILD_TYPE=Release -DSM="70;80" -DENABLE_HDFS=MINIMAL .. # Target is NVIDIA V100 / A100 with only minimum HDFS components mode on. + $ cmake -DCMAKE_BUILD_TYPE=Release -DSM="70;80" -DENABLE_HDFS=ON .. # Target is NVIDIA V100 / A100 with HDFS components mode on. $ make -j && make install ``` diff --git a/sbin/install-hadoop.sh b/sbin/install-hadoop.sh index a16905d479..d7c4660894 100755 --- a/sbin/install-hadoop.sh +++ b/sbin/install-hadoop.sh @@ -40,7 +40,7 @@ if [[ ! -f "${HADOOP_HOME}/include/hdfs.h" ]]; then cp hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h ${HADOOP_HOME}/include fi -# Cleanup reundant files. +# Cleanup redundant files. for f in $(find ${HADOOP_HOME} -name *.cmd); do rm -rf $f done diff --git a/sparse_operation_kit/CMakeLists.txt b/sparse_operation_kit/CMakeLists.txt index 9c6de27b0e..91f458d0c8 100644 --- a/sparse_operation_kit/CMakeLists.txt +++ b/sparse_operation_kit/CMakeLists.txt @@ -32,6 +32,8 @@ if (NOT TF_RESULT) list(GET TF_VERSION_LIST 0 TF_VERSION_MAJOR) list(GET TF_VERSION_LIST 1 TF_VERSION_MINOR) list(GET TF_VERSION_LIST 2 TF_VERSION_PATCH) + message(STATUS "TF_VERSION_MAJOR = ${TF_VERSION_MAJOR}") + message(STATUS "TF_VERSION_MINOR = ${TF_VERSION_MINOR}") if(${TF_VERSION_MAJOR} GREATER 1 AND ${TF_VERSION_MINOR} GREATER 9) add_definitions(-DTF_GE_210) set_property(GLOBAL PROPERTY SOK_CXX_STANDARD_PROPERTY cxx_std_17) @@ -51,6 +53,11 @@ if (NOT TF_RESULT) if(${TF_VERSION_MAJOR} GREATER 1 AND ${TF_VERSION_MINOR} GREATER 11) add_definitions(-DTF_GE_212) endif() + + + if(${TF_VERSION_MAJOR} GREATER 1 AND ${TF_VERSION_MINOR} GREATER 15) + add_definitions(-DTF_GE_216) + endif() else() message(FATAL_ERROR "Can not detect tensorflow in your environment,please install tensorflow(tf1 support version 1.15, for tf2 support version 2.60~latest) ") endif() diff --git a/sparse_operation_kit/ReadMe.md b/sparse_operation_kit/ReadMe.md index 99e710ec04..08d3adbbc4 100644 --- a/sparse_operation_kit/ReadMe.md +++ b/sparse_operation_kit/ReadMe.md @@ -87,7 +87,7 @@ You can also build the SOK module from source code. Here are the steps to follow ### Pre-requisites ### CUDA Version:>= 11.2 -TF2 Version:2.6.0~2.14.0 +TF2 Version:2.6.0~2.16.0 TF1 Version:1.15 diff --git a/sparse_operation_kit/kit_src/lookup/impl/core_impl/gpu_resource_impl.hpp b/sparse_operation_kit/kit_src/lookup/impl/core_impl/gpu_resource_impl.hpp index e16b7fac8a..b8d3e90844 100644 --- a/sparse_operation_kit/kit_src/lookup/impl/core_impl/gpu_resource_impl.hpp +++ b/sparse_operation_kit/kit_src/lookup/impl/core_impl/gpu_resource_impl.hpp @@ -27,6 +27,10 @@ #include "tensorflow/core/common_runtime/gpu_device_context.h" #endif +#ifdef TF_GE_216 +#include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h" +#endif + #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/platform/stream_executor.h" @@ -49,6 +53,16 @@ class GPUResource final : public GPUResourceBase { LOG(FATAL) << "Get DeviceContext fail! please check OpKernel running on GPU."; } const GPUDeviceContext *gpu_dc = static_cast(dc); + +#ifdef TF_GE_216 + cudaStream_t stream = + reinterpret_cast(gpu_dc->stream()->platform_specific_handle().stream); + + if (!stream) { + LOG(FATAL) << "Get default CUDA stream fail!"; + } + stream_map_[current_stream_name_] = stream; +#else cudaStream_t *stream = reinterpret_cast(gpu_dc->stream()->implementation()->GpuStreamMemberHack()); @@ -62,6 +76,8 @@ class GPUResource final : public GPUResourceBase { LOG(FATAL) << "Get default CUDA stream fail!"; } stream_map_[current_stream_name_] = *stream; + +#endif } void set_stream(const std::string &name) override { current_stream_name_ = name; } @@ -84,4 +100,4 @@ class GPUResource final : public GPUResourceBase { std::string current_stream_name_; std::unordered_map stream_map_; }; -} // namespace tf_internal \ No newline at end of file +} // namespace tf_internal diff --git a/third_party/hadoop b/third_party/hadoop deleted file mode 160000 index a585a73c3e..0000000000 --- a/third_party/hadoop +++ /dev/null @@ -1 +0,0 @@ -Subproject commit a585a73c3e02ac62350c136643a5e7f6095a3dbb diff --git a/third_party/protobuf b/third_party/protobuf deleted file mode 160000 index 22d0e265de..0000000000 --- a/third_party/protobuf +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 22d0e265de7d2b3d2e9a00d071313502e7d4cccf diff --git a/tools/dlrm_script/dlrm_raw.cu b/tools/dlrm_script/dlrm_raw.cu index 1a457098fe..54d666ed7c 100644 --- a/tools/dlrm_script/dlrm_raw.cu +++ b/tools/dlrm_script/dlrm_raw.cu @@ -156,7 +156,7 @@ void process_kaggle_dataset(const std::string &input_dir_path, const std::string if (col.type().id() == cudf::type_id::STRING) { auto str_col = cudf::strings_column_view(col.view()); int64_t num_strings = str_col.size(); - char *char_array = const_cast(str_col.chars().data()); + char *char_array = const_cast(str_col.chars_begin(cudf::get_default_stream())); int32_t *offsets = const_cast(str_col.offsets().data()); build_categorical_index<<>>( @@ -517,7 +517,7 @@ void process_terabyte_dataset(const std::string &input_dir_path, const std::stri if (col.type().id() == cudf::type_id::STRING) { auto str_col = cudf::strings_column_view(col.view()); int64_t num_strings = str_col.size(); - char *char_array = const_cast(str_col.chars().data()); + char *char_array = const_cast(str_col.chars_begin(cudf::get_default_stream())); int32_t *offsets = const_cast(str_col.offsets().data()); build_categorical_index<<>>( diff --git a/tools/dlrm_script/dlrm_raw_utils.hpp b/tools/dlrm_script/dlrm_raw_utils.hpp index 5f21102c90..821f644ffe 100644 --- a/tools/dlrm_script/dlrm_raw_utils.hpp +++ b/tools/dlrm_script/dlrm_raw_utils.hpp @@ -574,7 +574,7 @@ size_t convert_input_binaries(rmm::mr::device_memory_resource *mr, std::string i for (int k = 0; k < num_categoricals; k++) { auto str_col_view = cudf::strings_column_view((col_logs[k + num_numericals]->view())); - char_ptrs.push_back(const_cast(str_col_view.chars().data())); + char_ptrs.push_back(const_cast(str_col_view.chars_begin(cudf::get_default_stream()))); offset_ptrs.push_back(const_cast(str_col_view.offsets().data())); } diff --git a/tools/dlrm_script/hash/concurrent_unordered_map.cuh b/tools/dlrm_script/hash/concurrent_unordered_map.cuh index 2952b76008..c9febba3bb 100644 --- a/tools/dlrm_script/hash/concurrent_unordered_map.cuh +++ b/tools/dlrm_script/hash/concurrent_unordered_map.cuh @@ -18,7 +18,6 @@ #include #include -#include #include #ifndef CUDF_GE_2306 #include @@ -56,8 +55,8 @@ struct packed { using type = void; }; template <> -struct packed { - using type = uint64_t; +struct packed { + using type = unsigned long long; }; template <> struct packed { @@ -170,7 +169,6 @@ class concurrent_unordered_map { const key_type unused_key = std::numeric_limits::max(), const Hasher& hash_function = hasher(), const Equality& equal = key_equal(), const allocator_type& allocator = allocator_type(), cudaStream_t stream = 0) { - CUDF_FUNC_RANGE(); using Self = concurrent_unordered_map; // Note: need `(*p).destroy` instead of `p->destroy` here diff --git a/tools/dockerfiles/Dockerfile.optimized b/tools/dockerfiles/Dockerfile.optimized index 415b797e1e..7522db9eba 100644 --- a/tools/dockerfiles/Dockerfile.optimized +++ b/tools/dockerfiles/Dockerfile.optimized @@ -26,6 +26,11 @@ ARG RELEASE=true RUN apt-get update -y && \ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + libboost-date-time-dev \ + libboost-program-options-dev \ + libprotobuf-dev \ + libprotoc-dev \ + libfuse-dev \ clang-format \ libtbb-dev \ libaio-dev && \