From 2ba7c4714b216fb529d98038f238ffc81531a828 Mon Sep 17 00:00:00 2001 From: xiaoxmeng Date: Sat, 13 Apr 2024 22:37:35 -0700 Subject: [PATCH] Add faulty file system for io failure injections (#9457) Summary: Add faulty file system to allow us to inject failure for low-level file operations in unit and fuzzer test. It is implemented under velox filesystem. It is wrapper on top of the real file system, it either inject errors or delegate the file operations to the underlying file system. We extend TempDirectoryPath or TempFilePath to allow user to specify whether enable fault injection or not in test. If it does, it returns the file or directory path with faulty file system scheme prefix (faulty:). This allows the velox file system to open the file through faulty file system and by removing the faulty fs scheme prefix, it can delegate the file operation to the corresponding underlying file system. We support three kinds of file fault injections: (1) error injection which throws for a set (or all) file operation types; (2) delay injection for a set (or all) file operation types; (3) custom injection with user provided fault injection hook. We define the base structure FaultFileOperation to capture the file fault injection parameters and each file api will extend with its own operation type. This PR only supports fault injection for read file operations. Next we will extend to other file operation types as well as fs operation types. Pull Request resolved: https://github.com/facebookincubator/velox/pull/9457 Reviewed By: tanjialiang, oerling Differential Revision: D56079500 Pulled By: xiaoxmeng --- velox/common/base/SpillConfig.h | 2 +- velox/common/base/tests/FsTest.cpp | 5 +- .../caching/tests/AsyncDataCacheTest.cpp | 4 +- velox/common/caching/tests/SsdFileTest.cpp | 2 +- velox/common/file/File.cpp | 6 +- velox/common/file/File.h | 9 +- velox/common/file/tests/CMakeLists.txt | 4 +- velox/common/file/tests/FaultyFile.cpp | 79 ++++ velox/common/file/tests/FaultyFile.h | 150 +++++++ velox/common/file/tests/FaultyFileSystem.cpp | 192 +++++++++ velox/common/file/tests/FaultyFileSystem.h | 131 ++++++ velox/common/file/tests/FileTest.cpp | 405 +++++++++++++++--- .../memory/tests/SharedArbitratorTest.cpp | 8 +- .../hive/iceberg/tests/IcebergReadTest.cpp | 16 +- .../abfs/tests/AbfsFileSystemTest.cpp | 2 +- .../hdfs/tests/HdfsFileSystemTest.cpp | 2 +- .../connectors/hive/tests/FileHandleTest.cpp | 4 +- .../hive/tests/HiveDataSinkTest.cpp | 22 +- velox/dwio/common/tests/LocalFileSinkTest.cpp | 2 +- .../common/tests/ReadFileInputStreamTests.cpp | 4 +- velox/dwio/dwrf/test/CacheInputTest.cpp | 2 +- .../tests/writer/ParquetWriterTest.cpp | 3 +- velox/examples/ScanAndSort.cpp | 2 +- velox/exec/fuzzer/AggregationFuzzer.cpp | 6 +- velox/exec/fuzzer/AggregationFuzzerBase.cpp | 2 +- velox/exec/fuzzer/WindowFuzzer.cpp | 2 +- velox/exec/tests/AggregationTest.cpp | 42 +- velox/exec/tests/AssertQueryBuilderTest.cpp | 12 +- velox/exec/tests/GroupedExecutionTest.cpp | 80 ++-- velox/exec/tests/HashJoinBridgeTest.cpp | 4 +- velox/exec/tests/HashJoinTest.cpp | 109 ++--- velox/exec/tests/JoinFuzzer.cpp | 4 +- velox/exec/tests/LimitTest.cpp | 4 +- velox/exec/tests/LocalPartitionTest.cpp | 12 +- velox/exec/tests/MergeJoinTest.cpp | 8 +- velox/exec/tests/MultiFragmentTest.cpp | 10 +- velox/exec/tests/OrderByTest.cpp | 28 +- velox/exec/tests/PrintPlanWithStatsTest.cpp | 4 +- velox/exec/tests/RowNumberTest.cpp | 8 +- velox/exec/tests/SortBufferTest.cpp | 6 +- velox/exec/tests/SpillTest.cpp | 12 +- velox/exec/tests/SpillerBenchmarkBase.cpp | 2 +- velox/exec/tests/SpillerTest.cpp | 9 +- velox/exec/tests/TableScanTest.cpp | 293 ++++++------- velox/exec/tests/TableWriteTest.cpp | 180 ++++---- velox/exec/tests/TaskTest.cpp | 24 +- velox/exec/tests/TopNRowNumberTest.cpp | 6 +- velox/exec/tests/WindowTest.cpp | 2 +- velox/exec/tests/utils/ArbitratorTestUtil.cpp | 14 +- .../tests/utils/HiveConnectorTestBase.cpp | 2 +- velox/exec/tests/utils/TempDirectoryPath.cpp | 19 +- velox/exec/tests/utils/TempDirectoryPath.h | 34 +- velox/exec/tests/utils/TempFilePath.cpp | 21 +- velox/exec/tests/utils/TempFilePath.h | 50 ++- velox/expression/tests/ExprTest.cpp | 4 +- .../tests/ExpressionRunnerUnitTest.cpp | 12 +- .../tests/ExpressionVerifierUnitTest.cpp | 2 +- .../tests/utils/AggregationTestBase.cpp | 12 +- .../aggregates/tests/ArbitraryTest.cpp | 2 +- .../aggregates/tests/FirstAggregateTest.cpp | 2 +- velox/vector/tests/VectorSaverTest.cpp | 15 +- 61 files changed, 1495 insertions(+), 619 deletions(-) create mode 100644 velox/common/file/tests/FaultyFile.cpp create mode 100644 velox/common/file/tests/FaultyFile.h create mode 100644 velox/common/file/tests/FaultyFileSystem.cpp create mode 100644 velox/common/file/tests/FaultyFileSystem.h diff --git a/velox/common/base/SpillConfig.h b/velox/common/base/SpillConfig.h index a51801a62a49..2f99ed0c6e2a 100644 --- a/velox/common/base/SpillConfig.h +++ b/velox/common/base/SpillConfig.h @@ -35,7 +35,7 @@ namespace facebook::velox::common { /// Defining type for a callback function that returns the spill directory path. /// Implementations can use it to ensure the path exists before returning. -using GetSpillDirectoryPathCB = std::function; +using GetSpillDirectoryPathCB = std::function; /// The callback used to update the aggregated spill bytes of a query. If the /// query spill limit is set, the callback throws if the aggregated spilled diff --git a/velox/common/base/tests/FsTest.cpp b/velox/common/base/tests/FsTest.cpp index a9dbd822c0fa..ebe7a7ee2e51 100644 --- a/velox/common/base/tests/FsTest.cpp +++ b/velox/common/base/tests/FsTest.cpp @@ -24,7 +24,8 @@ namespace facebook::velox::common { class FsTest : public testing::Test {}; TEST_F(FsTest, createDirectory) { - auto rootPath = exec::test::TempDirectoryPath::createTempDirectory(); + auto dir = exec::test::TempDirectoryPath::create(); + auto rootPath = dir->path(); auto tmpDirectoryPath = rootPath + "/first/second/third"; // First time should generate directory successfully. EXPECT_FALSE(fs::exists(tmpDirectoryPath.c_str())); @@ -34,7 +35,7 @@ TEST_F(FsTest, createDirectory) { // Directory already exist, not creating but should return success. EXPECT_TRUE(generateFileDirectory(tmpDirectoryPath.c_str())); EXPECT_TRUE(fs::exists(tmpDirectoryPath.c_str())); - boost::filesystem::remove_all(rootPath); + dir.reset(); EXPECT_FALSE(fs::exists(rootPath.c_str())); } diff --git a/velox/common/caching/tests/AsyncDataCacheTest.cpp b/velox/common/caching/tests/AsyncDataCacheTest.cpp index b74afaabbab4..56427078007f 100644 --- a/velox/common/caching/tests/AsyncDataCacheTest.cpp +++ b/velox/common/caching/tests/AsyncDataCacheTest.cpp @@ -106,7 +106,7 @@ class AsyncDataCacheTest : public testing::Test { tempDirectory_ = exec::test::TempDirectoryPath::create(); } ssdCache = std::make_unique( - fmt::format("{}/cache", tempDirectory_->path), + fmt::format("{}/cache", tempDirectory_->path()), ssdBytes, 4, executor(), @@ -816,7 +816,7 @@ TEST_F(AsyncDataCacheTest, DISABLED_ssd) { cache_->ssdCache()->clear(); // We cut the tail off one of the cache shards. - corruptFile(fmt::format("{}/cache0.cpt", tempDirectory_->path)); + corruptFile(fmt::format("{}/cache0.cpt", tempDirectory_->path())); // We open the cache from checkpoint. Reading checks the data integrity, here // we check that more data was read than written. initializeCache(kRamBytes, kSsdBytes); diff --git a/velox/common/caching/tests/SsdFileTest.cpp b/velox/common/caching/tests/SsdFileTest.cpp index 9cb89f2fabb9..80ca323517ad 100644 --- a/velox/common/caching/tests/SsdFileTest.cpp +++ b/velox/common/caching/tests/SsdFileTest.cpp @@ -67,7 +67,7 @@ class SsdFileTest : public testing::Test { tempDirectory_ = exec::test::TempDirectoryPath::create(); ssdFile_ = std::make_unique( - fmt::format("{}/ssdtest", tempDirectory_->path), + fmt::format("{}/ssdtest", tempDirectory_->path()), 0, // shardId bits::roundUp(ssdBytes, SsdFile::kRegionSize) / SsdFile::kRegionSize, 0, // checkpointInternalBytes diff --git a/velox/common/file/File.cpp b/velox/common/file/File.cpp index ea5ef04b4b7e..a7f3f0c03f45 100644 --- a/velox/common/file/File.cpp +++ b/velox/common/file/File.cpp @@ -238,10 +238,8 @@ LocalWriteFile::LocalWriteFile( { if (shouldThrowOnFileAlreadyExists) { FILE* exists = fopen(buf.get(), "rb"); - VELOX_CHECK( - !exists, - "Failure in LocalWriteFile: path '{}' already exists.", - path); + VELOX_CHECK_NULL( + exists, "Failure in LocalWriteFile: path '{}' already exists.", path); } } auto* file = fopen(buf.get(), "ab"); diff --git a/velox/common/file/File.h b/velox/common/file/File.h index 2ce64cad3b98..1193f7c65f79 100644 --- a/velox/common/file/File.h +++ b/velox/common/file/File.h @@ -225,14 +225,15 @@ class InMemoryWriteFile final : public WriteFile { std::string* file_; }; -// Current implementation for the local version is quite simple (e.g. no -// internal arenaing), as local disk writes are expected to be cheap. Local -// files match against any filepath starting with '/'. - +/// Current implementation for the local version is quite simple (e.g. no +/// internal arenaing), as local disk writes are expected to be cheap. Local +/// files match against any filepath starting with '/'. class LocalReadFile final : public ReadFile { public: explicit LocalReadFile(std::string_view path); + /// TODO: deprecate this after creating local file all through velox fs + /// interface. explicit LocalReadFile(int32_t fd); ~LocalReadFile(); diff --git a/velox/common/file/tests/CMakeLists.txt b/velox/common/file/tests/CMakeLists.txt index 446ef6a859e1..7ea068f3c879 100644 --- a/velox/common/file/tests/CMakeLists.txt +++ b/velox/common/file/tests/CMakeLists.txt @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -add_library(velox_file_test_utils TestUtils.cpp) +add_library(velox_file_test_utils TestUtils.cpp FaultyFile.cpp + FaultyFileSystem.cpp) + target_link_libraries(velox_file_test_utils PUBLIC velox_file) add_executable(velox_file_test FileTest.cpp UtilsTest.cpp) diff --git a/velox/common/file/tests/FaultyFile.cpp b/velox/common/file/tests/FaultyFile.cpp new file mode 100644 index 000000000000..e17703aafe16 --- /dev/null +++ b/velox/common/file/tests/FaultyFile.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "velox/common/file/tests/FaultyFile.h" + +namespace facebook::velox::tests::utils { + +FaultyReadFile::FaultyReadFile( + const std::string& path, + std::shared_ptr delegatedFile, + FileFaultInjectionHook injectionHook) + : path_(path), + delegatedFile_(std::move(delegatedFile)), + injectionHook_(std::move(injectionHook)) { + VELOX_CHECK_NOT_NULL(delegatedFile_); +} + +std::string_view +FaultyReadFile::pread(uint64_t offset, uint64_t length, void* buf) const { + if (injectionHook_ != nullptr) { + FaultFileReadOperation op(path_, offset, length, buf); + injectionHook_(&op); + if (!op.delegate) { + return std::string_view(static_cast(op.buf), op.length); + } + } + return delegatedFile_->pread(offset, length, buf); +} + +uint64_t FaultyReadFile::preadv( + uint64_t offset, + const std::vector>& buffers) const { + if (injectionHook_ != nullptr) { + FaultFileReadvOperation op(path_, offset, buffers); + injectionHook_(&op); + if (!op.delegate) { + return op.readBytes; + } + } + return delegatedFile_->preadv(offset, buffers); +} + +FaultyWriteFile::FaultyWriteFile( + std::shared_ptr delegatedFile, + FileFaultInjectionHook injectionHook) + : delegatedFile_(std::move(delegatedFile)), + injectionHook_(std::move(injectionHook)) { + VELOX_CHECK_NOT_NULL(delegatedFile_); +} + +void FaultyWriteFile::append(std::string_view data) { + delegatedFile_->append(data); +} + +void FaultyWriteFile::append(std::unique_ptr data) { + delegatedFile_->append(std::move(data)); +} + +void FaultyWriteFile::flush() { + delegatedFile_->flush(); +} + +void FaultyWriteFile::close() { + delegatedFile_->close(); +} +} // namespace facebook::velox::tests::utils diff --git a/velox/common/file/tests/FaultyFile.h b/velox/common/file/tests/FaultyFile.h new file mode 100644 index 000000000000..4916fad3e2f8 --- /dev/null +++ b/velox/common/file/tests/FaultyFile.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "velox/common/file/File.h" + +namespace facebook::velox::tests::utils { + +/// Defines the per-file operation fault injection. +struct FaultFileOperation { + enum class Type { + /// Injects faults for file read operations. + kRead, + kReadv, + /// TODO: add to support fault injections for the other file operation + /// types. + }; + + const Type type; + + /// The delegated file path. + const std::string path; + + /// Indicates to forward this operation to the delegated file or not. If not, + /// then the file fault injection hook must have processed the request. For + /// instance, if this is a file read injection, then the hook must have filled + /// the fake read data for data corruption tests. + bool delegate{true}; + + FaultFileOperation(Type _type, const std::string& _path) + : type(_type), path(_path) {} +}; + +/// Fault injection parameters for file read API. +struct FaultFileReadOperation : FaultFileOperation { + const uint64_t offset; + const uint64_t length; + void* const buf; + + FaultFileReadOperation( + const std::string& _path, + uint64_t _offset, + uint64_t _length, + void* _buf) + : FaultFileOperation(FaultFileOperation::Type::kRead, _path), + offset(_offset), + length(_length), + buf(_buf) {} +}; + +/// Fault injection parameters for file readv API. +struct FaultFileReadvOperation : FaultFileOperation { + const uint64_t offset; + const std::vector>& buffers; + uint64_t readBytes{0}; + + FaultFileReadvOperation( + const std::string& _path, + uint64_t _offset, + const std::vector>& _buffers) + : FaultFileOperation(FaultFileOperation::Type::kReadv, _path), + offset(_offset), + buffers(_buffers) {} +}; + +/// The fault injection hook on the file operation path. +using FileFaultInjectionHook = std::function; + +class FaultyReadFile : public ReadFile { + public: + FaultyReadFile( + const std::string& path, + std::shared_ptr delegatedFile, + FileFaultInjectionHook injectionHook); + + ~FaultyReadFile() override{}; + + uint64_t size() const override { + return delegatedFile_->size(); + } + + std::string_view pread(uint64_t offset, uint64_t length, void* buf) + const override; + + uint64_t preadv( + uint64_t offset, + const std::vector>& buffers) const override; + + uint64_t memoryUsage() const override { + return delegatedFile_->memoryUsage(); + } + + bool shouldCoalesce() const override { + return delegatedFile_->shouldCoalesce(); + } + + std::string getName() const override { + return delegatedFile_->getName(); + } + + uint64_t getNaturalReadSize() const override { + return delegatedFile_->getNaturalReadSize(); + } + + private: + const std::string path_; + const std::shared_ptr delegatedFile_; + const FileFaultInjectionHook injectionHook_; +}; + +class FaultyWriteFile : public WriteFile { + public: + FaultyWriteFile( + std::shared_ptr delegatedFile, + FileFaultInjectionHook injectionHook); + + ~FaultyWriteFile() override{}; + + void append(std::string_view data) override; + + void append(std::unique_ptr data) override; + + void flush() override; + + void close() override; + + uint64_t size() const override { + return delegatedFile_->size(); + } + + private: + const std::shared_ptr delegatedFile_; + const FileFaultInjectionHook injectionHook_; +}; + +} // namespace facebook::velox::tests::utils diff --git a/velox/common/file/tests/FaultyFileSystem.cpp b/velox/common/file/tests/FaultyFileSystem.cpp new file mode 100644 index 000000000000..0dacb46ff6a0 --- /dev/null +++ b/velox/common/file/tests/FaultyFileSystem.cpp @@ -0,0 +1,192 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "velox/common/file/tests/FaultyFileSystem.h" +#include + +#include + +namespace facebook::velox::tests::utils { +namespace { +// Extracts the delegated real file path by removing the faulty file system +// scheme prefix. +inline std::string extractPath(std::string_view path) { + VELOX_CHECK_EQ(path.find(FaultyFileSystem::scheme()), 0); + return std::string(path.substr(FaultyFileSystem::scheme().length())); +} + +// Constructs the faulty file path based on the delegated read file 'path'. It +// pre-appends the faulty file system scheme. +inline std::string faultyPath(const std::string& path) { + return fmt::format("{}{}", FaultyFileSystem::scheme(), path); +} + +std::function schemeMatcher() { + // Note: presto behavior is to prefix local paths with 'file:'. + // Check for that prefix and prune to absolute regular paths as needed. + return [](std::string_view filePath) { + return filePath.find(FaultyFileSystem::scheme()) == 0; + }; +} + +folly::once_flag faultFilesystemInitOnceFlag; + +std::function(std::shared_ptr, std::string_view)> +fileSystemGenerator() { + return [](std::shared_ptr properties, + std::string_view /*unused*/) { + // One instance of faulty FileSystem is sufficient. Initializes on first + // access and reuse after that. + static std::shared_ptr lfs; + folly::call_once(faultFilesystemInitOnceFlag, [&properties]() { + lfs = std::make_shared(std::move(properties)); + }); + return lfs; + }; +} +} // namespace + +std::unique_ptr FaultyFileSystem::openFileForRead( + std::string_view path, + const FileOptions& options) { + const std::string delegatedPath = extractPath(path); + auto delegatedFile = getFileSystem(delegatedPath, config_) + ->openFileForRead(delegatedPath, options); + return std::make_unique( + std::string(path), std::move(delegatedFile), [&](FaultFileOperation* op) { + maybeInjectFileFault(op); + }); +} + +std::unique_ptr FaultyFileSystem::openFileForWrite( + std::string_view path, + const FileOptions& options) { + const std::string delegatedPath = extractPath(path); + auto delegatedFile = getFileSystem(delegatedPath, config_) + ->openFileForWrite(delegatedPath, options); + return std::make_unique( + std::move(delegatedFile), + [&](FaultFileOperation* op) { maybeInjectFileFault(op); }); +} + +void FaultyFileSystem::remove(std::string_view path) { + const std::string delegatedPath = extractPath(path); + getFileSystem(delegatedPath, config_)->remove(delegatedPath); +} + +void FaultyFileSystem::rename( + std::string_view oldPath, + std::string_view newPath, + bool overwrite) { + const auto delegatedOldPath = extractPath(oldPath); + const auto delegatedNewPath = extractPath(newPath); + getFileSystem(delegatedOldPath, config_) + ->rename(delegatedOldPath, delegatedNewPath, overwrite); +} + +bool FaultyFileSystem::exists(std::string_view path) { + const auto delegatedPath = extractPath(path); + return getFileSystem(delegatedPath, config_)->exists(delegatedPath); +} + +std::vector FaultyFileSystem::list(std::string_view path) { + const auto delegatedDirPath = extractPath(path); + const auto delegatedFiles = + getFileSystem(delegatedDirPath, config_)->list(delegatedDirPath); + // NOTE: we shall return the faulty file paths instead of the delegated file + // paths for list result. + std::vector files; + files.reserve(delegatedFiles.size()); + for (const auto& delegatedFile : delegatedFiles) { + files.push_back(faultyPath(delegatedFile)); + } + return files; +} + +void FaultyFileSystem::mkdir(std::string_view path) { + const auto delegatedDirPath = extractPath(path); + getFileSystem(delegatedDirPath, config_)->mkdir(delegatedDirPath); +} + +void FaultyFileSystem::rmdir(std::string_view path) { + const auto delegatedDirPath = extractPath(path); + getFileSystem(delegatedDirPath, config_)->rmdir(delegatedDirPath); +} + +void FaultyFileSystem::setFileInjectionHook( + FileFaultInjectionHook injectionHook) { + std::lock_guard l(mu_); + fileInjections_ = FileInjections(std::move(injectionHook)); +} + +void FaultyFileSystem::setFileInjectionError( + std::exception_ptr error, + std::unordered_set opTypes) { + std::lock_guard l(mu_); + fileInjections_ = FileInjections(std::move(error), std::move(opTypes)); +} + +void FaultyFileSystem::setFileInjectionDelay( + uint64_t delayUs, + std::unordered_set opTypes) { + std::lock_guard l(mu_); + fileInjections_ = FileInjections(delayUs, std::move(opTypes)); +} + +void FaultyFileSystem::clearFileFaultInjections() { + std::lock_guard l(mu_); + fileInjections_.reset(); +} + +void FaultyFileSystem::maybeInjectFileFault(FaultFileOperation* op) { + FileInjections injections; + { + std::lock_guard l(mu_); + if (!fileInjections_.has_value()) { + return; + } + injections = fileInjections_.value(); + } + + if (injections.fileInjectionHook != nullptr) { + injections.fileInjectionHook(op); + return; + } + + if (!injections.opTypes.empty() && injections.opTypes.count(op->type) == 0) { + return; + } + + if (injections.fileException != nullptr) { + std::rethrow_exception(injections.fileException); + } + + if (injections.fileDelayUs != 0) { + std::this_thread::sleep_for( + std::chrono::microseconds(injections.fileDelayUs)); + } +} + +void registerFaultyFileSystem() { + registerFileSystem(schemeMatcher(), fileSystemGenerator()); +} + +std::shared_ptr faultyFileSystem() { + return std::dynamic_pointer_cast( + getFileSystem(FaultyFileSystem::scheme(), {})); +} +} // namespace facebook::velox::tests::utils diff --git a/velox/common/file/tests/FaultyFileSystem.h b/velox/common/file/tests/FaultyFileSystem.h new file mode 100644 index 000000000000..38fe37a7d108 --- /dev/null +++ b/velox/common/file/tests/FaultyFileSystem.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include "velox/common/file/FileSystems.h" + +#include +#include +#include +#include "velox/common/file/tests/FaultyFile.h" + +namespace facebook::velox::tests::utils { + +using namespace filesystems; + +/// Implements faulty filesystem for io fault injection in unit test. It is a +/// wrapper on top of a real file system, and by default it delegates the the +/// file operation to the real file system underneath. +class FaultyFileSystem : public FileSystem { + public: + explicit FaultyFileSystem(std::shared_ptr config) + : FileSystem(std::move(config)) {} + + ~FaultyFileSystem() override {} + + static inline std::string scheme() { + return "faulty:"; + } + + std::string name() const override { + return "Faulty FS"; + } + + std::unique_ptr openFileForRead( + std::string_view path, + const FileOptions& options) override; + + std::unique_ptr openFileForWrite( + std::string_view path, + const FileOptions& options) override; + + void remove(std::string_view path) override; + + void rename( + std::string_view oldPath, + std::string_view newPath, + bool overwrite) override; + + bool exists(std::string_view path) override; + + std::vector list(std::string_view path) override; + + void mkdir(std::string_view path) override; + + void rmdir(std::string_view path) override; + + /// Setups hook for file fault injection. + void setFileInjectionHook(FileFaultInjectionHook hook); + + /// Setups to inject 'error' for a particular set of file operation types. If + /// 'opTypes' is empty, it injects error for all kinds of file operation + /// types. + void setFileInjectionError( + std::exception_ptr error, + std::unordered_set opTypes = {}); + + /// Setups to inject delay for a particular set of file operation types. If + /// 'opTypes' is empty, it injects delay for all kinds of file operation + /// types. + void setFileInjectionDelay( + uint64_t delayUs, + std::unordered_set opTypes = {}); + + /// Clears the file fault injections. + void clearFileFaultInjections(); + + private: + // Defines the file injection setup and only one type of injection can be set + // at a time. + struct FileInjections { + FileFaultInjectionHook fileInjectionHook{nullptr}; + + std::exception_ptr fileException{nullptr}; + + uint64_t fileDelayUs{0}; + + std::unordered_set opTypes{}; + + FileInjections() = default; + + explicit FileInjections(FileFaultInjectionHook _fileInjectionHook) + : fileInjectionHook(std::move(_fileInjectionHook)) {} + + FileInjections( + uint64_t _fileDelayUs, + std::unordered_set _opTypes) + : fileDelayUs(_fileDelayUs), opTypes(std::move(_opTypes)) {} + + FileInjections( + std::exception_ptr _fileException, + std::unordered_set _opTypes) + : fileException(std::move(_fileException)), + opTypes(std::move(_opTypes)) {} + }; + + // Invoked to inject file fault to 'op' if configured. + void maybeInjectFileFault(FaultFileOperation* op); + + mutable std::mutex mu_; + std::optional fileInjections_; +}; + +/// Registers the faulty filesystem. +void registerFaultyFileSystem(); + +/// Gets the fault filesystem instance. +std::shared_ptr faultyFileSystem(); +} // namespace facebook::velox::tests::utils diff --git a/velox/common/file/tests/FileTest.cpp b/velox/common/file/tests/FileTest.cpp index 9f837fc94a23..94fdbf4d29fc 100644 --- a/velox/common/file/tests/FileTest.cpp +++ b/velox/common/file/tests/FileTest.cpp @@ -19,6 +19,7 @@ #include "velox/common/base/tests/GTestUtils.h" #include "velox/common/file/File.h" #include "velox/common/file/FileSystems.h" +#include "velox/common/file/tests/FaultyFileSystem.h" #include "velox/exec/tests/utils/TempDirectoryPath.h" #include "velox/exec/tests/utils/TempFilePath.h" @@ -26,6 +27,7 @@ using namespace facebook::velox; using facebook::velox::common::Region; +using namespace facebook::velox::tests::utils; constexpr int kOneMB = 1 << 20; @@ -126,45 +128,58 @@ TEST(InMemoryFile, preadv) { EXPECT_EQ(expected, values); } -TEST(LocalFile, writeAndRead) { +class LocalFileTest : public ::testing::TestWithParam { + protected: + LocalFileTest() : useFaultyFs_(GetParam()) {} + + static void SetUpTestCase() { + filesystems::registerLocalFileSystem(); + tests::utils::registerFaultyFileSystem(); + } + + const bool useFaultyFs_; +}; + +TEST_P(LocalFileTest, writeAndRead) { for (bool useIOBuf : {true, false}) { - auto tempFile = ::exec::test::TempFilePath::create(); - const auto& filename = tempFile->path.c_str(); - remove(filename); + SCOPED_TRACE(fmt::format("useIOBuf: {}", useIOBuf)); + + auto tempFile = exec::test::TempFilePath::create(useFaultyFs_); + const auto& filename = tempFile->path(); + auto fs = filesystems::getFileSystem(filename, {}); + fs->remove(filename); { - LocalWriteFile writeFile(filename); - writeData(&writeFile, useIOBuf); - writeFile.close(); - ASSERT_EQ(writeFile.size(), 15 + kOneMB); + auto writeFile = fs->openFileForWrite(filename); + writeData(writeFile.get(), useIOBuf); + writeFile->close(); + ASSERT_EQ(writeFile->size(), 15 + kOneMB); } - LocalReadFile readFile(filename); - readData(&readFile); + auto readFile = fs->openFileForRead(filename); + readData(readFile.get()); } } -TEST(LocalFile, viaRegistry) { - filesystems::registerLocalFileSystem(); - auto tempFile = ::exec::test::TempFilePath::create(); - const auto& filename = tempFile->path.c_str(); - remove(filename); - auto lfs = filesystems::getFileSystem(filename, nullptr); +TEST_P(LocalFileTest, viaRegistry) { + auto tempFile = exec::test::TempFilePath::create(useFaultyFs_); + const auto& filename = tempFile->path(); + auto fs = filesystems::getFileSystem(filename, {}); + fs->remove(filename); { - auto writeFile = lfs->openFileForWrite(filename); + auto writeFile = fs->openFileForWrite(filename); writeFile->append("snarf"); } - auto readFile = lfs->openFileForRead(filename); + auto readFile = fs->openFileForRead(filename); ASSERT_EQ(readFile->size(), 5); char buffer1[5]; ASSERT_EQ(readFile->pread(0, 5, &buffer1), "snarf"); - lfs->remove(filename); + fs->remove(filename); } -TEST(LocalFile, rename) { - filesystems::registerLocalFileSystem(); - auto tempFolder = ::exec::test::TempDirectoryPath::create(); - auto a = fmt::format("{}/a", tempFolder->path); - auto b = fmt::format("{}/b", tempFolder->path); - auto newA = fmt::format("{}/newA", tempFolder->path); +TEST_P(LocalFileTest, rename) { + const auto tempFolder = ::exec::test::TempDirectoryPath::create(useFaultyFs_); + const auto a = fmt::format("{}/a", tempFolder->path()); + const auto b = fmt::format("{}/b", tempFolder->path()); + const auto newA = fmt::format("{}/newA", tempFolder->path()); const std::string data("aaaaa"); auto localFs = filesystems::getFileSystem(a, nullptr); { @@ -176,7 +191,7 @@ TEST(LocalFile, rename) { ASSERT_TRUE(localFs->exists(a)); ASSERT_TRUE(localFs->exists(b)); ASSERT_FALSE(localFs->exists(newA)); - EXPECT_THROW(localFs->rename(a, b), VeloxUserError); + VELOX_ASSERT_USER_THROW(localFs->rename(a, b), ""); localFs->rename(a, newA); ASSERT_FALSE(localFs->exists(a)); ASSERT_TRUE(localFs->exists(b)); @@ -187,11 +202,10 @@ TEST(LocalFile, rename) { ASSERT_EQ(readFile->pread(0, 5, &buffer), data); } -TEST(LocalFile, exists) { - filesystems::registerLocalFileSystem(); - auto tempFolder = ::exec::test::TempDirectoryPath::create(); - auto a = fmt::format("{}/a", tempFolder->path); - auto b = fmt::format("{}/b", tempFolder->path); +TEST_P(LocalFileTest, exists) { + auto tempFolder = ::exec::test::TempDirectoryPath::create(useFaultyFs_); + auto a = fmt::format("{}/a", tempFolder->path()); + auto b = fmt::format("{}/b", tempFolder->path()); auto localFs = filesystems::getFileSystem(a, nullptr); { auto writeFile = localFs->openFileForWrite(a); @@ -207,47 +221,50 @@ TEST(LocalFile, exists) { ASSERT_FALSE(localFs->exists(b)); } -TEST(LocalFile, list) { - filesystems::registerLocalFileSystem(); - auto tempFolder = ::exec::test::TempDirectoryPath::create(); - auto a = fmt::format("{}/1", tempFolder->path); - auto b = fmt::format("{}/2", tempFolder->path); +TEST_P(LocalFileTest, list) { + const auto tempFolder = ::exec::test::TempDirectoryPath::create(useFaultyFs_); + const auto a = fmt::format("{}/1", tempFolder->path()); + const auto b = fmt::format("{}/2", tempFolder->path()); auto localFs = filesystems::getFileSystem(a, nullptr); { auto writeFile = localFs->openFileForWrite(a); writeFile = localFs->openFileForWrite(b); } - auto files = localFs->list(std::string_view(tempFolder->path)); + auto files = localFs->list(std::string_view(tempFolder->path())); std::sort(files.begin(), files.end()); ASSERT_EQ(files, std::vector({a, b})); localFs->remove(a); ASSERT_EQ( - localFs->list(std::string_view(tempFolder->path)), + localFs->list(std::string_view(tempFolder->path())), std::vector({b})); localFs->remove(b); - ASSERT_TRUE(localFs->list(std::string_view(tempFolder->path)).empty()); + ASSERT_TRUE(localFs->list(std::string_view(tempFolder->path())).empty()); } -TEST(LocalFile, readFileDestructor) { - auto tempFile = ::exec::test::TempFilePath::create(); - const auto& filename = tempFile->path.c_str(); - remove(filename); +TEST_P(LocalFileTest, readFileDestructor) { + if (useFaultyFs_) { + return; + } + auto tempFile = exec::test::TempFilePath::create(useFaultyFs_); + const auto& filename = tempFile->path(); + auto fs = filesystems::getFileSystem(filename, {}); + fs->remove(filename); { - LocalWriteFile writeFile(filename); - writeData(&writeFile); + auto writeFile = fs->openFileForWrite(filename); + writeData(writeFile.get()); } { - LocalReadFile readFile(filename); - readData(&readFile); + auto readFile = fs->openFileForRead(filename); + readData(readFile.get()); } int32_t readFd; { - std::unique_ptr buf(new char[tempFile->path.size() + 1]); - buf[tempFile->path.size()] = 0; - memcpy(buf.get(), tempFile->path.data(), tempFile->path.size()); - readFd = open(buf.get(), O_RDONLY); + std::unique_ptr buf(new char[tempFile->path().size() + 1]); + buf[tempFile->path().size()] = 0; + ::memcpy(buf.get(), tempFile->path().c_str(), tempFile->path().size()); + readFd = ::open(buf.get(), O_RDONLY); } { LocalReadFile readFile(readFd); @@ -260,11 +277,10 @@ TEST(LocalFile, readFileDestructor) { } } -TEST(LocalFile, mkdir) { - filesystems::registerLocalFileSystem(); - auto tempFolder = ::exec::test::TempDirectoryPath::create(); +TEST_P(LocalFileTest, mkdir) { + auto tempFolder = exec::test::TempDirectoryPath::create(useFaultyFs_); - std::string path = tempFolder->path; + std::string path = tempFolder->path(); auto localFs = filesystems::getFileSystem(path, nullptr); // Create 3 levels of directories and ensure they exist. @@ -286,11 +302,10 @@ TEST(LocalFile, mkdir) { EXPECT_TRUE(localFs->exists(path)); } -TEST(LocalFile, rmdir) { - filesystems::registerLocalFileSystem(); - auto tempFolder = ::exec::test::TempDirectoryPath::create(); +TEST_P(LocalFileTest, rmdir) { + auto tempFolder = exec::test::TempDirectoryPath::create(useFaultyFs_); - std::string path = tempFolder->path; + std::string path = tempFolder->path(); auto localFs = filesystems::getFileSystem(path, nullptr); // Create 3 levels of directories and ensure they exist. @@ -309,24 +324,282 @@ TEST(LocalFile, rmdir) { EXPECT_TRUE(localFs->exists(path)); // Now delete the whole temp folder and ensure it is gone. - EXPECT_NO_THROW(localFs->rmdir(tempFolder->path)); - EXPECT_FALSE(localFs->exists(tempFolder->path)); + EXPECT_NO_THROW(localFs->rmdir(tempFolder->path())); + EXPECT_FALSE(localFs->exists(tempFolder->path())); // Delete a non-existing directory. path += "/does_not_exist/subdir"; EXPECT_FALSE(localFs->exists(path)); // The function does not throw, but will return zero files and folders // deleted, which is not an error. - EXPECT_NO_THROW(localFs->rmdir(tempFolder->path)); + EXPECT_NO_THROW(localFs->rmdir(tempFolder->path())); } -TEST(LocalFile, fileNotFound) { - filesystems::registerLocalFileSystem(); - auto tempFolder = ::exec::test::TempDirectoryPath::create(); - auto path = fmt::format("{}/file", tempFolder->path); +TEST_P(LocalFileTest, fileNotFound) { + auto tempFolder = exec::test::TempDirectoryPath::create(useFaultyFs_); + auto path = fmt::format("{}/file", tempFolder->path()); auto localFs = filesystems::getFileSystem(path, nullptr); VELOX_ASSERT_RUNTIME_THROW_CODE( localFs->openFileForRead(path), error_code::kFileNotFound, "No such file or directory"); } + +INSTANTIATE_TEST_SUITE_P( + LocalFileTestSuite, + LocalFileTest, + ::testing::Values(false, true)); + +class FaultyFsTest : public ::testing::Test { + protected: + FaultyFsTest() {} + + static void SetUpTestCase() { + filesystems::registerLocalFileSystem(); + tests::utils::registerFaultyFileSystem(); + } + + void SetUp() { + dir_ = exec::test::TempDirectoryPath::create(true); + fs_ = std::dynamic_pointer_cast( + filesystems::getFileSystem(dir_->path(), {})); + VELOX_CHECK_NOT_NULL(fs_); + filePath_ = fmt::format("{}/faultyTestFile", dir_->path()); + const int bufSize = 1024; + buffer_.resize(bufSize); + for (int i = 0; i < bufSize; ++i) { + buffer_[i] = i % 256; + } + { + auto writeFile = fs_->openFileForWrite(filePath_, {}); + writeData(writeFile.get()); + } + auto readFile = fs_->openFileForRead(filePath_, {}); + readData(readFile.get(), true); + try { + VELOX_FAIL("InjectedFaultFileError"); + } catch (VeloxRuntimeError&) { + fileError_ = std::current_exception(); + } + } + + void TearDown() { + fs_->clearFileFaultInjections(); + } + + void writeData(WriteFile* file) { + file->append(std::string_view(buffer_)); + file->flush(); + } + + void readData(ReadFile* file, bool useReadv = false) { + char readBuf[buffer_.size()]; + if (!useReadv) { + file->pread(0, buffer_.size(), readBuf); + } else { + std::vector> buffers; + buffers.push_back(folly::Range(readBuf, buffer_.size())); + file->preadv(0, buffers); + } + for (int i = 0; i < buffer_.size(); ++i) { + if (buffer_[i] != readBuf[i]) { + VELOX_FAIL("Data Mismatch"); + } + } + } + + std::shared_ptr dir_; + std::string filePath_; + std::shared_ptr fs_; + std::string buffer_; + std::exception_ptr fileError_; +}; + +TEST_F(FaultyFsTest, fileReadErrorInjection) { + // Set read error. + fs_->setFileInjectionError(fileError_, {FaultFileOperation::Type::kRead}); + { + auto readFile = fs_->openFileForRead(filePath_, {}); + VELOX_ASSERT_THROW( + readData(readFile.get(), false), "InjectedFaultFileError"); + } + { + auto readFile = fs_->openFileForRead(filePath_, {}); + // We only inject error for pread API so preadv should be fine. + readData(readFile.get(), true); + } + + // Set readv error + fs_->setFileInjectionError(fileError_, {FaultFileOperation::Type::kReadv}); + { + auto readFile = fs_->openFileForRead(filePath_, {}); + VELOX_ASSERT_THROW( + readData(readFile.get(), true), "InjectedFaultFileError"); + } + { + auto readFile = fs_->openFileForRead(filePath_, {}); + // We only inject error for pread API so preadv should be fine. + readData(readFile.get(), false); + } + + // Set error for all kinds of operations. + fs_->setFileInjectionError(fileError_); + auto readFile = fs_->openFileForRead(filePath_, {}); + VELOX_ASSERT_THROW(readData(readFile.get(), true), "InjectedFaultFileError"); + VELOX_ASSERT_THROW(readData(readFile.get(), false), "InjectedFaultFileError"); + fs_->remove(filePath_); + // Check there is no interference on write as we don't support it for now. + auto writeFile = fs_->openFileForWrite(filePath_, {}); + writeData(writeFile.get()); +} + +TEST_F(FaultyFsTest, fileReadDelayInjection) { + // Set 3 seconds delay. + const uint64_t injectDelay{2'000'000}; + fs_->setFileInjectionDelay(injectDelay, {FaultFileOperation::Type::kRead}); + { + auto readFile = fs_->openFileForRead(filePath_, {}); + uint64_t readDurationUs{0}; + { + MicrosecondTimer readTimer(&readDurationUs); + readData(readFile.get(), false); + } + ASSERT_GE(readDurationUs, injectDelay); + } + { + auto readFile = fs_->openFileForRead(filePath_, {}); + // We only inject error for pread API so preadv should be fine. + uint64_t readDurationUs{0}; + { + MicrosecondTimer readTimer(&readDurationUs); + readData(readFile.get(), true); + } + ASSERT_LT(readDurationUs, injectDelay); + } + + // Set readv error + fs_->setFileInjectionDelay(injectDelay, {FaultFileOperation::Type::kReadv}); + { + auto readFile = fs_->openFileForRead(filePath_, {}); + uint64_t readDurationUs{0}; + { + MicrosecondTimer readTimer(&readDurationUs); + readData(readFile.get(), true); + } + ASSERT_GE(readDurationUs, injectDelay); + } + { + auto readFile = fs_->openFileForRead(filePath_, {}); + // We only inject error for pread API so preadv should be fine. + uint64_t readDurationUs{0}; + { + MicrosecondTimer readTimer(&readDurationUs); + readData(readFile.get(), false); + } + ASSERT_LT(readDurationUs, injectDelay); + } + + // Set error for all kinds of operations. + fs_->setFileInjectionDelay(injectDelay); + { + auto readFile = fs_->openFileForRead(filePath_, {}); + // We only inject error for pread API so preadv should be fine. + uint64_t readDurationUs{0}; + { + MicrosecondTimer readTimer(&readDurationUs); + readData(readFile.get(), false); + } + ASSERT_GE(readDurationUs, injectDelay); + } + { + auto readFile = fs_->openFileForRead(filePath_, {}); + // We only inject error for pread API so preadv should be fine. + uint64_t readDurationUs{0}; + { + MicrosecondTimer readTimer(&readDurationUs); + readData(readFile.get(), false); + } + ASSERT_GE(readDurationUs, injectDelay); + } + + fs_->remove(filePath_); + // Check there is no interference on write as we don't support it for now. + auto writeFile = fs_->openFileForWrite(filePath_, {}); + uint64_t writeDurationUs{0}; + { + MicrosecondTimer writeTimer(&writeDurationUs); + writeData(writeFile.get()); + } + ASSERT_LT(writeDurationUs, injectDelay); +} + +TEST_F(FaultyFsTest, fileReadFaultHookInjection) { + const std::string path1 = fmt::format("{}/hookFile1", dir_->path()); + { + auto writeFile = fs_->openFileForWrite(path1, {}); + writeData(writeFile.get()); + auto readFile = fs_->openFileForRead(path1, {}); + readData(readFile.get()); + } + const std::string path2 = fmt::format("{}/hookFile2", dir_->path()); + { + auto writeFile = fs_->openFileForWrite(path2, {}); + writeData(writeFile.get()); + auto readFile = fs_->openFileForRead(path2, {}); + readData(readFile.get()); + } + // Set read error. + fs_->setFileInjectionHook([&](FaultFileOperation* op) { + // Only inject error for readv. + if (op->type != FaultFileOperation::Type::kReadv) { + return; + } + // Only inject error for path2. + if (op->path != path2) { + return; + } + VELOX_FAIL("inject hook read failure"); + }); + { + auto readFile = fs_->openFileForRead(path1, {}); + readData(readFile.get(), false); + readData(readFile.get(), true); + } + { + auto readFile = fs_->openFileForRead(path2, {}); + // Verify only throw for readv. + readData(readFile.get(), false); + VELOX_ASSERT_THROW( + readData(readFile.get(), true), "inject hook read failure"); + } + + // Set to return fake data. + fs_->setFileInjectionHook([&](FaultFileOperation* op) { + // Only inject error for path1. + if (op->path != path1) { + return; + } + // Only inject error for read. + if (op->type != FaultFileOperation::Type::kRead) { + return; + } + auto* readOp = static_cast(op); + char* readBuf = static_cast(readOp->buf); + for (int i = 0; i < readOp->length; ++i) { + readBuf[i] = 0; + } + readOp->delegate = false; + }); + + { + auto readFile = fs_->openFileForRead(path2, {}); + readData(readFile.get(), false); + readData(readFile.get(), true); + } + { + auto readFile = fs_->openFileForRead(path1, {}); + // Verify only throw for read. + readData(readFile.get(), true); + VELOX_ASSERT_THROW(readData(readFile.get(), false), "Data Mismatch"); + } +} diff --git a/velox/common/memory/tests/SharedArbitratorTest.cpp b/velox/common/memory/tests/SharedArbitratorTest.cpp index 05badf0306b1..7a5d61bca494 100644 --- a/velox/common/memory/tests/SharedArbitratorTest.cpp +++ b/velox/common/memory/tests/SharedArbitratorTest.cpp @@ -723,7 +723,7 @@ DEBUG_ONLY_TEST_F( VELOX_ASSERT_THROW( AssertQueryBuilder(duckDbQueryRunner_) .queryCtx(queryCtx) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kJoinSpillEnabled, "true") .config(core::QueryConfig::kSpillNumPartitionBits, "2") @@ -794,7 +794,7 @@ DEBUG_ONLY_TEST_F(SharedArbitrationTest, asyncArbitratonFromNonDriverContext) { std::thread queryThread([&]() { task = AssertQueryBuilder(duckDbQueryRunner_) .queryCtx(queryCtx) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kJoinSpillEnabled, "true") .config(core::QueryConfig::kSpillNumPartitionBits, "2") @@ -865,7 +865,7 @@ DEBUG_ONLY_TEST_F(SharedArbitrationTest, runtimeStats) { auto writerPlan = PlanBuilder() .values(vectors) - .tableWrite(outputDirectory->path) + .tableWrite(outputDirectory->path()) .singleAggregation( {}, {fmt::format("sum({})", TableWriteTraits::rowCountColumnName())}) @@ -875,7 +875,7 @@ DEBUG_ONLY_TEST_F(SharedArbitrationTest, runtimeStats) { AssertQueryBuilder(duckDbQueryRunner_) .queryCtx(queryCtx) .maxDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kWriterSpillEnabled, "true") // Set 0 file writer flush threshold to always trigger flush in diff --git a/velox/connectors/hive/iceberg/tests/IcebergReadTest.cpp b/velox/connectors/hive/iceberg/tests/IcebergReadTest.cpp index b6ddd2507fa9..5a3e5419b9e9 100644 --- a/velox/connectors/hive/iceberg/tests/IcebergReadTest.cpp +++ b/velox/connectors/hive/iceberg/tests/IcebergReadTest.cpp @@ -63,22 +63,22 @@ class HiveIcebergTest : public HiveConnectorTestBase { deleteFiles.reserve(deleteRowsVec.size()); for (auto const& deleteRows : deleteRowsVec) { std::shared_ptr deleteFilePath = writePositionDeleteFile( - dataFilePath->path, + dataFilePath->path(), deleteRows, numDeleteRowsBefore, numDeleteRowsAfter); + auto path = deleteFilePath->path(); IcebergDeleteFile deleteFile( FileContent::kPositionalDeletes, - deleteFilePath->path, + deleteFilePath->path(), fileFomat_, deleteRows.size() + numDeleteRowsBefore + numDeleteRowsAfter, - testing::internal::GetFileSize( - std::fopen(deleteFilePath->path.c_str(), "r"))); + testing::internal::GetFileSize(std::fopen(path.c_str(), "r"))); deleteFilePaths.emplace_back(deleteFilePath); deleteFiles.emplace_back(deleteFile); } - auto icebergSplit = makeIcebergSplit(dataFilePath->path, deleteFiles); + auto icebergSplit = makeIcebergSplit(dataFilePath->path(), deleteFiles); auto plan = tableScanNode(); auto task = OperatorTestBase::assertQuery(plan, {icebergSplit}, duckdbSql); @@ -162,7 +162,7 @@ class HiveIcebergTest : public HiveConnectorTestBase { auto dataVectors = makeVectors(1, numRows); auto dataFilePath = TempFilePath::create(); - writeToFile(dataFilePath->path, dataVectors); + writeToFile(dataFilePath->path(), dataVectors); createDuckDbTable(dataVectors); return dataFilePath; } @@ -217,7 +217,7 @@ class HiveIcebergTest : public HiveConnectorTestBase { {filePathVector, deletePositionsVector}); auto deleteFilePath = TempFilePath::create(); - writeToFile(deleteFilePath->path, deleteFileVectors); + writeToFile(deleteFilePath->path(), deleteFileVectors); return deleteFilePath; } @@ -252,7 +252,7 @@ class HiveIcebergTest : public HiveConnectorTestBase { std::shared_ptr dataFilePath, const std::vector& deleteFiles, const std::string& duckDbSql) { - auto icebergSplit = makeIcebergSplit(dataFilePath->path, deleteFiles); + auto icebergSplit = makeIcebergSplit(dataFilePath->path(), deleteFiles); return OperatorTestBase::assertQuery(plan, {icebergSplit}, duckDbSql); } diff --git a/velox/connectors/hive/storage_adapters/abfs/tests/AbfsFileSystemTest.cpp b/velox/connectors/hive/storage_adapters/abfs/tests/AbfsFileSystemTest.cpp index b7bffa997270..da3bd28176a0 100644 --- a/velox/connectors/hive/storage_adapters/abfs/tests/AbfsFileSystemTest.cpp +++ b/velox/connectors/hive/storage_adapters/abfs/tests/AbfsFileSystemTest.cpp @@ -100,7 +100,7 @@ class AbfsFileSystemTest : public testing::Test { private: static std::shared_ptr<::exec::test::TempFilePath> createFile( uint64_t size = -1) { - auto tempFile = ::exec::test::TempFilePath::create(); + auto tempFile = exec::test::TempFilePath::create(); if (size == -1) { tempFile->append("aaaaa"); tempFile->append("bbbbb"); diff --git a/velox/connectors/hive/storage_adapters/hdfs/tests/HdfsFileSystemTest.cpp b/velox/connectors/hive/storage_adapters/hdfs/tests/HdfsFileSystemTest.cpp index ac8ed66f7c0f..c316d1891e13 100644 --- a/velox/connectors/hive/storage_adapters/hdfs/tests/HdfsFileSystemTest.cpp +++ b/velox/connectors/hive/storage_adapters/hdfs/tests/HdfsFileSystemTest.cpp @@ -64,7 +64,7 @@ class HdfsFileSystemTest : public testing::Test { private: static std::shared_ptr<::exec::test::TempFilePath> createFile() { - auto tempFile = ::exec::test::TempFilePath::create(); + auto tempFile = exec::test::TempFilePath::create(); tempFile->append("aaaaa"); tempFile->append("bbbbb"); tempFile->append(std::string(kOneMB, 'c')); diff --git a/velox/connectors/hive/tests/FileHandleTest.cpp b/velox/connectors/hive/tests/FileHandleTest.cpp index 659f0299f9ee..3832e303d5ee 100644 --- a/velox/connectors/hive/tests/FileHandleTest.cpp +++ b/velox/connectors/hive/tests/FileHandleTest.cpp @@ -27,8 +27,8 @@ using namespace facebook::velox; TEST(FileHandleTest, localFile) { filesystems::registerLocalFileSystem(); - auto tempFile = ::exec::test::TempFilePath::create(); - const auto& filename = tempFile->path; + auto tempFile = exec::test::TempFilePath::create(); + const auto& filename = tempFile->path(); remove(filename.c_str()); { diff --git a/velox/connectors/hive/tests/HiveDataSinkTest.cpp b/velox/connectors/hive/tests/HiveDataSinkTest.cpp index 7ee1e3a065dd..1afd9bb98e8e 100644 --- a/velox/connectors/hive/tests/HiveDataSinkTest.cpp +++ b/velox/connectors/hive/tests/HiveDataSinkTest.cpp @@ -475,7 +475,7 @@ TEST_F(HiveDataSinkTest, hiveBucketProperty) { TEST_F(HiveDataSinkTest, basic) { const auto outputDirectory = TempDirectoryPath::create(); - auto dataSink = createDataSink(rowType_, outputDirectory->path); + auto dataSink = createDataSink(rowType_, outputDirectory->path()); auto stats = dataSink->stats(); ASSERT_TRUE(stats.empty()) << stats.toString(); ASSERT_EQ( @@ -503,14 +503,14 @@ TEST_F(HiveDataSinkTest, basic) { ASSERT_EQ(partitions.size(), 1); createDuckDbTable(vectors); - verifyWrittenData(outputDirectory->path); + verifyWrittenData(outputDirectory->path()); } TEST_F(HiveDataSinkTest, close) { for (bool empty : {true, false}) { SCOPED_TRACE(fmt::format("Data sink is empty: {}", empty)); const auto outputDirectory = TempDirectoryPath::create(); - auto dataSink = createDataSink(rowType_, outputDirectory->path); + auto dataSink = createDataSink(rowType_, outputDirectory->path()); auto vectors = createVectors(500, 1); @@ -532,7 +532,7 @@ TEST_F(HiveDataSinkTest, close) { ASSERT_EQ(partitions.size(), 1); ASSERT_GT(stats.numWrittenBytes, 0); createDuckDbTable(vectors); - verifyWrittenData(outputDirectory->path); + verifyWrittenData(outputDirectory->path()); } else { ASSERT_TRUE(partitions.empty()); ASSERT_EQ(stats.numWrittenBytes, 0); @@ -544,7 +544,7 @@ TEST_F(HiveDataSinkTest, abort) { for (bool empty : {true, false}) { SCOPED_TRACE(fmt::format("Data sink is empty: {}", empty)); const auto outputDirectory = TempDirectoryPath::create(); - auto dataSink = createDataSink(rowType_, outputDirectory->path); + auto dataSink = createDataSink(rowType_, outputDirectory->path()); auto vectors = createVectors(1, 1); int initialBytes = 0; @@ -634,7 +634,7 @@ TEST_F(HiveDataSinkTest, memoryReclaim) { if (testData.writerSpillEnabled) { spillDirectory = exec::test::TempDirectoryPath::create(); spillConfig = - getSpillConfig(spillDirectory->path, testData.writerFlushThreshold); + getSpillConfig(spillDirectory->path(), testData.writerFlushThreshold); auto connectorQueryCtx = std::make_unique( opPool_.get(), connectorPool_.get(), @@ -664,7 +664,7 @@ TEST_F(HiveDataSinkTest, memoryReclaim) { auto dataSink = createDataSink( rowType_, - outputDirectory->path, + outputDirectory->path(), testData.format, partitionBy, bucketProperty); @@ -771,7 +771,7 @@ TEST_F(HiveDataSinkTest, memoryReclaimAfterClose) { std::unique_ptr spillConfig; if (testData.writerSpillEnabled) { spillDirectory = exec::test::TempDirectoryPath::create(); - spillConfig = getSpillConfig(spillDirectory->path, 0); + spillConfig = getSpillConfig(spillDirectory->path(), 0); auto connectorQueryCtx = std::make_unique( opPool_.get(), connectorPool_.get(), @@ -801,7 +801,7 @@ TEST_F(HiveDataSinkTest, memoryReclaimAfterClose) { auto dataSink = createDataSink( rowType_, - outputDirectory->path, + outputDirectory->path(), testData.format, partitionBy, bucketProperty); @@ -865,7 +865,7 @@ DEBUG_ONLY_TEST_F(HiveDataSinkTest, sortWriterFailureTest) { const std::shared_ptr spillDirectory = exec::test::TempDirectoryPath::create(); std::unique_ptr spillConfig = - getSpillConfig(spillDirectory->path, 0); + getSpillConfig(spillDirectory->path(), 0); // Triggers the memory reservation in sort buffer. spillConfig->minSpillableReservationPct = 1'000; auto connectorQueryCtx = std::make_unique( @@ -883,7 +883,7 @@ DEBUG_ONLY_TEST_F(HiveDataSinkTest, sortWriterFailureTest) { auto dataSink = createDataSink( rowType_, - outputDirectory->path, + outputDirectory->path(), dwio::common::FileFormat::DWRF, partitionBy, bucketProperty); diff --git a/velox/dwio/common/tests/LocalFileSinkTest.cpp b/velox/dwio/common/tests/LocalFileSinkTest.cpp index 90394f59d176..cf7795f9de64 100644 --- a/velox/dwio/common/tests/LocalFileSinkTest.cpp +++ b/velox/dwio/common/tests/LocalFileSinkTest.cpp @@ -34,7 +34,7 @@ class LocalFileSinkTest : public testing::Test { void runTest() { auto root = TempDirectoryPath::create(); - auto filePath = fs::path(root->path) / "xxx/yyy/zzz/test_file.ext"; + auto filePath = fs::path(root->path()) / "xxx/yyy/zzz/test_file.ext"; ASSERT_FALSE(fs::exists(filePath.string())); diff --git a/velox/dwio/common/tests/ReadFileInputStreamTests.cpp b/velox/dwio/common/tests/ReadFileInputStreamTests.cpp index 4beab0445847..0d2ce3bfa5bd 100644 --- a/velox/dwio/common/tests/ReadFileInputStreamTests.cpp +++ b/velox/dwio/common/tests/ReadFileInputStreamTests.cpp @@ -35,8 +35,8 @@ class ReadFileInputStreamTest : public testing::Test { }; TEST_F(ReadFileInputStreamTest, LocalReadFile) { - auto tempFile = ::exec::test::TempFilePath::create(); - const auto& filename = tempFile->path; + auto tempFile = exec::test::TempFilePath::create(); + const auto& filename = tempFile->path(); remove(filename.c_str()); { LocalWriteFile writeFile(filename); diff --git a/velox/dwio/dwrf/test/CacheInputTest.cpp b/velox/dwio/dwrf/test/CacheInputTest.cpp index 85e8663eb3a8..23cee653e05a 100644 --- a/velox/dwio/dwrf/test/CacheInputTest.cpp +++ b/velox/dwio/dwrf/test/CacheInputTest.cpp @@ -78,7 +78,7 @@ class CacheTest : public testing::Test { FLAGS_ssd_odirect = false; tempDirectory_ = exec::test::TempDirectoryPath::create(); ssd = std::make_unique( - fmt::format("{}/cache", tempDirectory_->path), + fmt::format("{}/cache", tempDirectory_->path()), ssdBytes, 1, executor_.get()); diff --git a/velox/dwio/parquet/tests/writer/ParquetWriterTest.cpp b/velox/dwio/parquet/tests/writer/ParquetWriterTest.cpp index 6fed5d3da28a..6fa07bdfd558 100644 --- a/velox/dwio/parquet/tests/writer/ParquetWriterTest.cpp +++ b/velox/dwio/parquet/tests/writer/ParquetWriterTest.cpp @@ -181,7 +181,8 @@ DEBUG_ONLY_TEST_F(ParquetWriterTest, unitFromHiveConfig) { const auto plan = PlanBuilder() .values({data}) - .tableWrite(outputDirectory->path, dwio::common::FileFormat::PARQUET) + .tableWrite( + outputDirectory->path(), dwio::common::FileFormat::PARQUET) .planNode(); CursorParameters params; diff --git a/velox/examples/ScanAndSort.cpp b/velox/examples/ScanAndSort.cpp index ec73732c2d58..0f46019a051a 100644 --- a/velox/examples/ScanAndSort.cpp +++ b/velox/examples/ScanAndSort.cpp @@ -168,7 +168,7 @@ int main(int argc, char** argv) { // HiveConnectorSplit for each file, using the same HiveConnector id defined // above, the local file path (the "file:" prefix specifies which FileSystem // to use; local, in this case), and the file format (DWRF/ORC). - for (auto& filePath : fs::directory_iterator(tempDir->path)) { + for (auto& filePath : fs::directory_iterator(tempDir->path())) { auto connectorSplit = std::make_shared( kHiveConnectorId, "file:" + filePath.path().string(), diff --git a/velox/exec/fuzzer/AggregationFuzzer.cpp b/velox/exec/fuzzer/AggregationFuzzer.cpp index 89ea2ac08fbb..8add3432715e 100644 --- a/velox/exec/fuzzer/AggregationFuzzer.cpp +++ b/velox/exec/fuzzer/AggregationFuzzer.cpp @@ -741,7 +741,7 @@ bool AggregationFuzzer::verifyAggregation( const auto inputRowType = asRowType(input[0]->type()); if (isTableScanSupported(inputRowType) && vectorFuzzer_.coinToss(0.5)) { - auto splits = makeSplits(input, directory->path); + auto splits = makeSplits(input, directory->path()); std::vector tableScanPlans; makeAlternativePlansWithTableScan( @@ -856,7 +856,7 @@ bool AggregationFuzzer::verifySortedAggregation( const auto inputRowType = asRowType(input[0]->type()); if (isTableScanSupported(inputRowType)) { directory = exec::test::TempDirectoryPath::create(); - auto splits = makeSplits(input, directory->path); + auto splits = makeSplits(input, directory->path()); plans.push_back( {PlanBuilder() @@ -1160,7 +1160,7 @@ bool AggregationFuzzer::verifyDistinctAggregation( const auto inputRowType = asRowType(input[0]->type()); if (isTableScanSupported(inputRowType) && vectorFuzzer_.coinToss(0.5)) { directory = exec::test::TempDirectoryPath::create(); - auto splits = makeSplits(input, directory->path); + auto splits = makeSplits(input, directory->path()); plans.push_back( {PlanBuilder() diff --git a/velox/exec/fuzzer/AggregationFuzzerBase.cpp b/velox/exec/fuzzer/AggregationFuzzerBase.cpp index d826f6fbc232..397318d57610 100644 --- a/velox/exec/fuzzer/AggregationFuzzerBase.cpp +++ b/velox/exec/fuzzer/AggregationFuzzerBase.cpp @@ -405,7 +405,7 @@ velox::test::ResultOrError AggregationFuzzerBase::execute( int32_t spillPct{0}; if (injectSpill) { spillDirectory = exec::test::TempDirectoryPath::create(); - builder.spillDirectory(spillDirectory->path) + builder.spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kAggregationSpillEnabled, "true") .config(core::QueryConfig::kMaxSpillRunRows, randInt(32, 1L << 30)); diff --git a/velox/exec/fuzzer/WindowFuzzer.cpp b/velox/exec/fuzzer/WindowFuzzer.cpp index 3d73cbaa676e..9d00ed54efb8 100644 --- a/velox/exec/fuzzer/WindowFuzzer.cpp +++ b/velox/exec/fuzzer/WindowFuzzer.cpp @@ -333,7 +333,7 @@ void WindowFuzzer::testAlternativePlans( auto directory = exec::test::TempDirectoryPath::create(); const auto inputRowType = asRowType(input[0]->type()); if (isTableScanSupported(inputRowType)) { - auto splits = makeSplits(input, directory->path); + auto splits = makeSplits(input, directory->path()); plans.push_back( {PlanBuilder() diff --git a/velox/exec/tests/AggregationTest.cpp b/velox/exec/tests/AggregationTest.cpp index ac721e4420e2..bd1dbeaa3ec3 100644 --- a/velox/exec/tests/AggregationTest.cpp +++ b/velox/exec/tests/AggregationTest.cpp @@ -1130,7 +1130,7 @@ TEST_F(AggregationTest, spillAll) { auto queryCtx = std::make_shared(executor_.get()); TestScopedSpillInjection scopedSpillInjection(100); auto task = AssertQueryBuilder(plan) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) .assertResults(results); @@ -1581,7 +1581,7 @@ TEST_F(AggregationTest, outputBatchSizeCheckWithSpill) { TestScopedSpillInjection scopedSpillInjection(100); auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) .config( @@ -1624,7 +1624,7 @@ TEST_F(AggregationTest, spillDuringOutputProcessing) { TestScopedSpillInjection scopedSpillInjection(100); auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) // Set very large output buffer size, the number of output rows is @@ -1765,7 +1765,7 @@ DEBUG_ONLY_TEST_F(AggregationTest, minSpillableMemoryReservation) { auto spillDirectory = exec::test::TempDirectoryPath::create(); auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) .config( @@ -1791,7 +1791,7 @@ TEST_F(AggregationTest, distinctWithSpilling) { core::PlanNodeId aggrNodeId; TestScopedSpillInjection scopedSpillInjection(100); auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) .plan(PlanBuilder() @@ -1815,7 +1815,7 @@ TEST_F(AggregationTest, spillingForAggrsWithDistinct) { TestScopedSpillInjection scopedSpillInjection(100); auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) .plan(PlanBuilder() @@ -1843,7 +1843,7 @@ TEST_F(AggregationTest, spillingForAggrsWithSorting) { SCOPED_TRACE(sql); TestScopedSpillInjection scopedSpillInjection(100); auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) .plan(plan) @@ -1889,7 +1889,7 @@ TEST_F(AggregationTest, preGroupedAggregationWithSpilling) { TestScopedSpillInjection scopedSpillInjection(100); auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) .plan(PlanBuilder() @@ -2044,7 +2044,7 @@ DEBUG_ONLY_TEST_F(AggregationTest, reclaimDuringInputProcessing) { .singleAggregation({"c0", "c1"}, {"array_agg(c2)"}) .planNode()) .queryCtx(queryCtx) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) .maxDrivers(1) @@ -2184,7 +2184,7 @@ DEBUG_ONLY_TEST_F(AggregationTest, reclaimDuringReserve) { .singleAggregation({"c0", "c1"}, {"array_agg(c2)"}) .planNode()) .queryCtx(queryCtx) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) .maxDrivers(1) @@ -2300,7 +2300,7 @@ DEBUG_ONLY_TEST_F(AggregationTest, reclaimDuringAllocation) { .singleAggregation({"c0", "c1"}, {"array_agg(c2)"}) .planNode()) .queryCtx(queryCtx) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) .maxDrivers(1) @@ -2414,7 +2414,7 @@ DEBUG_ONLY_TEST_F(AggregationTest, reclaimDuringOutputProcessing) { .singleAggregation({"c0", "c1"}, {"array_agg(c2)"}) .planNode()) .queryCtx(queryCtx) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) .maxDrivers(1) @@ -2588,7 +2588,7 @@ DEBUG_ONLY_TEST_F(AggregationTest, reclaimDuringNonReclaimableSection) { .capturePlanNodeId(aggregationPlanNodeId) .planNode()) .queryCtx(queryCtx) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) .maxDrivers(1) @@ -2704,7 +2704,7 @@ DEBUG_ONLY_TEST_F(AggregationTest, reclaimWithEmptyAggregationTable) { AssertQueryBuilder(nullptr) .plan(aggregationPlan) .queryCtx(queryCtx) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) .maxDrivers(1) @@ -3032,7 +3032,7 @@ DEBUG_ONLY_TEST_F(AggregationTest, reclaimEmptyInput) { .capturePlanNodeId(aggNodeId) .planNode(), duckDbQueryRunner_) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .queryCtx(queryCtx) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) @@ -3097,7 +3097,7 @@ DEBUG_ONLY_TEST_F(AggregationTest, reclaimEmptyOutput) { .singleAggregation({"c0", "c1"}, {"array_agg(c2)"}) .capturePlanNodeId(aggNodeId) .planNode()) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .queryCtx(queryCtx) .config(QueryConfig::kSpillEnabled, true) .config(QueryConfig::kAggregationSpillEnabled, true) @@ -3141,7 +3141,7 @@ TEST_F(AggregationTest, maxSpillBytes) { try { TestScopedSpillInjection scopedSpillInjection(100); AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .queryCtx(queryCtx) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kAggregationSpillEnabled, true) @@ -3186,7 +3186,7 @@ DEBUG_ONLY_TEST_F(AggregationTest, reclaimFromAggregation) { core::PlanNodeId aggrNodeId; auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kAggregationSpillEnabled, true) .config( @@ -3233,7 +3233,7 @@ DEBUG_ONLY_TEST_F(AggregationTest, reclaimFromDistinctAggregation) { const auto spillDirectory = exec::test::TempDirectoryPath::create(); core::PlanNodeId aggrNodeId; auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kAggregationSpillEnabled, true) .config( @@ -3305,7 +3305,7 @@ DEBUG_ONLY_TEST_F(AggregationTest, reclaimFromAggregationOnNoMoreInput) { std::thread aggregationThread([&]() { auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kAggregationSpillEnabled, true) .queryCtx(aggregationQueryCtx) @@ -3389,7 +3389,7 @@ DEBUG_ONLY_TEST_F(AggregationTest, reclaimFromAggregationDuringOutput) { std::thread aggregationThread([&]() { auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kAggregationSpillEnabled, true) .config( diff --git a/velox/exec/tests/AssertQueryBuilderTest.cpp b/velox/exec/tests/AssertQueryBuilderTest.cpp index f563842fd826..f3cbd29405b7 100644 --- a/velox/exec/tests/AssertQueryBuilderTest.cpp +++ b/velox/exec/tests/AssertQueryBuilderTest.cpp @@ -83,13 +83,13 @@ TEST_F(AssertQueryBuilderTest, hiveSplits) { auto data = makeRowVector({makeFlatVector({1, 2, 3})}); auto file = TempFilePath::create(); - writeToFile(file->path, {data}); + writeToFile(file->path(), {data}); // Single leaf node. AssertQueryBuilder( PlanBuilder().tableScan(asRowType(data->type())).planNode(), duckDbQueryRunner_) - .split(makeHiveConnectorSplit(file->path)) + .split(makeHiveConnectorSplit(file->path())) .assertResults("VALUES (1), (2), (3)"); // Split with partition key. @@ -106,7 +106,7 @@ TEST_F(AssertQueryBuilderTest, hiveSplits) { .endTableScan() .planNode(), duckDbQueryRunner_) - .split(HiveConnectorSplitBuilder(file->path) + .split(HiveConnectorSplitBuilder(file->path()) .partitionKey("ds", "2022-05-10") .build()) .assertResults( @@ -115,7 +115,7 @@ TEST_F(AssertQueryBuilderTest, hiveSplits) { // Two leaf nodes. auto buildData = makeRowVector({makeFlatVector({2, 3})}); auto buildFile = TempFilePath::create(); - writeToFile(buildFile->path, {buildData}); + writeToFile(buildFile->path(), {buildData}); auto planNodeIdGenerator = std::make_shared(); core::PlanNodeId probeScanId; @@ -137,8 +137,8 @@ TEST_F(AssertQueryBuilderTest, hiveSplits) { .planNode(); AssertQueryBuilder(joinPlan, duckDbQueryRunner_) - .split(probeScanId, makeHiveConnectorSplit(file->path)) - .split(buildScanId, makeHiveConnectorSplit(buildFile->path)) + .split(probeScanId, makeHiveConnectorSplit(file->path())) + .split(buildScanId, makeHiveConnectorSplit(buildFile->path())) .assertResults("SELECT 2"); } diff --git a/velox/exec/tests/GroupedExecutionTest.cpp b/velox/exec/tests/GroupedExecutionTest.cpp index ed4c8c264742..2fea9c47b174 100644 --- a/velox/exec/tests/GroupedExecutionTest.cpp +++ b/velox/exec/tests/GroupedExecutionTest.cpp @@ -180,7 +180,7 @@ TEST_F(GroupedExecutionTest, groupedExecutionWithOutputBuffer) { // Create source file - we will read from it in 6 splits. auto vectors = makeVectors(10, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); // A chain of three pipelines separated by local exchange with the leaf one // having scan running grouped execution - this will make all three pipelines @@ -211,7 +211,7 @@ TEST_F(GroupedExecutionTest, groupedExecutionWithOutputBuffer) { EXPECT_EQ(0, task->numRunningDrivers()); // Add one split for group (8). - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 8)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 8)); // Only one split group should be in the processing mode, so 9 drivers (3 per // pipeline). @@ -219,11 +219,11 @@ TEST_F(GroupedExecutionTest, groupedExecutionWithOutputBuffer) { EXPECT_EQ(std::unordered_set{}, getCompletedSplitGroups(task)); // Add the rest of splits - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 1)); - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 5)); - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 8)); - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 5)); - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 8)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 1)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 5)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 8)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 5)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 8)); // One split group should be in the processing mode, so 9 drivers. EXPECT_EQ(9, task->numRunningDrivers()); @@ -289,7 +289,7 @@ DEBUG_ONLY_TEST_F( // Create source file to read as split input. auto vectors = makeVectors(24, 20); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); const int numDriversPerGroup{3}; @@ -400,7 +400,7 @@ DEBUG_ONLY_TEST_F( "0", std::move(planFragment), 0, std::move(queryCtx)); const auto spillDirectory = exec::test::TempDirectoryPath::create(); if (testData.enableSpill) { - task->setSpillDirectory(spillDirectory->path); + task->setSpillDirectory(spillDirectory->path()); } // 'numDriversPerGroup' drivers max to execute one group at a time. @@ -409,17 +409,19 @@ DEBUG_ONLY_TEST_F( // Add split(s) to the build scan. if (testData.mixedExecutionMode) { - task->addSplit(buildScanNodeId, makeHiveSplit(filePath->path)); + task->addSplit(buildScanNodeId, makeHiveSplit(filePath->path())); } else { task->addSplit( - buildScanNodeId, makeHiveSplitWithGroup(filePath->path, 0)); + buildScanNodeId, makeHiveSplitWithGroup(filePath->path(), 0)); task->addSplit( - buildScanNodeId, makeHiveSplitWithGroup(filePath->path, 1)); + buildScanNodeId, makeHiveSplitWithGroup(filePath->path(), 1)); } // Add one split for probe split group (0). - task->addSplit(probeScanNodeId, makeHiveSplitWithGroup(filePath->path, 0)); + task->addSplit( + probeScanNodeId, makeHiveSplitWithGroup(filePath->path(), 0)); // Add one split for probe split group (1). - task->addSplit(probeScanNodeId, makeHiveSplitWithGroup(filePath->path, 1)); + task->addSplit( + probeScanNodeId, makeHiveSplitWithGroup(filePath->path(), 1)); // Finalize the build split(s). if (testData.mixedExecutionMode) { @@ -458,7 +460,7 @@ TEST_F(GroupedExecutionTest, groupedExecutionWithHashAndNestedLoopJoin) { // Create source file - we will read from it in 6 splits. auto vectors = makeVectors(4, 20); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); // Run the test twice - for Hash and Cross Join. for (size_t i = 0; i < 2; ++i) { @@ -520,10 +522,11 @@ TEST_F(GroupedExecutionTest, groupedExecutionWithHashAndNestedLoopJoin) { EXPECT_EQ(3, task->numRunningDrivers()); // Add single split to the build scan. - task->addSplit(buildScanNodeId, makeHiveSplit(filePath->path)); + task->addSplit(buildScanNodeId, makeHiveSplit(filePath->path())); // Add one split for group (8). - task->addSplit(probeScanNodeId, makeHiveSplitWithGroup(filePath->path, 8)); + task->addSplit( + probeScanNodeId, makeHiveSplitWithGroup(filePath->path(), 8)); // Only one split group should be in the processing mode, so 9 drivers (3 // per pipeline) grouped + 3 ungrouped. @@ -531,11 +534,16 @@ TEST_F(GroupedExecutionTest, groupedExecutionWithHashAndNestedLoopJoin) { EXPECT_EQ(std::unordered_set{}, getCompletedSplitGroups(task)); // Add the rest of splits - task->addSplit(probeScanNodeId, makeHiveSplitWithGroup(filePath->path, 1)); - task->addSplit(probeScanNodeId, makeHiveSplitWithGroup(filePath->path, 5)); - task->addSplit(probeScanNodeId, makeHiveSplitWithGroup(filePath->path, 8)); - task->addSplit(probeScanNodeId, makeHiveSplitWithGroup(filePath->path, 5)); - task->addSplit(probeScanNodeId, makeHiveSplitWithGroup(filePath->path, 8)); + task->addSplit( + probeScanNodeId, makeHiveSplitWithGroup(filePath->path(), 1)); + task->addSplit( + probeScanNodeId, makeHiveSplitWithGroup(filePath->path(), 5)); + task->addSplit( + probeScanNodeId, makeHiveSplitWithGroup(filePath->path(), 8)); + task->addSplit( + probeScanNodeId, makeHiveSplitWithGroup(filePath->path(), 5)); + task->addSplit( + probeScanNodeId, makeHiveSplitWithGroup(filePath->path(), 8)); // One split group should be in the processing mode, so 9 drivers (3 per // pipeline) grouped + 3 ungrouped. @@ -621,7 +629,7 @@ TEST_F(GroupedExecutionTest, groupedExecution) { const size_t numSplits{6}; auto vectors = makeVectors(10, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); CursorParameters params; params.planNode = tableScanNode(ROW({}, {})); @@ -641,7 +649,7 @@ TEST_F(GroupedExecutionTest, groupedExecution) { auto task = cursor->task(); // Add one splits before start to ensure we can handle such cases. - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 8)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 8)); // Start task now. cursor->start(); @@ -651,11 +659,11 @@ TEST_F(GroupedExecutionTest, groupedExecution) { EXPECT_EQ(std::unordered_set{}, getCompletedSplitGroups(task)); // Add the rest of splits - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 1)); - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 5)); - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 8)); - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 5)); - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 8)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 1)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 5)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 8)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 5)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 8)); // Only two split groups should be in the processing mode, so 4 drivers. EXPECT_EQ(4, task->numRunningDrivers()); @@ -714,7 +722,7 @@ TEST_F(GroupedExecutionTest, allGroupSplitsReceivedBeforeTaskStart) { const size_t numSplits{6}; auto vectors = makeVectors(10, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); CursorParameters params; params.planNode = tableScanNode(ROW({}, {})); @@ -729,12 +737,12 @@ TEST_F(GroupedExecutionTest, allGroupSplitsReceivedBeforeTaskStart) { auto task = cursor->task(); // Add all split groups before start to ensure we can handle such cases. - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 0)); - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 1)); - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 2)); - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 0)); - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 1)); - task->addSplit("0", makeHiveSplitWithGroup(filePath->path, 2)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 0)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 1)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 2)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 0)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 1)); + task->addSplit("0", makeHiveSplitWithGroup(filePath->path(), 2)); task->noMoreSplits("0"); // Start task now. diff --git a/velox/exec/tests/HashJoinBridgeTest.cpp b/velox/exec/tests/HashJoinBridgeTest.cpp index 1f825418a422..e7e39b547504 100644 --- a/velox/exec/tests/HashJoinBridgeTest.cpp +++ b/velox/exec/tests/HashJoinBridgeTest.cpp @@ -112,13 +112,13 @@ class HashJoinBridgeTest : public testing::Test, static uint32_t fakeFileId{0}; SpillFiles files; files.reserve(numFiles); - const std::string filePathPrefix = tempDir_->path + "/Spill"; + const std::string filePathPrefix = tempDir_->path() + "/Spill"; for (int32_t i = 0; i < numFiles; ++i) { const auto fileId = fakeFileId; files.push_back( {fileId, rowType_, - tempDir_->path + "/Spill_" + std::to_string(fileId), + tempDir_->path() + "/Spill_" + std::to_string(fileId), 1024, 1, std::vector({}), diff --git a/velox/exec/tests/HashJoinTest.cpp b/velox/exec/tests/HashJoinTest.cpp index 84f9c6370cc1..31d0543b7d22 100644 --- a/velox/exec/tests/HashJoinTest.cpp +++ b/velox/exec/tests/HashJoinTest.cpp @@ -617,7 +617,7 @@ class HashJoinBuilder { int32_t spillPct{0}; if (injectSpill) { spillDirectory = exec::test::TempDirectoryPath::create(); - builder.spillDirectory(spillDirectory->path); + builder.spillDirectory(spillDirectory->path()); config(core::QueryConfig::kSpillEnabled, "true"); config(core::QueryConfig::kMaxSpillLevel, std::to_string(maxSpillLevel)); config(core::QueryConfig::kJoinSpillEnabled, "true"); @@ -802,7 +802,7 @@ class HashJoinTest : public HiveConnectorTestBase { std::vector splits; splits.reserve(files[i].size()); for (const auto& file : files[i]) { - splits.push_back(exec::Split(makeHiveConnectorSplit(file->path))); + splits.push_back(exec::Split(makeHiveConnectorSplit(file->path()))); } splitInput.emplace(nodeIds[i], std::move(splits)); } @@ -1704,10 +1704,10 @@ TEST_P(MultiThreadedHashJoinTest, semiFilterOverLazyVectors) { }); std::shared_ptr probeFile = TempFilePath::create(); - writeToFile(probeFile->path, probeVectors); + writeToFile(probeFile->path(), probeVectors); std::shared_ptr buildFile = TempFilePath::create(); - writeToFile(buildFile->path, buildVectors); + writeToFile(buildFile->path(), buildVectors); createDuckDbTable("t", probeVectors); createDuckDbTable("u", buildVectors); @@ -1731,8 +1731,8 @@ TEST_P(MultiThreadedHashJoinTest, semiFilterOverLazyVectors) { .planNode(); SplitInput splitInput = { - {probeScanId, {exec::Split(makeHiveConnectorSplit(probeFile->path))}}, - {buildScanId, {exec::Split(makeHiveConnectorSplit(buildFile->path))}}, + {probeScanId, {exec::Split(makeHiveConnectorSplit(probeFile->path()))}}, + {buildScanId, {exec::Split(makeHiveConnectorSplit(buildFile->path()))}}, }; HashJoinBuilder(*pool_, duckDbQueryRunner_, driverExecutor_.get()) @@ -3197,10 +3197,10 @@ TEST_F(HashJoinTest, nullAwareRightSemiProjectOverScan) { }); std::shared_ptr probeFile = TempFilePath::create(); - writeToFile(probeFile->path, {probe}); + writeToFile(probeFile->path(), {probe}); std::shared_ptr buildFile = TempFilePath::create(); - writeToFile(buildFile->path, {build}); + writeToFile(buildFile->path(), {build}); createDuckDbTable("t", {probe}); createDuckDbTable("u", {build}); @@ -3225,8 +3225,8 @@ TEST_F(HashJoinTest, nullAwareRightSemiProjectOverScan) { .planNode(); SplitInput splitInput = { - {probeScanId, {exec::Split(makeHiveConnectorSplit(probeFile->path))}}, - {buildScanId, {exec::Split(makeHiveConnectorSplit(buildFile->path))}}, + {probeScanId, {exec::Split(makeHiveConnectorSplit(probeFile->path()))}}, + {buildScanId, {exec::Split(makeHiveConnectorSplit(buildFile->path()))}}, }; HashJoinBuilder(*pool_, duckDbQueryRunner_, driverExecutor_.get()) @@ -3797,10 +3797,10 @@ TEST_F(HashJoinTest, semiProjectOverLazyVectors) { }); std::shared_ptr probeFile = TempFilePath::create(); - writeToFile(probeFile->path, probeVectors); + writeToFile(probeFile->path(), probeVectors); std::shared_ptr buildFile = TempFilePath::create(); - writeToFile(buildFile->path, buildVectors); + writeToFile(buildFile->path(), buildVectors); createDuckDbTable("t", probeVectors); createDuckDbTable("u", buildVectors); @@ -3824,8 +3824,8 @@ TEST_F(HashJoinTest, semiProjectOverLazyVectors) { .planNode(); SplitInput splitInput = { - {probeScanId, {exec::Split(makeHiveConnectorSplit(probeFile->path))}}, - {buildScanId, {exec::Split(makeHiveConnectorSplit(buildFile->path))}}, + {probeScanId, {exec::Split(makeHiveConnectorSplit(probeFile->path()))}}, + {buildScanId, {exec::Split(makeHiveConnectorSplit(buildFile->path()))}}, }; HashJoinBuilder(*pool_, duckDbQueryRunner_, driverExecutor_.get()) @@ -3947,13 +3947,13 @@ TEST_F(HashJoinTest, lazyVectors) { for (const auto& probeVector : probeVectors) { tempFiles.push_back(TempFilePath::create()); - writeToFile(tempFiles.back()->path, probeVector); + writeToFile(tempFiles.back()->path(), probeVector); } createDuckDbTable("t", probeVectors); for (const auto& buildVector : buildVectors) { tempFiles.push_back(TempFilePath::create()); - writeToFile(tempFiles.back()->path, buildVector); + writeToFile(tempFiles.back()->path(), buildVector); } createDuckDbTable("u", buildVectors); @@ -3963,12 +3963,12 @@ TEST_F(HashJoinTest, lazyVectors) { std::vector probeSplits; for (int i = 0; i < probeVectors.size(); ++i) { probeSplits.push_back( - exec::Split(makeHiveConnectorSplit(tempFiles[i]->path))); + exec::Split(makeHiveConnectorSplit(tempFiles[i]->path()))); } std::vector buildSplits; for (int i = 0; i < buildVectors.size(); ++i) { buildSplits.push_back(exec::Split( - makeHiveConnectorSplit(tempFiles[probeSplits.size() + i]->path))); + makeHiveConnectorSplit(tempFiles[probeSplits.size() + i]->path()))); } SplitInput splits; splits.emplace(probeScanId, probeSplits); @@ -4052,13 +4052,14 @@ TEST_F(HashJoinTest, dynamicFilters) { }); probeVectors.push_back(rowVector); tempFiles.push_back(TempFilePath::create()); - writeToFile(tempFiles.back()->path, rowVector); + writeToFile(tempFiles.back()->path(), rowVector); } auto makeInputSplits = [&](const core::PlanNodeId& nodeId) { return [&] { std::vector probeSplits; for (auto& file : tempFiles) { - probeSplits.push_back(exec::Split(makeHiveConnectorSplit(file->path))); + probeSplits.push_back( + exec::Split(makeHiveConnectorSplit(file->path()))); } SplitInput splits; splits.emplace(nodeId, probeSplits); @@ -4577,18 +4578,19 @@ TEST_F(HashJoinTest, dynamicFiltersWithSkippedSplits) { }); probeVectors.push_back(rowVector); tempFiles.push_back(TempFilePath::create()); - writeToFile(tempFiles.back()->path, rowVector); + writeToFile(tempFiles.back()->path(), rowVector); } auto makeInputSplits = [&](const core::PlanNodeId& nodeId) { return [&] { std::vector probeSplits; for (auto& file : tempFiles) { - probeSplits.push_back(exec::Split(makeHiveConnectorSplit(file->path))); + probeSplits.push_back( + exec::Split(makeHiveConnectorSplit(file->path()))); } // We add splits that have no rows. auto makeEmpty = [&]() { - return exec::Split(HiveConnectorSplitBuilder(tempFiles.back()->path) + return exec::Split(HiveConnectorSplitBuilder(tempFiles.back()->path()) .start(10000000) .length(1) .build()); @@ -4791,8 +4793,8 @@ TEST_F(HashJoinTest, dynamicFiltersAppliedToPreloadedSplits) { }); probeVectors.push_back(rowVector); tempFiles.push_back(TempFilePath::create()); - writeToFile(tempFiles.back()->path, rowVector); - auto split = HiveConnectorSplitBuilder(tempFiles.back()->path) + writeToFile(tempFiles.back()->path(), rowVector); + auto split = HiveConnectorSplitBuilder(tempFiles.back()->path()) .partitionKey("p1", std::to_string(i)) .build(); probeSplits.push_back(exec::Split(split)); @@ -5041,7 +5043,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, buildReservationReleaseCheck) { // only gets executed when spilling is enabled. We don't care about if // spilling is really triggered in test or not. auto spillDirectory = exec::test::TempDirectoryPath::create(); - params.spillDirectory = spillDirectory->path; + params.spillDirectory = spillDirectory->path(); params.queryCtx->testingOverrideConfigUnsafe( {{core::QueryConfig::kSpillEnabled, "true"}, {core::QueryConfig::kMaxSpillLevel, "0"}}); @@ -5070,14 +5072,14 @@ TEST_F(HashJoinTest, dynamicFilterOnPartitionKey) { auto rowVector = makeRowVector( {makeFlatVector(size, [&](auto row) { return row; })}); createDuckDbTable("u", {rowVector}); - writeToFile(filePaths[0]->path, rowVector); + writeToFile(filePaths[0]->path(), rowVector); std::vector buildVectors{ makeRowVector({"c0"}, {makeFlatVector({0, 1, 2})})}; createDuckDbTable("t", buildVectors); - auto split = - facebook::velox::exec::test::HiveConnectorSplitBuilder(filePaths[0]->path) - .partitionKey("k", "0") - .build(); + auto split = facebook::velox::exec::test::HiveConnectorSplitBuilder( + filePaths[0]->path()) + .partitionKey("k", "0") + .build(); auto outputType = ROW({"n1_0", "n1_1"}, {BIGINT(), BIGINT()}); ColumnHandleMap assignments = { {"n1_0", regularColumn("c0", BIGINT())}, @@ -5209,7 +5211,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, reclaimDuringInputProcessing) { .planNode(plan) .queryPool(std::move(queryPool)) .injectSpill(false) - .spillDirectory(testData.spillEnabled ? tempDirectory->path : "") + .spillDirectory(testData.spillEnabled ? tempDirectory->path() : "") .referenceQuery( "SELECT t_k1, t_k2, t_v1, u_k1, u_k2, u_v1 FROM t, u WHERE t.t_k1 = u.u_k1") .config(core::QueryConfig::kSpillStartPartitionBit, "29") @@ -5361,7 +5363,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, reclaimDuringReserve) { .planNode(plan) .queryPool(std::move(queryPool)) .injectSpill(false) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .referenceQuery( "SELECT t_k1, t_k2, t_v1, u_k1, u_k2, u_v1 FROM t, u WHERE t.t_k1 = u.u_k1") .config(core::QueryConfig::kSpillStartPartitionBit, "29") @@ -5490,7 +5492,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, reclaimDuringAllocation) { .planNode(plan) .queryPool(std::move(queryPool)) .injectSpill(false) - .spillDirectory(enableSpilling ? tempDirectory->path : "") + .spillDirectory(enableSpilling ? tempDirectory->path() : "") .referenceQuery( "SELECT t_k1, t_k2, t_v1, u_k1, u_k2, u_v1 FROM t, u WHERE t.t_k1 = u.u_k1") .verifier([&](const std::shared_ptr& task, bool /*unused*/) { @@ -5608,7 +5610,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, reclaimDuringOutputProcessing) { .planNode(plan) .queryPool(std::move(queryPool)) .injectSpill(false) - .spillDirectory(enableSpilling ? tempDirectory->path : "") + .spillDirectory(enableSpilling ? tempDirectory->path() : "") .referenceQuery( "SELECT t_k1, t_k2, t_v1, u_k1, u_k2, u_v1 FROM t, u WHERE t.t_k1 = u.u_k1") .verifier([&](const std::shared_ptr& task, bool /*unused*/) { @@ -5753,7 +5755,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, reclaimDuringWaitForProbe) { .planNode(plan) .queryPool(std::move(queryPool)) .injectSpill(false) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .referenceQuery( "SELECT t_k1, t_k2, t_v1, u_k1, u_k2, u_v1 FROM t, u WHERE t.t_k1 = u.u_k1") .config(core::QueryConfig::kSpillStartPartitionBit, "29") @@ -6255,7 +6257,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, minSpillableMemoryReservation) { .numDrivers(numDrivers_) .planNode(plan) .injectSpill(false) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .referenceQuery( "SELECT t_k1, t_k2, t_v1, u_k1, u_k2, u_v1 FROM t, u WHERE t.t_k1 = u.u_k1") .run(); @@ -6307,7 +6309,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, exceededMaxSpillLevel) { // Always trigger spilling. .injectSpill(false) .maxSpillLevel(0) - .spillDirectory(tempDirectory->path) + .spillDirectory(tempDirectory->path()) .referenceQuery( "SELECT t_k1, t_k2, t_v1, u_k1, u_k2, u_v1 FROM t, u WHERE t.t_k1 = u.u_k1") .config(core::QueryConfig::kSpillStartPartitionBit, "29") @@ -6378,7 +6380,7 @@ TEST_F(HashJoinTest, maxSpillBytes) { try { TestScopedSpillInjection scopedSpillInjection(100); AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .queryCtx(queryCtx) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kJoinSpillEnabled, true) @@ -6434,7 +6436,7 @@ TEST_F(HashJoinTest, onlyHashBuildMaxSpillBytes) { try { TestScopedSpillInjection scopedSpillInjection(100); AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .queryCtx(queryCtx) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kJoinSpillEnabled, true) @@ -6779,7 +6781,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, arbitrationTriggeredByEnsureJoinTableFit) { const auto spillDirectory = exec::test::TempDirectoryPath::create(); auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kJoinSpillEnabled, true) .config(core::QueryConfig::kSpillNumPartitionBits, 2) @@ -6850,7 +6852,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, reclaimDuringJoinTableBuild) { const auto spillDirectory = exec::test::TempDirectoryPath::create(); auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kJoinSpillEnabled, true) .config(core::QueryConfig::kSpillNumPartitionBits, 2) @@ -6957,7 +6959,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, joinBuildSpillError) { VELOX_ASSERT_THROW( AssertQueryBuilder(plan) .queryCtx(joinQueryCtx) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .copyResults(pool()), injectedErrorMsg); @@ -7132,7 +7134,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, hashProbeSpill) { const auto spillDirectory = exec::test::TempDirectoryPath::create(); HashJoinBuilder(*pool_, duckDbQueryRunner_, driverExecutor_.get()) .numDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .probeKeys({"t_k1"}) .probeVectors(std::move(probeVectors)) .buildKeys({"u_k1"}) @@ -7188,7 +7190,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, hashProbeSpillInMiddeOfLastOutputProcessing) { const auto spillDirectory = exec::test::TempDirectoryPath::create(); HashJoinBuilder(*pool_, duckDbQueryRunner_, driverExecutor_.get()) .numDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .probeKeys({"t_k1"}) .probeVectors(std::move(probeVectors)) .buildKeys({"u_k1"}) @@ -7261,7 +7263,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, hashProbeSpillInMiddeOfOutputProcessing) { const auto spillDirectory = exec::test::TempDirectoryPath::create(); HashJoinBuilder(*pool_, duckDbQueryRunner_, driverExecutor_.get()) .numDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .probeKeys({"t_k1"}) .probeVectors(std::move(probeVectors)) .buildKeys({"u_k1"}) @@ -7319,7 +7321,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, hashProbeSpillWhenOneOfProbeFinish) { const auto spillDirectory = exec::test::TempDirectoryPath::create(); HashJoinBuilder(*pool_, duckDbQueryRunner_, driverExecutor_.get()) .numDrivers(numDrivers, true, true) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .keyTypes({BIGINT()}) .probeVectors(32, 5) .buildVectors(32, 5) @@ -7364,7 +7366,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, hashProbeSpillExceedLimit) { const auto spillDirectory = exec::test::TempDirectoryPath::create(); HashJoinBuilder(*pool_, duckDbQueryRunner_, driverExecutor_.get()) .numDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .probeKeys({"t_k1"}) .probeVectors(std::move(probeVectors)) .buildKeys({"u_k1"}) @@ -7431,7 +7433,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, hashProbeSpillUnderNonReclaimableSection) { const auto spillDirectory = exec::test::TempDirectoryPath::create(); HashJoinBuilder(*pool_, duckDbQueryRunner_, driverExecutor_.get()) .numDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .keyTypes({BIGINT()}) .probeVectors(32, 5) .buildVectors(32, 5) @@ -7486,7 +7488,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, spillOutputWithRightSemiJoins) { const auto spillDirectory = exec::test::TempDirectoryPath::create(); HashJoinBuilder(*pool_, duckDbQueryRunner_, driverExecutor_.get()) .numDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .probeType(probeType_) .probeVectors(128, 3) .probeKeys({"t_k1"}) @@ -7523,13 +7525,14 @@ DEBUG_ONLY_TEST_F(HashJoinTest, spillCheckOnLeftSemiFilterWithDynamicFilters) { }); probeVectors.push_back(rowVector); tempFiles.push_back(TempFilePath::create()); - writeToFile(tempFiles.back()->path, rowVector); + writeToFile(tempFiles.back()->path(), rowVector); } auto makeInputSplits = [&](const core::PlanNodeId& nodeId) { return [&] { std::vector probeSplits; for (auto& file : tempFiles) { - probeSplits.push_back(exec::Split(makeHiveConnectorSplit(file->path))); + probeSplits.push_back( + exec::Split(makeHiveConnectorSplit(file->path()))); } SplitInput splits; splits.emplace(nodeId, probeSplits); @@ -7608,7 +7611,7 @@ DEBUG_ONLY_TEST_F(HashJoinTest, spillCheckOnLeftSemiFilterWithDynamicFilters) { HashJoinBuilder(*pool_, duckDbQueryRunner_, driverExecutor_.get()) .planNode(std::move(op)) .makeInputSplits(makeInputSplits(probeScanId)) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .injectSpill(false) .referenceQuery( "SELECT t.c0, t.c1 + 1 FROM t WHERE t.c0 IN (SELECT c0 FROM u)") diff --git a/velox/exec/tests/JoinFuzzer.cpp b/velox/exec/tests/JoinFuzzer.cpp index 45b4faf77510..170732172b11 100644 --- a/velox/exec/tests/JoinFuzzer.cpp +++ b/velox/exec/tests/JoinFuzzer.cpp @@ -432,7 +432,7 @@ RowVectorPtr JoinFuzzer::execute(const PlanWithSplits& plan, bool injectSpill) { spillDirectory = exec::test::TempDirectoryPath::create(); builder.config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kAggregationSpillEnabled, "true") - .spillDirectory(spillDirectory->path); + .spillDirectory(spillDirectory->path()); spillPct = 10; } @@ -982,7 +982,7 @@ void JoinFuzzer::verify(core::JoinType joinType) { const auto tableScanDir = exec::test::TempDirectoryPath::create(); addPlansWithTableScan( - tableScanDir->path, + tableScanDir->path(), joinType, nullAware, probeKeys, diff --git a/velox/exec/tests/LimitTest.cpp b/velox/exec/tests/LimitTest.cpp index 70249fb944d9..0edf86d72f58 100644 --- a/velox/exec/tests/LimitTest.cpp +++ b/velox/exec/tests/LimitTest.cpp @@ -79,7 +79,7 @@ TEST_F(LimitTest, limitOverLocalExchange) { {makeFlatVector(1'000, [](auto row) { return row; })}); auto file = TempFilePath::create(); - writeToFile(file->path, {data}); + writeToFile(file->path(), {data}); core::PlanNodeId scanNodeId; @@ -93,7 +93,7 @@ TEST_F(LimitTest, limitOverLocalExchange) { auto cursor = TaskCursor::create(params); cursor->task()->addSplit( - scanNodeId, exec::Split(makeHiveConnectorSplit(file->path))); + scanNodeId, exec::Split(makeHiveConnectorSplit(file->path()))); int32_t numRead = 0; while (cursor->moveNext()) { diff --git a/velox/exec/tests/LocalPartitionTest.cpp b/velox/exec/tests/LocalPartitionTest.cpp index 86549b636a5a..5de59909a372 100644 --- a/velox/exec/tests/LocalPartitionTest.cpp +++ b/velox/exec/tests/LocalPartitionTest.cpp @@ -43,7 +43,7 @@ class LocalPartitionTest : public HiveConnectorTestBase { const std::vector& vectors) { auto filePaths = makeFilePaths(vectors.size()); for (auto i = 0; i < vectors.size(); i++) { - writeToFile(filePaths[i]->path, vectors[i]); + writeToFile(filePaths[i]->path(), vectors[i]); } return filePaths; } @@ -140,7 +140,7 @@ TEST_F(LocalPartitionTest, gather) { AssertQueryBuilder queryBuilder(op, duckDbQueryRunner_); for (auto i = 0; i < filePaths.size(); ++i) { queryBuilder.split( - scanNodeIds[i], makeHiveConnectorSplit(filePaths[i]->path)); + scanNodeIds[i], makeHiveConnectorSplit(filePaths[i]->path())); } task = queryBuilder.assertResults("SELECT 300, -71, 152"); @@ -186,7 +186,7 @@ TEST_F(LocalPartitionTest, partition) { queryBuilder.maxDrivers(2); for (auto i = 0; i < filePaths.size(); ++i) { queryBuilder.split( - scanNodeIds[i], makeHiveConnectorSplit(filePaths[i]->path)); + scanNodeIds[i], makeHiveConnectorSplit(filePaths[i]->path())); } auto task = @@ -267,7 +267,7 @@ TEST_F(LocalPartitionTest, maxBufferSizePartition) { queryBuilder.maxDrivers(2); for (auto i = 0; i < filePaths.size(); ++i) { queryBuilder.split( - scanNodeIds[i % 3], makeHiveConnectorSplit(filePaths[i]->path)); + scanNodeIds[i % 3], makeHiveConnectorSplit(filePaths[i]->path())); } queryBuilder.config( core::QueryConfig::kMaxLocalExchangeBufferSize, bufferSize); @@ -316,7 +316,7 @@ TEST_F(LocalPartitionTest, indicesBufferCapacity) { for (auto i = 0; i < filePaths.size(); ++i) { auto id = scanNodeIds[i % 3]; cursor->task()->addSplit( - id, Split(makeHiveConnectorSplit(filePaths[i]->path))); + id, Split(makeHiveConnectorSplit(filePaths[i]->path()))); cursor->task()->noMoreSplits(id); } int numRows = 0; @@ -450,7 +450,7 @@ TEST_F(LocalPartitionTest, multipleExchanges) { AssertQueryBuilder queryBuilder(op, duckDbQueryRunner_); for (auto i = 0; i < filePaths.size(); ++i) { queryBuilder.split( - scanNodeIds[i], makeHiveConnectorSplit(filePaths[i]->path)); + scanNodeIds[i], makeHiveConnectorSplit(filePaths[i]->path())); } queryBuilder.maxDrivers(2).assertResults( diff --git a/velox/exec/tests/MergeJoinTest.cpp b/velox/exec/tests/MergeJoinTest.cpp index 4868419e83fd..bf76a2a054c3 100644 --- a/velox/exec/tests/MergeJoinTest.cpp +++ b/velox/exec/tests/MergeJoinTest.cpp @@ -465,11 +465,11 @@ TEST_F(MergeJoinTest, lazyVectors) { makeFlatVector(10'000, [](auto row) { return row % 31; })}); auto leftFile = TempFilePath::create(); - writeToFile(leftFile->path, leftVectors); + writeToFile(leftFile->path(), leftVectors); createDuckDbTable("t", {leftVectors}); auto rightFile = TempFilePath::create(); - writeToFile(rightFile->path, rightVectors); + writeToFile(rightFile->path(), rightVectors); createDuckDbTable("u", {rightVectors}); auto planNodeIdGenerator = std::make_shared(); @@ -492,8 +492,8 @@ TEST_F(MergeJoinTest, lazyVectors) { .planNode(); AssertQueryBuilder(op, duckDbQueryRunner_) - .split(rightScanId, makeHiveConnectorSplit(rightFile->path)) - .split(leftScanId, makeHiveConnectorSplit(leftFile->path)) + .split(rightScanId, makeHiveConnectorSplit(rightFile->path())) + .split(leftScanId, makeHiveConnectorSplit(leftFile->path())) .assertResults( "SELECT c0, rc0, c1, rc1, c2, c3 FROM t, u WHERE t.c0 = u.rc0 and c1 + rc1 < 30"); } diff --git a/velox/exec/tests/MultiFragmentTest.cpp b/velox/exec/tests/MultiFragmentTest.cpp index 110729da8850..dbf4e09fb4cd 100644 --- a/velox/exec/tests/MultiFragmentTest.cpp +++ b/velox/exec/tests/MultiFragmentTest.cpp @@ -106,11 +106,11 @@ class MultiFragmentTest : public HiveConnectorTestBase { auto split = exec::Split( std::make_shared( kHiveConnectorId, - "file:" + filePath->path, + "file:" + filePath->path(), facebook::velox::dwio::common::FileFormat::DWRF), -1); task->addSplit("0", std::move(split)); - VLOG(1) << filePath->path << "\n"; + VLOG(1) << filePath->path() << "\n"; } task->noMoreSplits("0"); } @@ -152,7 +152,7 @@ class MultiFragmentTest : public HiveConnectorTestBase { filePaths_ = makeFilePaths(filePathCount); vectors_ = makeVectors(filePaths_.size(), rowsPerVector); for (int i = 0; i < filePaths_.size(); i++) { - writeToFile(filePaths_[i]->path, vectors_[i]); + writeToFile(filePaths_[i]->path(), vectors_[i]); } createDuckDbTable(vectors_); } @@ -885,7 +885,7 @@ TEST_F(MultiFragmentTest, limit) { 1'000, [](auto row) { return row; }, nullEvery(7))}); auto file = TempFilePath::create(); - writeToFile(file->path, {data}); + writeToFile(file->path(), {data}); // Make leaf task: Values -> PartialLimit(10) -> Repartitioning(0). auto leafTaskId = makeTaskId("leaf", 0); @@ -899,7 +899,7 @@ TEST_F(MultiFragmentTest, limit) { leafTask->start(1); leafTask.get()->addSplit( - "0", exec::Split(makeHiveConnectorSplit(file->path))); + "0", exec::Split(makeHiveConnectorSplit(file->path()))); // Make final task: Exchange -> FinalLimit(10). auto plan = PlanBuilder() diff --git a/velox/exec/tests/OrderByTest.cpp b/velox/exec/tests/OrderByTest.cpp index a269ab2cb09b..926ebba3a721 100644 --- a/velox/exec/tests/OrderByTest.cpp +++ b/velox/exec/tests/OrderByTest.cpp @@ -205,7 +205,7 @@ class OrderByTest : public OperatorTestBase { CursorParameters params; params.planNode = planNode; params.queryCtx = queryCtx; - params.spillDirectory = spillDirectory->path; + params.spillDirectory = spillDirectory->path(); auto task = assertQueryOrdered(params, duckDbSql, sortingKeys); auto inputRows = toPlanStats(task->taskStats()).at(orderById).inputRows; const uint64_t peakSpillMemoryUsage = @@ -509,7 +509,7 @@ TEST_F(OrderByTest, spill) { auto spillDirectory = exec::test::TempDirectoryPath::create(); TestScopedSpillInjection scopedSpillInjection(100); auto task = AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kOrderBySpillEnabled, true) .assertResults(expectedResult); @@ -566,7 +566,7 @@ DEBUG_ONLY_TEST_F(OrderByTest, reclaimDuringInputProcessing) { for (const auto& testData : testSettings) { SCOPED_TRACE(testData.debugString()); - auto tempDirectory = exec::test::TempDirectoryPath::create(); + auto spillDirectory = exec::test::TempDirectoryPath::create(); auto queryCtx = std::make_shared(executor_.get()); queryCtx->testingOverrideMemoryPool(memory::memoryManager()->addRootPool( queryCtx->queryId(), kMaxBytes, memory::MemoryReclaimer::create())); @@ -626,7 +626,7 @@ DEBUG_ONLY_TEST_F(OrderByTest, reclaimDuringInputProcessing) { .orderBy({fmt::format("{} ASC NULLS LAST", "c0")}, false) .planNode()) .queryCtx(queryCtx) - .spillDirectory(tempDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kOrderBySpillEnabled, true) .maxDrivers(1) @@ -705,7 +705,7 @@ DEBUG_ONLY_TEST_F(OrderByTest, reclaimDuringReserve) { batches.push_back(fuzzer.fuzzRow(rowType)); } - auto tempDirectory = exec::test::TempDirectoryPath::create(); + auto spillDirectory = exec::test::TempDirectoryPath::create(); auto queryCtx = std::make_shared(executor_.get()); queryCtx->testingOverrideMemoryPool(memory::memoryManager()->addRootPool( queryCtx->queryId(), kMaxBytes, memory::MemoryReclaimer::create())); @@ -765,7 +765,7 @@ DEBUG_ONLY_TEST_F(OrderByTest, reclaimDuringReserve) { .orderBy({fmt::format("{} ASC NULLS LAST", "c0")}, false) .planNode()) .queryCtx(queryCtx) - .spillDirectory(tempDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kOrderBySpillEnabled, true) .maxDrivers(1) @@ -818,7 +818,7 @@ DEBUG_ONLY_TEST_F(OrderByTest, reclaimDuringAllocation) { const std::vector enableSpillings = {false, true}; for (const auto enableSpilling : enableSpillings) { SCOPED_TRACE(fmt::format("enableSpilling {}", enableSpilling)); - auto tempDirectory = exec::test::TempDirectoryPath::create(); + auto spillDirectory = exec::test::TempDirectoryPath::create(); auto queryCtx = std::make_shared(executor_.get()); queryCtx->testingOverrideMemoryPool( memory::memoryManager()->addRootPool(queryCtx->queryId(), kMaxBytes)); @@ -883,7 +883,7 @@ DEBUG_ONLY_TEST_F(OrderByTest, reclaimDuringAllocation) { .orderBy({fmt::format("{} ASC NULLS LAST", "c0")}, false) .planNode()) .queryCtx(queryCtx) - .spillDirectory(tempDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kOrderBySpillEnabled, true) .maxDrivers(1) @@ -948,7 +948,7 @@ DEBUG_ONLY_TEST_F(OrderByTest, reclaimDuringOutputProcessing) { const std::vector enableSpillings = {false, true}; for (const auto enableSpilling : enableSpillings) { SCOPED_TRACE(fmt::format("enableSpilling {}", enableSpilling)); - auto tempDirectory = exec::test::TempDirectoryPath::create(); + auto spillDirectory = exec::test::TempDirectoryPath::create(); auto queryCtx = std::make_shared(executor_.get()); queryCtx->testingOverrideMemoryPool(memory::memoryManager()->addRootPool( queryCtx->queryId(), kMaxBytes, memory::MemoryReclaimer::create())); @@ -1000,7 +1000,7 @@ DEBUG_ONLY_TEST_F(OrderByTest, reclaimDuringOutputProcessing) { .orderBy({fmt::format("{} ASC NULLS LAST", "c0")}, false) .planNode()) .queryCtx(queryCtx) - .spillDirectory(tempDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kOrderBySpillEnabled, true) .maxDrivers(1) @@ -1226,7 +1226,7 @@ DEBUG_ONLY_TEST_F(OrderByTest, spillWithNoMoreOutput) { auto spillDirectory = exec::test::TempDirectoryPath::create(); auto task = AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kOrderBySpillEnabled, true) // Set output buffer size to extreme large to read all the @@ -1272,7 +1272,7 @@ TEST_F(OrderByTest, maxSpillBytes) { try { TestScopedSpillInjection scopedSpillInjection(100); AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .queryCtx(queryCtx) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kOrderBySpillEnabled, true) @@ -1341,7 +1341,7 @@ DEBUG_ONLY_TEST_F(OrderByTest, reclaimFromOrderBy) { std::thread orderByThread([&]() { auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kOrderBySpillEnabled, true) .queryCtx(orderByQueryCtx) @@ -1409,7 +1409,7 @@ DEBUG_ONLY_TEST_F(OrderByTest, reclaimFromEmptyOrderBy) { std::thread orderByThread([&]() { auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kOrderBySpillEnabled, true) .queryCtx(orderByQueryCtx) diff --git a/velox/exec/tests/PrintPlanWithStatsTest.cpp b/velox/exec/tests/PrintPlanWithStatsTest.cpp index 39f1bfdd44b1..61a1a310607c 100644 --- a/velox/exec/tests/PrintPlanWithStatsTest.cpp +++ b/velox/exec/tests/PrintPlanWithStatsTest.cpp @@ -89,7 +89,7 @@ TEST_F(PrintPlanWithStatsTest, innerJoinWithTableScan) { makeFlatVector(numRowsProbe, [](auto row) { return row; }), }); leftVectors.push_back(rowVector); - writeToFile(leftFiles[i]->path, rowVector); + writeToFile(leftFiles[i]->path(), rowVector); } auto probeType = ROW({"c0", "c1"}, {INTEGER(), BIGINT()}); @@ -237,7 +237,7 @@ TEST_F(PrintPlanWithStatsTest, partialAggregateWithTableScan) { SCOPED_TRACE(fmt::format("numPrefetchSplit {}", numPrefetchSplit)); asyncDataCache_->clear(); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); auto op = PlanBuilder() diff --git a/velox/exec/tests/RowNumberTest.cpp b/velox/exec/tests/RowNumberTest.cpp index 6f76dbf2bd8f..df4f0b025f94 100644 --- a/velox/exec/tests/RowNumberTest.cpp +++ b/velox/exec/tests/RowNumberTest.cpp @@ -211,7 +211,7 @@ TEST_F(RowNumberTest, spill) { core::PlanNodeId rowNumberPlanNodeId; auto task = AssertQueryBuilder(duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kRowNumberSpillEnabled, true) .config( @@ -273,7 +273,7 @@ TEST_F(RowNumberTest, maxSpillBytes) { try { TestScopedSpillInjection scopedSpillInjection(100); AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .queryCtx(queryCtx) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kRowNumberSpillEnabled, true) @@ -315,11 +315,11 @@ TEST_F(RowNumberTest, memoryUsage) { std::shared_ptr task; TestScopedSpillInjection scopedSpillInjection(100); AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .queryCtx(queryCtx) .config(core::QueryConfig::kSpillEnabled, spillEnableConfig) .config(core::QueryConfig::kRowNumberSpillEnabled, spillEnableConfig) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .copyResults(pool_.get(), task); if (spillEnable) { diff --git a/velox/exec/tests/SortBufferTest.cpp b/velox/exec/tests/SortBufferTest.cpp index 771b16cad06d..b5158136c096 100644 --- a/velox/exec/tests/SortBufferTest.cpp +++ b/velox/exec/tests/SortBufferTest.cpp @@ -286,7 +286,7 @@ TEST_F(SortBufferTest, batchOutput) { SCOPED_TRACE(testData.debugString()); auto spillDirectory = exec::test::TempDirectoryPath::create(); auto spillConfig = common::SpillConfig( - [&]() -> const std::string& { return spillDirectory->path; }, + [&]() -> const std::string& { return spillDirectory->path(); }, [&](uint64_t) {}, "0.0.0", 1000, @@ -380,7 +380,7 @@ TEST_F(SortBufferTest, spill) { auto spillableReservationGrowthPct = testData.memoryReservationFailure ? 100000 : 100; auto spillConfig = common::SpillConfig( - [&]() -> const std::string& { return spillDirectory->path; }, + [&]() -> const std::string& { return spillDirectory->path(); }, [&](uint64_t) {}, "0.0.0", 1000, @@ -454,7 +454,7 @@ TEST_F(SortBufferTest, emptySpill) { for (bool hasPostSpillData : {false, true}) { SCOPED_TRACE(fmt::format("hasPostSpillData {}", hasPostSpillData)); auto spillDirectory = exec::test::TempDirectoryPath::create(); - auto spillConfig = getSpillConfig(spillDirectory->path); + auto spillConfig = getSpillConfig(spillDirectory->path()); folly::Synchronized spillStats; auto sortBuffer = std::make_unique( inputType_, diff --git a/velox/exec/tests/SpillTest.cpp b/velox/exec/tests/SpillTest.cpp index 35f1eaaf1ca2..4ac61f9099a7 100644 --- a/velox/exec/tests/SpillTest.cpp +++ b/velox/exec/tests/SpillTest.cpp @@ -157,7 +157,7 @@ class SpillTest : public ::testing::TestWithParam, // partitions produce an ascending sequence of integers without gaps. spillStats_.wlock()->reset(); state_ = std::make_unique( - [&]() -> const std::string& { return tempDir_->path; }, + [&]() -> const std::string { return tempDir_->path(); }, updateSpilledBytesCb_, fileNamePrefix_, numPartitions, @@ -337,7 +337,7 @@ class SpillTest : public ::testing::TestWithParam, ASSERT_EQ(expectedNumSpilledFiles, spilledFileSet.size()); // Verify the spilled file exist on file system. std::shared_ptr fs = - filesystems::getFileSystem(tempDir_->path, nullptr); + filesystems::getFileSystem(tempDir_->path(), nullptr); uint64_t totalFileBytes{0}; for (const auto& spilledFile : spilledFileSet) { auto readFile = fs->openFileForRead(spilledFile); @@ -467,7 +467,7 @@ TEST_P(SpillTest, spillTimestamp) { // read back. auto tempDirectory = exec::test::TempDirectoryPath::create(); std::vector emptyCompareFlags; - const std::string spillPath = tempDirectory->path + "/test"; + const std::string spillPath = tempDirectory->path() + "/test"; std::vector timeValues = { Timestamp{0, 0}, Timestamp{12, 0}, @@ -478,7 +478,7 @@ TEST_P(SpillTest, spillTimestamp) { Timestamp{Timestamp::kMinSeconds, 0}}; SpillState state( - [&]() -> const std::string& { return tempDirectory->path; }, + [&]() -> const std::string { return tempDirectory->path(); }, updateSpilledBytesCb_, "test", 1, @@ -774,13 +774,13 @@ SpillFiles makeFakeSpillFiles(int32_t numFiles) { static uint32_t fakeFileId{0}; SpillFiles files; files.reserve(numFiles); - const std::string filePathPrefix = tempDir->path + "/Spill"; + const std::string filePathPrefix = tempDir->path() + "/Spill"; for (int32_t i = 0; i < numFiles; ++i) { const auto fileId = fakeFileId; files.push_back( {fileId, ROW({"k1", "k2"}, {BIGINT(), BIGINT()}), - tempDir->path + "/Spill_" + std::to_string(fileId), + tempDir->path() + "/Spill_" + std::to_string(fileId), 1024, 1, std::vector({}), diff --git a/velox/exec/tests/SpillerBenchmarkBase.cpp b/velox/exec/tests/SpillerBenchmarkBase.cpp index bd4783ed9831..ec7c6aa69d3d 100644 --- a/velox/exec/tests/SpillerBenchmarkBase.cpp +++ b/velox/exec/tests/SpillerBenchmarkBase.cpp @@ -109,7 +109,7 @@ void SpillerBenchmarkBase::setUp() { if (FLAGS_spiller_benchmark_path.empty()) { tempDir_ = exec::test::TempDirectoryPath::create(); - spillDir_ = tempDir_->path; + spillDir_ = tempDir_->path(); } else { spillDir_ = FLAGS_spiller_benchmark_path; } diff --git a/velox/exec/tests/SpillerTest.cpp b/velox/exec/tests/SpillerTest.cpp index 5a98684c4c68..f078fe388890 100644 --- a/velox/exec/tests/SpillerTest.cpp +++ b/velox/exec/tests/SpillerTest.cpp @@ -160,7 +160,7 @@ class SpillerTest : public exec::test::RowContainerTestBase { RowContainerTestBase::SetUp(); rng_.seed(1); tempDirPath_ = exec::test::TempDirectoryPath::create(); - fs_ = filesystems::getFileSystem(tempDirPath_->path, nullptr); + fs_ = filesystems::getFileSystem(tempDirPath_->path(), nullptr); containerType_ = ROW({ {"bool_val", BOOLEAN()}, {"tiny_val", TINYINT()}, @@ -520,10 +520,11 @@ class SpillerTest : public exec::test::RowContainerTestBase { bool makeError, uint64_t maxSpillRunRows = 0) { static const std::string kBadSpillDirPath = "/bad/path"; - common::GetSpillDirectoryPathCB badSpillDirCb = - [&]() -> const std::string& { return kBadSpillDirPath; }; + common::GetSpillDirectoryPathCB badSpillDirCb = [&]() -> const std::string { + return kBadSpillDirPath; + }; common::GetSpillDirectoryPathCB tempSpillDirCb = - [&]() -> const std::string& { return tempDirPath_->path; }; + [&]() -> const std::string { return tempDirPath_->path(); }; stats_.clear(); spillStats_ = folly::Synchronized(); diff --git a/velox/exec/tests/TableScanTest.cpp b/velox/exec/tests/TableScanTest.cpp index fc261aef119a..28ca54062d6d 100644 --- a/velox/exec/tests/TableScanTest.cpp +++ b/velox/exec/tests/TableScanTest.cpp @@ -235,7 +235,7 @@ class TableScanTest : public virtual HiveConnectorTestBase { TEST_F(TableScanTest, allColumns) { auto vectors = makeVectors(10, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); auto plan = tableScanNode(); @@ -261,7 +261,7 @@ TEST_F(TableScanTest, connectorStats) { for (size_t i = 0; i < 99; i++) { auto vectors = makeVectors(10, 10); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); auto plan = tableScanNode(); assertQuery(plan, {filePath}, "SELECT * FROM tmp"); @@ -274,7 +274,7 @@ TEST_F(TableScanTest, connectorStats) { TEST_F(TableScanTest, columnAliases) { auto vectors = makeVectors(1, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); std::string tableName = "t"; @@ -315,14 +315,14 @@ TEST_F(TableScanTest, columnAliases) { TEST_F(TableScanTest, partitionKeyAlias) { auto vectors = makeVectors(1, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); ColumnHandleMap assignments = { {"a", regularColumn("c0", BIGINT())}, {"ds_alias", partitionKey("ds", VARCHAR())}}; - auto split = HiveConnectorSplitBuilder(filePath->path) + auto split = HiveConnectorSplitBuilder(filePath->path()) .partitionKey("ds", "2021-12-02") .build(); @@ -340,7 +340,7 @@ TEST_F(TableScanTest, partitionKeyAlias) { TEST_F(TableScanTest, columnPruning) { auto vectors = makeVectors(10, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); auto op = tableScanNode(ROW({"c0"}, {BIGINT()})); @@ -377,7 +377,7 @@ TEST_F(TableScanTest, timestamp) { })}); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {rowVector}); + writeToFile(filePath->path(), {rowVector}); createDuckDbTable({rowVector}); auto dataColumns = ROW({"c0", "c1"}, {BIGINT(), TIMESTAMP()}); @@ -460,7 +460,7 @@ DEBUG_ONLY_TEST_F(TableScanTest, timeLimitInGetOutput) { for (auto i = 0; i < numFiles; ++i) { filePaths.emplace_back(TempFilePath::create()); const auto& vec = (i % 3 == 0) ? rowVector : rowVectorNoNulls; - writeToFile(filePaths.back()->path, vec); + writeToFile(filePaths.back()->path(), vec); vectorsForDuckDb.emplace_back(vec); } createDuckDbTable(vectorsForDuckDb); @@ -515,7 +515,7 @@ TEST_F(TableScanTest, subfieldPruningRowType) { auto rowType = ROW({"e"}, {columnType}); auto vectors = makeVectors(10, 1'000, rowType); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); std::vector requiredSubfields; requiredSubfields.emplace_back("e.c"); std::unordered_map> @@ -532,7 +532,7 @@ TEST_F(TableScanTest, subfieldPruningRowType) { .assignments(assignments) .endTableScan() .planNode(); - auto split = makeHiveConnectorSplit(filePath->path); + auto split = makeHiveConnectorSplit(filePath->path()); auto result = AssertQueryBuilder(op).split(split).copyResults(pool()); ASSERT_EQ(result->size(), 10'000); auto rows = result->as(); @@ -570,7 +570,7 @@ TEST_F(TableScanTest, subfieldPruningRemainingFilterSubfieldsMissing) { auto rowType = ROW({"e"}, {columnType}); auto vectors = makeVectors(10, 1'000, rowType); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); std::vector requiredSubfields; requiredSubfields.emplace_back("e.c"); std::unordered_map> @@ -589,7 +589,7 @@ TEST_F(TableScanTest, subfieldPruningRemainingFilterSubfieldsMissing) { .assignments(assignments) .endTableScan() .planNode(); - auto split = makeHiveConnectorSplit(filePath->path); + auto split = makeHiveConnectorSplit(filePath->path()); auto result = AssertQueryBuilder(op).split(split).copyResults(pool()); auto rows = result->as(); @@ -609,7 +609,7 @@ TEST_F(TableScanTest, subfieldPruningRemainingFilterRootFieldMissing) { auto rowType = ROW({"d", "e"}, {BIGINT(), columnType}); auto vectors = makeVectors(10, 1'000, rowType); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); std::unordered_map> assignments; assignments["d"] = std::make_shared( @@ -622,7 +622,7 @@ TEST_F(TableScanTest, subfieldPruningRemainingFilterRootFieldMissing) { .assignments(assignments) .endTableScan() .planNode(); - auto split = makeHiveConnectorSplit(filePath->path); + auto split = makeHiveConnectorSplit(filePath->path()); auto result = AssertQueryBuilder(op).split(split).copyResults(pool()); auto rows = result->as(); ASSERT_TRUE(rows); @@ -646,7 +646,7 @@ TEST_F(TableScanTest, subfieldPruningRemainingFilterStruct) { auto rowType = ROW({"c", "d"}, {structType, BIGINT()}); auto vectors = makeVectors(3, 10, rowType); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); enum { kNoOutput = 0, kWholeColumn = 1, kSubfieldOnly = 2 }; for (int outputColumn = kNoOutput; outputColumn <= kSubfieldOnly; ++outputColumn) { @@ -686,7 +686,7 @@ TEST_F(TableScanTest, subfieldPruningRemainingFilterStruct) { .assignments(assignments) .endTableScan() .planNode(); - auto split = makeHiveConnectorSplit(filePath->path); + auto split = makeHiveConnectorSplit(filePath->path()); auto result = AssertQueryBuilder(op).split(split).copyResults(pool()); int expectedSize = 0; std::vector> ranges; @@ -732,7 +732,7 @@ TEST_F(TableScanTest, subfieldPruningRemainingFilterMap) { {"a", "b"}, {makeFlatVector(10, folly::identity), mapVector}); auto rowType = asRowType(vector->type()); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {vector}); + writeToFile(filePath->path(), {vector}); enum { kNoOutput = 0, kWholeColumn = 1, kSubfieldOnly = 2 }; for (int outputColumn = kNoOutput; outputColumn <= kSubfieldOnly; ++outputColumn) { @@ -772,7 +772,7 @@ TEST_F(TableScanTest, subfieldPruningRemainingFilterMap) { .assignments(assignments) .endTableScan() .planNode(); - auto split = makeHiveConnectorSplit(filePath->path); + auto split = makeHiveConnectorSplit(filePath->path()); auto result = AssertQueryBuilder(op).split(split).copyResults(pool()); auto expected = vector; auto a = vector->as()->childAt(0); @@ -834,7 +834,7 @@ TEST_F(TableScanTest, subfieldPruningMapType) { } auto rowType = asRowType(vectors[0]->type()); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); std::vector requiredSubfields; requiredSubfields.emplace_back("c[0]"); requiredSubfields.emplace_back("c[2]"); @@ -853,7 +853,7 @@ TEST_F(TableScanTest, subfieldPruningMapType) { .assignments(assignments) .endTableScan() .planNode(); - auto split = makeHiveConnectorSplit(filePath->path); + auto split = makeHiveConnectorSplit(filePath->path()); auto result = AssertQueryBuilder(op).split(split).copyResults(pool()); ASSERT_EQ(result->size(), vectors.size() * kSize); auto rows = result->as(); @@ -909,7 +909,7 @@ TEST_F(TableScanTest, subfieldPruningArrayType) { } auto rowType = asRowType(vectors[0]->type()); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); std::vector requiredSubfields; requiredSubfields.emplace_back("c[3]"); std::unordered_map> @@ -926,7 +926,7 @@ TEST_F(TableScanTest, subfieldPruningArrayType) { .assignments(assignments) .endTableScan() .planNode(); - auto split = makeHiveConnectorSplit(filePath->path); + auto split = makeHiveConnectorSplit(filePath->path()); auto result = AssertQueryBuilder(op).split(split).copyResults(pool()); ASSERT_EQ(result->size(), vectors.size() * kSize); auto rows = result->as(); @@ -974,7 +974,7 @@ TEST_F(TableScanTest, missingColumns) { size, [&](auto row) { return row * 0.1 + i * size; }), })); } - writeToFile(filePaths[i]->path, {rows.back()}); + writeToFile(filePaths[i]->path(), {rows.back()}); } // For duckdb ensure we have nulls for the missing column. @@ -1104,7 +1104,7 @@ TEST_F(TableScanTest, constDictLazy) { [](auto row) { return row * 0.1; })}); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {rowVector}); + writeToFile(filePath->path(), {rowVector}); createDuckDbTable({rowVector}); @@ -1147,14 +1147,14 @@ TEST_F(TableScanTest, constDictLazy) { TEST_F(TableScanTest, count) { auto vectors = makeVectors(10, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); CursorParameters params; params.planNode = tableScanNode(ROW({}, {})); auto cursor = TaskCursor::create(params); - cursor->task()->addSplit("0", makeHiveSplit(filePath->path)); + cursor->task()->addSplit("0", makeHiveSplit(filePath->path())); cursor->task()->noMoreSplits("0"); int32_t numRead = 0; @@ -1186,7 +1186,7 @@ TEST_F(TableScanTest, batchSize) { auto vector = makeVectors(1, numRows, rowType); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vector); + writeToFile(filePath->path(), vector); createDuckDbTable(vector); @@ -1246,7 +1246,7 @@ TEST_F(TableScanTest, batchSize) { TEST_F(TableScanTest, sequentialSplitNoDoubleRead) { auto vectors = makeVectors(10, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); CursorParameters params; params.planNode = tableScanNode(ROW({}, {})); @@ -1255,10 +1255,10 @@ TEST_F(TableScanTest, sequentialSplitNoDoubleRead) { // Add the same split with the same sequence id twice. The second should be // ignored. EXPECT_TRUE(cursor->task()->addSplitWithSequence( - "0", makeHiveSplit(filePath->path), 0)); + "0", makeHiveSplit(filePath->path()), 0)); cursor->task()->setMaxSplitSequenceId("0", 0); EXPECT_FALSE(cursor->task()->addSplitWithSequence( - "0", makeHiveSplit(filePath->path), 0)); + "0", makeHiveSplit(filePath->path()), 0)); cursor->task()->noMoreSplits("0"); int32_t numRead = 0; @@ -1276,7 +1276,7 @@ TEST_F(TableScanTest, sequentialSplitNoDoubleRead) { TEST_F(TableScanTest, outOfOrderSplits) { auto vectors = makeVectors(10, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); CursorParameters params; params.planNode = tableScanNode(ROW({}, {})); @@ -1285,9 +1285,9 @@ TEST_F(TableScanTest, outOfOrderSplits) { // Add splits out of order (1, 0). Both of them should be processed. EXPECT_TRUE(cursor->task()->addSplitWithSequence( - "0", makeHiveSplit(filePath->path), 1)); + "0", makeHiveSplit(filePath->path()), 1)); EXPECT_TRUE(cursor->task()->addSplitWithSequence( - "0", makeHiveSplit(filePath->path), 0)); + "0", makeHiveSplit(filePath->path()), 0)); cursor->task()->setMaxSplitSequenceId("0", 1); cursor->task()->noMoreSplits("0"); @@ -1306,7 +1306,7 @@ TEST_F(TableScanTest, outOfOrderSplits) { TEST_F(TableScanTest, splitDoubleRead) { auto vectors = makeVectors(10, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); CursorParameters params; params.planNode = tableScanNode(ROW({}, {})); @@ -1315,8 +1315,8 @@ TEST_F(TableScanTest, splitDoubleRead) { auto cursor = TaskCursor::create(params); // Add the same split twice - we should read twice the size. - cursor->task()->addSplit("0", makeHiveSplit(filePath->path)); - cursor->task()->addSplit("0", makeHiveSplit(filePath->path)); + cursor->task()->addSplit("0", makeHiveSplit(filePath->path())); + cursor->task()->addSplit("0", makeHiveSplit(filePath->path())); cursor->task()->noMoreSplits("0"); int32_t numRead = 0; @@ -1337,7 +1337,7 @@ TEST_F(TableScanTest, multipleSplits) { auto filePaths = makeFilePaths(100); auto vectors = makeVectors(100, 100); for (int32_t i = 0; i < vectors.size(); i++) { - writeToFile(filePaths[i]->path, vectors[i]); + writeToFile(filePaths[i]->path(), vectors[i]); } createDuckDbTable(vectors); @@ -1356,7 +1356,7 @@ TEST_F(TableScanTest, waitForSplit) { auto filePaths = makeFilePaths(10); auto vectors = makeVectors(10, 1'000); for (int32_t i = 0; i < vectors.size(); i++) { - writeToFile(filePaths[i]->path, vectors[i]); + writeToFile(filePaths[i]->path(), vectors[i]); } createDuckDbTable(vectors); @@ -1365,7 +1365,7 @@ TEST_F(TableScanTest, waitForSplit) { tableScanNode(), [&](Task* task) { if (fileIndex < filePaths.size()) { - task->addSplit("0", makeHiveSplit(filePaths[fileIndex++]->path)); + task->addSplit("0", makeHiveSplit(filePaths[fileIndex++]->path())); } if (fileIndex == filePaths.size()) { task->noMoreSplits("0"); @@ -1381,7 +1381,7 @@ DEBUG_ONLY_TEST_F(TableScanTest, tableScanSplitsAndWeights) { const auto filePaths = makeFilePaths(numSplits); auto vectors = makeVectors(numSplits, 100); for (auto i = 0; i < numSplits; i++) { - writeToFile(filePaths.at(i)->path, vectors.at(i)); + writeToFile(filePaths.at(i)->path(), vectors.at(i)); } // Set the table scan operators wait twice: @@ -1462,7 +1462,7 @@ DEBUG_ONLY_TEST_F(TableScanTest, tableScanSplitsAndWeights) { for (auto fileIndex = 0; fileIndex < numSplits; ++fileIndex) { const int64_t splitWeight = fileIndex * 10 + 1; totalSplitWeights += splitWeight; - auto split = makeHiveSplit(filePaths.at(fileIndex)->path, splitWeight); + auto split = makeHiveSplit(filePaths.at(fileIndex)->path(), splitWeight); task->addSplit(scanNodeId, std::move(split)); } task->noMoreSplits(scanNodeId); @@ -1524,18 +1524,19 @@ DEBUG_ONLY_TEST_F(TableScanTest, tableScanSplitsAndWeights) { TEST_F(TableScanTest, splitOffsetAndLength) { auto vectors = makeVectors(10, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); assertQuery( tableScanNode(), makeHiveConnectorSplit( - filePath->path, 0, fs::file_size(filePath->path) / 2), + filePath->path(), 0, fs::file_size(filePath->path()) / 2), "SELECT * FROM tmp"); assertQuery( tableScanNode(), - makeHiveConnectorSplit(filePath->path, fs::file_size(filePath->path) / 2), + makeHiveConnectorSplit( + filePath->path(), fs::file_size(filePath->path()) / 2), "SELECT * FROM tmp LIMIT 0"); } @@ -1579,7 +1580,7 @@ TEST_F(TableScanTest, emptyFile) { try { assertQuery( tableScanNode(), - makeHiveConnectorSplit(filePath->path), + makeHiveConnectorSplit(filePath->path()), "SELECT * FROM tmp"); ASSERT_FALSE(true) << "Function should throw."; } catch (const VeloxException& e) { @@ -1592,8 +1593,8 @@ TEST_F(TableScanTest, preloadEmptySplit) { auto emptyVector = makeVectors(1, 0, rowType); auto vector = makeVectors(1, 1'000, rowType); auto filePaths = makeFilePaths(2); - writeToFile(filePaths[0]->path, vector[0]); - writeToFile(filePaths[1]->path, emptyVector[0]); + writeToFile(filePaths[0]->path(), vector[0]); + writeToFile(filePaths[1]->path(), emptyVector[0]); createDuckDbTable(vector); auto op = tableScanNode(rowType); assertQuery(op, filePaths, "SELECT * FROM tmp", 1); @@ -1603,82 +1604,82 @@ TEST_F(TableScanTest, partitionedTableVarcharKey) { auto rowType = ROW({"c0", "c1"}, {BIGINT(), DOUBLE()}); auto vectors = makeVectors(10, 1'000, rowType); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); - testPartitionedTable(filePath->path, VARCHAR(), "2020-11-01"); + testPartitionedTable(filePath->path(), VARCHAR(), "2020-11-01"); } TEST_F(TableScanTest, partitionedTableBigIntKey) { auto rowType = ROW({"c0", "c1"}, {BIGINT(), DOUBLE()}); auto vectors = makeVectors(10, 1'000, rowType); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); - testPartitionedTable(filePath->path, BIGINT(), "123456789123456789"); + testPartitionedTable(filePath->path(), BIGINT(), "123456789123456789"); } TEST_F(TableScanTest, partitionedTableIntegerKey) { auto rowType = ROW({"c0", "c1"}, {BIGINT(), DOUBLE()}); auto vectors = makeVectors(10, 1'000, rowType); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); - testPartitionedTable(filePath->path, INTEGER(), "123456789"); + testPartitionedTable(filePath->path(), INTEGER(), "123456789"); } TEST_F(TableScanTest, partitionedTableSmallIntKey) { auto rowType = ROW({"c0", "c1"}, {BIGINT(), DOUBLE()}); auto vectors = makeVectors(10, 1'000, rowType); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); - testPartitionedTable(filePath->path, SMALLINT(), "1"); + testPartitionedTable(filePath->path(), SMALLINT(), "1"); } TEST_F(TableScanTest, partitionedTableTinyIntKey) { auto rowType = ROW({"c0", "c1"}, {BIGINT(), DOUBLE()}); auto vectors = makeVectors(10, 1'000, rowType); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); - testPartitionedTable(filePath->path, TINYINT(), "1"); + testPartitionedTable(filePath->path(), TINYINT(), "1"); } TEST_F(TableScanTest, partitionedTableBooleanKey) { auto rowType = ROW({"c0", "c1"}, {BIGINT(), DOUBLE()}); auto vectors = makeVectors(10, 1'000, rowType); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); - testPartitionedTable(filePath->path, BOOLEAN(), "0"); + testPartitionedTable(filePath->path(), BOOLEAN(), "0"); } TEST_F(TableScanTest, partitionedTableRealKey) { auto rowType = ROW({"c0", "c1"}, {BIGINT(), DOUBLE()}); auto vectors = makeVectors(10, 1'000, rowType); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); - testPartitionedTable(filePath->path, REAL(), "3.5"); + testPartitionedTable(filePath->path(), REAL(), "3.5"); } TEST_F(TableScanTest, partitionedTableDoubleKey) { auto rowType = ROW({"c0", "c1"}, {BIGINT(), DOUBLE()}); auto vectors = makeVectors(10, 1'000, rowType); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); - testPartitionedTable(filePath->path, DOUBLE(), "3.5"); + testPartitionedTable(filePath->path(), DOUBLE(), "3.5"); } TEST_F(TableScanTest, partitionedTableDateKey) { auto rowType = ROW({"c0", "c1"}, {BIGINT(), DOUBLE()}); auto vectors = makeVectors(10, 1'000, rowType); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); - testPartitionedTable(filePath->path, DATE(), "2023-10-27"); + testPartitionedTable(filePath->path(), DATE(), "2023-10-27"); } std::vector toStringViews(const std::vector& values) { @@ -1699,7 +1700,7 @@ TEST_F(TableScanTest, statsBasedSkippingBool) { makeFlatVector( size, [](auto row) { return (row / 10'000) % 2 == 0; })}); - writeToFile(filePaths[0]->path, rowVector); + writeToFile(filePaths[0]->path(), rowVector); createDuckDbTable({rowVector}); auto assertQuery = [&](const std::string& filter) { @@ -1725,7 +1726,7 @@ TEST_F(TableScanTest, statsBasedSkippingDouble) { auto rowVector = makeRowVector({makeFlatVector( size, [](auto row) { return (double)(row + 0.0001); })}); - writeToFile(filePaths[0]->path, rowVector); + writeToFile(filePaths[0]->path(), rowVector); createDuckDbTable({rowVector}); // c0 <= -1.05 -> whole file should be skipped based on stats @@ -1765,7 +1766,7 @@ TEST_F(TableScanTest, statsBasedSkippingFloat) { auto rowVector = makeRowVector({makeFlatVector( size, [](auto row) { return (float)(row + 0.0001); })}); - writeToFile(filePaths[0]->path, rowVector); + writeToFile(filePaths[0]->path(), rowVector); createDuckDbTable({rowVector}); // c0 <= -1.05 -> whole file should be skipped based on stats @@ -1828,7 +1829,7 @@ TEST_F(TableScanTest, statsBasedSkipping) { } })}); - writeToFile(filePaths[0]->path, rowVector); + writeToFile(filePaths[0]->path(), rowVector); createDuckDbTable({rowVector}); // c0 <= -1 -> whole file should be skipped based on stats @@ -1954,7 +1955,7 @@ TEST_F(TableScanTest, statsBasedSkippingConstants) { return fruitViews[row / 10'000]; })}); - writeToFile(filePaths[0]->path, rowVector); + writeToFile(filePaths[0]->path(), rowVector); createDuckDbTable({rowVector}); auto assertQuery = [&](const std::string& filter) { @@ -2003,7 +2004,7 @@ TEST_F(TableScanTest, statsBasedSkippingNulls) { [](auto row) { return row >= 11'111; }); auto rowVector = makeRowVector({noNulls, someNulls}); - writeToFile(filePaths[0]->path, rowVector); + writeToFile(filePaths[0]->path(), rowVector); createDuckDbTable({rowVector}); // c0 IS NULL - whole file should be skipped based on stats @@ -2067,7 +2068,7 @@ TEST_F(TableScanTest, statsBasedSkippingWithoutDecompression) { auto rowVector = makeRowVector({makeFlatVector(strings)}); auto filePaths = makeFilePaths(1); - writeToFile(filePaths[0]->path, rowVector); + writeToFile(filePaths[0]->path(), rowVector); createDuckDbTable({rowVector}); // Skip 1st row group. @@ -2112,7 +2113,7 @@ TEST_F(TableScanTest, filterBasedSkippingWithoutDecompression) { auto rowType = asRowType(rowVector->type()); auto filePaths = makeFilePaths(1); - writeToFile(filePaths[0]->path, rowVector); + writeToFile(filePaths[0]->path(), rowVector); createDuckDbTable({rowVector}); auto assertQuery = [&](const std::string& remainingFilter) { @@ -2161,7 +2162,7 @@ TEST_F(TableScanTest, statsBasedSkippingNumerics) { size, [](auto row) { return row % 11 == 0; }, nullEvery(23))}); auto filePaths = makeFilePaths(1); - writeToFile(filePaths[0]->path, rowVector); + writeToFile(filePaths[0]->path(), rowVector); createDuckDbTable({rowVector}); // Skip whole file. @@ -2224,7 +2225,7 @@ TEST_F(TableScanTest, statsBasedSkippingComplexTypes) { nullEvery(11))}); auto filePaths = makeFilePaths(1); - writeToFile(filePaths[0]->path, rowVector); + writeToFile(filePaths[0]->path(), rowVector); // TODO Figure out how to create DuckDB tables with columns of complex types // For now, using 1st element of the array and map element for key zero. createDuckDbTable({makeRowVector( @@ -2296,7 +2297,7 @@ TEST_F(TableScanTest, statsBasedAndRegularSkippingComplexTypes) { }); auto filePaths = makeFilePaths(1); - writeToFile(filePaths[0]->path, rowVector); + writeToFile(filePaths[0]->path(), rowVector); createDuckDbTable({makeRowVector({ makeFlatVector(size, [](auto row) { return row; }), @@ -2332,7 +2333,7 @@ TEST_F(TableScanTest, filterPushdown) { auto filePaths = makeFilePaths(10); auto vectors = makeVectors(10, 1'000, rowType); for (int32_t i = 0; i < vectors.size(); i++) { - writeToFile(filePaths[i]->path, vectors[i]); + writeToFile(filePaths[i]->path(), vectors[i]); } createDuckDbTable(vectors); @@ -2412,7 +2413,7 @@ TEST_F(TableScanTest, path) { auto rowType = ROW({"a"}, {BIGINT()}); auto filePath = makeFilePaths(1)[0]; auto vector = makeVectors(1, 1'000, rowType)[0]; - writeToFile(filePath->path, vector); + writeToFile(filePath->path(), vector); createDuckDbTable({vector}); static const char* kPath = "$path"; @@ -2420,7 +2421,7 @@ TEST_F(TableScanTest, path) { auto assignments = allRegularColumns(rowType); assignments[kPath] = synthesizedColumn(kPath, VARCHAR()); - auto pathValue = fmt::format("file:{}", filePath->path); + auto pathValue = fmt::format("file:{}", filePath->path()); auto typeWithPath = ROW({kPath, "a"}, {VARCHAR(), BIGINT()}); auto op = PlanBuilder() .startTableScan() @@ -2459,7 +2460,7 @@ TEST_F(TableScanTest, fileSizeAndModifiedTime) { auto rowType = ROW({"a"}, {BIGINT()}); auto filePath = makeFilePaths(1)[0]; auto vector = makeVectors(1, 10, rowType)[0]; - writeToFile(filePath->path, vector); + writeToFile(filePath->path(), vector); createDuckDbTable({vector}); static const char* kSize = "$file_size"; @@ -2545,10 +2546,10 @@ TEST_F(TableScanTest, bucket) { {makeFlatVector(size, [&](auto /*row*/) { return bucket; }), makeFlatVector( size, [&](auto row) { return bucket + row; })}); - writeToFile(filePaths[i]->path, rowVector); + writeToFile(filePaths[i]->path(), rowVector); rowVectors.emplace_back(rowVector); - splits.emplace_back(HiveConnectorSplitBuilder(filePaths[i]->path) + splits.emplace_back(HiveConnectorSplitBuilder(filePaths[i]->path()) .tableBucketNumber(bucket) .build()); } @@ -2574,7 +2575,7 @@ TEST_F(TableScanTest, bucket) { for (int i = 0; i < buckets.size(); ++i) { int bucketValue = buckets[i]; - auto hsplit = HiveConnectorSplitBuilder(filePaths[i]->path) + auto hsplit = HiveConnectorSplitBuilder(filePaths[i]->path()) .tableBucketNumber(bucketValue) .build(); @@ -2594,7 +2595,7 @@ TEST_F(TableScanTest, bucket) { // Filter on bucket column, but don't project it out auto rowTypes = ROW({"c0", "c1"}, {INTEGER(), BIGINT()}); - hsplit = HiveConnectorSplitBuilder(filePaths[i]->path) + hsplit = HiveConnectorSplitBuilder(filePaths[i]->path()) .tableBucketNumber(bucketValue) .build(); op = PlanBuilder() @@ -2628,7 +2629,7 @@ TEST_F(TableScanTest, integerNotEqualFilter) { size, [](auto row) { return row % 210; }, nullEvery(11))}); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, rowVector); + writeToFile(filePath->path(), rowVector); createDuckDbTable({rowVector}); assertQuery( @@ -2663,7 +2664,7 @@ TEST_F(TableScanTest, integerNotEqualFilter) { TEST_F(TableScanTest, floatingPointNotEqualFilter) { auto vectors = makeVectors(1, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); auto outputType = ROW({"c4"}, {DOUBLE()}); @@ -2701,7 +2702,7 @@ TEST_F(TableScanTest, stringNotEqualFilter) { })}); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, rowVector); + writeToFile(filePath->path(), rowVector); createDuckDbTable({rowVector}); assertQuery( @@ -2732,7 +2733,7 @@ TEST_F(TableScanTest, arrayIsNullFilter) { [](vector_size_t, vector_size_t j) { return j; }, isNullAt); vectors[i] = makeRowVector({"c0"}, {c0}); - writeToFile(filePaths[i]->path, vectors[i]); + writeToFile(filePaths[i]->path(), vectors[i]); } createDuckDbTable(vectors); auto rowType = asRowType(vectors[0]->type()); @@ -2765,7 +2766,7 @@ TEST_F(TableScanTest, mapIsNullFilter) { [](vector_size_t j) { return 2 * j; }, isNullAt); vectors[i] = makeRowVector({"c0"}, {c0}); - writeToFile(filePaths[i]->path, vectors[i]); + writeToFile(filePaths[i]->path(), vectors[i]); } createDuckDbTable(vectors); auto rowType = asRowType(vectors[0]->type()); @@ -2788,7 +2789,7 @@ TEST_F(TableScanTest, remainingFilter) { auto filePaths = makeFilePaths(10); auto vectors = makeVectors(10, 1'000, rowType); for (int32_t i = 0; i < vectors.size(); i++) { - writeToFile(filePaths[i]->path, vectors[i]); + writeToFile(filePaths[i]->path(), vectors[i]); } createDuckDbTable(vectors); @@ -2885,7 +2886,7 @@ TEST_F(TableScanTest, remainingFilterSkippedStrides) { nullptr, c->size(), std::vector({c, c})); - writeToFile(filePaths[j]->path, vectors[j]); + writeToFile(filePaths[j]->path(), vectors[j]); } createDuckDbTable(vectors); core::PlanNodeId tableScanNodeId; @@ -2907,11 +2908,11 @@ TEST_F(TableScanTest, skipStridesForParentNulls) { auto a = makeRowVector({"b"}, {b}, [](auto i) { return i % 2 == 0; }); auto vector = makeRowVector({"a"}, {a}); auto file = TempFilePath::create(); - writeToFile(file->path, {vector}); + writeToFile(file->path(), {vector}); auto plan = PlanBuilder() .tableScan(asRowType(vector->type()), {"a.b IS NULL"}) .planNode(); - auto split = makeHiveConnectorSplit(file->path); + auto split = makeHiveConnectorSplit(file->path()); auto result = AssertQueryBuilder(plan).split(split).copyResults(pool()); ASSERT_EQ(result->size(), 5000); } @@ -2934,10 +2935,10 @@ TEST_F(TableScanTest, randomSample) { for (int j = 0; j < 100; ++j) { vectors.push_back(rows); } - writeToFile(file->path, vectors, writeConfig); + writeToFile(file->path(), vectors, writeConfig); numTotalRows += rows->size() * vectors.size(); } else { - writeToFile(file->path, {rows}, writeConfig); + writeToFile(file->path(), {rows}, writeConfig); numTotalRows += rows->size(); } files.push_back(file); @@ -2947,7 +2948,7 @@ TEST_F(TableScanTest, randomSample) { PlanBuilder().tableScan(rowType, {}, "rand() < 0.01").planNode(); auto cursor = TaskCursor::create(params); for (auto& file : files) { - cursor->task()->addSplit("0", makeHiveSplit(file->path)); + cursor->task()->addSplit("0", makeHiveSplit(file->path())); } cursor->task()->noMoreSplits("0"); int numRows = 0; @@ -2997,7 +2998,7 @@ TEST_F(TableScanTest, remainingFilterConstantResult) { }; auto filePath = TempFilePath::create(); - writeToFile(filePath->path, data); + writeToFile(filePath->path(), data); createDuckDbTable(data); auto rowType = asRowType(data[0]->type()); @@ -3016,7 +3017,7 @@ TEST_F(TableScanTest, remainingFilterConstantResult) { TEST_F(TableScanTest, aggregationPushdown) { auto vectors = makeVectors(10, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); // Get the number of values processed via aggregation pushdown into scan. @@ -3135,7 +3136,7 @@ TEST_F(TableScanTest, aggregationPushdown) { TEST_F(TableScanTest, bitwiseAggregationPushdown) { auto vectors = makeVectors(10, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); auto op = PlanBuilder() @@ -3181,7 +3182,7 @@ TEST_F(TableScanTest, structLazy) { [](auto row) { return row * 0.1; })})}); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {rowVector}); + writeToFile(filePath->path(), {rowVector}); // Exclude struct columns as DuckDB doesn't support complex types yet. createDuckDbTable( @@ -3203,8 +3204,8 @@ TEST_F(TableScanTest, interleaveLazyEager) { makeRowVector({makeFlatVector(kSize, folly::identity)})}); auto rows = makeRowVector({column}); auto rowType = asRowType(rows->type()); - auto lazyFile = TempFilePath::create(); - writeToFile(lazyFile->path, {rows}); + auto Lazyfile = TempFilePath::create(); + writeToFile(Lazyfile->path(), {rows}); auto rowsWithNulls = makeVectors(1, kSize, rowType); int numNonNull = 0; for (int i = 0; i < kSize; ++i) { @@ -3216,7 +3217,7 @@ TEST_F(TableScanTest, interleaveLazyEager) { numNonNull += !c0c0->isNullAt(i); } auto eagerFile = TempFilePath::create(); - writeToFile(eagerFile->path, rowsWithNulls); + writeToFile(eagerFile->path(), rowsWithNulls); ColumnHandleMap assignments = {{"c0", regularColumn("c0", column->type())}}; CursorParameters params; @@ -3228,9 +3229,9 @@ TEST_F(TableScanTest, interleaveLazyEager) { .endTableScan() .planNode(); auto cursor = TaskCursor::create(params); - cursor->task()->addSplit("0", makeHiveSplit(lazyFile->path)); - cursor->task()->addSplit("0", makeHiveSplit(eagerFile->path)); - cursor->task()->addSplit("0", makeHiveSplit(lazyFile->path)); + cursor->task()->addSplit("0", makeHiveSplit(Lazyfile->path())); + cursor->task()->addSplit("0", makeHiveSplit(eagerFile->path())); + cursor->task()->addSplit("0", makeHiveSplit(Lazyfile->path())); cursor->task()->noMoreSplits("0"); for (int i = 0; i < 3; ++i) { ASSERT_TRUE(cursor->moveNext()); @@ -3247,7 +3248,7 @@ TEST_F(TableScanTest, lazyVectorAccessTwiceWithDifferentRows) { }); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {data}); + writeToFile(filePath->path(), {data}); createDuckDbTable({data}); auto plan = @@ -3293,7 +3294,7 @@ TEST_F(TableScanTest, structInArrayOrMap) { innerRow)}); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {rowVector}); + writeToFile(filePath->path(), {rowVector}); // Exclude struct columns as DuckDB doesn't support complex types yet. createDuckDbTable( @@ -3313,7 +3314,7 @@ TEST_F(TableScanTest, addSplitsToFailedTask) { {makeFlatVector(12'000, [](auto row) { return row % 5; })}); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {data}); + writeToFile(filePath->path(), {data}); core::PlanNodeId scanNodeId; exec::test::CursorParameters params; @@ -3324,15 +3325,15 @@ TEST_F(TableScanTest, addSplitsToFailedTask) { .planNode(); auto cursor = exec::test::TaskCursor::create(params); - cursor->task()->addSplit(scanNodeId, makeHiveSplit(filePath->path)); + cursor->task()->addSplit(scanNodeId, makeHiveSplit(filePath->path())); EXPECT_THROW(while (cursor->moveNext()){}, VeloxUserError); // Verify that splits can be added to the task ever after task has failed. // In this case these splits will be ignored. - cursor->task()->addSplit(scanNodeId, makeHiveSplit(filePath->path)); + cursor->task()->addSplit(scanNodeId, makeHiveSplit(filePath->path())); cursor->task()->addSplitWithSequence( - scanNodeId, makeHiveSplit(filePath->path), 20L); + scanNodeId, makeHiveSplit(filePath->path()), 20L); cursor->task()->setMaxSplitSequenceId(scanNodeId, 20L); } @@ -3341,7 +3342,7 @@ TEST_F(TableScanTest, errorInLoadLazy) { VELOX_CHECK_NOT_NULL(cache); auto vectors = makeVectors(10, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); std::atomic counter = 0; cache->setVerifyHook([&](const cache::AsyncDataCacheEntry&) { @@ -3368,7 +3369,7 @@ TEST_F(TableScanTest, errorInLoadLazy) { assertQuery(planNode, {filePath}, ""); FAIL() << "Excepted exception"; } catch (VeloxException& ex) { - EXPECT_TRUE(ex.context().find(filePath->path, 0) != std::string::npos) + EXPECT_TRUE(ex.context().find(filePath->path(), 0) != std::string::npos) << ex.context(); } } @@ -3381,7 +3382,7 @@ TEST_F(TableScanTest, parallelPrepare) { {makeFlatVector(10, [](auto row) { return row % 5; })}); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {data}); + writeToFile(filePath->path(), {data}); auto plan = exec::test::PlanBuilder(pool_.get()) .tableScan(ROW({"c0"}, {INTEGER()}), {}, kLargeRemainingFilter) @@ -3390,7 +3391,7 @@ TEST_F(TableScanTest, parallelPrepare) { std::vector splits; for (auto i = 0; i < kNumParallel; ++i) { - splits.push_back(makeHiveSplit(filePath->path)); + splits.push_back(makeHiveSplit(filePath->path())); } AssertQueryBuilder(plan) .config( @@ -3418,7 +3419,7 @@ TEST_F(TableScanTest, dictionaryMemo) { auto rows = makeRowVector({"a", "b"}, {dict, makeRowVector({"c"}, {dict})}); auto rowType = asRowType(rows->type()); auto file = TempFilePath::create(); - writeToFile(file->path, {rows}); + writeToFile(file->path(), {rows}); auto plan = PlanBuilder() .tableScan(rowType, {}, "a like '%m'") .project({"length(b.c)"}) @@ -3436,7 +3437,7 @@ TEST_F(TableScanTest, dictionaryMemo) { })); #endif auto result = AssertQueryBuilder(plan) - .splits({makeHiveSplit(file->path)}) + .splits({makeHiveSplit(file->path())}) .copyResults(pool_.get()); ASSERT_EQ(result->size(), 50); #ifndef NDEBUG @@ -3449,12 +3450,12 @@ TEST_F(TableScanTest, reuseRowVector) { auto data = makeRowVector({iota, makeRowVector({iota})}); auto rowType = asRowType(data->type()); auto file = TempFilePath::create(); - writeToFile(file->path, {data}); + writeToFile(file->path(), {data}); auto plan = PlanBuilder() .tableScan(rowType, {}, "c0 < 5") .project({"c1.c0"}) .planNode(); - auto split = HiveConnectorSplitBuilder(file->path).build(); + auto split = HiveConnectorSplitBuilder(file->path()).build(); auto expected = makeRowVector( {makeFlatVector(10, [](auto i) { return i % 5; })}); AssertQueryBuilder(plan).splits({split, split}).assertResults(expected); @@ -3466,12 +3467,12 @@ TEST_F(TableScanTest, readMissingFields) { auto iota = makeFlatVector(size, folly::identity); auto rowVector = makeRowVector({makeRowVector({iota, iota}), iota}); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {rowVector}); + writeToFile(filePath->path(), {rowVector}); // Create a row type with additional fields not present in the file. auto rowType = makeRowType( {makeRowType({BIGINT(), BIGINT(), BIGINT(), BIGINT()}), BIGINT()}); auto op = PlanBuilder().tableScan(rowType).planNode(); - auto split = makeHiveConnectorSplit(filePath->path); + auto split = makeHiveConnectorSplit(filePath->path()); auto nulls = makeNullConstant(TypeKind::BIGINT, size); auto expected = makeRowVector({makeRowVector({iota, iota, nulls, nulls}), iota}); @@ -3483,10 +3484,10 @@ TEST_F(TableScanTest, readExtraFields) { auto iota = makeFlatVector(size, folly::identity); auto rowVector = makeRowVector({makeRowVector({iota, iota}), iota}); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {rowVector}); + writeToFile(filePath->path(), {rowVector}); auto rowType = makeRowType({makeRowType({BIGINT()}), BIGINT()}); auto op = PlanBuilder().tableScan(rowType).planNode(); - auto split = makeHiveConnectorSplit(filePath->path); + auto split = makeHiveConnectorSplit(filePath->path()); auto nulls = makeNullConstant(TypeKind::BIGINT, size); auto expected = makeRowVector({makeRowVector({iota}), iota}); AssertQueryBuilder(op).split(split).assertResults(expected); @@ -3502,7 +3503,7 @@ TEST_F(TableScanTest, readMissingFieldsFilesVary) { })}); auto missingFieldsFilePath = TempFilePath::create(); - writeToFile(missingFieldsFilePath->path, {rowVectorMissingFields}); + writeToFile(missingFieldsFilePath->path(), {rowVectorMissingFields}); auto rowVectorWithAllFields = makeRowVector({makeRowVector({ makeFlatVector(size, [](auto row) { return row; }), @@ -3512,19 +3513,20 @@ TEST_F(TableScanTest, readMissingFieldsFilesVary) { })}); auto allFieldsFilePath = TempFilePath::create(); - writeToFile(allFieldsFilePath->path, {rowVectorWithAllFields}); + writeToFile(allFieldsFilePath->path(), {rowVectorWithAllFields}); auto op = PlanBuilder() .tableScan(asRowType(rowVectorWithAllFields->type())) .project({"c0.c0", "c0.c1", "c0.c2", "c0.c3"}) .planNode(); - auto result = AssertQueryBuilder(op) - .split(makeHiveConnectorSplit(missingFieldsFilePath->path)) - .split(makeHiveConnectorSplit(allFieldsFilePath->path)) - .split(makeHiveConnectorSplit(missingFieldsFilePath->path)) - .split(makeHiveConnectorSplit(allFieldsFilePath->path)) - .copyResults(pool()); + auto result = + AssertQueryBuilder(op) + .split(makeHiveConnectorSplit(missingFieldsFilePath->path())) + .split(makeHiveConnectorSplit(allFieldsFilePath->path())) + .split(makeHiveConnectorSplit(missingFieldsFilePath->path())) + .split(makeHiveConnectorSplit(allFieldsFilePath->path())) + .copyResults(pool()); ASSERT_EQ(result->size(), size * 4); auto rows = result->as(); @@ -3583,7 +3585,7 @@ TEST_F(TableScanTest, readMissingFieldsInArray) { auto arrayVector = makeArrayVector(offsets, rowVector); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {makeRowVector({arrayVector})}); + writeToFile(filePath->path(), {makeRowVector({arrayVector})}); // Create a row type with additional fields not present in the file. auto rowType = makeRowType( {ARRAY(makeRowType({BIGINT(), BIGINT(), BIGINT(), BIGINT()}))}); @@ -3594,7 +3596,7 @@ TEST_F(TableScanTest, readMissingFieldsInArray) { .project({"c0[1].c0", "c0[2].c1", "c0[3].c2", "c0[4].c3"}) .planNode(); - auto split = makeHiveConnectorSplit(filePath->path); + auto split = makeHiveConnectorSplit(filePath->path()); auto result = AssertQueryBuilder(op).split(split).copyResults(pool()); ASSERT_EQ(result->size(), size); @@ -3640,7 +3642,7 @@ TEST_F(TableScanTest, readMissingFieldsInMap) { auto arrayVector = makeArrayVector(offsets, valuesVector); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {makeRowVector({mapVector, arrayVector})}); + writeToFile(filePath->path(), {makeRowVector({mapVector, arrayVector})}); // Create a row type with additional fields in the structure not present in // the file ('c' and 'd') and with all columns having different names than in @@ -3666,7 +3668,7 @@ TEST_F(TableScanTest, readMissingFieldsInMap) { "a2[4].d"}) .planNode(); - auto split = makeHiveConnectorSplit(filePath->path); + auto split = makeHiveConnectorSplit(filePath->path()); auto result = AssertQueryBuilder(op).split(split).copyResults(pool()); ASSERT_EQ(result->size(), size); @@ -3785,7 +3787,7 @@ TEST_F(TableScanTest, tableScanProjections) { }); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {rowVector}); + writeToFile(filePath->path(), {rowVector}); auto testQueryRow = [&](const std::vector& projections) { std::vector cols; @@ -3796,7 +3798,7 @@ TEST_F(TableScanTest, tableScanProjections) { std::move(cols), std::vector(projections.size(), BIGINT())); auto op = PlanBuilder().tableScan(scanRowType).planNode(); - auto split = makeHiveConnectorSplit(filePath->path); + auto split = makeHiveConnectorSplit(filePath->path()); auto result = AssertQueryBuilder(op).split(split).copyResults(pool()); ASSERT_EQ(result->size(), size); @@ -3857,7 +3859,7 @@ TEST_F(TableScanTest, readMissingFieldsWithMoreColumns) { })}); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {rowVector}); + writeToFile(filePath->path(), {rowVector}); // Create a row type with additional fields in the structure not present in // the file ('c' and 'd') and with all columns having different names than in @@ -3877,7 +3879,7 @@ TEST_F(TableScanTest, readMissingFieldsWithMoreColumns) { .project({"st1.a", "st1.b", "st1.c", "st1.d", "i2", "d3", "b4", "c4"}) .planNode(); - auto split = makeHiveConnectorSplit(filePath->path); + auto split = makeHiveConnectorSplit(filePath->path()); auto result = AssertQueryBuilder(op).split(split).copyResults(pool()); ASSERT_EQ(result->size(), size); @@ -3992,14 +3994,14 @@ TEST_F(TableScanTest, readMissingFieldsWithMoreColumns) { TEST_F(TableScanTest, varbinaryPartitionKey) { auto vectors = makeVectors(1, 1'000); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); createDuckDbTable(vectors); ColumnHandleMap assignments = { {"a", regularColumn("c0", BIGINT())}, {"ds_alias", partitionKey("ds", VARBINARY())}}; - auto split = HiveConnectorSplitBuilder(filePath->path) + auto split = HiveConnectorSplitBuilder(filePath->path()) .partitionKey("ds", "2021-12-02") .build(); @@ -4029,12 +4031,13 @@ TEST_F(TableScanTest, timestampPartitionKey) { }); auto vectors = makeVectors(1, 1); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, vectors); + writeToFile(filePath->path(), vectors); ColumnHandleMap assignments = {{"t", partitionKey("t", TIMESTAMP())}}; std::vector> splits; for (auto& t : inputs) { - splits.push_back( - HiveConnectorSplitBuilder(filePath->path).partitionKey("t", t).build()); + splits.push_back(HiveConnectorSplitBuilder(filePath->path()) + .partitionKey("t", t) + .build()); } auto plan = PlanBuilder() .startTableScan() diff --git a/velox/exec/tests/TableWriteTest.cpp b/velox/exec/tests/TableWriteTest.cpp index 5e688f793fda..73aa4cd169b3 100644 --- a/velox/exec/tests/TableWriteTest.cpp +++ b/velox/exec/tests/TableWriteTest.cpp @@ -255,7 +255,7 @@ class TableWriteTest : public HiveConnectorTestBase { bool spillEnabled = false) { std::vector splits; for (const auto& filePath : filePaths) { - splits.push_back(exec::Split(makeHiveConnectorSplit(filePath->path))); + splits.push_back(exec::Split(makeHiveConnectorSplit(filePath->path()))); } if (!spillEnabled) { return AssertQueryBuilder(plan, duckDbQueryRunner_) @@ -274,7 +274,7 @@ class TableWriteTest : public HiveConnectorTestBase { const auto spillDirectory = exec::test::TempDirectoryPath::create(); TestScopedSpillInjection scopedSpillInjection(100); return AssertQueryBuilder(plan, duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .maxDrivers( 2 * std::max(kNumTableWriterCount, kNumPartitionedTableWriterCount)) .config( @@ -312,7 +312,7 @@ class TableWriteTest : public HiveConnectorTestBase { const auto spillDirectory = exec::test::TempDirectoryPath::create(); TestScopedSpillInjection scopedSpillInjection(100); return AssertQueryBuilder(plan, duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .maxDrivers( 2 * std::max(kNumTableWriterCount, kNumPartitionedTableWriterCount)) .config( @@ -345,7 +345,7 @@ class TableWriteTest : public HiveConnectorTestBase { const auto spillDirectory = exec::test::TempDirectoryPath::create(); TestScopedSpillInjection scopedSpillInjection(100); return AssertQueryBuilder(plan, duckDbQueryRunner_) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .maxDrivers( 2 * std::max(kNumTableWriterCount, kNumPartitionedTableWriterCount)) .config( @@ -403,7 +403,7 @@ class TableWriteTest : public HiveConnectorTestBase { std::vector> makeHiveConnectorSplits( const std::shared_ptr& directoryPath) { - return makeHiveConnectorSplits(directoryPath->path); + return makeHiveConnectorSplits(directoryPath->path()); } std::vector> @@ -1052,18 +1052,18 @@ TEST_F(BasicTableWriteTest, roundTrip) { }); auto sourceFilePath = TempFilePath::create(); - writeToFile(sourceFilePath->path, data); + writeToFile(sourceFilePath->path(), data); auto targetDirectoryPath = TempDirectoryPath::create(); auto rowType = asRowType(data->type()); auto plan = PlanBuilder() .tableScan(rowType) - .tableWrite(targetDirectoryPath->path) + .tableWrite(targetDirectoryPath->path()) .planNode(); auto results = AssertQueryBuilder(plan) - .split(makeHiveConnectorSplit(sourceFilePath->path)) + .split(makeHiveConnectorSplit(sourceFilePath->path())) .copyResults(pool()); ASSERT_EQ(2, results->size()); @@ -1093,7 +1093,7 @@ TEST_F(BasicTableWriteTest, roundTrip) { auto copy = AssertQueryBuilder(plan) .split(makeHiveConnectorSplit(fmt::format( - "{}/{}", targetDirectoryPath->path, writeFileName))) + "{}/{}", targetDirectoryPath->path(), writeFileName))) .copyResults(pool()); assertEqualResults({data}, {copy}); } @@ -1478,7 +1478,7 @@ TEST_P(AllTableWriterTest, scanFilterProjectWrite) { auto filePaths = makeFilePaths(5); auto vectors = makeVectors(filePaths.size(), 500); for (int i = 0; i < filePaths.size(); i++) { - writeToFile(filePaths[i]->path, vectors[i]); + writeToFile(filePaths[i]->path(), vectors[i]); } createDuckDbTable(vectors); @@ -1498,7 +1498,7 @@ TEST_P(AllTableWriterTest, scanFilterProjectWrite) { auto plan = createInsertPlan( project, outputType, - outputDirectory->path, + outputDirectory->path(), partitionedBy_, bucketProperty_, compressionKind_, @@ -1518,13 +1518,13 @@ TEST_P(AllTableWriterTest, scanFilterProjectWrite) { PlanBuilder().tableScan(newOutputType).planNode(), makeHiveConnectorSplits(outputDirectory), "SELECT c3, c5, c2 + c3, substr(c5, 1, 1) FROM tmp WHERE c2 <> 0"); - verifyTableWriterOutput(outputDirectory->path, newOutputType, false); + verifyTableWriterOutput(outputDirectory->path(), newOutputType, false); } else { assertQuery( PlanBuilder().tableScan(outputType).planNode(), makeHiveConnectorSplits(outputDirectory), "SELECT c0, c1, c3, c5, c2 + c3, substr(c5, 1, 1) FROM tmp WHERE c2 <> 0"); - verifyTableWriterOutput(outputDirectory->path, outputType, false); + verifyTableWriterOutput(outputDirectory->path(), outputType, false); } } @@ -1532,7 +1532,7 @@ TEST_P(AllTableWriterTest, renameAndReorderColumns) { auto filePaths = makeFilePaths(5); auto vectors = makeVectors(filePaths.size(), 500); for (int i = 0; i < filePaths.size(); ++i) { - writeToFile(filePaths[i]->path, vectors[i]); + writeToFile(filePaths[i]->path(), vectors[i]); } createDuckDbTable(vectors); @@ -1564,7 +1564,7 @@ TEST_P(AllTableWriterTest, renameAndReorderColumns) { PlanBuilder().tableScan(rowType_), inputRowType, tableSchema_, - outputDirectory->path, + outputDirectory->path(), partitionedBy_, bucketProperty_, compressionKind_, @@ -1581,14 +1581,14 @@ TEST_P(AllTableWriterTest, renameAndReorderColumns) { makeHiveConnectorSplits(outputDirectory), "SELECT c2, c5, c4, c3 FROM tmp"); - verifyTableWriterOutput(outputDirectory->path, newOutputType, false); + verifyTableWriterOutput(outputDirectory->path(), newOutputType, false); } else { HiveConnectorTestBase::assertQuery( PlanBuilder().tableScan(tableSchema_).planNode(), makeHiveConnectorSplits(outputDirectory), "SELECT c2, c5, c4, c1, c0, c3 FROM tmp"); - verifyTableWriterOutput(outputDirectory->path, tableSchema_, false); + verifyTableWriterOutput(outputDirectory->path(), tableSchema_, false); } } @@ -1597,7 +1597,7 @@ TEST_P(AllTableWriterTest, directReadWrite) { auto filePaths = makeFilePaths(5); auto vectors = makeVectors(filePaths.size(), 200); for (int i = 0; i < filePaths.size(); i++) { - writeToFile(filePaths[i]->path, vectors[i]); + writeToFile(filePaths[i]->path(), vectors[i]); } createDuckDbTable(vectors); @@ -1606,7 +1606,7 @@ TEST_P(AllTableWriterTest, directReadWrite) { auto plan = createInsertPlan( PlanBuilder().tableScan(rowType_), rowType_, - outputDirectory->path, + outputDirectory->path(), partitionedBy_, bucketProperty_, compressionKind_, @@ -1627,14 +1627,14 @@ TEST_P(AllTableWriterTest, directReadWrite) { makeHiveConnectorSplits(outputDirectory), "SELECT c2, c3, c4, c5 FROM tmp"); rowType_ = newOutputType; - verifyTableWriterOutput(outputDirectory->path, rowType_); + verifyTableWriterOutput(outputDirectory->path(), rowType_); } else { assertQuery( PlanBuilder().tableScan(rowType_).planNode(), makeHiveConnectorSplits(outputDirectory), "SELECT * FROM tmp"); - verifyTableWriterOutput(outputDirectory->path, rowType_); + verifyTableWriterOutput(outputDirectory->path(), rowType_); } } @@ -1651,7 +1651,7 @@ TEST_P(AllTableWriterTest, constantVectors) { auto op = createInsertPlan( PlanBuilder().values({vector}), rowType_, - outputDirectory->path, + outputDirectory->path(), partitionedBy_, bucketProperty_, compressionKind_, @@ -1668,14 +1668,14 @@ TEST_P(AllTableWriterTest, constantVectors) { makeHiveConnectorSplits(outputDirectory), "SELECT c2, c3, c4, c5 FROM tmp"); rowType_ = newOutputType; - verifyTableWriterOutput(outputDirectory->path, rowType_); + verifyTableWriterOutput(outputDirectory->path(), rowType_); } else { assertQuery( PlanBuilder().tableScan(rowType_).planNode(), makeHiveConnectorSplits(outputDirectory), "SELECT * FROM tmp"); - verifyTableWriterOutput(outputDirectory->path, rowType_); + verifyTableWriterOutput(outputDirectory->path(), rowType_); } } @@ -1685,7 +1685,7 @@ TEST_P(AllTableWriterTest, emptyInput) { auto op = createInsertPlan( PlanBuilder().values({vector}), rowType_, - outputDirectory->path, + outputDirectory->path(), partitionedBy_, bucketProperty_, compressionKind_, @@ -1710,7 +1710,7 @@ TEST_P(AllTableWriterTest, commitStrategies) { auto plan = createInsertPlan( PlanBuilder().values(vectors), rowType_, - outputDirectory->path, + outputDirectory->path(), partitionedBy_, bucketProperty_, compressionKind_, @@ -1729,14 +1729,14 @@ TEST_P(AllTableWriterTest, commitStrategies) { "SELECT c2, c3, c4, c5 FROM tmp"); auto originalRowType = rowType_; rowType_ = newOutputType; - verifyTableWriterOutput(outputDirectory->path, rowType_); + verifyTableWriterOutput(outputDirectory->path(), rowType_); rowType_ = originalRowType; } else { assertQuery( PlanBuilder().tableScan(rowType_).planNode(), makeHiveConnectorSplits(outputDirectory), "SELECT * FROM tmp"); - verifyTableWriterOutput(outputDirectory->path, rowType_); + verifyTableWriterOutput(outputDirectory->path(), rowType_); } } // Test kNoCommit commit strategy writing to non-temporary files. @@ -1747,7 +1747,7 @@ TEST_P(AllTableWriterTest, commitStrategies) { auto plan = createInsertPlan( PlanBuilder().values(vectors), rowType_, - outputDirectory->path, + outputDirectory->path(), partitionedBy_, bucketProperty_, compressionKind_, @@ -1765,13 +1765,13 @@ TEST_P(AllTableWriterTest, commitStrategies) { makeHiveConnectorSplits(outputDirectory), "SELECT c2, c3, c4, c5 FROM tmp"); rowType_ = newOutputType; - verifyTableWriterOutput(outputDirectory->path, rowType_); + verifyTableWriterOutput(outputDirectory->path(), rowType_); } else { assertQuery( PlanBuilder().tableScan(rowType_).planNode(), makeHiveConnectorSplits(outputDirectory), "SELECT * FROM tmp"); - verifyTableWriterOutput(outputDirectory->path, rowType_); + verifyTableWriterOutput(outputDirectory->path(), rowType_); } } } @@ -1834,14 +1834,14 @@ TEST_P(PartitionedTableWriterTest, specialPartitionName) { auto inputFilePaths = makeFilePaths(numBatches); for (int i = 0; i < numBatches; i++) { - writeToFile(inputFilePaths[i]->path, vectors[i]); + writeToFile(inputFilePaths[i]->path(), vectors[i]); } auto outputDirectory = TempDirectoryPath::create(); auto plan = createInsertPlan( PlanBuilder().tableScan(rowType), rowType, - outputDirectory->path, + outputDirectory->path(), partitionKeys, bucketProperty_, compressionKind_, @@ -1852,7 +1852,7 @@ TEST_P(PartitionedTableWriterTest, specialPartitionName) { auto task = assertQuery(plan, inputFilePaths, "SELECT count(*) FROM tmp"); std::set actualPartitionDirectories = - getLeafSubdirectories(outputDirectory->path); + getLeafSubdirectories(outputDirectory->path()); std::set expectedPartitionDirectories; const std::vector expectedCharsAfterEscape = { @@ -1876,7 +1876,7 @@ TEST_P(PartitionedTableWriterTest, specialPartitionName) { auto partitionName = fmt::format( "p0={}/p1=str_{}{}", i, i, expectedCharsAfterEscape.at(i % 15)); expectedPartitionDirectories.emplace( - fs::path(outputDirectory->path) / partitionName); + fs::path(outputDirectory->path()) / partitionName); } EXPECT_EQ(actualPartitionDirectories, expectedPartitionDirectories); } @@ -1920,14 +1920,14 @@ TEST_P(PartitionedTableWriterTest, multiplePartitions) { auto inputFilePaths = makeFilePaths(numBatches); for (int i = 0; i < numBatches; i++) { - writeToFile(inputFilePaths[i]->path, vectors[i]); + writeToFile(inputFilePaths[i]->path(), vectors[i]); } auto outputDirectory = TempDirectoryPath::create(); auto plan = createInsertPlan( PlanBuilder().tableScan(rowType), rowType, - outputDirectory->path, + outputDirectory->path(), partitionKeys, bucketProperty_, compressionKind_, @@ -1939,7 +1939,7 @@ TEST_P(PartitionedTableWriterTest, multiplePartitions) { // Verify that there is one partition directory for each partition. std::set actualPartitionDirectories = - getLeafSubdirectories(outputDirectory->path); + getLeafSubdirectories(outputDirectory->path()); std::set expectedPartitionDirectories; std::set partitionNames; @@ -1947,7 +1947,7 @@ TEST_P(PartitionedTableWriterTest, multiplePartitions) { auto partitionName = fmt::format("p0={}/p1=str_{}", i, i); partitionNames.emplace(partitionName); expectedPartitionDirectories.emplace( - fs::path(outputDirectory->path) / partitionName); + fs::path(outputDirectory->path()) / partitionName); } EXPECT_EQ(actualPartitionDirectories, expectedPartitionDirectories); @@ -2000,7 +2000,7 @@ TEST_P(PartitionedTableWriterTest, singlePartition) { auto inputFilePaths = makeFilePaths(numBatches); for (int i = 0; i < numBatches; i++) { - writeToFile(inputFilePaths[i]->path, vectors[i]); + writeToFile(inputFilePaths[i]->path(), vectors[i]); } auto outputDirectory = TempDirectoryPath::create(); @@ -2008,7 +2008,7 @@ TEST_P(PartitionedTableWriterTest, singlePartition) { auto plan = createInsertPlan( PlanBuilder().tableScan(rowType), rowType, - outputDirectory->path, + outputDirectory->path(), partitionKeys, bucketProperty_, compressionKind_, @@ -2020,13 +2020,13 @@ TEST_P(PartitionedTableWriterTest, singlePartition) { plan, inputFilePaths, "SELECT count(*) FROM tmp"); std::set partitionDirectories = - getLeafSubdirectories(outputDirectory->path); + getLeafSubdirectories(outputDirectory->path()); // Verify only a single partition directory is created. ASSERT_EQ(partitionDirectories.size(), 1); EXPECT_EQ( *partitionDirectories.begin(), - fs::path(outputDirectory->path) / "p0=365"); + fs::path(outputDirectory->path()) / "p0=365"); // Verify all data is written to the single partition directory. auto newOutputType = getNonPartitionsColumns(partitionKeys, rowType); @@ -2068,7 +2068,7 @@ TEST_P(PartitionedWithoutBucketTableWriterTest, fromSinglePartitionToMultiple) { auto plan = createInsertPlan( PlanBuilder().values(vectors), rowType, - outputDirectory->path, + outputDirectory->path(), partitionKeys, nullptr, compressionKind_, @@ -2124,7 +2124,7 @@ TEST_P(PartitionedTableWriterTest, maxPartitions) { auto plan = createInsertPlan( PlanBuilder().values({vector}), rowType, - outputDirectory->path, + outputDirectory->path(), partitionKeys, bucketProperty_, compressionKind_, @@ -2160,7 +2160,7 @@ TEST_P(AllTableWriterTest, writeNoFile) { auto plan = createInsertPlan( PlanBuilder().tableScan(rowType_).filter("false"), rowType_, - outputDirectory->path); + outputDirectory->path()); auto execute = [&](const std::shared_ptr& plan, std::shared_ptr queryCtx) { @@ -2171,7 +2171,7 @@ TEST_P(AllTableWriterTest, writeNoFile) { }; execute(plan, std::make_shared(executor_.get())); - ASSERT_TRUE(fs::is_empty(outputDirectory->path)); + ASSERT_TRUE(fs::is_empty(outputDirectory->path())); } TEST_P(UnpartitionedTableWriterTest, differentCompression) { @@ -2193,7 +2193,7 @@ TEST_P(UnpartitionedTableWriterTest, differentCompression) { createInsertPlan( PlanBuilder().values(input), rowType_, - outputDirectory->path, + outputDirectory->path(), {}, nullptr, compressionKind, @@ -2205,7 +2205,7 @@ TEST_P(UnpartitionedTableWriterTest, differentCompression) { auto plan = createInsertPlan( PlanBuilder().values(input), rowType_, - outputDirectory->path, + outputDirectory->path(), {}, nullptr, compressionKind, @@ -2285,7 +2285,7 @@ TEST_P(UnpartitionedTableWriterTest, runtimeStatsCheck) { auto plan = createInsertPlan( PlanBuilder().values(vectors), rowType, - outputDirectory->path, + outputDirectory->path(), {}, nullptr, compressionKind_, @@ -2345,7 +2345,7 @@ TEST_P(UnpartitionedTableWriterTest, immutableSettings) { auto plan = createInsertPlan( PlanBuilder().values(input), rowType_, - outputDirectory->path, + outputDirectory->path(), {}, nullptr, CompressionKind_NONE, @@ -2399,7 +2399,7 @@ TEST_P(BucketedTableOnlyWriteTest, bucketCountLimit) { auto plan = createInsertPlan( PlanBuilder().values({input}), rowType_, - outputDirectory->path, + outputDirectory->path(), partitionedBy_, bucketProperty_, compressionKind_, @@ -2428,14 +2428,14 @@ TEST_P(BucketedTableOnlyWriteTest, bucketCountLimit) { "SELECT c2, c3, c4, c5 FROM tmp"); auto originalRowType = rowType_; rowType_ = newOutputType; - verifyTableWriterOutput(outputDirectory->path, rowType_); + verifyTableWriterOutput(outputDirectory->path(), rowType_); rowType_ = originalRowType; } else { assertQuery( PlanBuilder().tableScan(rowType_).planNode(), makeHiveConnectorSplits(outputDirectory), "SELECT * FROM tmp"); - verifyTableWriterOutput(outputDirectory->path, rowType_); + verifyTableWriterOutput(outputDirectory->path(), rowType_); } } } @@ -2458,7 +2458,7 @@ TEST_P(BucketedTableOnlyWriteTest, mismatchedBucketTypes) { auto plan = createInsertPlan( PlanBuilder().values({input}), rowType_, - outputDirectory->path, + outputDirectory->path(), partitionedBy_, bucketProperty_, compressionKind_, @@ -2486,7 +2486,7 @@ TEST_P(AllTableWriterTest, tableWriteOutputCheck) { auto plan = createInsertPlan( PlanBuilder().values({input}), rowType_, - outputDirectory->path, + outputDirectory->path(), partitionedBy_, bucketProperty_, compressionKind_, @@ -2524,8 +2524,8 @@ TEST_P(AllTableWriterTest, tableWriteOutputCheck) { ASSERT_FALSE(fragmentVector->isNullAt(i)); folly::dynamic obj = folly::parseJson(fragmentVector->valueAt(i)); if (testMode_ == TestMode::kUnpartitioned) { - ASSERT_EQ(obj["targetPath"], outputDirectory->path); - ASSERT_EQ(obj["writePath"], outputDirectory->path); + ASSERT_EQ(obj["targetPath"], outputDirectory->path()); + ASSERT_EQ(obj["writePath"], outputDirectory->path()); } else { std::string partitionDirRe; for (const auto& partitionBy : partitionedBy_) { @@ -2533,11 +2533,11 @@ TEST_P(AllTableWriterTest, tableWriteOutputCheck) { } ASSERT_TRUE(RE2::FullMatch( obj["targetPath"].asString(), - fmt::format("{}{}", outputDirectory->path, partitionDirRe))) + fmt::format("{}{}", outputDirectory->path(), partitionDirRe))) << obj["targetPath"].asString(); ASSERT_TRUE(RE2::FullMatch( obj["writePath"].asString(), - fmt::format("{}{}", outputDirectory->path, partitionDirRe))) + fmt::format("{}{}", outputDirectory->path(), partitionDirRe))) << obj["writePath"].asString(); } numRows += obj["rowCount"].asInt(); @@ -2583,7 +2583,7 @@ TEST_P(AllTableWriterTest, tableWriteOutputCheck) { ASSERT_GT(writeFiles.size(), 0); ASSERT_LE(writeFiles.size(), numTableWriterCount_); } - auto diskFiles = listAllFiles(outputDirectory->path); + auto diskFiles = listAllFiles(outputDirectory->path()); std::sort(diskFiles.begin(), diskFiles.end()); std::sort(writeFiles.begin(), writeFiles.end()); ASSERT_EQ(diskFiles, writeFiles) @@ -2760,7 +2760,7 @@ TEST_P(AllTableWriterTest, columnStatsDataTypes) { rowType_->children(), partitionedBy_, nullptr, - makeLocationHandle(outputDirectory->path))), + makeLocationHandle(outputDirectory->path()))), false, CommitStrategy::kNoCommit)) .planNode(); @@ -2849,7 +2849,7 @@ TEST_P(AllTableWriterTest, columnStats) { rowType_->children(), partitionedBy_, bucketProperty_, - makeLocationHandle(outputDirectory->path))), + makeLocationHandle(outputDirectory->path()))), false, commitStrategy_)) .planNode(); @@ -2948,7 +2948,7 @@ TEST_P(AllTableWriterTest, columnStatsWithTableWriteMerge) { rowType_->children(), partitionedBy_, bucketProperty_, - makeLocationHandle(outputDirectory->path))), + makeLocationHandle(outputDirectory->path()))), false, commitStrategy_)); @@ -3040,7 +3040,7 @@ TEST_P(AllTableWriterTest, tableWriterStats) { auto inputFilePaths = makeFilePaths(numBatches); for (int i = 0; i < numBatches; i++) { - writeToFile(inputFilePaths[i]->path, vectors[i]); + writeToFile(inputFilePaths[i]->path(), vectors[i]); } auto outputDirectory = TempDirectoryPath::create(); @@ -3048,7 +3048,7 @@ TEST_P(AllTableWriterTest, tableWriterStats) { auto plan = createInsertPlan( PlanBuilder().tableScan(rowType), rowType, - outputDirectory->path, + outputDirectory->path(), partitionKeys, bucketProperty_, compressionKind_, @@ -3132,7 +3132,7 @@ DEBUG_ONLY_TEST_P( auto op = createInsertPlan( PlanBuilder().values(vectors), rowType_, - outputDirectory->path, + outputDirectory->path(), partitionedBy_, bucketProperty_, compressionKind_, @@ -3181,7 +3181,7 @@ DEBUG_ONLY_TEST_P(UnpartitionedTableWriterTest, dataSinkAbortError) { auto outputDirectory = TempDirectoryPath::create(); auto plan = PlanBuilder() .values({vector}) - .tableWrite(outputDirectory->path, fileFormat_) + .tableWrite(outputDirectory->path(), fileFormat_) .planNode(); VELOX_ASSERT_THROW( AssertQueryBuilder(plan).copyResults(pool()), "inject writer error"); @@ -3199,7 +3199,7 @@ TEST_P(BucketSortOnlyTableWriterTest, sortWriterSpill) { auto op = createInsertPlan( PlanBuilder().values(vectors), rowType_, - outputDirectory->path, + outputDirectory->path(), partitionedBy_, bucketProperty_, compressionKind_, @@ -3212,9 +3212,9 @@ TEST_P(BucketSortOnlyTableWriterTest, sortWriterSpill) { assertQueryWithWriterConfigs(op, fmt::format("SELECT {}", 5 * 500), true); if (partitionedBy_.size() > 0) { rowType_ = getNonPartitionsColumns(partitionedBy_, rowType_); - verifyTableWriterOutput(outputDirectory->path, rowType_); + verifyTableWriterOutput(outputDirectory->path(), rowType_); } else { - verifyTableWriterOutput(outputDirectory->path, rowType_); + verifyTableWriterOutput(outputDirectory->path(), rowType_); } const auto updatedSpillStats = globalSpillStats(); @@ -3294,7 +3294,7 @@ DEBUG_ONLY_TEST_P(BucketSortOnlyTableWriterTest, outputBatchRows) { auto plan = createInsertPlan( PlanBuilder().values({vectors}), rowType, - outputDirectory->path, + outputDirectory->path(), partitionKeys, bucketProperty_, compressionKind_, @@ -3432,7 +3432,7 @@ DEBUG_ONLY_TEST_F(TableWriterArbitrationTest, reclaimFromTableWriter) { auto writerPlan = PlanBuilder() .values(vectors) - .tableWrite(outputDirectory->path) + .tableWrite(outputDirectory->path()) .capturePlanNodeId(tableWriteNodeId) .project({TableWriteTraits::rowCountColumnName()}) .singleAggregation( @@ -3445,7 +3445,7 @@ DEBUG_ONLY_TEST_F(TableWriterArbitrationTest, reclaimFromTableWriter) { AssertQueryBuilder(duckDbQueryRunner_) .queryCtx(queryCtx) .maxDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, writerSpillEnabled) .config( core::QueryConfig::kWriterSpillEnabled, writerSpillEnabled) @@ -3543,7 +3543,7 @@ DEBUG_ONLY_TEST_F(TableWriterArbitrationTest, reclaimFromSortTableWriter) { auto writerPlan = PlanBuilder() .values(vectors) - .tableWrite(outputDirectory->path, {"c0"}, 4, {"c1"}, {"c2"}) + .tableWrite(outputDirectory->path(), {"c0"}, 4, {"c1"}, {"c2"}) .project({TableWriteTraits::rowCountColumnName()}) .singleAggregation( {}, @@ -3554,7 +3554,7 @@ DEBUG_ONLY_TEST_F(TableWriterArbitrationTest, reclaimFromSortTableWriter) { AssertQueryBuilder(duckDbQueryRunner_) .queryCtx(queryCtx) .maxDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, writerSpillEnabled) .config(core::QueryConfig::kWriterSpillEnabled, writerSpillEnabled) // Set 0 file writer flush threshold to always trigger flush in test. @@ -3639,7 +3639,7 @@ DEBUG_ONLY_TEST_F(TableWriterArbitrationTest, writerFlushThreshold) { auto writerPlan = PlanBuilder() .values(vectors) - .tableWrite(outputDirectory->path) + .tableWrite(outputDirectory->path()) .project({TableWriteTraits::rowCountColumnName()}) .singleAggregation( {}, @@ -3650,7 +3650,7 @@ DEBUG_ONLY_TEST_F(TableWriterArbitrationTest, writerFlushThreshold) { AssertQueryBuilder(duckDbQueryRunner_) .queryCtx(queryCtx) .maxDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kWriterSpillEnabled, true) .config( @@ -3715,7 +3715,7 @@ DEBUG_ONLY_TEST_F( auto writerPlan = PlanBuilder() .values(vectors) - .tableWrite(outputDirectory->path) + .tableWrite(outputDirectory->path()) .project({TableWriteTraits::rowCountColumnName()}) .singleAggregation( {}, @@ -3726,7 +3726,7 @@ DEBUG_ONLY_TEST_F( AssertQueryBuilder(duckDbQueryRunner_) .queryCtx(queryCtx) .maxDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kWriterSpillEnabled, true) // Set file writer flush threshold of zero to always trigger flush in @@ -3807,7 +3807,7 @@ DEBUG_ONLY_TEST_F( auto writerPlan = PlanBuilder() .values(vectors) - .tableWrite(outputDirectory->path) + .tableWrite(outputDirectory->path()) .project({TableWriteTraits::rowCountColumnName()}) .singleAggregation( {}, @@ -3818,7 +3818,7 @@ DEBUG_ONLY_TEST_F( AssertQueryBuilder(duckDbQueryRunner_) .queryCtx(queryCtx) .maxDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kWriterSpillEnabled, true) // Set 0 file writer flush threshold to always trigger flush in test. @@ -3895,7 +3895,7 @@ DEBUG_ONLY_TEST_F( auto writerPlan = PlanBuilder() .values(vectors) - .tableWrite(outputDirectory->path, {"c0"}, 4, {"c1"}, {"c2"}) + .tableWrite(outputDirectory->path(), {"c0"}, 4, {"c1"}, {"c2"}) .project({TableWriteTraits::rowCountColumnName()}) .singleAggregation( {}, @@ -3907,7 +3907,7 @@ DEBUG_ONLY_TEST_F( AssertQueryBuilder(duckDbQueryRunner_) .queryCtx(queryCtx) .maxDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kWriterSpillEnabled, "true") // Set file writer flush threshold of zero to always trigger flush in @@ -3978,13 +3978,13 @@ DEBUG_ONLY_TEST_F(TableWriterArbitrationTest, tableFileWriteError) { const auto outputDirectory = TempDirectoryPath::create(); auto writerPlan = PlanBuilder() .values(vectors) - .tableWrite(outputDirectory->path) + .tableWrite(outputDirectory->path()) .planNode(); VELOX_ASSERT_THROW( AssertQueryBuilder(duckDbQueryRunner_) .queryCtx(queryCtx) .maxDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kWriterSpillEnabled, true) // Set 0 file writer flush threshold to always reclaim memory from @@ -4067,13 +4067,13 @@ DEBUG_ONLY_TEST_F(TableWriterArbitrationTest, tableWriteSpillUseMoreMemory) { const auto outputDirectory = TempDirectoryPath::create(); auto writerPlan = PlanBuilder() .values(vectors) - .tableWrite(outputDirectory->path) + .tableWrite(outputDirectory->path()) .planNode(); VELOX_ASSERT_THROW( AssertQueryBuilder(duckDbQueryRunner_) .queryCtx(queryCtx) .maxDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kWriterSpillEnabled, true) // Set 0 file writer flush threshold to always trigger flush in test. @@ -4157,7 +4157,7 @@ DEBUG_ONLY_TEST_F(TableWriterArbitrationTest, tableWriteReclaimOnClose) { auto writerPlan = PlanBuilder() .values(vectors) - .tableWrite(outputDirectory->path) + .tableWrite(outputDirectory->path()) .singleAggregation( {}, {fmt::format("sum({})", TableWriteTraits::rowCountColumnName())}) @@ -4166,7 +4166,7 @@ DEBUG_ONLY_TEST_F(TableWriterArbitrationTest, tableWriteReclaimOnClose) { AssertQueryBuilder(duckDbQueryRunner_) .queryCtx(queryCtx) .maxDrivers(1) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kWriterSpillEnabled, true) // Set 0 file writer flush threshold to always trigger flush in test. diff --git a/velox/exec/tests/TaskTest.cpp b/velox/exec/tests/TaskTest.cpp index dc049e032420..d92dbdd684eb 100644 --- a/velox/exec/tests/TaskTest.cpp +++ b/velox/exec/tests/TaskTest.cpp @@ -762,7 +762,7 @@ TEST_F(TaskTest, singleThreadedExecution) { // Project + Aggregation over TableScan. auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {data, data}); + writeToFile(filePath->path(), {data, data}); core::PlanNodeId scanId; plan = PlanBuilder() @@ -774,7 +774,7 @@ TEST_F(TaskTest, singleThreadedExecution) { { auto [task, results] = - executeSingleThreaded(plan, {{scanId, {filePath->path}}}); + executeSingleThreaded(plan, {{scanId, {filePath->path()}}}); assertEqualResults({expectedResult}, results); } @@ -791,7 +791,7 @@ TEST_F(TaskTest, singleThreadedHashJoin) { makeFlatVector({10, 20, 30, 40}), }); auto leftPath = TempFilePath::create(); - writeToFile(leftPath->path, {left}); + writeToFile(leftPath->path(), {left}); auto right = makeRowVector( {"u_c0"}, @@ -799,7 +799,7 @@ TEST_F(TaskTest, singleThreadedHashJoin) { makeFlatVector({0, 1, 3, 5}), }); auto rightPath = TempFilePath::create(); - writeToFile(rightPath->path, {right}); + writeToFile(rightPath->path(), {right}); auto planNodeIdGenerator = std::make_shared(); core::PlanNodeId leftScanId; @@ -827,7 +827,7 @@ TEST_F(TaskTest, singleThreadedHashJoin) { { auto [task, results] = executeSingleThreaded( plan, - {{leftScanId, {leftPath->path}}, {rightScanId, {rightPath->path}}}); + {{leftScanId, {leftPath->path()}}, {rightScanId, {rightPath->path()}}}); assertEqualResults({expectedResult}, results); } } @@ -835,11 +835,11 @@ TEST_F(TaskTest, singleThreadedHashJoin) { TEST_F(TaskTest, singleThreadedCrossJoin) { auto left = makeRowVector({"t_c0"}, {makeFlatVector({1, 2, 3})}); auto leftPath = TempFilePath::create(); - writeToFile(leftPath->path, {left}); + writeToFile(leftPath->path(), {left}); auto right = makeRowVector({"u_c0"}, {makeFlatVector({10, 20})}); auto rightPath = TempFilePath::create(); - writeToFile(rightPath->path, {right}); + writeToFile(rightPath->path(), {right}); auto planNodeIdGenerator = std::make_shared(); core::PlanNodeId leftScanId; @@ -864,7 +864,7 @@ TEST_F(TaskTest, singleThreadedCrossJoin) { { auto [task, results] = executeSingleThreaded( plan, - {{leftScanId, {leftPath->path}}, {rightScanId, {rightPath->path}}}); + {{leftScanId, {leftPath->path()}}, {rightScanId, {rightPath->path()}}}); assertEqualResults({expectedResult}, results); } } @@ -1344,7 +1344,7 @@ DEBUG_ONLY_TEST_F(TaskTest, driverCounters) { makeFlatVector(1'000, [](auto row) { return row; }), }); auto filePath = TempFilePath::create(); - writeToFile(filePath->path, {data, data}); + writeToFile(filePath->path(), {data, data}); core::PlanNodeId scanNodeId; auto plan = PlanBuilder() @@ -1429,7 +1429,7 @@ DEBUG_ONLY_TEST_F(TaskTest, driverCounters) { // Now add a split, finalize splits and wait for the task to finish. auto split = exec::Split(makeHiveConnectorSplit( - filePath->path, 0, std::numeric_limits::max(), 1)); + filePath->path(), 0, std::numeric_limits::max(), 1)); task->addSplit(scanNodeId, std::move(split)); task->noMoreSplits(scanNodeId); taskThread.join(); @@ -1495,7 +1495,7 @@ TEST_F(TaskTest, spillDirectoryLifecycleManagement) { std::shared_ptr task = cursor->task(); auto rootTempDir = exec::test::TempDirectoryPath::create(); auto tmpDirectoryPath = - rootTempDir->path + "/spillDirectoryLifecycleManagement"; + rootTempDir->path() + "/spillDirectoryLifecycleManagement"; task->setSpillDirectory(tmpDirectoryPath, false); TestScopedSpillInjection scopedSpillInjection(100); @@ -1551,7 +1551,7 @@ TEST_F(TaskTest, spillDirNotCreated) { auto cursor = TaskCursor::create(params); auto* task = cursor->task().get(); auto rootTempDir = exec::test::TempDirectoryPath::create(); - auto tmpDirectoryPath = rootTempDir->path + "/spillDirNotCreated"; + auto tmpDirectoryPath = rootTempDir->path() + "/spillDirNotCreated"; task->setSpillDirectory(tmpDirectoryPath, false); while (cursor->moveNext()) { diff --git a/velox/exec/tests/TopNRowNumberTest.cpp b/velox/exec/tests/TopNRowNumberTest.cpp index bbc9bcd1c61a..7dd58fe0d024 100644 --- a/velox/exec/tests/TopNRowNumberTest.cpp +++ b/velox/exec/tests/TopNRowNumberTest.cpp @@ -139,7 +139,7 @@ TEST_F(TopNRowNumberTest, largeOutput) { .config(core::QueryConfig::kPreferredOutputBatchBytes, "1024") .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kTopNRowNumberSpillEnabled, "true") - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .assertResults(sql); auto taskStats = exec::toPlanStats(task->taskStats()); @@ -223,7 +223,7 @@ TEST_F(TopNRowNumberTest, manyPartitions) { fmt::format("{}", outputBatchBytes)) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kTopNRowNumberSpillEnabled, "true") - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .assertResults(sql); auto taskStats = exec::toPlanStats(task->taskStats()); @@ -359,7 +359,7 @@ TEST_F(TopNRowNumberTest, maxSpillBytes) { try { TestScopedSpillInjection scopedSpillInjection(100); AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .queryCtx(queryCtx) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kTopNRowNumberSpillEnabled, "true") diff --git a/velox/exec/tests/WindowTest.cpp b/velox/exec/tests/WindowTest.cpp index cfc45ee5afd6..c5d07cd63266 100644 --- a/velox/exec/tests/WindowTest.cpp +++ b/velox/exec/tests/WindowTest.cpp @@ -66,7 +66,7 @@ TEST_F(WindowTest, spill) { .config(core::QueryConfig::kPreferredOutputBatchBytes, "1024") .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kWindowSpillEnabled, "true") - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .assertResults( "SELECT *, row_number() over (partition by p order by s) FROM tmp"); diff --git a/velox/exec/tests/utils/ArbitratorTestUtil.cpp b/velox/exec/tests/utils/ArbitratorTestUtil.cpp index 58c5d0af4b25..d5311e7dc66e 100644 --- a/velox/exec/tests/utils/ArbitratorTestUtil.cpp +++ b/velox/exec/tests/utils/ArbitratorTestUtil.cpp @@ -94,7 +94,7 @@ QueryTestResult runHashJoinTask( if (enableSpilling) { const auto spillDirectory = exec::test::TempDirectoryPath::create(); result.data = AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kJoinSpillEnabled, true) .config(core::QueryConfig::kSpillStartPartitionBit, "29") @@ -137,7 +137,7 @@ QueryTestResult runAggregateTask( const auto spillDirectory = exec::test::TempDirectoryPath::create(); result.data = AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kAggregationSpillEnabled, "true") .queryCtx(queryCtx) @@ -179,7 +179,7 @@ QueryTestResult runOrderByTask( if (enableSpilling) { const auto spillDirectory = exec::test::TempDirectoryPath::create(); result.data = AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kOrderBySpillEnabled, "true") .queryCtx(queryCtx) @@ -221,7 +221,7 @@ QueryTestResult runRowNumberTask( if (enableSpilling) { const auto spillDirectory = exec::test::TempDirectoryPath::create(); result.data = AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kRowNumberSpillEnabled, "true") .queryCtx(queryCtx) @@ -264,7 +264,7 @@ QueryTestResult runTopNTask( const auto spillDirectory = exec::test::TempDirectoryPath::create(); result.data = AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kTopNRowNumberSpillEnabled, "true") .queryCtx(queryCtx) @@ -306,12 +306,12 @@ QueryTestResult runWriteTask( const RowVectorPtr& expectedResult) { QueryTestResult result; const auto outputDirectory = TempDirectoryPath::create(); - auto plan = writePlan(vectors, outputDirectory->path, result.planNodeId); + auto plan = writePlan(vectors, outputDirectory->path(), result.planNodeId); if (enableSpilling) { const auto spillDirectory = exec::test::TempDirectoryPath::create(); result.data = AssertQueryBuilder(plan) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kAggregationSpillEnabled, "false") .config(core::QueryConfig::kWriterSpillEnabled, "true") diff --git a/velox/exec/tests/utils/HiveConnectorTestBase.cpp b/velox/exec/tests/utils/HiveConnectorTestBase.cpp index c3c6ccb2a166..cbc6ebd74a4e 100644 --- a/velox/exec/tests/utils/HiveConnectorTestBase.cpp +++ b/velox/exec/tests/utils/HiveConnectorTestBase.cpp @@ -172,7 +172,7 @@ HiveConnectorTestBase::makeHiveConnectorSplits( std::vector> splits; for (auto filePath : filePaths) { splits.push_back(makeHiveConnectorSplit( - filePath->path, + filePath->path(), filePath->fileSize(), filePath->fileModifiedTime(), 0, diff --git a/velox/exec/tests/utils/TempDirectoryPath.cpp b/velox/exec/tests/utils/TempDirectoryPath.cpp index b34815a0cd5a..74fffa9241be 100644 --- a/velox/exec/tests/utils/TempDirectoryPath.cpp +++ b/velox/exec/tests/utils/TempDirectoryPath.cpp @@ -20,21 +20,26 @@ namespace facebook::velox::exec::test { -std::shared_ptr TempDirectoryPath::create() { - struct SharedTempDirectoryPath : public TempDirectoryPath { - SharedTempDirectoryPath() : TempDirectoryPath() {} - }; - return std::make_shared(); +std::shared_ptr TempDirectoryPath::create(bool injectFault) { + auto* tempDirPath = new TempDirectoryPath(injectFault); + return std::shared_ptr(tempDirPath); } TempDirectoryPath::~TempDirectoryPath() { - LOG(INFO) << "TempDirectoryPath:: removing all files from " << path; + LOG(INFO) << "TempDirectoryPath:: removing all files from " << path_; try { - boost::filesystem::remove_all(path.c_str()); + boost::filesystem::remove_all(path_.c_str()); } catch (...) { LOG(WARNING) << "TempDirectoryPath:: destructor failed while calling boost::filesystem::remove_all"; } } +std::string TempDirectoryPath::createTempDirectory() { + char path[] = "/tmp/velox_test_XXXXXX"; + const char* tempDirectoryPath = ::mkdtemp(path); + VELOX_CHECK_NOT_NULL(tempDirectoryPath, "Cannot open temp directory"); + return tempDirectoryPath; +} + } // namespace facebook::velox::exec::test diff --git a/velox/exec/tests/utils/TempDirectoryPath.h b/velox/exec/tests/utils/TempDirectoryPath.h index 31d8d5dbea16..c61bd6d84c3f 100644 --- a/velox/exec/tests/utils/TempDirectoryPath.h +++ b/velox/exec/tests/utils/TempDirectoryPath.h @@ -24,27 +24,35 @@ namespace facebook::velox::exec::test { -// It manages the lifetime of a temporary directory. +/// Manages the lifetime of a temporary directory. class TempDirectoryPath { public: - static std::shared_ptr create(); + /// If 'enableFaultInjection' is true, we enable fault injection on the + /// created file directory. + static std::shared_ptr create( + bool enableFaultInjection = false); virtual ~TempDirectoryPath(); - const std::string path; - TempDirectoryPath(const TempDirectoryPath&) = delete; TempDirectoryPath& operator=(const TempDirectoryPath&) = delete; - TempDirectoryPath() : path(createTempDirectory()) {} - - static std::string createTempDirectory() { - char path[] = "/tmp/velox_test_XXXXXX"; - const char* tempDirectoryPath = mkdtemp(path); - if (tempDirectoryPath == nullptr) { - throw std::logic_error("Cannot open temp directory"); - } - return tempDirectoryPath; + /// If fault injection is enabled, the returned the file path has the faulty + /// file system prefix scheme. The velox fs then opens the directory through + /// the faulty file system. The actual file operation might either fails or + /// delegate to the actual file. + const std::string path() const { + return enableFaultInjection_ ? fmt::format("faulty:{}", path_) : path_; } + + private: + static std::string createTempDirectory(); + + explicit TempDirectoryPath(bool enableFaultInjection) + : path_(createTempDirectory()), + enableFaultInjection_(enableFaultInjection) {} + + const std::string path_; + const bool enableFaultInjection_{false}; }; } // namespace facebook::velox::exec::test diff --git a/velox/exec/tests/utils/TempFilePath.cpp b/velox/exec/tests/utils/TempFilePath.cpp index 7c5cbe7370c1..7097539b9a1a 100644 --- a/velox/exec/tests/utils/TempFilePath.cpp +++ b/velox/exec/tests/utils/TempFilePath.cpp @@ -18,11 +18,22 @@ namespace facebook::velox::exec::test { -std::shared_ptr TempFilePath::create() { - struct SharedTempFilePath : public TempFilePath { - SharedTempFilePath() : TempFilePath() {} - }; - return std::make_shared(); +TempFilePath::~TempFilePath() { + ::unlink(path_.c_str()); + ::close(fd_); } +std::shared_ptr TempFilePath::create(bool enableFaultInjection) { + auto* tempFilePath = new TempFilePath(enableFaultInjection); + return std::shared_ptr(tempFilePath); +} + +std::string TempFilePath::createTempFile(TempFilePath* tempFilePath) { + char path[] = "/tmp/velox_test_XXXXXX"; + tempFilePath->fd_ = ::mkstemp(path); + if (tempFilePath->fd_ == -1) { + VELOX_FAIL("Cannot open temp file: {}", folly::errnoStr(errno)); + } + return path; +} } // namespace facebook::velox::exec::test diff --git a/velox/exec/tests/utils/TempFilePath.h b/velox/exec/tests/utils/TempFilePath.h index d993795f1e3a..2cc86582687c 100644 --- a/velox/exec/tests/utils/TempFilePath.h +++ b/velox/exec/tests/utils/TempFilePath.h @@ -26,23 +26,21 @@ namespace facebook::velox::exec::test { -// It manages the lifetime of a temporary file. +/// Manages the lifetime of a temporary file. class TempFilePath { public: - static std::shared_ptr create(); + /// If 'enableFaultInjection' is true, we enable fault injection on the + /// created file. + static std::shared_ptr create( + bool enableFaultInjection = false); - virtual ~TempFilePath() { - unlink(path.c_str()); - close(fd); - } - - const std::string path; + ~TempFilePath(); TempFilePath(const TempFilePath&) = delete; TempFilePath& operator=(const TempFilePath&) = delete; void append(std::string data) { - std::ofstream file(path, std::ios_base::app); + std::ofstream file(path_, std::ios_base::app); file << data; file.flush(); file.close(); @@ -50,31 +48,37 @@ class TempFilePath { const int64_t fileSize() { struct stat st; - stat(path.data(), &st); + ::stat(path_.data(), &st); return st.st_size; } - const int64_t fileModifiedTime() { + int64_t fileModifiedTime() { struct stat st; - stat(path.data(), &st); + ::stat(path_.data(), &st); return st.st_mtime; } + /// If fault injection is enabled, the returned the file path has the faulty + /// file system prefix scheme. The velox fs then opens the file through the + /// faulty file system. The actual file operation might either fails or + /// delegate to the actual file. + std::string path() const { + return enableFaultInjection_ ? fmt::format("faulty:{}", path_) : path_; + } + private: - int fd; + static std::string createTempFile(TempFilePath* tempFilePath); - TempFilePath() : path(createTempFile(this)) { - VELOX_CHECK_NE(fd, -1); + TempFilePath(bool enableFaultInjection) + : enableFaultInjection_(enableFaultInjection), + path_(createTempFile(this)) { + VELOX_CHECK_NE(fd_, -1); } - static std::string createTempFile(TempFilePath* tempFilePath) { - char path[] = "/tmp/velox_test_XXXXXX"; - tempFilePath->fd = mkstemp(path); - if (tempFilePath->fd == -1) { - throw std::logic_error("Cannot open temp file"); - } - return path; - } + const bool enableFaultInjection_; + const std::string path_; + + int fd_; }; } // namespace facebook::velox::exec::test diff --git a/velox/expression/tests/ExprTest.cpp b/velox/expression/tests/ExprTest.cpp index c4df5aa14678..00a6a7f45ada 100644 --- a/velox/expression/tests/ExprTest.cpp +++ b/velox/expression/tests/ExprTest.cpp @@ -2415,7 +2415,7 @@ TEST_P(ParameterizedExprTest, exceptionContext) { // Enable saving vector and expression SQL for system errors only. auto tempDirectory = exec::test::TempDirectoryPath::create(); FLAGS_velox_save_input_on_expression_system_failure_path = - tempDirectory->path; + tempDirectory->path(); try { evaluate("runtime_error(c0) + c1", data); @@ -2449,7 +2449,7 @@ TEST_P(ParameterizedExprTest, exceptionContext) { } // Enable saving vector and expression SQL for all errors. - FLAGS_velox_save_input_on_expression_any_failure_path = tempDirectory->path; + FLAGS_velox_save_input_on_expression_any_failure_path = tempDirectory->path(); FLAGS_velox_save_input_on_expression_system_failure_path = ""; try { diff --git a/velox/expression/tests/ExpressionRunnerUnitTest.cpp b/velox/expression/tests/ExpressionRunnerUnitTest.cpp index 845325636d48..25f0d3f7095c 100644 --- a/velox/expression/tests/ExpressionRunnerUnitTest.cpp +++ b/velox/expression/tests/ExpressionRunnerUnitTest.cpp @@ -49,8 +49,10 @@ TEST_F(ExpressionRunnerUnitTest, run) { auto inputFile = exec::test::TempFilePath::create(); auto sqlFile = exec::test::TempFilePath::create(); auto resultFile = exec::test::TempFilePath::create(); - const char* inputPath = inputFile->path.data(); - const char* resultPath = resultFile->path.data(); + const auto inputPathStr = inputFile->path(); + const char* inputPath = inputPathStr.data(); + const auto resultPathStr = resultFile->path(); + const char* resultPath = resultPathStr.data(); const int vectorSize = 100; VectorMaker vectorMaker(pool_.get()); @@ -108,8 +110,10 @@ TEST_F(ExpressionRunnerUnitTest, persistAndReproComplexSql) { // Emulate a reproduce from complex constant SQL auto sqlFile = exec::test::TempFilePath::create(); auto complexConstantsFile = exec::test::TempFilePath::create(); - auto sqlPath = sqlFile->path.c_str(); - auto complexConstantsPath = complexConstantsFile->path.c_str(); + const auto complexConstantsFilePathStr = complexConstantsFile->path(); + auto sqlPathStr = sqlFile->path(); + auto sqlPath = sqlPathStr.c_str(); + auto complexConstantsPath = complexConstantsFilePathStr.c_str(); // Write to file.. saveStringToFile(complexConstantsSql, sqlPath); diff --git a/velox/expression/tests/ExpressionVerifierUnitTest.cpp b/velox/expression/tests/ExpressionVerifierUnitTest.cpp index ff63876c786c..7fca398d25fb 100644 --- a/velox/expression/tests/ExpressionVerifierUnitTest.cpp +++ b/velox/expression/tests/ExpressionVerifierUnitTest.cpp @@ -88,7 +88,7 @@ class ExpressionVerifierUnitTest : public testing::Test, public VectorTestBase { TEST_F(ExpressionVerifierUnitTest, persistReproInfo) { filesystems::registerLocalFileSystem(); auto reproFolder = exec::test::TempDirectoryPath::create(); - const auto reproPath = reproFolder->path; + const auto reproPath = reproFolder->path(); auto localFs = filesystems::getFileSystem(reproPath, nullptr); ExpressionVerifierOptions options{false, reproPath.c_str(), false}; diff --git a/velox/functions/lib/aggregates/tests/utils/AggregationTestBase.cpp b/velox/functions/lib/aggregates/tests/utils/AggregationTestBase.cpp index 45776cb5f1a6..7cdc5e93151a 100644 --- a/velox/functions/lib/aggregates/tests/utils/AggregationTestBase.cpp +++ b/velox/functions/lib/aggregates/tests/utils/AggregationTestBase.cpp @@ -404,7 +404,7 @@ void AggregationTestBase::testAggregationsWithCompanion( queryBuilder.configs(config) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kAggregationSpillEnabled, true) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .maxDrivers(4); exec::TestScopedSpillInjection scopedSpillInjection(100); @@ -647,10 +647,10 @@ void AggregationTestBase::testReadFromFiles( for (auto& vector : inputs) { auto file = exec::test::TempFilePath::create(); - writeToFile(file->path, vector, writerPool.get()); + writeToFile(file->path(), vector, writerPool.get()); files.push_back(file); splits.emplace_back(std::make_shared( - kHiveConnectorId, file->path, dwio::common::FileFormat::DWRF)); + kHiveConnectorId, file->path(), dwio::common::FileFormat::DWRF)); } // No need to test streaming as the streaming test generates its own inputs, // so it would be the same as the original test. @@ -666,7 +666,7 @@ void AggregationTestBase::testReadFromFiles( } for (const auto& file : files) { - remove(file->path.c_str()); + remove(file->path().c_str()); } } @@ -843,7 +843,7 @@ void AggregationTestBase::testAggregationsImpl( queryBuilder.configs(config) .config(core::QueryConfig::kSpillEnabled, true) .config(core::QueryConfig::kAggregationSpillEnabled, true) - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .maxDrivers(4); exec::TestScopedSpillInjection scopedSpillInjection(100); @@ -945,7 +945,7 @@ void AggregationTestBase::testAggregationsImpl( queryBuilder.configs(config) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kAggregationSpillEnabled, "true") - .spillDirectory(spillDirectory->path); + .spillDirectory(spillDirectory->path()); TestScopedSpillInjection scopedSpillInjection(100); auto task = assertResults(queryBuilder); diff --git a/velox/functions/prestosql/aggregates/tests/ArbitraryTest.cpp b/velox/functions/prestosql/aggregates/tests/ArbitraryTest.cpp index 4ae59387b1e2..d5b609130288 100644 --- a/velox/functions/prestosql/aggregates/tests/ArbitraryTest.cpp +++ b/velox/functions/prestosql/aggregates/tests/ArbitraryTest.cpp @@ -402,7 +402,7 @@ TEST_F(ArbitraryTest, spilling) { exec::TestScopedSpillInjection scopedSpillInjection(100); spillDirectory = exec::test::TempDirectoryPath::create(); - builder.spillDirectory(spillDirectory->path) + builder.spillDirectory(spillDirectory->path()) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kAggregationSpillEnabled, "true"); diff --git a/velox/functions/sparksql/aggregates/tests/FirstAggregateTest.cpp b/velox/functions/sparksql/aggregates/tests/FirstAggregateTest.cpp index 2088d088644b..712a81bb2c01 100644 --- a/velox/functions/sparksql/aggregates/tests/FirstAggregateTest.cpp +++ b/velox/functions/sparksql/aggregates/tests/FirstAggregateTest.cpp @@ -539,7 +539,7 @@ TEST_F(FirstAggregateTest, spillingAndSorting) { results = AssertQueryBuilder(plan) .config(core::QueryConfig::kSpillEnabled, "true") .config(core::QueryConfig::kAggregationSpillEnabled, "true") - .spillDirectory(spillDirectory->path) + .spillDirectory(spillDirectory->path()) .copyResults(pool()); exec::test::assertEqualResults({expected}, {results}); } diff --git a/velox/vector/tests/VectorSaverTest.cpp b/velox/vector/tests/VectorSaverTest.cpp index e2735201ce33..c06acc4140fd 100644 --- a/velox/vector/tests/VectorSaverTest.cpp +++ b/velox/vector/tests/VectorSaverTest.cpp @@ -120,11 +120,11 @@ class VectorSaverTest : public testing::Test, public VectorTestBase { VectorPtr takeRoundTrip(const VectorPtr& vector) { auto path = exec::test::TempFilePath::create(); - std::ofstream outputFile(path->path, std::ofstream::binary); + std::ofstream outputFile(path->path(), std::ofstream::binary); saveVector(*vector, outputFile); outputFile.close(); - std::ifstream inputFile(path->path, std::ifstream::binary); + std::ifstream inputFile(path->path(), std::ifstream::binary); auto copy = restoreVector(inputFile, pool()); inputFile.close(); return copy; @@ -139,11 +139,11 @@ class VectorSaverTest : public testing::Test, public VectorTestBase { void testTypeRoundTrip(const TypePtr& type) { auto path = exec::test::TempFilePath::create(); - std::ofstream outputFile(path->path, std::ofstream::binary); + std::ofstream outputFile(path->path(), std::ofstream::binary); saveType(type, outputFile); outputFile.close(); - std::ifstream inputFile(path->path, std::ifstream::binary); + std::ifstream inputFile(path->path(), std::ifstream::binary); auto copy = restoreType(inputFile); inputFile.close(); @@ -625,8 +625,8 @@ TEST_F(VectorSaverTest, LazyVector) { TEST_F(VectorSaverTest, stdVector) { std::vector intVector = {1, 2, 3, 4, 5}; auto path = exec::test::TempFilePath::create(); - saveStdVectorToFile(intVector, path->path.c_str()); - auto copy = restoreStdVectorFromFile(path->path.c_str()); + saveStdVectorToFile(intVector, path->path().c_str()); + auto copy = restoreStdVectorFromFile(path->path().c_str()); ASSERT_EQ(intVector, copy); } @@ -661,7 +661,8 @@ TEST_F(VectorSaverTest, exceptionContext) { }; VectorPtr data = makeFlatVector({1, 2, 3, 4, 5}); - VectorSaverInfo info{tempDirectory.get()->path.c_str(), data.get()}; + const auto path = tempDirectory->path(); + VectorSaverInfo info{path.c_str(), data.get()}; { ExceptionContextSetter context({messageFunction, &info}); try {