Skip to content

Commit

Permalink
Back out "Allow disabling VectorPool in QueryConfig" (facebookincubat…
Browse files Browse the repository at this point in the history
…or#6783)

Summary: Pull Request resolved: facebookincubator#6783

Reviewed By: xiaoxmeng

Differential Revision: D49711124

fbshipit-source-id: d149f3f38c4e217d32450bb433180b623f0ff4cb
  • Loading branch information
kagamiori authored and facebook-github-bot committed Sep 28, 2023
1 parent ab72f0d commit ca9ce55
Show file tree
Hide file tree
Showing 7 changed files with 15 additions and 107 deletions.
7 changes: 0 additions & 7 deletions velox/core/QueryConfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -260,9 +260,6 @@ class QueryConfig {
static constexpr const char* kMinTableRowsForParallelJoinBuild =
"min_table_rows_for_parallel_join_build";

/// If true, allow VectorPool to cache vectors for possible reuse.
static constexpr const char* kVectorPoolEnabled = "vector_pool_enabled";

uint64_t maxPartialAggregationMemoryUsage() const {
static constexpr uint64_t kDefault = 1L << 24;
return get<uint64_t>(kMaxPartialAggregationMemory, kDefault);
Expand Down Expand Up @@ -533,10 +530,6 @@ class QueryConfig {
return get<uint32_t>(kMinTableRowsForParallelJoinBuild, 1'000);
}

bool vectorPoolEnabled() const {
return get<bool>(kVectorPoolEnabled, true);
}

template <typename T>
T get(const std::string& key, const T& defaultValue) const {
return config_->get<T>(key, defaultValue);
Expand Down
6 changes: 1 addition & 5 deletions velox/core/QueryCtx.h
Original file line number Diff line number Diff line change
Expand Up @@ -145,11 +145,7 @@ class QueryCtx {
class ExecCtx {
public:
ExecCtx(memory::MemoryPool* pool, QueryCtx* queryCtx)
: pool_(pool),
queryCtx_(queryCtx),
vectorPool_{
pool,
queryCtx ? queryCtx->queryConfig().vectorPoolEnabled() : true} {}
: pool_(pool), queryCtx_(queryCtx), vectorPool_{pool} {}

velox::memory::MemoryPool* pool() const {
return pool_;
Expand Down
4 changes: 0 additions & 4 deletions velox/docs/configs.rst
Original file line number Diff line number Diff line change
Expand Up @@ -81,10 +81,6 @@ Generic Configuration
- integer
- 1000
- The minimum number of table rows that can trigger the parallel hash join table build.
* - vector_pool_enabled
- bool
- true
- Whether to allow VectorPool to cache vectors for possible reuse during a query execution.

.. _expression-evaluation-conf:

Expand Down
39 changes: 0 additions & 39 deletions velox/exec/tests/TaskTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1305,43 +1305,4 @@ TEST_F(TaskTest, driverCreationMemoryAllocationCheck) {
}
}
}

TEST_F(TaskTest, vectorPoolConfig) {
auto testConfig = [&](bool enableVectorPool) {
std::unordered_map<std::string, std::string> configData(
{{core::QueryConfig::kVectorPoolEnabled,
enableVectorPool ? "true" : "false"}});
auto queryCtx =
std::make_shared<core::QueryCtx>(nullptr, std::move(configData));
const core::QueryConfig& config = queryCtx->queryConfig();
ASSERT_EQ(config.vectorPoolEnabled(), enableVectorPool);

auto planFragment =
PlanBuilder()
.values({makeRowVector(
{BaseVector::createNullConstant(BIGINT(), 5, pool())})})
.project({"c0 + 1"})
.planFragment();
auto task = exec::Task::create("", planFragment, 0, queryCtx);

task->next();
task->testingVisitDrivers([&enableVectorPool](Driver* driver) {
auto operators = driver->operators();
for (const auto op : operators) {
ASSERT_EQ(
op->testingOperatorCtx()->execCtx()->vectorPool().enabled(),
enableVectorPool);
}
});

RowVectorPtr result = nullptr;
do {
result = task->next();
} while (result != nullptr);
ASSERT_TRUE(waitForTaskCompletion(task.get()));
};

testConfig(true);
testConfig(false);
}
} // namespace facebook::velox::exec::test
5 changes: 1 addition & 4 deletions velox/vector/VectorPool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,16 +51,13 @@ FOLLY_ALWAYS_INLINE int32_t toCacheIndex(const TypePtr& type) {

VectorPtr VectorPool::get(const TypePtr& type, vector_size_t size) {
auto cacheIndex = toCacheIndex(type);
if (enabled_ && cacheIndex >= 0 && size <= kMaxRecycleSize) {
if (cacheIndex >= 0 && size <= kMaxRecycleSize) {
return vectors_[cacheIndex].pop(type, size, *pool_);
}
return BaseVector::create(type, size, pool_);
}

bool VectorPool::release(VectorPtr& vector) {
if (!enabled_) {
return false;
}
if (FOLLY_UNLIKELY(vector == nullptr)) {
return false;
}
Expand Down
37 changes: 13 additions & 24 deletions velox/vector/VectorPool.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,49 +21,33 @@
namespace facebook::velox {

/// A thread-level cache of pre-allocated flat vectors of different types.
/// Keeps up to 10 recyclable vectors of each type. If VectorPool is enabled, a
/// vector is recyclable if it is flat and recursively singly-referenced. Only
/// singleton built-in types are supported. Decimal types, fixed-size array
/// Keeps up to 10 recyclable vectors of each type. A vector is
/// recyclable if it is flat and recursively singly-referenced.
/// Only singleton built-in types are supported. Decimal types, fixed-size array
/// type, complex and custom types are not supported. Calling 'get' for an
/// unsupported type already returns a newly allocated vector. Calling 'release'
/// for an unsupported type is a no-op. Calling 'release' when VectorPool is not
/// enabled is a no-op too.
/// for an unsupported type is a no-op.
class VectorPool {
public:
explicit VectorPool(memory::MemoryPool* pool, bool enabled = true)
: enabled_{enabled}, pool_{pool} {}
explicit VectorPool(memory::MemoryPool* pool) : pool_{pool} {}

/// Gets a possibly recycled vector of 'type and 'size'. Allocates from
/// 'pool_' if no pre-allocated vector, type is a complex type, or VectorPool
/// is not enabled.
/// 'pool_' if no pre-allocated vector or type is a complex type.
VectorPtr get(const TypePtr& type, vector_size_t size);

/// Moves vector into 'this' if it is flat, recursively singly referenced and
/// there is space. The function returns true if VectorPool is enabled,
/// 'vector' is not null, and has been returned back to this pool, otherwise
/// returns false.
/// there is space. The function returns true if 'vector' is not null and has
/// been returned back to this pool, otherwise returns false.
bool release(VectorPtr& vector);

size_t release(std::vector<VectorPtr>& vectors);

bool enabled() const {
return enabled_;
}

private:
/// Max number of elements for a vector to be recyclable. The larger
/// the batch the less the win from recycling.
static constexpr vector_size_t kMaxRecycleSize = 64 * 1024;

static constexpr int32_t kNumPerType = 10;

static constexpr int32_t kNumCachedVectorTypes =
static_cast<int32_t>(TypeKind::HUGEINT) + 1;

const bool enabled_;

memory::MemoryPool* const pool_;

struct TypePool {
int32_t size{0};
std::array<VectorPtr, kNumPerType> vectors;
Expand All @@ -76,6 +60,11 @@ class VectorPool {
memory::MemoryPool& pool);
};

memory::MemoryPool* const pool_;

static constexpr int32_t kNumCachedVectorTypes =
static_cast<int32_t>(TypeKind::HUGEINT) + 1;

/// Caches of pre-allocated vectors indexed by typeKind.
std::array<TypePool, kNumCachedVectorTypes> vectors_;
};
Expand Down
24 changes: 0 additions & 24 deletions velox/vector/tests/VectorPoolTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,28 +146,4 @@ TEST_F(VectorPoolTest, customTypes) {
ASSERT_EQ(1'000, vector->size());
ASSERT_TRUE(isJsonType(vector->type()));
}

TEST_F(VectorPoolTest, vectorPoolConfig) {
auto testConfig = [&](bool enableVectorPool) {
VectorPool vectorPool(pool(), enableVectorPool);
ASSERT_EQ(vectorPool.enabled(), enableVectorPool);

for (auto i = 0; i < 10; ++i) {
if (!enableVectorPool) {
ASSERT_EQ(pool()->currentBytes(), 0);
}
VectorPtr vector =
makeFlatVector<int64_t>(1000, [](auto row) { return row; });
ASSERT_EQ(vectorPool.release(vector), enableVectorPool);
}
if (enableVectorPool) {
ASSERT_GT(pool()->currentBytes(), 0);
} else {
ASSERT_EQ(pool()->currentBytes(), 0);
}
};

testConfig(true);
testConfig(false);
}
} // namespace facebook::velox::test

0 comments on commit ca9ce55

Please sign in to comment.