diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index b8d0256d50..81d9744d78 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1240,6 +1240,19 @@ bool CacheAllocator::moveChainedItem(ChainedItem& oldItem, return true; } +template +void CacheAllocator::unlinkItemForEviction(Item& it) { + XDCHECK(it.isMarkedForEviction()); + XDCHECK_EQ(0, it.getRefCount()); + accessContainer_->remove(it); + removeFromMMContainer(it); + + // Since we managed to mark the item for eviction we must be the only + // owner of the item. + const auto ref = it.unmarkForEviction(); + XDCHECK_EQ(0, ref); +} + template typename CacheAllocator::Item* CacheAllocator::findEviction(PoolId pid, ClassId cid) { @@ -1248,76 +1261,109 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { // Keep searching for a candidate until we were able to evict it // or until the search limit has been exhausted unsigned int searchTries = 0; - auto itr = mmContainer.getEvictionIterator(); - while ((config_.evictionSearchTries == 0 || - config_.evictionSearchTries > searchTries) && - itr) { - ++searchTries; - (*stats_.evictionAttempts)[pid][cid].inc(); - - Item* toRecycle = itr.get(); - - Item* candidate = - toRecycle->isChainedItem() - ? &toRecycle->asChainedItem().getParentItem(compressor_) - : toRecycle; - - // make sure no other thead is evicting the item - if (candidate->getRefCount() != 0 || !candidate->markMoving()) { - ++itr; + while (config_.evictionSearchTries == 0 || + config_.evictionSearchTries > searchTries) { + Item* toRecycle = nullptr; + Item* candidate = nullptr; + typename NvmCacheT::PutToken token; + + mmContainer.withEvictionIterator([this, pid, cid, &candidate, &toRecycle, + &searchTries, &mmContainer, + &token](auto&& itr) { + if (!itr) { + ++searchTries; + (*stats_.evictionAttempts)[pid][cid].inc(); + return; + } + + while ((config_.evictionSearchTries == 0 || + config_.evictionSearchTries > searchTries) && + itr) { + ++searchTries; + (*stats_.evictionAttempts)[pid][cid].inc(); + + auto* toRecycle_ = itr.get(); + auto* candidate_ = + toRecycle_->isChainedItem() + ? &toRecycle_->asChainedItem().getParentItem(compressor_) + : toRecycle_; + + const bool evictToNvmCache = shouldWriteToNvmCache(*candidate_); + auto token_ = evictToNvmCache + ? nvmCache_->createPutToken(candidate_->getKey()) + : typename NvmCacheT::PutToken{}; + + if (evictToNvmCache && !token_.isValid()) { + stats_.evictFailConcurrentFill.inc(); + ++itr; + continue; + } + + auto markedForEviction = candidate_->markForEviction(); + if (!markedForEviction) { + if (candidate_->hasChainedItem()) { + stats_.evictFailParentAC.inc(); + } else { + stats_.evictFailAC.inc(); + } + ++itr; + continue; + } + + XDCHECK(candidate_->isMarkedForEviction()); + // markForEviction to make sure no other thead is evicting the item + // nor holding a handle to that item + toRecycle = toRecycle_; + candidate = candidate_; + token = std::move(token_); + + // Check if parent changed for chained items - if yes, we cannot + // remove the child from the mmContainer as we will not be evicting + // it. We could abort right here, but we need to cleanup in case + // unmarkForEviction() returns 0 - so just go through normal path. + if (!toRecycle_->isChainedItem() || + &toRecycle->asChainedItem().getParentItem(compressor_) == + candidate) { + mmContainer.remove(itr); + } + return; + } + }); + + if (!toRecycle) { continue; } - // for chained items, the ownership of the parent can change. We try to - // evict what we think as parent and see if the eviction of parent - // recycles the child we intend to. - bool evictionSuccessful = false; - { - auto toReleaseHandle = - itr->isChainedItem() - ? advanceIteratorAndTryEvictChainedItem(itr) - : advanceIteratorAndTryEvictRegularItem(mmContainer, itr); - evictionSuccessful = toReleaseHandle != nullptr; - // destroy toReleaseHandle. The item won't be released to allocator - // since we marked for eviction. - } - - const auto ref = candidate->unmarkMoving(); - if (ref == 0u) { - // Invalidate iterator since later on we may use this mmContainer - // again, which cannot be done unless we drop this iterator - itr.destroy(); - - // recycle the item. it's safe to do so, even if toReleaseHandle was - // NULL. If `ref` == 0 then it means that we are the last holder of - // that item. - if (candidate->hasChainedItem()) { - (*stats_.chainedItemEvictions)[pid][cid].inc(); - } else { - (*stats_.regularItemEvictions)[pid][cid].inc(); - } + XDCHECK(toRecycle); + XDCHECK(candidate); - if (auto eventTracker = getEventTracker()) { - eventTracker->record(AllocatorApiEvent::DRAM_EVICT, candidate->getKey(), - AllocatorApiResult::EVICTED, candidate->getSize(), - candidate->getConfiguredTTL().count()); - } + unlinkItemForEviction(*candidate); - // check if by releasing the item we intend to, we actually - // recycle the candidate. - if (ReleaseRes::kRecycled == - releaseBackToAllocator(*candidate, RemoveContext::kEviction, - /* isNascent */ false, toRecycle)) { - return toRecycle; - } + if (token.isValid() && shouldWriteToNvmCacheExclusive(*candidate)) { + nvmCache_->put(*candidate, std::move(token)); + } + + // recycle the item. it's safe to do so, even if toReleaseHandle was + // NULL. If `ref` == 0 then it means that we are the last holder of + // that item. + if (candidate->hasChainedItem()) { + (*stats_.chainedItemEvictions)[pid][cid].inc(); } else { - XDCHECK(!evictionSuccessful); + (*stats_.regularItemEvictions)[pid][cid].inc(); } - // If we destroyed the itr to possibly evict and failed, we restart - // from the beginning again - if (!itr) { - itr.resetToBegin(); + if (auto eventTracker = getEventTracker()) { + eventTracker->record(AllocatorApiEvent::DRAM_EVICT, candidate->getKey(), + AllocatorApiResult::EVICTED, candidate->getSize(), + candidate->getConfiguredTTL().count()); + } + + // check if by releasing the item we intend to, we actually + // recycle the candidate. + auto ret = releaseBackToAllocator(*candidate, RemoveContext::kEviction, + /* isNascent */ false, toRecycle); + if (ret == ReleaseRes::kRecycled) { + return toRecycle; } } return nullptr; @@ -1461,7 +1507,7 @@ bool CacheAllocator::pushToNvmCacheFromRamForTesting( if (handle && nvmCache_ && shouldWriteToNvmCache(*handle) && shouldWriteToNvmCacheExclusive(*handle)) { - nvmCache_->put(handle, nvmCache_->createPutToken(handle->getKey())); + nvmCache_->put(*handle, nvmCache_->createPutToken(handle->getKey())); return true; } return false; @@ -2660,126 +2706,6 @@ void CacheAllocator::evictForSlabRelease( } } -template -typename CacheAllocator::WriteHandle -CacheAllocator::advanceIteratorAndTryEvictRegularItem( - MMContainer& mmContainer, EvictionIterator& itr) { - // we should flush this to nvmcache if it is not already present in nvmcache - // and the item is not expired. - Item& item = *itr; - const bool evictToNvmCache = shouldWriteToNvmCache(item); - - auto token = evictToNvmCache ? nvmCache_->createPutToken(item.getKey()) - : typename NvmCacheT::PutToken{}; - - // record the in-flight eviciton. If not, we move on to next item to avoid - // stalling eviction. - if (evictToNvmCache && !token.isValid()) { - ++itr; - stats_.evictFailConcurrentFill.inc(); - return WriteHandle{}; - } - - // If there are other accessors, we should abort. Acquire a handle here since - // if we remove the item from both access containers and mm containers - // below, we will need a handle to ensure proper cleanup in case we end up - // not evicting this item - auto evictHandle = accessContainer_->removeIf(item, &itemExclusivePredicate); - if (!evictHandle) { - ++itr; - stats_.evictFailAC.inc(); - return evictHandle; - } - - mmContainer.remove(itr); - XDCHECK_EQ(reinterpret_cast(evictHandle.get()), - reinterpret_cast(&item)); - XDCHECK(!evictHandle->isInMMContainer()); - XDCHECK(!evictHandle->isAccessible()); - - // Invalidate iterator since later on if we are not evicting this - // item, we may need to rely on the handle we created above to ensure - // proper cleanup if the item's raw refcount has dropped to 0. - // And since this item may be a parent item that has some child items - // in this very same mmContainer, we need to make sure we drop this - // exclusive iterator so we can gain access to it when we're cleaning - // up the child items - itr.destroy(); - - // Ensure that there are no accessors after removing from the access - // container - XDCHECK(evictHandle->getRefCount() == 1); - - if (evictToNvmCache && shouldWriteToNvmCacheExclusive(item)) { - XDCHECK(token.isValid()); - nvmCache_->put(evictHandle, std::move(token)); - } - return evictHandle; -} - -template -typename CacheAllocator::WriteHandle -CacheAllocator::advanceIteratorAndTryEvictChainedItem( - EvictionIterator& itr) { - XDCHECK(itr->isChainedItem()); - - ChainedItem* candidate = &itr->asChainedItem(); - ++itr; - - // The parent could change at any point through transferChain. However, if - // that happens, we would realize that the releaseBackToAllocator return - // kNotRecycled and we would try another chained item, leading to transient - // failure. - auto& parent = candidate->getParentItem(compressor_); - - const bool evictToNvmCache = shouldWriteToNvmCache(parent); - - auto token = evictToNvmCache ? nvmCache_->createPutToken(parent.getKey()) - : typename NvmCacheT::PutToken{}; - - // if token is invalid, return. iterator is already advanced. - if (evictToNvmCache && !token.isValid()) { - stats_.evictFailConcurrentFill.inc(); - return WriteHandle{}; - } - - // check if the parent exists in the hashtable and refcount is drained. - auto parentHandle = - accessContainer_->removeIf(parent, &itemExclusivePredicate); - if (!parentHandle) { - stats_.evictFailParentAC.inc(); - return parentHandle; - } - - // Invalidate iterator since later on we may use the mmContainer - // associated with this iterator which cannot be done unless we - // drop this iterator - // - // This must be done once we know the parent is not nullptr. - // Since we can very well be the last holder of this parent item, - // which may have a chained item that is linked in this MM container. - itr.destroy(); - - // Ensure we have the correct parent and we're the only user of the - // parent, then free it from access container. Otherwise, we abort - XDCHECK_EQ(reinterpret_cast(&parent), - reinterpret_cast(parentHandle.get())); - XDCHECK_EQ(1u, parent.getRefCount()); - - removeFromMMContainer(*parentHandle); - - XDCHECK(!parent.isInMMContainer()); - XDCHECK(!parent.isAccessible()); - - if (evictToNvmCache && shouldWriteToNvmCacheExclusive(*parentHandle)) { - XDCHECK(token.isValid()); - XDCHECK(parentHandle->hasChainedItem()); - nvmCache_->put(parentHandle, std::move(token)); - } - - return parentHandle; -} - template typename CacheAllocator::WriteHandle CacheAllocator::evictNormalItemForSlabRelease(Item& item) { @@ -2812,7 +2738,7 @@ CacheAllocator::evictNormalItemForSlabRelease(Item& item) { // now that we are the only handle and we actually removed something from // the RAM cache, we enqueue it to nvmcache. if (evictToNvmCache && shouldWriteToNvmCacheExclusive(item)) { - nvmCache_->put(handle, std::move(token)); + nvmCache_->put(*handle, std::move(token)); } return handle; @@ -2913,7 +2839,7 @@ CacheAllocator::evictChainedItemForSlabRelease(ChainedItem& child) { // the RAM cache, we enqueue it to nvmcache. if (evictToNvmCache && shouldWriteToNvmCacheExclusive(*parentHandle)) { DCHECK(parentHandle->hasChainedItem()); - nvmCache_->put(parentHandle, std::move(token)); + nvmCache_->put(*parentHandle, std::move(token)); } return parentHandle; diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index 32d44c1388..c5e5bf2b1c 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1652,6 +1652,12 @@ class CacheAllocator : public CacheBase { bool removeFromNvm = true, bool recordApiEvent = true); + // Must be called by the thread which called markForEviction and + // succeeded. After this call, the item is unlinked from Access and + // MM Containers. The item is no longer marked as exclusive and it's + // ref count is 0 - it's available for recycling. + void unlinkItemForEviction(Item& it); + // Implementation to find a suitable eviction from the container. The // two parameters together identify a single container. // @@ -1662,25 +1668,6 @@ class CacheAllocator : public CacheBase { using EvictionIterator = typename MMContainer::LockedIterator; - // Advance the current iterator and try to evict a regular item - // - // @param mmContainer the container to look for evictions. - // @param itr iterator holding the item - // - // @return valid handle to regular item on success. This will be the last - // handle to the item. On failure an empty handle. - WriteHandle advanceIteratorAndTryEvictRegularItem(MMContainer& mmContainer, - EvictionIterator& itr); - - // Advance the current iterator and try to evict a chained item - // Iterator may also be reset during the course of this function - // - // @param itr iterator holding the item - // - // @return valid handle to the parent item on success. This will be the last - // handle to the item - WriteHandle advanceIteratorAndTryEvictChainedItem(EvictionIterator& itr); - // Deserializer CacheAllocatorMetadata and verify the version // // @param deserializer Deserializer object diff --git a/cachelib/allocator/nvmcache/NvmCache-inl.h b/cachelib/allocator/nvmcache/NvmCache-inl.h index b79712e607..3cef6b7ad0 100644 --- a/cachelib/allocator/nvmcache/NvmCache-inl.h +++ b/cachelib/allocator/nvmcache/NvmCache-inl.h @@ -460,19 +460,18 @@ uint32_t NvmCache::getStorageSizeInNvm(const Item& it) { } template -std::unique_ptr NvmCache::makeNvmItem(const WriteHandle& hdl) { - const auto& item = *hdl; +std::unique_ptr NvmCache::makeNvmItem(const Item& item) { auto poolId = cache_.getAllocInfo((void*)(&item)).poolId; if (item.isChainedItem()) { throw std::invalid_argument(folly::sformat( - "Chained item can not be flushed separately {}", hdl->toString())); + "Chained item can not be flushed separately {}", item.toString())); } auto chainedItemRange = - CacheAPIWrapperForNvm::viewAsChainedAllocsRange(cache_, *hdl); + CacheAPIWrapperForNvm::viewAsChainedAllocsRange(cache_, item); if (config_.encodeCb && !config_.encodeCb(EncodeDecodeContext{ - *(hdl.getInternal()), chainedItemRange})) { + const_cast(item), chainedItemRange})) { return nullptr; } @@ -496,12 +495,10 @@ std::unique_ptr NvmCache::makeNvmItem(const WriteHandle& hdl) { } template -void NvmCache::put(WriteHandle& hdl, PutToken token) { +void NvmCache::put(Item& item, PutToken token) { util::LatencyTracker tracker(stats().nvmInsertLatency_); - HashedKey hk{hdl->getKey()}; + HashedKey hk{item.getKey()}; - XDCHECK(hdl); - auto& item = *hdl; // for regular items that can only write to nvmcache upon eviction, we // should not be recording a write for an nvmclean item unless it is marked // as evicted from nvmcache. @@ -526,7 +523,7 @@ void NvmCache::put(WriteHandle& hdl, PutToken token) { return; } - auto nvmItem = makeNvmItem(hdl); + auto nvmItem = makeNvmItem(item); if (!nvmItem) { stats().numNvmPutEncodeFailure.inc(); return; diff --git a/cachelib/allocator/nvmcache/NvmCache.h b/cachelib/allocator/nvmcache/NvmCache.h index c9f5c753f0..9a5e96c1fa 100644 --- a/cachelib/allocator/nvmcache/NvmCache.h +++ b/cachelib/allocator/nvmcache/NvmCache.h @@ -158,11 +158,11 @@ class NvmCache { PutToken createPutToken(folly::StringPiece key); // store the given item in navy - // @param hdl handle to cache item. should not be null + // @param item reference to cache item // @param token the put token for the item. this must have been // obtained before enqueueing the put to maintain // consistency - void put(WriteHandle& hdl, PutToken token); + void put(Item& item, PutToken token); // returns the current state of whether nvmcache is enabled or not. nvmcache // can be disabled if the backend implementation ends up in a corrupt state @@ -286,7 +286,7 @@ class NvmCache { // returns true if there is tombstone entry for the key. bool hasTombStone(HashedKey hk); - std::unique_ptr makeNvmItem(const WriteHandle& handle); + std::unique_ptr makeNvmItem(const Item& item); // wrap an item into a blob for writing into navy. Blob makeBlob(const Item& it); diff --git a/cachelib/allocator/nvmcache/tests/NvmCacheTests.cpp b/cachelib/allocator/nvmcache/tests/NvmCacheTests.cpp index ec74c51980..04ea539884 100644 --- a/cachelib/allocator/nvmcache/tests/NvmCacheTests.cpp +++ b/cachelib/allocator/nvmcache/tests/NvmCacheTests.cpp @@ -2072,7 +2072,7 @@ TEST_F(NvmCacheTest, testEvictCB) { auto handle = cache.allocate(pid, key, 100); ASSERT_NE(nullptr, handle.get()); std::memcpy(handle->getMemory(), val.data(), val.size()); - auto buf = toIOBuf(makeNvmItem(handle)); + auto buf = toIOBuf(makeNvmItem(*handle)); evictCB(HashedKey{key.data()}, navy::BufferView(buf.length(), buf.data()), navy::DestructorEvent::Recycled); @@ -2093,7 +2093,7 @@ TEST_F(NvmCacheTest, testEvictCB) { ASSERT_NE(nullptr, handle.get()); std::memcpy(handle->getMemory(), val.data(), val.size()); cache.insertOrReplace(handle); - auto buf = toIOBuf(makeNvmItem(handle)); + auto buf = toIOBuf(makeNvmItem(*handle)); evictCB(HashedKey{key.data()}, navy::BufferView(buf.length(), buf.data()), navy::DestructorEvent::Recycled); @@ -2112,7 +2112,7 @@ TEST_F(NvmCacheTest, testEvictCB) { std::memcpy(handle->getMemory(), val.data(), val.size()); cache.insertOrReplace(handle); handle->markNvmClean(); - auto buf = toIOBuf(makeNvmItem(handle)); + auto buf = toIOBuf(makeNvmItem(*handle)); evictCB(HashedKey{key.data()}, navy::BufferView(buf.length(), buf.data()), navy::DestructorEvent::Recycled); @@ -2128,7 +2128,7 @@ TEST_F(NvmCacheTest, testEvictCB) { auto handle = cache.allocate(pid, key, 100); ASSERT_NE(nullptr, handle.get()); std::memcpy(handle->getMemory(), val.data(), val.size()); - auto buf = toIOBuf(makeNvmItem(handle)); + auto buf = toIOBuf(makeNvmItem(*handle)); evictCB(HashedKey{key.data()}, navy::BufferView(buf.length(), buf.data()), navy::DestructorEvent::Removed); @@ -2147,7 +2147,7 @@ TEST_F(NvmCacheTest, testEvictCB) { ASSERT_NE(nullptr, handle.get()); std::memcpy(handle->getMemory(), val.data(), val.size()); cache.insertOrReplace(handle); - auto buf = toIOBuf(makeNvmItem(handle)); + auto buf = toIOBuf(makeNvmItem(*handle)); evictCB(HashedKey{key.data()}, navy::BufferView(buf.length(), buf.data()), navy::DestructorEvent::Removed); @@ -2166,7 +2166,7 @@ TEST_F(NvmCacheTest, testEvictCB) { std::memcpy(handle->getMemory(), val.data(), val.size()); cache.insertOrReplace(handle); handle->markNvmClean(); - auto buf = toIOBuf(makeNvmItem(handle)); + auto buf = toIOBuf(makeNvmItem(*handle)); evictCB(HashedKey{key.data()}, navy::BufferView(buf.length(), buf.data()), navy::DestructorEvent::Removed); @@ -2228,7 +2228,7 @@ TEST_F(NvmCacheTest, testCreateItemAsIOBuf) { ASSERT_NE(nullptr, handle.get()); std::memcpy(handle->getMemory(), val.data(), val.size()); - auto dipper = makeNvmItem(handle); + auto dipper = makeNvmItem(*handle); auto iobuf = createItemAsIOBuf(key, *dipper); verifyItemInIOBuf(key, handle, iobuf.get()); @@ -2240,7 +2240,7 @@ TEST_F(NvmCacheTest, testCreateItemAsIOBuf) { ASSERT_NE(nullptr, handle.get()); std::memcpy(handle->getMemory(), val.data(), val.size()); - auto dipper = makeNvmItem(handle); + auto dipper = makeNvmItem(*handle); auto iobuf = createItemAsIOBuf(key, *dipper); verifyItemInIOBuf(key, handle, iobuf.get()); @@ -2269,7 +2269,7 @@ TEST_F(NvmCacheTest, testCreateItemAsIOBufChained) { cache.addChainedItem(handle, std::move(chainedIt)); } - auto dipper = makeNvmItem(handle); + auto dipper = makeNvmItem(*handle); auto iobuf = createItemAsIOBuf(key, *dipper); verifyItemInIOBuf(key, handle, iobuf.get()); diff --git a/cachelib/allocator/nvmcache/tests/NvmTestBase.h b/cachelib/allocator/nvmcache/tests/NvmTestBase.h index fd88875fa9..6f7fcc328c 100644 --- a/cachelib/allocator/nvmcache/tests/NvmTestBase.h +++ b/cachelib/allocator/nvmcache/tests/NvmTestBase.h @@ -108,7 +108,7 @@ class NvmCacheTest : public testing::Test { void pushToNvmCacheFromRamForTesting(WriteHandle& handle) { auto nvmCache = getNvmCache(); if (nvmCache) { - nvmCache->put(handle, nvmCache->createPutToken(handle->getKey())); + nvmCache->put(*handle, nvmCache->createPutToken(handle->getKey())); } } @@ -126,8 +126,8 @@ class NvmCacheTest : public testing::Test { return cache_ ? cache_->nvmCache_.get() : nullptr; } - std::unique_ptr makeNvmItem(const WriteHandle& handle) { - return getNvmCache()->makeNvmItem(handle); + std::unique_ptr makeNvmItem(const Item& item) { + return getNvmCache()->makeNvmItem(item); } std::unique_ptr createItemAsIOBuf(folly::StringPiece key, diff --git a/cachelib/cachebench/runner/CacheStressor.h b/cachelib/cachebench/runner/CacheStressor.h index d2433a734e..6091a716dc 100644 --- a/cachelib/cachebench/runner/CacheStressor.h +++ b/cachelib/cachebench/runner/CacheStressor.h @@ -299,8 +299,11 @@ class CacheStressor : public Stressor { SCOPE_EXIT { throttleFn(); }; // detect refcount leaks when run in debug mode. #ifndef NDEBUG - auto checkCnt = [](int cnt) { - if (cnt != 0) { + auto checkCnt = [useCombinedLockForIterators = + config_.useCombinedLockForIterators](int cnt) { + // if useCombinedLockForIterators is set handle count can be modified + // by a different thread + if (!useCombinedLockForIterators && cnt != 0) { throw std::runtime_error(folly::sformat("Refcount leak {}", cnt)); } }; diff --git a/cachelib/cachebench/util/Config.cpp b/cachelib/cachebench/util/Config.cpp index c50cb4b6a5..bcda0abf5c 100644 --- a/cachelib/cachebench/util/Config.cpp +++ b/cachelib/cachebench/util/Config.cpp @@ -67,6 +67,8 @@ StressorConfig::StressorConfig(const folly::dynamic& configJson) { JSONSetVal(configJson, checkNvmCacheWarmUp); + JSONSetVal(configJson, useCombinedLockForIterators); + if (configJson.count("poolDistributions")) { for (auto& it : configJson["poolDistributions"]) { poolDistributions.push_back(DistributionConfig(it, configPath)); @@ -88,7 +90,7 @@ StressorConfig::StressorConfig(const folly::dynamic& configJson) { // If you added new fields to the configuration, update the JSONSetVal // to make them available for the json configs and increment the size // below - checkCorrectSize(); + checkCorrectSize(); } bool StressorConfig::usesChainedItems() const { diff --git a/cachelib/cachebench/util/Config.h b/cachelib/cachebench/util/Config.h index 8530250cd2..6be7fa755f 100644 --- a/cachelib/cachebench/util/Config.h +++ b/cachelib/cachebench/util/Config.h @@ -267,6 +267,8 @@ struct StressorConfig : public JSONConfig { // By default, timestamps are in milliseconds. uint64_t timestampFactor{1000}; + bool useCombinedLockForIterators{false}; + // admission policy for cache. std::shared_ptr admPolicy{};