From 1c23ee0e193c2fdc970f891eee1dbe1d4ff0990c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Igor=20Chor=C4=85=C5=BCewicz?= Date: Tue, 12 Apr 2022 07:42:13 -0400 Subject: [PATCH 1/9] Shorten critical section in findEviction Remove the item from mmContainer and drop the lock before attempting eviction. --- cachelib/allocator/CacheAllocator-inl.h | 58 ++++++------------------- cachelib/allocator/CacheAllocator.h | 14 +++--- 2 files changed, 20 insertions(+), 52 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index c6db0b6d78..9a072809da 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1233,13 +1233,15 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { ++searchTries; Item* candidate = itr.get(); + mmContainer.remove(itr); + itr.destroy(); + // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. - auto toReleaseHandle = - itr->isChainedItem() - ? advanceIteratorAndTryEvictChainedItem(itr) - : advanceIteratorAndTryEvictRegularItem(mmContainer, itr); + auto toReleaseHandle = candidate->isChainedItem() + ? tryEvictChainedItem(*candidate) + : tryEvictRegularItem(mmContainer, *candidate); if (toReleaseHandle) { if (toReleaseHandle->hasChainedItem()) { @@ -1254,9 +1256,6 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { AllocatorApiResult::EVICTED, toReleaseHandle->getSize(), toReleaseHandle->getConfiguredTTL().count()); } - // Invalidate iterator since later on we may use this mmContainer - // again, which cannot be done unless we drop this iterator - itr.destroy(); // we must be the last handle and for chained items, this will be // the parent. @@ -1281,11 +1280,9 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { } } - // If we destroyed the itr to possibly evict and failed, we restart - // from the beginning again - if (!itr) { - itr.resetToBegin(); - } + // Insert item back to the mmContainer if eviction failed. + mmContainer.add(*candidate); + itr.resetToBegin(); } return nullptr; } @@ -1340,11 +1337,10 @@ bool CacheAllocator::shouldWriteToNvmCacheExclusive( template typename CacheAllocator::WriteHandle -CacheAllocator::advanceIteratorAndTryEvictRegularItem( - MMContainer& mmContainer, EvictionIterator& itr) { +CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, + Item& item) { // we should flush this to nvmcache if it is not already present in nvmcache // and the item is not expired. - Item& item = *itr; const bool evictToNvmCache = shouldWriteToNvmCache(item); auto token = evictToNvmCache ? nvmCache_->createPutToken(item.getKey()) @@ -1352,7 +1348,6 @@ CacheAllocator::advanceIteratorAndTryEvictRegularItem( // record the in-flight eviciton. If not, we move on to next item to avoid // stalling eviction. if (evictToNvmCache && !token.isValid()) { - ++itr; stats_.evictFailConcurrentFill.inc(); return WriteHandle{}; } @@ -1364,12 +1359,10 @@ CacheAllocator::advanceIteratorAndTryEvictRegularItem( auto evictHandle = accessContainer_->removeIf(item, &itemEvictionPredicate); if (!evictHandle) { - ++itr; stats_.evictFailAC.inc(); return evictHandle; } - mmContainer.remove(itr); XDCHECK_EQ(reinterpret_cast(evictHandle.get()), reinterpret_cast(&item)); XDCHECK(!evictHandle->isInMMContainer()); @@ -1384,15 +1377,6 @@ CacheAllocator::advanceIteratorAndTryEvictRegularItem( return WriteHandle{}; } - // Invalidate iterator since later on if we are not evicting this - // item, we may need to rely on the handle we created above to ensure - // proper cleanup if the item's raw refcount has dropped to 0. - // And since this item may be a parent item that has some child items - // in this very same mmContainer, we need to make sure we drop this - // exclusive iterator so we can gain access to it when we're cleaning - // up the child items - itr.destroy(); - // Ensure that there are no accessors after removing from the access // container XDCHECK(evictHandle->getRefCount() == 1); @@ -1406,12 +1390,10 @@ CacheAllocator::advanceIteratorAndTryEvictRegularItem( template typename CacheAllocator::WriteHandle -CacheAllocator::advanceIteratorAndTryEvictChainedItem( - EvictionIterator& itr) { - XDCHECK(itr->isChainedItem()); +CacheAllocator::tryEvictChainedItem(Item& item) { + XDCHECK(item.isChainedItem()); - ChainedItem* candidate = &itr->asChainedItem(); - ++itr; + ChainedItem* candidate = &item.asChainedItem(); // The parent could change at any point through transferChain. However, if // that happens, we would realize that the releaseBackToAllocator return @@ -1438,23 +1420,11 @@ CacheAllocator::advanceIteratorAndTryEvictChainedItem( return parentHandle; } - // Invalidate iterator since later on we may use the mmContainer - // associated with this iterator which cannot be done unless we - // drop this iterator - // - // This must be done once we know the parent is not nullptr. - // Since we can very well be the last holder of this parent item, - // which may have a chained item that is linked in this MM container. - itr.destroy(); - // Ensure we have the correct parent and we're the only user of the // parent, then free it from access container. Otherwise, we abort XDCHECK_EQ(reinterpret_cast(&parent), reinterpret_cast(parentHandle.get())); XDCHECK_EQ(1u, parent.getRefCount()); - - removeFromMMContainer(*parentHandle); - XDCHECK(!parent.isInMMContainer()); XDCHECK(!parent.isAccessible()); diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index 89086ddfe0..19b8fb15ed 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1609,24 +1609,22 @@ class CacheAllocator : public CacheBase { using EvictionIterator = typename MMContainer::Iterator; - // Advance the current iterator and try to evict a regular item + // Try to evict a regular item. // // @param mmContainer the container to look for evictions. - // @param itr iterator holding the item + // @param item item to evict // // @return valid handle to regular item on success. This will be the last // handle to the item. On failure an empty handle. - WriteHandle advanceIteratorAndTryEvictRegularItem(MMContainer& mmContainer, - EvictionIterator& itr); + WriteHandle tryEvictRegularItem(MMContainer& mmContainer, Item& item); - // Advance the current iterator and try to evict a chained item - // Iterator may also be reset during the course of this function + // Try to evict a chained item. // - // @param itr iterator holding the item + // @param item item to evict // // @return valid handle to the parent item on success. This will be the last // handle to the item - WriteHandle advanceIteratorAndTryEvictChainedItem(EvictionIterator& itr); + WriteHandle tryEvictChainedItem(Item& item); // Deserializer CacheAllocatorMetadata and verify the version // From 55e234dfac5d94cb7f00173025af1b955f4c7f8d Mon Sep 17 00:00:00 2001 From: igchor Date: Wed, 4 May 2022 15:27:44 -0400 Subject: [PATCH 2/9] Increment refcount under lock --- cachelib/allocator/CacheAllocator-inl.h | 38 +++++++++++++++++++------ cachelib/allocator/CacheAllocator.h | 4 +-- 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index 9a072809da..128853a4d5 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1233,15 +1233,23 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { ++searchTries; Item* candidate = itr.get(); - mmContainer.remove(itr); + + // make sure no other thead is evicting the item + if (candidate->getRefCount() != 0) { + ++itr; + continue; + } + + incRef(*candidate); itr.destroy(); // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. auto toReleaseHandle = candidate->isChainedItem() - ? tryEvictChainedItem(*candidate) + ? tryEvictChainedItem(mmContainer, *candidate) : tryEvictRegularItem(mmContainer, *candidate); + // XXX: fix chained item if (toReleaseHandle) { if (toReleaseHandle->hasChainedItem()) { @@ -1260,7 +1268,7 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { // we must be the last handle and for chained items, this will be // the parent. XDCHECK(toReleaseHandle.get() == candidate || candidate->isChainedItem()); - XDCHECK_EQ(1u, toReleaseHandle->getRefCount()); + XDCHECK_EQ(2u, toReleaseHandle->getRefCount()); // We manually release the item here because we don't want to // invoke the Item Handle's destructor which will be decrementing @@ -1268,6 +1276,7 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { auto& itemToRelease = *toReleaseHandle.release(); // Decrementing the refcount because we want to recycle the item + decRef(itemToRelease); const auto ref = decRef(itemToRelease); XDCHECK_EQ(0u, ref); @@ -1280,9 +1289,13 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { } } - // Insert item back to the mmContainer if eviction failed. - mmContainer.add(*candidate); - itr.resetToBegin(); + // If we destroyed the itr to possibly evict and failed, we restart + // from the beginning again + if (!itr) { + itr.resetToBegin(); + for (int i = 0; i < searchTries; i++) + ++itr; + } } return nullptr; } @@ -1349,6 +1362,7 @@ CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, // stalling eviction. if (evictToNvmCache && !token.isValid()) { stats_.evictFailConcurrentFill.inc(); + decRef(item); return WriteHandle{}; } @@ -1360,9 +1374,12 @@ CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, if (!evictHandle) { stats_.evictFailAC.inc(); + decRef(item); return evictHandle; } + auto removed = mmContainer.remove(item); + XDCHECK(removed); XDCHECK_EQ(reinterpret_cast(evictHandle.get()), reinterpret_cast(&item)); XDCHECK(!evictHandle->isInMMContainer()); @@ -1374,12 +1391,13 @@ CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, // is set. Iterator was already advance by the remove call above. if (evictHandle->isMoving()) { stats_.evictFailMove.inc(); + decRef(item); return WriteHandle{}; } // Ensure that there are no accessors after removing from the access // container - XDCHECK(evictHandle->getRefCount() == 1); + XDCHECK(evictHandle->getRefCount() == 2); if (evictToNvmCache && shouldWriteToNvmCacheExclusive(item)) { XDCHECK(token.isValid()); @@ -1390,7 +1408,7 @@ CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, template typename CacheAllocator::WriteHandle -CacheAllocator::tryEvictChainedItem(Item& item) { +CacheAllocator::tryEvictChainedItem(MMContainer& mmContainer, Item& item) { XDCHECK(item.isChainedItem()); ChainedItem* candidate = &item.asChainedItem(); @@ -1422,9 +1440,11 @@ CacheAllocator::tryEvictChainedItem(Item& item) { // Ensure we have the correct parent and we're the only user of the // parent, then free it from access container. Otherwise, we abort + auto removed = mmContainer.remove(item); + XDCHECK(removed); XDCHECK_EQ(reinterpret_cast(&parent), reinterpret_cast(parentHandle.get())); - XDCHECK_EQ(1u, parent.getRefCount()); + XDCHECK_EQ(2u, parent.getRefCount()); // XXX? XDCHECK(!parent.isInMMContainer()); XDCHECK(!parent.isAccessible()); diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index 19b8fb15ed..417d70b363 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1624,7 +1624,7 @@ class CacheAllocator : public CacheBase { // // @return valid handle to the parent item on success. This will be the last // handle to the item - WriteHandle tryEvictChainedItem(Item& item); + WriteHandle tryEvictChainedItem(MMContainer& mmContainer, Item& item); // Deserializer CacheAllocatorMetadata and verify the version // @@ -1877,7 +1877,7 @@ class CacheAllocator : public CacheBase { } static bool itemEvictionPredicate(const Item& item) { - return item.getRefCount() == 0 && !item.isMoving(); + return item.getRefCount() == 1 && !item.isMoving(); } static bool itemExpiryPredicate(const Item& item) { From 4a34f75c89c7fc2e29d409abce48f11845482e47 Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Thu, 2 Jun 2022 04:40:48 -0400 Subject: [PATCH 3/9] Increment item refCount under AC lock to avoid races with remove/replace with predicate. Also, refact tryEvictRegularItem and tryEvictChainedItem into a single function. --- cachelib/allocator/CacheAllocator-inl.h | 156 +++++++--------------- cachelib/allocator/CacheAllocator.h | 18 +-- cachelib/allocator/ChainedHashTable-inl.h | 15 +++ cachelib/allocator/ChainedHashTable.h | 11 ++ 4 files changed, 80 insertions(+), 120 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index 128853a4d5..ec559a0f71 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1235,24 +1235,29 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { Item* candidate = itr.get(); // make sure no other thead is evicting the item - if (candidate->getRefCount() != 0) { + if (candidate->getRefCount() != 0 || candidate->isMoving()) { ++itr; continue; } - - incRef(*candidate); + + if (candidate->isChainedItem()) { + candidate = &candidate->asChainedItem().getParentItem(compressor_); + } + + auto toReleaseHandle = accessContainer_->find(*candidate); + if (!toReleaseHandle || !toReleaseHandle.isReady()) { + ++itr; + continue; + } + itr.destroy(); // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. - auto toReleaseHandle = candidate->isChainedItem() - ? tryEvictChainedItem(mmContainer, *candidate) - : tryEvictRegularItem(mmContainer, *candidate); - // XXX: fix chained item - - if (toReleaseHandle) { - if (toReleaseHandle->hasChainedItem()) { + bool evicted = tryEvictItem(mmContainer, std::move(toReleaseHandle)); + if (evicted) { + if (candidate->hasChainedItem()) { (*stats_.chainedItemEvictions)[pid][cid].inc(); } else { (*stats_.regularItemEvictions)[pid][cid].inc(); @@ -1265,37 +1270,20 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { toReleaseHandle->getConfiguredTTL().count()); } - // we must be the last handle and for chained items, this will be - // the parent. - XDCHECK(toReleaseHandle.get() == candidate || candidate->isChainedItem()); - XDCHECK_EQ(2u, toReleaseHandle->getRefCount()); - - // We manually release the item here because we don't want to - // invoke the Item Handle's destructor which will be decrementing - // an already zero refcount, which will throw exception - auto& itemToRelease = *toReleaseHandle.release(); - // Decrementing the refcount because we want to recycle the item - decRef(itemToRelease); - const auto ref = decRef(itemToRelease); + const auto ref = decRef(*candidate); XDCHECK_EQ(0u, ref); // check if by releasing the item we intend to, we actually // recycle the candidate. if (ReleaseRes::kRecycled == - releaseBackToAllocator(itemToRelease, RemoveContext::kEviction, + releaseBackToAllocator(*candidate, RemoveContext::kEviction, /* isNascent */ false, candidate)) { return candidate; } } - // If we destroyed the itr to possibly evict and failed, we restart - // from the beginning again - if (!itr) { - itr.resetToBegin(); - for (int i = 0; i < searchTries; i++) - ++itr; - } + itr.resetToBegin(); } return nullptr; } @@ -1349,9 +1337,10 @@ bool CacheAllocator::shouldWriteToNvmCacheExclusive( } template -typename CacheAllocator::WriteHandle -CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, - Item& item) { +bool CacheAllocator::tryEvictItem(MMContainer& mmContainer, + WriteHandle&& handle) { + auto& item = *handle; + // we should flush this to nvmcache if it is not already present in nvmcache // and the item is not expired. const bool evictToNvmCache = shouldWriteToNvmCache(item); @@ -1361,21 +1350,27 @@ CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, // record the in-flight eviciton. If not, we move on to next item to avoid // stalling eviction. if (evictToNvmCache && !token.isValid()) { - stats_.evictFailConcurrentFill.inc(); - decRef(item); - return WriteHandle{}; + if (item.isChainedItem()) + stats_.evictFailConcurrentFill.inc(); + else + stats_.evictFailConcurrentFill.inc(); + handle.reset(); + return false; } - // If there are other accessors, we should abort. Acquire a handle here since - // if we remove the item from both access containers and mm containers - // below, we will need a handle to ensure proper cleanup in case we end up - // not evicting this item + // If there are other accessors, we should abort. We should be the only + // handle owner. auto evictHandle = accessContainer_->removeIf(item, &itemEvictionPredicate); + /* We got new handle so it's safe to get rid of the old one. */ + handle.reset(); + if (!evictHandle) { - stats_.evictFailAC.inc(); - decRef(item); - return evictHandle; + if (item.isChainedItem()) + stats_.evictFailParentAC.inc(); + else + stats_.evictFailAC.inc(); + return false; } auto removed = mmContainer.remove(item); @@ -1388,81 +1383,28 @@ CacheAllocator::tryEvictRegularItem(MMContainer& mmContainer, // If the item is now marked as moving, that means its corresponding slab is // being released right now. So, we look for the next item that is eligible // for eviction. It is safe to destroy the handle here since the moving bit - // is set. Iterator was already advance by the remove call above. + // is set. if (evictHandle->isMoving()) { - stats_.evictFailMove.inc(); - decRef(item); - return WriteHandle{}; + if (item.isChainedItem()) + stats_.evictFailParentMove.inc(); + else + stats_.evictFailMove.inc(); + return false; } // Ensure that there are no accessors after removing from the access - // container - XDCHECK(evictHandle->getRefCount() == 2); + // container. + XDCHECK(evictHandle->getRefCount() == 1); if (evictToNvmCache && shouldWriteToNvmCacheExclusive(item)) { XDCHECK(token.isValid()); nvmCache_->put(evictHandle, std::move(token)); } - return evictHandle; -} - -template -typename CacheAllocator::WriteHandle -CacheAllocator::tryEvictChainedItem(MMContainer& mmContainer, Item& item) { - XDCHECK(item.isChainedItem()); - - ChainedItem* candidate = &item.asChainedItem(); - - // The parent could change at any point through transferChain. However, if - // that happens, we would realize that the releaseBackToAllocator return - // kNotRecycled and we would try another chained item, leading to transient - // failure. - auto& parent = candidate->getParentItem(compressor_); - - const bool evictToNvmCache = shouldWriteToNvmCache(parent); - - auto token = evictToNvmCache ? nvmCache_->createPutToken(parent.getKey()) - : typename NvmCacheT::PutToken{}; - - // if token is invalid, return. iterator is already advanced. - if (evictToNvmCache && !token.isValid()) { - stats_.evictFailConcurrentFill.inc(); - return WriteHandle{}; - } - - // check if the parent exists in the hashtable and refcount is drained. - auto parentHandle = - accessContainer_->removeIf(parent, &itemEvictionPredicate); - if (!parentHandle) { - stats_.evictFailParentAC.inc(); - return parentHandle; - } - // Ensure we have the correct parent and we're the only user of the - // parent, then free it from access container. Otherwise, we abort - auto removed = mmContainer.remove(item); - XDCHECK(removed); - XDCHECK_EQ(reinterpret_cast(&parent), - reinterpret_cast(parentHandle.get())); - XDCHECK_EQ(2u, parent.getRefCount()); // XXX? - XDCHECK(!parent.isInMMContainer()); - XDCHECK(!parent.isAccessible()); - - // We need to make sure the parent is not marked as moving - // and we're the only holder of the parent item. Safe to destroy the handle - // here since moving bit is set. - if (parentHandle->isMoving()) { - stats_.evictFailParentMove.inc(); - return WriteHandle{}; - } - - if (evictToNvmCache && shouldWriteToNvmCacheExclusive(*parentHandle)) { - XDCHECK(token.isValid()); - XDCHECK(parentHandle->hasChainedItem()); - nvmCache_->put(parentHandle, std::move(token)); - } + /* Release the handle, item will be reused by the called. */ + evictHandle.release(); - return parentHandle; + return true; } template diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index 417d70b363..e7126b6c09 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1609,22 +1609,14 @@ class CacheAllocator : public CacheBase { using EvictionIterator = typename MMContainer::Iterator; - // Try to evict a regular item. + // Try to evict an item. // // @param mmContainer the container to look for evictions. - // @param item item to evict + // @param handle handle to item to evict. eviction will fail if + // this is not an owning handle. // - // @return valid handle to regular item on success. This will be the last - // handle to the item. On failure an empty handle. - WriteHandle tryEvictRegularItem(MMContainer& mmContainer, Item& item); - - // Try to evict a chained item. - // - // @param item item to evict - // - // @return valid handle to the parent item on success. This will be the last - // handle to the item - WriteHandle tryEvictChainedItem(MMContainer& mmContainer, Item& item); + // @return whether eviction succeeded + bool tryEvictItem(MMContainer& mmContainer, WriteHandle&& handle); // Deserializer CacheAllocatorMetadata and verify the version // diff --git a/cachelib/allocator/ChainedHashTable-inl.h b/cachelib/allocator/ChainedHashTable-inl.h index 8a8e68ded1..86d37b7bb4 100644 --- a/cachelib/allocator/ChainedHashTable-inl.h +++ b/cachelib/allocator/ChainedHashTable-inl.h @@ -450,6 +450,21 @@ typename T::Handle ChainedHashTable::Container::removeIf( } } +template T::*HookPtr, + typename LockT> +typename T::Handle ChainedHashTable::Container::find( + T& node) const { + const auto bucket = ht_.getBucket(node.getKey()); + auto l = locks_.lockShared(bucket); + + if (!node.isAccessible()) { + return {}; + } + + return handleMaker_(&node); +} + template T::*HookPtr, typename LockT> diff --git a/cachelib/allocator/ChainedHashTable.h b/cachelib/allocator/ChainedHashTable.h index f3c87bfa7a..aa686661dd 100644 --- a/cachelib/allocator/ChainedHashTable.h +++ b/cachelib/allocator/ChainedHashTable.h @@ -496,6 +496,17 @@ class ChainedHashTable { // creating this item handle. Handle find(Key key) const; + // returns a handle to specified node. + // + // @param node requested node + // + // @return handle to the node if find was sucessfull. returns a + // null handle if the node was not in the container. + // + // @throw std::overflow_error is the maximum item refcount is execeeded by + // creating this item handle. + Handle find(T& node) const; + // for saving the state of the hash table // // precondition: serialization must happen without any reader or writer From 28019e95edea65bc9524b8a4385aa783fc6c5766 Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Thu, 9 Jun 2022 06:01:11 +0000 Subject: [PATCH 4/9] Use moving bit for synchronization in findEviction moving bit is used to give exclusive right to evict the item to a particular thread. Originially, there was an assumption that whoever marked the item as moving will try to free it until he succeeds. Since we don't want to do that in findEviction (potentially can take a long time) we need to make sure that unmarking is safe. This patch checks the flags after unmarking (atomically) and if ref is zero it also recyles the item. This is needed as there might be some concurrent thread releasing the item (and decrementing ref count). If moving bit is set, that thread would not free the memory back to allocator, resulting in memory leak on unmarkMoving(). --- cachelib/allocator/CacheAllocator-inl.h | 139 ++++++++-------------- cachelib/allocator/CacheAllocator.h | 15 +-- cachelib/allocator/CacheItem-inl.h | 4 +- cachelib/allocator/CacheItem.h | 2 +- cachelib/allocator/ChainedHashTable-inl.h | 15 --- cachelib/allocator/ChainedHashTable.h | 11 -- cachelib/allocator/Refcount.h | 13 +- 7 files changed, 60 insertions(+), 139 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index ec559a0f71..f7b976e92b 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1232,20 +1232,15 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { itr) { ++searchTries; - Item* candidate = itr.get(); + Item* toRecycle = itr.get(); - // make sure no other thead is evicting the item - if (candidate->getRefCount() != 0 || candidate->isMoving()) { - ++itr; - continue; - } - - if (candidate->isChainedItem()) { - candidate = &candidate->asChainedItem().getParentItem(compressor_); - } + Item* candidate = + toRecycle->isChainedItem() + ? &toRecycle->asChainedItem().getParentItem(compressor_) + : toRecycle; - auto toReleaseHandle = accessContainer_->find(*candidate); - if (!toReleaseHandle || !toReleaseHandle.isReady()) { + // make sure no other thead is evicting the item + if (candidate->getRefCount() != 0 || !candidate->markMoving()) { ++itr; continue; } @@ -1255,8 +1250,11 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. - bool evicted = tryEvictItem(mmContainer, std::move(toReleaseHandle)); - if (evicted) { + auto toReleaseHandle = + evictNormalItem(*candidate, true /* skipIfTokenInvalid */); + auto ref = candidate->unmarkMoving(); + + if (toReleaseHandle || ref == 0u) { if (candidate->hasChainedItem()) { (*stats_.chainedItemEvictions)[pid][cid].inc(); } else { @@ -1269,17 +1267,43 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { AllocatorApiResult::EVICTED, toReleaseHandle->getSize(), toReleaseHandle->getConfiguredTTL().count()); } + } else { + if (candidate->hasChainedItem()) { + stats_.evictFailParentAC.inc(); + } else { + stats_.evictFailAC.inc(); + } + } + + if (toReleaseHandle) { + XDCHECK(toReleaseHandle.get() == candidate); + XDCHECK(toRecycle == candidate || toRecycle->isChainedItem()); + XDCHECK_EQ(1u, toReleaseHandle->getRefCount()); + + // We manually release the item here because we don't want to + // invoke the Item Handle's destructor which will be decrementing + // an already zero refcount, which will throw exception + auto& itemToRelease = *toReleaseHandle.release(); // Decrementing the refcount because we want to recycle the item - const auto ref = decRef(*candidate); + ref = decRef(itemToRelease); XDCHECK_EQ(0u, ref); // check if by releasing the item we intend to, we actually // recycle the candidate. + if (ReleaseRes::kRecycled == + releaseBackToAllocator(itemToRelease, RemoveContext::kEviction, + /* isNascent */ false, toRecycle)) { + return toRecycle; + } + } else if (ref == 0u) { + // it's safe to recycle the item here as there are no more + // references and the item could not been marked as moving + // by other thread since it's detached from MMContainer. if (ReleaseRes::kRecycled == releaseBackToAllocator(*candidate, RemoveContext::kEviction, - /* isNascent */ false, candidate)) { - return candidate; + /* isNascent */ false, toRecycle)) { + return toRecycle; } } @@ -1336,77 +1360,6 @@ bool CacheAllocator::shouldWriteToNvmCacheExclusive( return true; } -template -bool CacheAllocator::tryEvictItem(MMContainer& mmContainer, - WriteHandle&& handle) { - auto& item = *handle; - - // we should flush this to nvmcache if it is not already present in nvmcache - // and the item is not expired. - const bool evictToNvmCache = shouldWriteToNvmCache(item); - - auto token = evictToNvmCache ? nvmCache_->createPutToken(item.getKey()) - : typename NvmCacheT::PutToken{}; - // record the in-flight eviciton. If not, we move on to next item to avoid - // stalling eviction. - if (evictToNvmCache && !token.isValid()) { - if (item.isChainedItem()) - stats_.evictFailConcurrentFill.inc(); - else - stats_.evictFailConcurrentFill.inc(); - handle.reset(); - return false; - } - - // If there are other accessors, we should abort. We should be the only - // handle owner. - auto evictHandle = accessContainer_->removeIf(item, &itemEvictionPredicate); - - /* We got new handle so it's safe to get rid of the old one. */ - handle.reset(); - - if (!evictHandle) { - if (item.isChainedItem()) - stats_.evictFailParentAC.inc(); - else - stats_.evictFailAC.inc(); - return false; - } - - auto removed = mmContainer.remove(item); - XDCHECK(removed); - XDCHECK_EQ(reinterpret_cast(evictHandle.get()), - reinterpret_cast(&item)); - XDCHECK(!evictHandle->isInMMContainer()); - XDCHECK(!evictHandle->isAccessible()); - - // If the item is now marked as moving, that means its corresponding slab is - // being released right now. So, we look for the next item that is eligible - // for eviction. It is safe to destroy the handle here since the moving bit - // is set. - if (evictHandle->isMoving()) { - if (item.isChainedItem()) - stats_.evictFailParentMove.inc(); - else - stats_.evictFailMove.inc(); - return false; - } - - // Ensure that there are no accessors after removing from the access - // container. - XDCHECK(evictHandle->getRefCount() == 1); - - if (evictToNvmCache && shouldWriteToNvmCacheExclusive(item)) { - XDCHECK(token.isValid()); - nvmCache_->put(evictHandle, std::move(token)); - } - - /* Release the handle, item will be reused by the called. */ - evictHandle.release(); - - return true; -} - template typename CacheAllocator::RemoveRes CacheAllocator::remove(typename Item::Key key) { @@ -2596,7 +2549,7 @@ void CacheAllocator::evictForSlabRelease( auto owningHandle = item.isChainedItem() ? evictChainedItemForSlabRelease(item.asChainedItem()) - : evictNormalItemForSlabRelease(item); + : evictNormalItem(item); // we managed to evict the corresponding owner of the item and have the // last handle for the owner. @@ -2653,7 +2606,8 @@ void CacheAllocator::evictForSlabRelease( template typename CacheAllocator::WriteHandle -CacheAllocator::evictNormalItemForSlabRelease(Item& item) { +CacheAllocator::evictNormalItem(Item& item, + bool skipIfTokenInvalid) { XDCHECK(item.isMoving()); if (item.isOnlyMoving()) { @@ -2666,6 +2620,11 @@ CacheAllocator::evictNormalItemForSlabRelease(Item& item) { auto token = evictToNvmCache ? nvmCache_->createPutToken(item.getKey()) : typename NvmCacheT::PutToken{}; + if (skipIfTokenInvalid && evictToNvmCache && !token.isValid()) { + stats_.evictFailConcurrentFill.inc(); + return WriteHandle{}; + } + // We remove the item from both access and mm containers. It doesn't matter // if someone else calls remove on the item at this moment, the item cannot // be freed as long as we have the moving bit set. diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index e7126b6c09..17c6374b01 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1609,15 +1609,6 @@ class CacheAllocator : public CacheBase { using EvictionIterator = typename MMContainer::Iterator; - // Try to evict an item. - // - // @param mmContainer the container to look for evictions. - // @param handle handle to item to evict. eviction will fail if - // this is not an owning handle. - // - // @return whether eviction succeeded - bool tryEvictItem(MMContainer& mmContainer, WriteHandle&& handle); - // Deserializer CacheAllocatorMetadata and verify the version // // @param deserializer Deserializer object @@ -1735,7 +1726,7 @@ class CacheAllocator : public CacheBase { // // @return last handle for corresponding to item on success. empty handle on // failure. caller can retry if needed. - WriteHandle evictNormalItemForSlabRelease(Item& item); + WriteHandle evictNormalItem(Item& item, bool skipIfTokenInvalid = false); // Helper function to evict a child item for slab release // As a side effect, the parent item is also evicted @@ -1868,10 +1859,6 @@ class CacheAllocator : public CacheBase { return item.getRefCount() == 0; } - static bool itemEvictionPredicate(const Item& item) { - return item.getRefCount() == 1 && !item.isMoving(); - } - static bool itemExpiryPredicate(const Item& item) { return item.getRefCount() == 1 && item.isExpired(); } diff --git a/cachelib/allocator/CacheItem-inl.h b/cachelib/allocator/CacheItem-inl.h index a1c2456af5..878bf36f11 100644 --- a/cachelib/allocator/CacheItem-inl.h +++ b/cachelib/allocator/CacheItem-inl.h @@ -219,8 +219,8 @@ bool CacheItem::markMoving() noexcept { } template -void CacheItem::unmarkMoving() noexcept { - ref_.unmarkMoving(); +RefcountWithFlags::Value CacheItem::unmarkMoving() noexcept { + return ref_.unmarkMoving(); } template diff --git a/cachelib/allocator/CacheItem.h b/cachelib/allocator/CacheItem.h index 87c8b8a19e..96dd50df66 100644 --- a/cachelib/allocator/CacheItem.h +++ b/cachelib/allocator/CacheItem.h @@ -357,7 +357,7 @@ class CACHELIB_PACKED_ATTR CacheItem { * Unmarking moving does not depend on `isInMMContainer` */ bool markMoving() noexcept; - void unmarkMoving() noexcept; + RefcountWithFlags::Value unmarkMoving() noexcept; bool isMoving() const noexcept; bool isOnlyMoving() const noexcept; diff --git a/cachelib/allocator/ChainedHashTable-inl.h b/cachelib/allocator/ChainedHashTable-inl.h index 86d37b7bb4..8a8e68ded1 100644 --- a/cachelib/allocator/ChainedHashTable-inl.h +++ b/cachelib/allocator/ChainedHashTable-inl.h @@ -450,21 +450,6 @@ typename T::Handle ChainedHashTable::Container::removeIf( } } -template T::*HookPtr, - typename LockT> -typename T::Handle ChainedHashTable::Container::find( - T& node) const { - const auto bucket = ht_.getBucket(node.getKey()); - auto l = locks_.lockShared(bucket); - - if (!node.isAccessible()) { - return {}; - } - - return handleMaker_(&node); -} - template T::*HookPtr, typename LockT> diff --git a/cachelib/allocator/ChainedHashTable.h b/cachelib/allocator/ChainedHashTable.h index aa686661dd..f3c87bfa7a 100644 --- a/cachelib/allocator/ChainedHashTable.h +++ b/cachelib/allocator/ChainedHashTable.h @@ -496,17 +496,6 @@ class ChainedHashTable { // creating this item handle. Handle find(Key key) const; - // returns a handle to specified node. - // - // @param node requested node - // - // @return handle to the node if find was sucessfull. returns a - // null handle if the node was not in the container. - // - // @throw std::overflow_error is the maximum item refcount is execeeded by - // creating this item handle. - Handle find(T& node) const; - // for saving the state of the hash table // // precondition: serialization must happen without any reader or writer diff --git a/cachelib/allocator/Refcount.h b/cachelib/allocator/Refcount.h index 631e1695f9..8328019f27 100644 --- a/cachelib/allocator/Refcount.h +++ b/cachelib/allocator/Refcount.h @@ -247,10 +247,10 @@ class FOLLY_PACK_ATTR RefcountWithFlags { /** * The following four functions are used to track whether or not * an item is currently in the process of being moved. This happens during a - * slab rebalance or resize operation. + * slab rebalance or resize operation or during eviction. * - * An item can only be marked moving when `isInMMContainer` returns true. - * This operation is atomic. + * An item can only be marked moving when `isInMMContainer` returns true and + * the item is not yet marked as moving. This operation is atomic. * * User can also query if an item "isOnlyMoving". This returns true only * if the refcount is 0 and only the moving bit is set. @@ -267,7 +267,8 @@ class FOLLY_PACK_ATTR RefcountWithFlags { Value curValue = __atomic_load_n(refPtr, __ATOMIC_RELAXED); while (true) { const bool flagSet = curValue & conditionBitMask; - if (!flagSet) { + const bool alreadyMoving = curValue & bitMask; + if (!flagSet || alreadyMoving) { return false; } @@ -286,9 +287,9 @@ class FOLLY_PACK_ATTR RefcountWithFlags { } } } - void unmarkMoving() noexcept { + Value unmarkMoving() noexcept { Value bitMask = ~getAdminRef(); - __atomic_and_fetch(&refCount_, bitMask, __ATOMIC_ACQ_REL); + return __atomic_and_fetch(&refCount_, bitMask, __ATOMIC_ACQ_REL) & kRefMask; } bool isMoving() const noexcept { return getRaw() & getAdminRef(); } bool isOnlyMoving() const noexcept { From 437041ffbc1b7adcade62684af2729e10e659a76 Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Mon, 13 Jun 2022 10:20:36 -0400 Subject: [PATCH 5/9] Execute findEviction critical section under MMContainer combined_lock. --- cachelib/allocator/CacheAllocator-inl.h | 48 ++++++++++++++++--------- cachelib/allocator/MM2Q-inl.h | 21 +++++------ cachelib/allocator/MM2Q.h | 5 +++ cachelib/allocator/MMLru-inl.h | 8 +++++ cachelib/allocator/MMLru.h | 5 +++ cachelib/allocator/MMTinyLFU-inl.h | 7 ++++ cachelib/allocator/MMTinyLFU.h | 5 +++ 7 files changed, 70 insertions(+), 29 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index f7b976e92b..2ec382dcd7 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1226,26 +1226,42 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { // Keep searching for a candidate until we were able to evict it // or until the search limit has been exhausted unsigned int searchTries = 0; - auto itr = mmContainer.getEvictionIterator(); while ((config_.evictionSearchTries == 0 || - config_.evictionSearchTries > searchTries) && - itr) { + config_.evictionSearchTries > searchTries)) { ++searchTries; - Item* toRecycle = itr.get(); - - Item* candidate = - toRecycle->isChainedItem() - ? &toRecycle->asChainedItem().getParentItem(compressor_) - : toRecycle; - - // make sure no other thead is evicting the item - if (candidate->getRefCount() != 0 || !candidate->markMoving()) { - ++itr; + Item* toRecycle = nullptr; + Item* candidate = nullptr; + + mmContainer.withEvictionIterator( + [this, &candidate, &toRecycle, &searchTries](auto&& itr) { + while ((config_.evictionSearchTries == 0 || + config_.evictionSearchTries > searchTries) && + itr) { + ++searchTries; + + auto* toRecycle_ = itr.get(); + auto* candidate_ = + toRecycle_->isChainedItem() + ? &toRecycle_->asChainedItem().getParentItem(compressor_) + : toRecycle_; + + // make sure no other thead is evicting the item + if (candidate_->getRefCount() == 0 && candidate_->markMoving()) { + toRecycle = toRecycle_; + candidate = candidate_; + return; + } + + ++itr; + } + }); + + if (!toRecycle) continue; - } - itr.destroy(); + XDCHECK(toRecycle); + XDCHECK(candidate); // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent @@ -1306,8 +1322,6 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { return toRecycle; } } - - itr.resetToBegin(); } return nullptr; } diff --git a/cachelib/allocator/MM2Q-inl.h b/cachelib/allocator/MM2Q-inl.h index 1d2482d45b..493e050c09 100644 --- a/cachelib/allocator/MM2Q-inl.h +++ b/cachelib/allocator/MM2Q-inl.h @@ -250,22 +250,19 @@ MM2Q::Container::getEvictionIterator() const noexcept { // arbitrary amount of time outside a lambda-friendly piece of code (eg. they // can return the iterator from functions, pass it to functions, etc) // - // it would be theoretically possible to refactor this interface into - // something like the following to allow combining - // - // mm2q.withEvictionIterator([&](auto iterator) { - // // user code - // }); - // - // at the time of writing it is unclear if the gains from combining are - // reasonable justification for the codemod required to achieve combinability - // as we don't expect this critical section to be the hotspot in user code. - // This is however subject to change at some time in the future as and when - // this assertion becomes false. + // to get advantage of combining, use withEvictionIterator LockHolder l(*lruMutex_); return Iterator{std::move(l), lru_.rbegin()}; } +template T::*HookPtr> +template +void MM2Q::Container::withEvictionIterator(F&& fun) { + lruMutex_->lock_combine([this, &fun]() { + fun(Iterator{LockHolder{}, lru_.rbegin()}); + }); +} + template T::*HookPtr> void MM2Q::Container::removeLocked(T& node, bool doRebalance) noexcept { diff --git a/cachelib/allocator/MM2Q.h b/cachelib/allocator/MM2Q.h index 886dffdddd..31073965ce 100644 --- a/cachelib/allocator/MM2Q.h +++ b/cachelib/allocator/MM2Q.h @@ -447,6 +447,11 @@ class MM2Q { // container and only one such iterator can exist at a time Iterator getEvictionIterator() const noexcept; + // Execute provided function under container lock. Function gets + // iterator passed as parameter. + template + void withEvictionIterator(F&& f); + // get the current config as a copy Config getConfig() const; diff --git a/cachelib/allocator/MMLru-inl.h b/cachelib/allocator/MMLru-inl.h index 5a66161ae0..53ed1b5f59 100644 --- a/cachelib/allocator/MMLru-inl.h +++ b/cachelib/allocator/MMLru-inl.h @@ -218,6 +218,14 @@ MMLru::Container::getEvictionIterator() const noexcept { return Iterator{std::move(l), lru_.rbegin()}; } +template T::*HookPtr> +template +void MMLru::Container::withEvictionIterator(F&& fun) { + lruMutex_->lock_combine([this, &fun]() { + fun(Iterator{LockHolder{}, lru_.rbegin()}); + }); +} + template T::*HookPtr> void MMLru::Container::ensureNotInsertionPoint(T& node) noexcept { // If we are removing the insertion point node, grow tail before we remove diff --git a/cachelib/allocator/MMLru.h b/cachelib/allocator/MMLru.h index f89438dd3d..0ba27db3a4 100644 --- a/cachelib/allocator/MMLru.h +++ b/cachelib/allocator/MMLru.h @@ -332,6 +332,11 @@ class MMLru { // container and only one such iterator can exist at a time Iterator getEvictionIterator() const noexcept; + // Execute provided function under container lock. Function gets + // iterator passed as parameter. + template + void withEvictionIterator(F&& f); + // get copy of current config Config getConfig() const; diff --git a/cachelib/allocator/MMTinyLFU-inl.h b/cachelib/allocator/MMTinyLFU-inl.h index de06902482..d9e7da8a7d 100644 --- a/cachelib/allocator/MMTinyLFU-inl.h +++ b/cachelib/allocator/MMTinyLFU-inl.h @@ -220,6 +220,13 @@ MMTinyLFU::Container::getEvictionIterator() const noexcept { return Iterator{std::move(l), *this}; } +template T::*HookPtr> +template +void MMTinyLFU::Container::withEvictionIterator(F&& fun) { + LockHolder l(lruMutex_); + fun(Iterator{LockHolder{}, *this}); +} + template T::*HookPtr> void MMTinyLFU::Container::removeLocked(T& node) noexcept { if (isTiny(node)) { diff --git a/cachelib/allocator/MMTinyLFU.h b/cachelib/allocator/MMTinyLFU.h index 14d5ae6906..40886d53af 100644 --- a/cachelib/allocator/MMTinyLFU.h +++ b/cachelib/allocator/MMTinyLFU.h @@ -491,6 +491,11 @@ class MMTinyLFU { // container and only one such iterator can exist at a time Iterator getEvictionIterator() const noexcept; + // Execute provided function under container lock. Function gets + // iterator passed as parameter. + template + void withEvictionIterator(F&& f); + // for saving the state of the lru // // precondition: serialization must happen without any reader or writer From c5306ec73ba00bfb61f80a1ade898a3d307e24b1 Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Mon, 20 Jun 2022 09:09:26 -0400 Subject: [PATCH 6/9] Remove skip skipIfTokenInvalid. Checking token validity should not be needed right after creating it. --- cachelib/allocator/CacheAllocator-inl.h | 11 ++--------- cachelib/allocator/CacheAllocator.h | 2 +- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index 2ec382dcd7..c4a6b43720 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1266,8 +1266,7 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. - auto toReleaseHandle = - evictNormalItem(*candidate, true /* skipIfTokenInvalid */); + auto toReleaseHandle = evictNormalItem(*candidate); auto ref = candidate->unmarkMoving(); if (toReleaseHandle || ref == 0u) { @@ -2620,8 +2619,7 @@ void CacheAllocator::evictForSlabRelease( template typename CacheAllocator::WriteHandle -CacheAllocator::evictNormalItem(Item& item, - bool skipIfTokenInvalid) { +CacheAllocator::evictNormalItem(Item& item) { XDCHECK(item.isMoving()); if (item.isOnlyMoving()) { @@ -2634,11 +2632,6 @@ CacheAllocator::evictNormalItem(Item& item, auto token = evictToNvmCache ? nvmCache_->createPutToken(item.getKey()) : typename NvmCacheT::PutToken{}; - if (skipIfTokenInvalid && evictToNvmCache && !token.isValid()) { - stats_.evictFailConcurrentFill.inc(); - return WriteHandle{}; - } - // We remove the item from both access and mm containers. It doesn't matter // if someone else calls remove on the item at this moment, the item cannot // be freed as long as we have the moving bit set. diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index 17c6374b01..9fcf18b6f1 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1726,7 +1726,7 @@ class CacheAllocator : public CacheBase { // // @return last handle for corresponding to item on success. empty handle on // failure. caller can retry if needed. - WriteHandle evictNormalItem(Item& item, bool skipIfTokenInvalid = false); + WriteHandle evictNormalItem(Item& item); // Helper function to evict a child item for slab release // As a side effect, the parent item is also evicted From cbbbbe396274fde6aea3e616324ba16a46cd371c Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Mon, 20 Jun 2022 09:23:39 -0400 Subject: [PATCH 7/9] Unconditionally destroy toReleaseHandle in findEviction, and rely only of recount being zero when releasing items back to allocator. --- cachelib/allocator/CacheAllocator-inl.h | 56 +++++++++---------------- 1 file changed, 19 insertions(+), 37 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index c4a6b43720..889501f53e 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1266,10 +1266,17 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. - auto toReleaseHandle = evictNormalItem(*candidate); - auto ref = candidate->unmarkMoving(); + { + auto toReleaseHandle = evictNormalItem(*candidate); + // destroy toReleseHandle. The item won't be release to allocator + // since we marked it as moving. + } + const auto ref = candidate->unmarkMoving(); - if (toReleaseHandle || ref == 0u) { + if (ref == 0u) { + // recycle the item. it's safe to do so, even if toReleaseHandle was + // NULL. If `ref` == 0 then it means that we are the last holder of + // that item. if (candidate->hasChainedItem()) { (*stats_.chainedItemEvictions)[pid][cid].inc(); } else { @@ -1277,49 +1284,24 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { } if (auto eventTracker = getEventTracker()) { - eventTracker->record( - AllocatorApiEvent::DRAM_EVICT, toReleaseHandle->getKey(), - AllocatorApiResult::EVICTED, toReleaseHandle->getSize(), - toReleaseHandle->getConfiguredTTL().count()); + eventTracker->record(AllocatorApiEvent::DRAM_EVICT, candidate->getKey(), + AllocatorApiResult::EVICTED, candidate->getSize(), + candidate->getConfiguredTTL().count()); } - } else { - if (candidate->hasChainedItem()) { - stats_.evictFailParentAC.inc(); - } else { - stats_.evictFailAC.inc(); - } - } - - if (toReleaseHandle) { - XDCHECK(toReleaseHandle.get() == candidate); - XDCHECK(toRecycle == candidate || toRecycle->isChainedItem()); - XDCHECK_EQ(1u, toReleaseHandle->getRefCount()); - - // We manually release the item here because we don't want to - // invoke the Item Handle's destructor which will be decrementing - // an already zero refcount, which will throw exception - auto& itemToRelease = *toReleaseHandle.release(); - - // Decrementing the refcount because we want to recycle the item - ref = decRef(itemToRelease); - XDCHECK_EQ(0u, ref); // check if by releasing the item we intend to, we actually // recycle the candidate. - if (ReleaseRes::kRecycled == - releaseBackToAllocator(itemToRelease, RemoveContext::kEviction, - /* isNascent */ false, toRecycle)) { - return toRecycle; - } - } else if (ref == 0u) { - // it's safe to recycle the item here as there are no more - // references and the item could not been marked as moving - // by other thread since it's detached from MMContainer. if (ReleaseRes::kRecycled == releaseBackToAllocator(*candidate, RemoveContext::kEviction, /* isNascent */ false, toRecycle)) { return toRecycle; } + } else { + if (candidate->hasChainedItem()) { + stats_.evictFailParentAC.inc(); + } else { + stats_.evictFailAC.inc(); + } } } return nullptr; From ac5609711c93c0e9c44685f472bb27e5f7ccc97e Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Fri, 24 Jun 2022 08:35:04 -0400 Subject: [PATCH 8/9] Remove getEvictionIterator and use withEvictionIterator everywhere --- cachelib/allocator/CacheAllocator-inl.h | 14 ++-- cachelib/allocator/MM2Q-inl.h | 22 +----- cachelib/allocator/MM2Q.h | 48 +------------ cachelib/allocator/MMLru-inl.h | 14 +--- cachelib/allocator/MMLru.h | 49 +------------ cachelib/allocator/MMTinyLFU-inl.h | 15 ++-- cachelib/allocator/MMTinyLFU.h | 25 +------ cachelib/allocator/tests/BaseAllocatorTest.h | 30 +++++--- cachelib/allocator/tests/MM2QTest.cpp | 71 +++++++++++-------- cachelib/allocator/tests/MMLruTest.cpp | 48 ++++++++----- cachelib/allocator/tests/MMTinyLFUTest.cpp | 31 ++++---- cachelib/allocator/tests/MMTypeTest.h | 54 ++++++++------ cachelib/benchmarks/MMTypeBench.h | 9 +-- .../RAM_cache_indexing_and_eviction.md | 7 +- 14 files changed, 168 insertions(+), 269 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index 889501f53e..ecc0f4acd4 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -1790,13 +1790,13 @@ std::vector CacheAllocator::dumpEvictionIterator( std::vector content; auto& mm = *mmContainers_[pid][cid]; - auto evictItr = mm.getEvictionIterator(); - size_t i = 0; - while (evictItr && i < numItems) { - content.push_back(evictItr->toString()); - ++evictItr; - ++i; - } + + mm.withEvictionIterator([&content, numItems](auto&& itr) { + while (itr && content.size() < numItems) { + content.push_back(itr->toString()); + ++itr; + } + }); return content; } diff --git a/cachelib/allocator/MM2Q-inl.h b/cachelib/allocator/MM2Q-inl.h index 493e050c09..094f08fdc2 100644 --- a/cachelib/allocator/MM2Q-inl.h +++ b/cachelib/allocator/MM2Q-inl.h @@ -240,26 +240,11 @@ bool MM2Q::Container::add(T& node) noexcept { }); } -template T::*HookPtr> -typename MM2Q::Container::Iterator -MM2Q::Container::getEvictionIterator() const noexcept { - // we cannot use combined critical sections with folly::DistributedMutex here - // because the lock is held for the lifetime of the eviction iterator. In - // other words, the abstraction of the iterator just does not lend itself well - // to combinable critical sections as the user can hold the lock for an - // arbitrary amount of time outside a lambda-friendly piece of code (eg. they - // can return the iterator from functions, pass it to functions, etc) - // - // to get advantage of combining, use withEvictionIterator - LockHolder l(*lruMutex_); - return Iterator{std::move(l), lru_.rbegin()}; -} - template T::*HookPtr> template void MM2Q::Container::withEvictionIterator(F&& fun) { lruMutex_->lock_combine([this, &fun]() { - fun(Iterator{LockHolder{}, lru_.rbegin()}); + fun(Iterator{lru_.rbegin()}); }); } @@ -457,10 +442,5 @@ void MM2Q::Container::reconfigureLocked(const Time& currTime) { lruRefreshTime_.store(lruRefreshTime, std::memory_order_relaxed); } -// Iterator Context Implementation -template T::*HookPtr> -MM2Q::Container::Iterator::Iterator( - LockHolder l, const typename LruList::Iterator& iter) noexcept - : LruList::Iterator(iter), l_(std::move(l)) {} } // namespace cachelib } // namespace facebook diff --git a/cachelib/allocator/MM2Q.h b/cachelib/allocator/MM2Q.h index 31073965ce..492ffedea9 100644 --- a/cachelib/allocator/MM2Q.h +++ b/cachelib/allocator/MM2Q.h @@ -347,48 +347,7 @@ class MM2Q { Container(const Container&) = delete; Container& operator=(const Container&) = delete; - // context for iterating the MM container. At any given point of time, - // there can be only one iterator active since we need to lock the LRU for - // iteration. we can support multiple iterators at same time, by using a - // shared ptr in the context for the lock holder in the future. - class Iterator : public LruList::Iterator { - public: - // noncopyable but movable. - Iterator(const Iterator&) = delete; - Iterator& operator=(const Iterator&) = delete; - - Iterator(Iterator&&) noexcept = default; - - // 1. Invalidate this iterator - // 2. Unlock - void destroy() { - LruList::Iterator::reset(); - if (l_.owns_lock()) { - l_.unlock(); - } - } - - // Reset this iterator to the beginning - void resetToBegin() { - if (!l_.owns_lock()) { - l_.lock(); - } - LruList::Iterator::resetToBegin(); - } - - private: - // private because it's easy to misuse and cause deadlock for MM2Q - Iterator& operator=(Iterator&&) noexcept = default; - - // create an lru iterator with the lock being held. - Iterator(LockHolder l, const typename LruList::Iterator& iter) noexcept; - - // only the container can create iterators - friend Container; - - // lock protecting the validity of the iterator - LockHolder l_; - }; + using Iterator = typename LruList::Iterator; // records the information that the node was accessed. This could bump up // the node to the head of the lru depending on the time when the node was @@ -442,11 +401,6 @@ class MM2Q { // source node already existed. bool replace(T& oldNode, T& newNode) noexcept; - // Obtain an iterator that start from the tail and can be used - // to search for evictions. This iterator holds a lock to this - // container and only one such iterator can exist at a time - Iterator getEvictionIterator() const noexcept; - // Execute provided function under container lock. Function gets // iterator passed as parameter. template diff --git a/cachelib/allocator/MMLru-inl.h b/cachelib/allocator/MMLru-inl.h index 53ed1b5f59..6c3fe4c66f 100644 --- a/cachelib/allocator/MMLru-inl.h +++ b/cachelib/allocator/MMLru-inl.h @@ -211,18 +211,11 @@ bool MMLru::Container::add(T& node) noexcept { }); } -template T::*HookPtr> -typename MMLru::Container::Iterator -MMLru::Container::getEvictionIterator() const noexcept { - LockHolder l(*lruMutex_); - return Iterator{std::move(l), lru_.rbegin()}; -} - template T::*HookPtr> template void MMLru::Container::withEvictionIterator(F&& fun) { lruMutex_->lock_combine([this, &fun]() { - fun(Iterator{LockHolder{}, lru_.rbegin()}); + fun(Iterator{lru_.rbegin()}); }); } @@ -366,10 +359,5 @@ void MMLru::Container::reconfigureLocked(const Time& currTime) { lruRefreshTime_.store(lruRefreshTime, std::memory_order_relaxed); } -// Iterator Context Implementation -template T::*HookPtr> -MMLru::Container::Iterator::Iterator( - LockHolder l, const typename LruList::Iterator& iter) noexcept - : LruList::Iterator(iter), l_(std::move(l)) {} } // namespace cachelib } // namespace facebook diff --git a/cachelib/allocator/MMLru.h b/cachelib/allocator/MMLru.h index 0ba27db3a4..f16fea568a 100644 --- a/cachelib/allocator/MMLru.h +++ b/cachelib/allocator/MMLru.h @@ -234,48 +234,7 @@ class MMLru { Container(const Container&) = delete; Container& operator=(const Container&) = delete; - // context for iterating the MM container. At any given point of time, - // there can be only one iterator active since we need to lock the LRU for - // iteration. we can support multiple iterators at same time, by using a - // shared ptr in the context for the lock holder in the future. - class Iterator : public LruList::Iterator { - public: - // noncopyable but movable. - Iterator(const Iterator&) = delete; - Iterator& operator=(const Iterator&) = delete; - - Iterator(Iterator&&) noexcept = default; - - // 1. Invalidate this iterator - // 2. Unlock - void destroy() { - LruList::Iterator::reset(); - if (l_.owns_lock()) { - l_.unlock(); - } - } - - // Reset this iterator to the beginning - void resetToBegin() { - if (!l_.owns_lock()) { - l_.lock(); - } - LruList::Iterator::resetToBegin(); - } - - private: - // private because it's easy to misuse and cause deadlock for MMLru - Iterator& operator=(Iterator&&) noexcept = default; - - // create an lru iterator with the lock being held. - Iterator(LockHolder l, const typename LruList::Iterator& iter) noexcept; - - // only the container can create iterators - friend Container; - - // lock protecting the validity of the iterator - LockHolder l_; - }; + using Iterator = typename LruList::Iterator; // records the information that the node was accessed. This could bump up // the node to the head of the lru depending on the time when the node was @@ -307,7 +266,6 @@ class MMLru { // state of node is unchanged. bool remove(T& node) noexcept; - using Iterator = Iterator; // same as the above but uses an iterator context. The iterator is updated // on removal of the corresponding node to point to the next node. The // iterator context holds the lock on the lru. @@ -327,11 +285,6 @@ class MMLru { // source node already existed. bool replace(T& oldNode, T& newNode) noexcept; - // Obtain an iterator that start from the tail and can be used - // to search for evictions. This iterator holds a lock to this - // container and only one such iterator can exist at a time - Iterator getEvictionIterator() const noexcept; - // Execute provided function under container lock. Function gets // iterator passed as parameter. template diff --git a/cachelib/allocator/MMTinyLFU-inl.h b/cachelib/allocator/MMTinyLFU-inl.h index d9e7da8a7d..4ee95a057a 100644 --- a/cachelib/allocator/MMTinyLFU-inl.h +++ b/cachelib/allocator/MMTinyLFU-inl.h @@ -213,18 +213,11 @@ bool MMTinyLFU::Container::add(T& node) noexcept { return true; } -template T::*HookPtr> -typename MMTinyLFU::Container::Iterator -MMTinyLFU::Container::getEvictionIterator() const noexcept { - LockHolder l(lruMutex_); - return Iterator{std::move(l), *this}; -} - template T::*HookPtr> template void MMTinyLFU::Container::withEvictionIterator(F&& fun) { LockHolder l(lruMutex_); - fun(Iterator{LockHolder{}, *this}); + fun(Iterator{*this}); } template T::*HookPtr> @@ -357,10 +350,10 @@ void MMTinyLFU::Container::reconfigureLocked(const Time& currTime) { // Iterator Context Implementation template T::*HookPtr> MMTinyLFU::Container::Iterator::Iterator( - LockHolder l, const Container& c) noexcept + const Container& c) noexcept : c_(c), tIter_(c.lru_.getList(LruType::Tiny).rbegin()), - mIter_(c.lru_.getList(LruType::Main).rbegin()), - l_(std::move(l)) {} + mIter_(c.lru_.getList(LruType::Main).rbegin()) + {} } // namespace cachelib } // namespace facebook diff --git a/cachelib/allocator/MMTinyLFU.h b/cachelib/allocator/MMTinyLFU.h index 40886d53af..87362c169a 100644 --- a/cachelib/allocator/MMTinyLFU.h +++ b/cachelib/allocator/MMTinyLFU.h @@ -356,10 +356,7 @@ class MMTinyLFU { // source node already existed. bool replace(T& oldNode, T& newNode) noexcept; - // context for iterating the MM container. At any given point of time, - // there can be only one iterator active since we need to lock the LRU for - // iteration. we can support multiple iterators at same time, by using a - // shared ptr in the context for the lock holder in the future. + // context for iterating the MM container. class Iterator { public: using ListIterator = typename LruList::DListIterator; @@ -367,6 +364,7 @@ class MMTinyLFU { Iterator(const Iterator&) = delete; Iterator& operator=(const Iterator&) = delete; Iterator(Iterator&&) noexcept = default; + Iterator& operator=(Iterator&&) noexcept = default; Iterator& operator++() noexcept { ++getIter(); @@ -405,26 +403,16 @@ class MMTinyLFU { // 2. Unlock void destroy() { reset(); - if (l_.owns_lock()) { - l_.unlock(); - } } // Reset this iterator to the beginning void resetToBegin() { - if (!l_.owns_lock()) { - l_.lock(); - } tIter_.resetToBegin(); mIter_.resetToBegin(); } private: - // private because it's easy to misuse and cause deadlock for MMTinyLFU - Iterator& operator=(Iterator&&) noexcept = default; - - // create an lru iterator with the lock being held. - explicit Iterator(LockHolder l, const Container& c) noexcept; + explicit Iterator(const Container& c) noexcept; const ListIterator& getIter() const noexcept { return evictTiny() ? tIter_ : mIter_; @@ -456,8 +444,6 @@ class MMTinyLFU { // Tiny and main cache iterators ListIterator tIter_; ListIterator mIter_; - // lock protecting the validity of the iterator - LockHolder l_; }; Config getConfig() const; @@ -486,11 +472,6 @@ class MMTinyLFU { // Returns the eviction age stats. See CacheStats.h for details EvictionAgeStat getEvictionAgeStat(uint64_t projectedLength) const noexcept; - // Obtain an iterator that start from the tail and can be used - // to search for evictions. This iterator holds a lock to this - // container and only one such iterator can exist at a time - Iterator getEvictionIterator() const noexcept; - // Execute provided function under container lock. Function gets // iterator passed as parameter. template diff --git a/cachelib/allocator/tests/BaseAllocatorTest.h b/cachelib/allocator/tests/BaseAllocatorTest.h index ce5c1f3efb..ce4bb2977e 100644 --- a/cachelib/allocator/tests/BaseAllocatorTest.h +++ b/cachelib/allocator/tests/BaseAllocatorTest.h @@ -4113,15 +4113,16 @@ class BaseAllocatorTest : public AllocatorTest { // Check that item is in the expected container. bool findItem(AllocatorT& allocator, typename AllocatorT::Item* item) { auto& container = allocator.getMMContainer(*item); - auto itr = container.getEvictionIterator(); bool found = false; - while (itr) { - if (itr.get() == item) { - found = true; - break; + container.withEvictionIterator([&found, &item](auto&& itr) { + while (itr) { + if (itr.get() == item) { + found = true; + break; + } + ++itr; } - ++itr; - } + }); return found; } @@ -5509,8 +5510,12 @@ class BaseAllocatorTest : public AllocatorTest { ASSERT_TRUE(big->isInMMContainer()); auto& mmContainer = alloc.getMMContainer(*big); - auto itr = mmContainer.getEvictionIterator(); - ASSERT_EQ(big.get(), &(*itr)); + + typename AllocatorT::Item* evictionCandidate = nullptr; + mmContainer.withEvictionIterator( + [&evictionCandidate](auto&& itr) { evictionCandidate = itr.get(); }); + + ASSERT_EQ(big.get(), evictionCandidate); alloc.remove("hello"); } @@ -5524,8 +5529,11 @@ class BaseAllocatorTest : public AllocatorTest { ASSERT_TRUE(small2->isInMMContainer()); auto& mmContainer = alloc.getMMContainer(*small2); - auto itr = mmContainer.getEvictionIterator(); - ASSERT_EQ(small2.get(), &(*itr)); + + typename AllocatorT::Item* evictionCandidate = nullptr; + mmContainer.withEvictionIterator( + [&evictionCandidate](auto&& itr) { evictionCandidate = itr.get(); }); + ASSERT_EQ(small2.get(), evictionCandidate); alloc.remove("hello"); } diff --git a/cachelib/allocator/tests/MM2QTest.cpp b/cachelib/allocator/tests/MM2QTest.cpp index daf846e6bc..89d757b5ad 100644 --- a/cachelib/allocator/tests/MM2QTest.cpp +++ b/cachelib/allocator/tests/MM2QTest.cpp @@ -43,17 +43,20 @@ TEST_F(MM2QTest, RemoveWithSmallQueues) { // trying to remove through iterator should work as expected. // no need of iter++ since remove will do that. - for (auto iter = c.getEvictionIterator(); iter;) { - auto& node = *iter; - ASSERT_TRUE(node.isInMMContainer()); - - // this will move the iter. - c.remove(iter); - ASSERT_FALSE(node.isInMMContainer()); - if (iter) { - ASSERT_NE((*iter).getId(), node.getId()); + c.withEvictionIterator([&c](auto&& iter) { + while (iter) { + auto& node = *iter; + ASSERT_TRUE(node.isInMMContainer()); + + // this will move the iter. + c.remove(iter); + + ASSERT_FALSE(node.isInMMContainer()); + if (iter) { + ASSERT_NE((*iter).getId(), node.getId()); + } } - } + }); ASSERT_EQ(c.getStats().size, 0); for (const auto& node : nodes) { @@ -92,9 +95,12 @@ TEST_F(MM2QTest, RecordAccessWrites) { } std::vector nodeOrderPrev; - for (auto itr = c_.getEvictionIterator(); itr; ++itr) { - nodeOrderPrev.push_back(itr->getId()); - } + c_.withEvictionIterator([&nodeOrderPrev](auto&& it) { + while (it) { + nodeOrderPrev.push_back(it->getId()); + ++it; + } + }); int nAccess = 1000; std::set accessedNodes; @@ -119,9 +125,12 @@ TEST_F(MM2QTest, RecordAccessWrites) { // after a random set of recordAccess, test the order of the nodes in the // lru. std::vector nodeOrderCurr; - for (auto itr = c_.getEvictionIterator(); itr; ++itr) { - nodeOrderCurr.push_back(itr->getId()); - } + c_.withEvictionIterator([&nodeOrderCurr](auto&& it) { + while (it) { + nodeOrderCurr.push_back(it->getId()); + ++it; + } + }); if ((mode == AccessMode::kWrite && updateOnWrites) || (mode == AccessMode::kRead && updateOnReads)) { @@ -209,13 +218,14 @@ TEST_F(MM2QTest, RecordAccessWrites) { template void MMTypeTest::testIterate(std::vector>& nodes, Container& c) { - auto it2q = c.getEvictionIterator(); auto it = nodes.begin(); - while (it2q) { - ASSERT_EQ(it2q->getId(), (*it)->getId()); - ++it2q; - ++it; - } + c.withEvictionIterator([&it](auto&& it2q) { + while (it2q) { + ASSERT_EQ(it2q->getId(), (*it)->getId()); + ++it2q; + ++it; + } + }); } template @@ -223,14 +233,15 @@ void MMTypeTest::testMatch(std::string expected, MMTypeTest::Container& c) { int index = -1; std::string actual; - auto it2q = c.getEvictionIterator(); - while (it2q) { - ++index; - actual += folly::stringPrintf( - "%d:%s, ", it2q->getId(), - (c.isHot(*it2q) ? "H" : (c.isCold(*it2q) ? "C" : "W"))); - ++it2q; - } + c.withEvictionIterator([&index, &actual, &c](auto&& it2q) { + while (it2q) { + ++index; + actual += folly::stringPrintf( + "%d:%s, ", it2q->getId(), + (c.isHot(*it2q) ? "H" : (c.isCold(*it2q) ? "C" : "W"))); + ++it2q; + } + }); ASSERT_EQ(expected, actual); } diff --git a/cachelib/allocator/tests/MMLruTest.cpp b/cachelib/allocator/tests/MMLruTest.cpp index bb63618668..c63c25e8d6 100644 --- a/cachelib/allocator/tests/MMLruTest.cpp +++ b/cachelib/allocator/tests/MMLruTest.cpp @@ -59,9 +59,12 @@ TEST_F(MMLruTest, RecordAccessWrites) { } std::vector nodeOrderPrev; - for (auto itr = c_.getEvictionIterator(); itr; ++itr) { - nodeOrderPrev.push_back(itr->getId()); - } + c_.withEvictionIterator([&nodeOrderPrev](auto&& itr) { + while (itr) { + nodeOrderPrev.push_back(itr->getId()); + ++itr; + } + }); int nAccess = 1000; std::set accessedNodes; @@ -86,9 +89,12 @@ TEST_F(MMLruTest, RecordAccessWrites) { // after a random set of recordAccess, test the order of the nodes in the // lru. std::vector nodeOrderCurr; - for (auto itr = c_.getEvictionIterator(); itr; ++itr) { - nodeOrderCurr.push_back(itr->getId()); - } + c_.withEvictionIterator([&nodeOrderCurr](auto&& itr) { + while (itr) { + nodeOrderCurr.push_back(itr->getId()); + ++itr; + } + }); if ((mode == AccessMode::kWrite && updateOnWrites) || (mode == AccessMode::kRead && updateOnReads)) { @@ -180,14 +186,16 @@ TEST_F(MMLruTest, InsertionPointBasic) { } auto checkLruConfig = [&](Container& container, std::vector order) { - auto it = container.getEvictionIterator(); int i = 0; - while (it) { - ASSERT_LT(i, order.size()); - EXPECT_EQ(order[i], it->getId()); - i++; - ++it; - } + container.withEvictionIterator([&i, &order](auto&& it) { + while (it) { + ASSERT_LT(i, order.size()); + EXPECT_EQ(order[i], it->getId()); + i++; + ++it; + } + }); + ASSERT_EQ(i, order.size()); }; @@ -379,13 +387,15 @@ TEST_F(MMLruTest, InsertionPointStress) { auto getTailCount = [&]() { size_t ntail = 0; - auto it = c.getEvictionIterator(); - while (it) { - if (it->isTail()) { - ntail++; + c.withEvictionIterator([&ntail](auto&& it) { + while (it) { + if (it->isTail()) { + ntail++; + } + ++it; } - ++it; - } + }); + return ntail; }; diff --git a/cachelib/allocator/tests/MMTinyLFUTest.cpp b/cachelib/allocator/tests/MMTinyLFUTest.cpp index 6200db55c5..1be8bb6ec7 100644 --- a/cachelib/allocator/tests/MMTinyLFUTest.cpp +++ b/cachelib/allocator/tests/MMTinyLFUTest.cpp @@ -59,9 +59,12 @@ TEST_F(MMTinyLFUTest, RecordAccessWrites) { } std::vector nodeOrderPrev; - for (auto itr = c_.getEvictionIterator(); itr; ++itr) { - nodeOrderPrev.push_back(itr->getId()); - } + c_.withEvictionIterator([&nodeOrderPrev](auto&& itr) { + while (itr) { + nodeOrderPrev.push_back(itr->getId()); + ++itr; + } + }); int nAccess = 1000; std::set accessedNodes; @@ -86,9 +89,12 @@ TEST_F(MMTinyLFUTest, RecordAccessWrites) { // after a random set of recordAccess, test the order of the nodes in the // lru. std::vector nodeOrderCurr; - for (auto itr = c_.getEvictionIterator(); itr; ++itr) { - nodeOrderCurr.push_back(itr->getId()); - } + c_.withEvictionIterator([&nodeOrderCurr](auto&& itr) { + while (itr) { + nodeOrderCurr.push_back(itr->getId()); + ++itr; + } + }); if ((mode == AccessMode::kWrite && updateOnWrites) || (mode == AccessMode::kRead && updateOnReads)) { @@ -170,13 +176,14 @@ TEST_F(MMTinyLFUTest, TinyLFUBasic) { auto checkTlfuConfig = [&](Container& container, std::string expected, std::string context) { - auto it = container.getEvictionIterator(); std::string actual; - while (it) { - actual += folly::stringPrintf("%s:%s, ", it->getKey().str().c_str(), - (container.isTiny(*it) ? "T" : "M")); - ++it; - } + container.withEvictionIterator([&container, &actual](auto&& it) { + while (it) { + actual += folly::stringPrintf("%s:%s, ", it->getKey().str().c_str(), + (container.isTiny(*it) ? "T" : "M")); + ++it; + } + }); ASSERT_EQ(expected, actual) << context; }; diff --git a/cachelib/allocator/tests/MMTypeTest.h b/cachelib/allocator/tests/MMTypeTest.h index 5c421cf4c1..ba5faeb2af 100644 --- a/cachelib/allocator/tests/MMTypeTest.h +++ b/cachelib/allocator/tests/MMTypeTest.h @@ -182,9 +182,12 @@ void MMTypeTest::testAddBasic( } std::set foundNodes; - for (auto itr = c.getEvictionIterator(); itr; ++itr) { - foundNodes.insert(itr->getId()); - } + c.withEvictionIterator([&foundNodes](auto&& itr) { + while (itr) { + foundNodes.insert(itr->getId()); + ++itr; + } + }); EXPECT_EQ(foundNodes.size(), c.getStats().size); EXPECT_EQ(foundNodes.size(), c.size()); } @@ -229,26 +232,31 @@ void MMTypeTest::testRemoveBasic(Config config) { } std::set foundNodes; - for (auto itr = c.getEvictionIterator(); itr; ++itr) { - foundNodes.insert(itr->getId()); - } + c.withEvictionIterator([&foundNodes](auto&& itr) { + while (itr) { + foundNodes.insert(itr->getId()); + ++itr; + } + }); for (const auto& node : removedNodes) { ASSERT_EQ(foundNodes.find(node->getId()), foundNodes.end()); } // trying to remove through iterator should work as expected as well. // no need of iter++ since remove will do that. - for (auto iter = c.getEvictionIterator(); iter;) { - auto& node = *iter; - ASSERT_TRUE(node.isInMMContainer()); - - // this will move the iter. - c.remove(iter); - ASSERT_FALSE(node.isInMMContainer()); - if (iter) { - ASSERT_NE((*iter).getId(), node.getId()); + c.withEvictionIterator([&foundNodes, &c](auto&& iter) { + while (iter) { + auto& node = *iter; + ASSERT_TRUE(node.isInMMContainer()); + + // this will move the iter. + c.remove(iter); + ASSERT_FALSE(node.isInMMContainer()); + if (iter) { + ASSERT_NE((*iter).getId(), node.getId()); + } } - } + }); EXPECT_EQ(c.getStats().size, 0); EXPECT_EQ(c.size(), 0); @@ -322,12 +330,16 @@ void MMTypeTest::testSerializationBasic(Config config) { for (auto& node : nodes) { bool foundNode = false; - for (auto it = c2.getEvictionIterator(); it; ++it) { - if (&*node == &*it) { - foundNode = true; - break; + c2.withEvictionIterator([&foundNode, &node](auto&& it) { + while (it) { + if (&*node == &*it) { + foundNode = true; + break; + } + ++it; } - } + }); + ASSERT_TRUE(foundNode); foundNode = false; } diff --git a/cachelib/benchmarks/MMTypeBench.h b/cachelib/benchmarks/MMTypeBench.h index a5dc6280d0..6362ff732b 100644 --- a/cachelib/benchmarks/MMTypeBench.h +++ b/cachelib/benchmarks/MMTypeBench.h @@ -203,10 +203,11 @@ void MMTypeBench::benchRemoveIterator(unsigned int numNodes) { // // no need of iter++ since remove will do that. for (unsigned int deleted = 0; deleted < numNodes; deleted++) { - auto iter = c->getEvictionIterator(); - if (iter) { - c->remove(iter); - } + c->withEvictionIterator([this](auto&& iter) { + if (iter) { + c->remove(iter); + } + }); } } diff --git a/website/docs/Cache_Library_Architecture_Guide/RAM_cache_indexing_and_eviction.md b/website/docs/Cache_Library_Architecture_Guide/RAM_cache_indexing_and_eviction.md index e23217e36c..7a39fa00f8 100644 --- a/website/docs/Cache_Library_Architecture_Guide/RAM_cache_indexing_and_eviction.md +++ b/website/docs/Cache_Library_Architecture_Guide/RAM_cache_indexing_and_eviction.md @@ -109,9 +109,10 @@ It has the following major API functions: is removed form the cache (evicted or explicitly removed by the client) (`bool CacheAllocator::removeFromMMContainer(Item& item)` in `cachelib/allocator/CacheAllocator-inl.h`). -* `getEvictionIterator`: Return an iterator of items to be evicted. This is - called when the cache allocator is looking for eviction. Usually the first item - that can be evicted (no active handles, not moving, etc) is used (see +* `withEvictionIterator`: Executes callback with eviction iterator passed as a + parameter.This is called when the cache allocator is looking for eviction. + Usually the first item that can be evicted (no active handles, not moving, + etc) is used (see `CacheAllocator::findEviction(PoolId pid, ClassId cid)` in `cachelib/allocator/CacheAllocator-inl.h`). From 215d8220f339dcc4f200a44495b6bc5f572ca1f2 Mon Sep 17 00:00:00 2001 From: Igor Chorazewicz Date: Fri, 24 Jun 2022 13:18:53 +0000 Subject: [PATCH 9/9] Rename moving bit to exclusive --- cachelib/allocator/CacheAllocator-inl.h | 77 +++++++++++------------ cachelib/allocator/CacheAllocator.h | 14 ++--- cachelib/allocator/CacheItem-inl.h | 33 +++++----- cachelib/allocator/CacheItem.h | 32 +++++----- cachelib/allocator/MM2Q-inl.h | 4 +- cachelib/allocator/MMLru-inl.h | 4 +- cachelib/allocator/MMTinyLFU-inl.h | 3 +- cachelib/allocator/MMTinyLFU.h | 4 +- cachelib/allocator/Refcount.h | 49 +++++++-------- cachelib/allocator/tests/ItemTest.cpp | 4 +- cachelib/allocator/tests/RefCountTest.cpp | 28 ++++----- 11 files changed, 118 insertions(+), 134 deletions(-) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index ecc0f4acd4..a9cd7bc3b0 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -819,20 +819,20 @@ CacheAllocator::releaseBackToAllocator(Item& it, removeFromMMContainer(*head); - // If this chained item is marked as moving, we will not free it. - // We must capture the moving state before we do the decRef when + // If this chained item is marked as exclusive, we will not free it. + // We must capture the exclusive state before we do the decRef when // we know the item must still be valid - const bool wasMoving = head->isMoving(); + const bool wasExclusive = head->isExclusive(); // Decref and check if we were the last reference. Now if the item - // was marked moving, after decRef, it will be free to be released + // was marked exclusive, after decRef, it will be free to be released // by slab release thread const auto childRef = head->decRef(); - // If the item is already moving and we already decremented the + // If the item is already exclusive and we already decremented the // refcount, we don't need to free this item. We'll let the slab // release thread take care of that - if (!wasMoving) { + if (!wasExclusive) { if (childRef != 0) { throw std::runtime_error(folly::sformat( "chained item refcount is not zero. We cannot proceed! " @@ -840,7 +840,7 @@ CacheAllocator::releaseBackToAllocator(Item& it, childRef, head->toString())); } - // Item is not moving and refcount is 0, we can proceed to + // Item is not exclusive and refcount is 0, we can proceed to // free it or recylce the memory if (head == toRecycle) { XDCHECK(ReleaseRes::kReleased != res); @@ -1116,7 +1116,8 @@ bool CacheAllocator::moveRegularItem(Item& oldItem, // this item through an API such as remove(itemHandle). In this case, // it is unsafe to replace the old item with a new one, so we should // also abort. - if (!accessContainer_->replaceIf(oldItem, *newItemHdl, itemMovingPredicate)) { + if (!accessContainer_->replaceIf(oldItem, *newItemHdl, + itemExlusivePredicate)) { return false; } @@ -1165,7 +1166,7 @@ bool CacheAllocator::moveChainedItem(ChainedItem& oldItem, // This item has been unlinked from its parent and we're the only // owner of it, so we're done here - if (!oldItem.isInMMContainer() || oldItem.isOnlyMoving()) { + if (!oldItem.isInMMContainer() || oldItem.isOnlyExclusive()) { return false; } @@ -1196,7 +1197,7 @@ bool CacheAllocator::moveChainedItem(ChainedItem& oldItem, // In case someone else had removed this chained item from its parent by now // So we check again to see if the it has been unlinked from its parent - if (!oldItem.isInMMContainer() || oldItem.isOnlyMoving()) { + if (!oldItem.isInMMContainer() || oldItem.isOnlyExclusive()) { return false; } @@ -1212,7 +1213,7 @@ bool CacheAllocator::moveChainedItem(ChainedItem& oldItem, // parent's chain and the MMContainer. auto oldItemHandle = replaceChainedItemLocked(oldItem, std::move(newItemHdl), *parentHandle); - XDCHECK(oldItemHandle->isMoving()); + XDCHECK(oldItemHandle->isExclusive()); XDCHECK(!oldItemHandle->isInMMContainer()); return true; @@ -1247,7 +1248,7 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { : toRecycle_; // make sure no other thead is evicting the item - if (candidate_->getRefCount() == 0 && candidate_->markMoving()) { + if (candidate_->getRefCount() == 0 && candidate_->markExclusive()) { toRecycle = toRecycle_; candidate = candidate_; return; @@ -1269,9 +1270,9 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { { auto toReleaseHandle = evictNormalItem(*candidate); // destroy toReleseHandle. The item won't be release to allocator - // since we marked it as moving. + // since we marked it as exclusive. } - const auto ref = candidate->unmarkMoving(); + const auto ref = candidate->unmarkExclusive(); if (ref == 0u) { // recycle the item. it's safe to do so, even if toReleaseHandle was @@ -2304,7 +2305,7 @@ void CacheAllocator::releaseSlabImpl( // Need to mark an item for release before proceeding // If we can't mark as moving, it means the item is already freed const bool isAlreadyFreed = - !markMovingForSlabRelease(releaseContext, alloc, throttler); + !markExclusiveForSlabRelease(releaseContext, alloc, throttler); if (isAlreadyFreed) { continue; } @@ -2349,8 +2350,8 @@ bool CacheAllocator::moveForSlabRelease( stats_.numMoveAttempts.inc(); // Nothing to move and the key is likely also bogus for chained items. - if (oldItem.isOnlyMoving()) { - oldItem.unmarkMoving(); + if (oldItem.isOnlyExclusive()) { + oldItem.unmarkExclusive(); const auto res = releaseBackToAllocator(oldItem, RemoveContext::kNormal, false); XDCHECK(res == ReleaseRes::kReleased); @@ -2389,7 +2390,7 @@ bool CacheAllocator::moveForSlabRelease( // that's identical to this one to replace it. Here we just need to wait // until all users have dropped the item handles before we can proceed. startTime = util::getCurrentTimeSec(); - while (!oldItem.isOnlyMoving()) { + while (!oldItem.isOnlyExclusive()) { throttleWith(throttler, [&] { XLOGF(WARN, "Spent {} seconds, slab release still waiting for refcount to " @@ -2444,7 +2445,7 @@ CacheAllocator::allocateNewItemForOldItem(const Item& oldItem) { } // Set up the destination for the move. Since oldChainedItem would have - // the moving bit set, it won't be picked for eviction. + // the exclusive bit set, it won't be picked for eviction. auto newItemHdl = allocateChainedItemInternal(parentHandle, oldChainedItem.getSize()); if (!newItemHdl) { @@ -2496,7 +2497,7 @@ bool CacheAllocator::tryMovingForSlabRelease( // item is still valid. const std::string parentKey = oldItem.asChainedItem().getParentItem(compressor_).getKey().str(); - if (oldItem.isOnlyMoving()) { + if (oldItem.isOnlyExclusive()) { // If chained item no longer has a refcount, its parent is already // being released, so we abort this try to moving. return false; @@ -2527,11 +2528,11 @@ void CacheAllocator::evictForSlabRelease( while (true) { stats_.numEvictionAttempts.inc(); - // if the item is already in a state where only the moving bit is set, - // nothing needs to be done. We simply need to unmark moving bit and free + // if the item is already in a state where only the exclusive bit is set, + // nothing needs to be done. We simply need to unmark exclusive bit and free // the item. - if (item.isOnlyMoving()) { - item.unmarkMoving(); + if (item.isOnlyExclusive()) { + item.unmarkExclusive(); const auto res = releaseBackToAllocator(item, RemoveContext::kNormal, false); XDCHECK(ReleaseRes::kReleased == res); @@ -2561,10 +2562,8 @@ void CacheAllocator::evictForSlabRelease( stats_.numEvictionSuccesses.inc(); - // we have the last handle. no longer need to hold on to the moving bit - item.unmarkMoving(); - - XDCHECK(owningHandle->isExclusive()); + // we have the last handle. no longer need to hold on to the exclusive bit + item.unmarkExclusive(); // manually decrement the refcount to call releaseBackToAllocator const auto ref = decRef(*owningHandle); @@ -2576,7 +2575,7 @@ void CacheAllocator::evictForSlabRelease( } if (shutDownInProgress_) { - item.unmarkMoving(); + item.unmarkExclusive(); allocator_->abortSlabRelease(ctx); throw exception::SlabReleaseAborted( folly::sformat("Slab Release aborted while trying to evict" @@ -2602,9 +2601,9 @@ void CacheAllocator::evictForSlabRelease( template typename CacheAllocator::WriteHandle CacheAllocator::evictNormalItem(Item& item) { - XDCHECK(item.isMoving()); + XDCHECK(item.isExclusive()); - if (item.isOnlyMoving()) { + if (item.isOnlyExclusive()) { return WriteHandle{}; } @@ -2616,7 +2615,7 @@ CacheAllocator::evictNormalItem(Item& item) { // We remove the item from both access and mm containers. It doesn't matter // if someone else calls remove on the item at this moment, the item cannot - // be freed as long as we have the moving bit set. + // be freed as long as we have the exclusive bit set. auto handle = accessContainer_->removeIf(item, std::move(predicate)); if (!handle) { @@ -2640,7 +2639,7 @@ CacheAllocator::evictNormalItem(Item& item) { template typename CacheAllocator::WriteHandle CacheAllocator::evictChainedItemForSlabRelease(ChainedItem& child) { - XDCHECK(child.isMoving()); + XDCHECK(child.isExclusive()); // We have the child marked as moving, but dont know anything about the // state of the parent. Unlike the case of regular eviction where we are @@ -2662,7 +2661,7 @@ CacheAllocator::evictChainedItemForSlabRelease(ChainedItem& child) { // check if the child is still in mmContainer and the expected parent is // valid under the chained item lock. if (expectedParent.getKey() != parentKey || !child.isInMMContainer() || - child.isOnlyMoving() || + child.isOnlyExclusive() || &expectedParent != &child.getParentItem(compressor_) || !expectedParent.isAccessible() || !expectedParent.hasChainedItem()) { return {}; @@ -2717,14 +2716,14 @@ CacheAllocator::evictChainedItemForSlabRelease(ChainedItem& child) { // In case someone else had removed this chained item from its parent by now // So we check again to see if it has been unlinked from its parent - if (!child.isInMMContainer() || child.isOnlyMoving()) { + if (!child.isInMMContainer() || child.isOnlyExclusive()) { return {}; } // check after removing from the MMContainer that the parent is still not // being marked as moving. If parent is moving, it will release the child // item and we will wait for that. - if (parentHandle->isMoving()) { + if (parentHandle->isExclusive()) { return {}; } @@ -2757,7 +2756,7 @@ bool CacheAllocator::removeIfExpired(const ReadHandle& handle) { } template -bool CacheAllocator::markMovingForSlabRelease( +bool CacheAllocator::markExclusiveForSlabRelease( const SlabReleaseContext& ctx, void* alloc, util::Throttler& throttler) { // MemoryAllocator::processAllocForRelease will execute the callback // if the item is not already free. So there are three outcomes here: @@ -2776,7 +2775,7 @@ bool CacheAllocator::markMovingForSlabRelease( // Since this callback is executed, the item is not yet freed itemFreed = false; Item* item = static_cast(memory); - if (item->markMoving()) { + if (item->markExclusive()) { markedMoving = true; } }; @@ -2798,7 +2797,7 @@ bool CacheAllocator::markMovingForSlabRelease( itemFreed = true; if (shutDownInProgress_) { - XDCHECK(!static_cast(alloc)->isMoving()); + XDCHECK(!static_cast(alloc)->isExclusive()); allocator_->abortSlabRelease(ctx); throw exception::SlabReleaseAborted( folly::sformat("Slab Release aborted while still trying to mark" diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index 9fcf18b6f1..1dd33d2d3b 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1455,7 +1455,7 @@ class CacheAllocator : public CacheBase { FOLLY_ALWAYS_INLINE WriteHandle findFastImpl(Key key, AccessMode mode); // Moves a regular item to a different slab. This should only be used during - // slab release after the item's moving bit has been set. The user supplied + // slab release after the item's exclusive bit has been set. The user supplied // callback is responsible for copying the contents and fixing the semantics // of chained item. // @@ -1478,7 +1478,7 @@ class CacheAllocator : public CacheBase { folly::IOBuf convertToIOBufT(Handle& handle); // Moves a chained item to a different slab. This should only be used during - // slab release after the item's moving bit has been set. The user supplied + // slab release after the item's exclusive bit has been set. The user supplied // callback is responsible for copying the contents and fixing the semantics // of chained item. // @@ -1684,9 +1684,9 @@ class CacheAllocator : public CacheBase { // @return true when successfully marked as moving, // fasle when this item has already been freed - bool markMovingForSlabRelease(const SlabReleaseContext& ctx, - void* alloc, - util::Throttler& throttler); + bool markExclusiveForSlabRelease(const SlabReleaseContext& ctx, + void* alloc, + util::Throttler& throttler); // "Move" (by copying) the content in this item to another memory // location by invoking the move callback. @@ -1855,7 +1855,7 @@ class CacheAllocator : public CacheBase { std::optional saveNvmCache(); void saveRamCache(); - static bool itemMovingPredicate(const Item& item) { + static bool itemExlusivePredicate(const Item& item) { return item.getRefCount() == 0; } @@ -1864,7 +1864,7 @@ class CacheAllocator : public CacheBase { } static bool parentEvictForSlabReleasePredicate(const Item& item) { - return item.getRefCount() == 1 && !item.isMoving(); + return item.getRefCount() == 1 && !item.isExclusive(); } std::unique_ptr createDeserializer(); diff --git a/cachelib/allocator/CacheItem-inl.h b/cachelib/allocator/CacheItem-inl.h index 878bf36f11..274e7c141a 100644 --- a/cachelib/allocator/CacheItem-inl.h +++ b/cachelib/allocator/CacheItem-inl.h @@ -141,12 +141,13 @@ std::string CacheItem::toString() const { return folly::sformat( "item: " "memory={}:raw-ref={}:size={}:key={}:hex-key={}:" - "isInMMContainer={}:isAccessible={}:isMoving={}:references={}:ctime={}:" + "isInMMContainer={}:isAccessible={}:isExclusive={}:references={}:ctime=" + "{}:" "expTime={}:updateTime={}:isNvmClean={}:isNvmEvicted={}:hasChainedItem=" "{}", this, getRefCountAndFlagsRaw(), getSize(), folly::humanify(getKey().str()), folly::hexlify(getKey()), - isInMMContainer(), isAccessible(), isMoving(), getRefCount(), + isInMMContainer(), isAccessible(), isExclusive(), getRefCount(), getCreationTime(), getExpiryTime(), getLastAccessTime(), isNvmClean(), isNvmEvicted(), hasChainedItem()); } @@ -178,11 +179,6 @@ bool CacheItem::isDrained() const noexcept { return ref_.isDrained(); } -template -bool CacheItem::isExclusive() const noexcept { - return ref_.isExclusive(); -} - template void CacheItem::markAccessible() noexcept { ref_.markAccessible(); @@ -214,23 +210,23 @@ bool CacheItem::isInMMContainer() const noexcept { } template -bool CacheItem::markMoving() noexcept { - return ref_.markMoving(); +bool CacheItem::markExclusive() noexcept { + return ref_.markExclusive(); } template -RefcountWithFlags::Value CacheItem::unmarkMoving() noexcept { - return ref_.unmarkMoving(); +RefcountWithFlags::Value CacheItem::unmarkExclusive() noexcept { + return ref_.unmarkExclusive(); } template -bool CacheItem::isMoving() const noexcept { - return ref_.isMoving(); +bool CacheItem::isExclusive() const noexcept { + return ref_.isExclusive(); } template -bool CacheItem::isOnlyMoving() const noexcept { - return ref_.isOnlyMoving(); +bool CacheItem::isOnlyExclusive() const noexcept { + return ref_.isOnlyExclusive(); } template @@ -332,7 +328,7 @@ bool CacheItem::updateExpiryTime(uint32_t expiryTimeSecs) noexcept { // check for moving to make sure we are not updating the expiry time while at // the same time re-allocating the item with the old state of the expiry time // in moveRegularItem(). See D6852328 - if (isMoving() || !isInMMContainer() || isChainedItem()) { + if (isExclusive() || !isInMMContainer() || isChainedItem()) { return false; } // attempt to atomically update the value of expiryTime @@ -448,10 +444,11 @@ std::string CacheChainedItem::toString() const { return folly::sformat( "chained item: " "memory={}:raw-ref={}:size={}:parent-compressed-ptr={}:" - "isInMMContainer={}:isAccessible={}:isMoving={}:references={}:ctime={}:" + "isInMMContainer={}:isAccessible={}:isExclusive={}:references={}:ctime={}" + ":" "expTime={}:updateTime={}", this, Item::getRefCountAndFlagsRaw(), Item::getSize(), cPtr.getRaw(), - Item::isInMMContainer(), Item::isAccessible(), Item::isMoving(), + Item::isInMMContainer(), Item::isAccessible(), Item::isExclusive(), Item::getRefCount(), Item::getCreationTime(), Item::getExpiryTime(), Item::getLastAccessTime()); } diff --git a/cachelib/allocator/CacheItem.h b/cachelib/allocator/CacheItem.h index 96dd50df66..1a723ffafe 100644 --- a/cachelib/allocator/CacheItem.h +++ b/cachelib/allocator/CacheItem.h @@ -243,23 +243,23 @@ class CACHELIB_PACKED_ATTR CacheItem { * * This API will only succeed when an item is a regular item, and user * has already inserted it into the cache (via @insert or @insertOrReplace). - * In addition, the item cannot be in a "moving" state. + * In addition, the item cannot be in a "exclusive" state. * * @param expiryTime the expiryTime value to update to * * @return boolean indicating whether expiry time was successfully updated - * false when item is not linked in cache, or in moving state, or a + * false when item is not linked in cache, or in exclusive state, or a * chained item */ bool updateExpiryTime(uint32_t expiryTimeSecs) noexcept; // Same as @updateExpiryTime, but sets expiry time to @ttl seconds from now. // It has the same restrictions as @updateExpiryTime. An item must be a - // regular item and is part of the cache and NOT in the moving state. + // regular item and is part of the cache and NOT in the exclusive state. // // @param ttl TTL (from now) // @return boolean indicating whether expiry time was successfully updated - // false when item is not linked in cache, or in moving state, or a + // false when item is not linked in cache, or in exclusive state, or a // chained item bool extendTTL(std::chrono::seconds ttl) noexcept; @@ -317,13 +317,9 @@ class CACHELIB_PACKED_ATTR CacheItem { // Whether or not an item is completely drained of all references including // the internal ones. This means there is no access refcount bits and zero // admin bits set. I.e. refcount is 0 and the item is not linked, accessible, - // nor moving + // nor exclusive bool isDrained() const noexcept; - // Whether or not we hold the last exclusive access to this item - // Refcount is 1 and the item is not linked, accessible, nor moving - bool isExclusive() const noexcept; - /** * The following three functions correspond to the state of the allocation * in the memory management container. This is protected by the @@ -346,20 +342,20 @@ class CACHELIB_PACKED_ATTR CacheItem { /** * The following two functions corresond to whether or not an item is * currently in the process of being moved. This happens during a slab - * rebalance or resize operation. + * rebalance, eviction or resize operation. * - * An item can only be marked moving when `isInMMContainer` returns true. + * An item can only be marked exclusive when `isInMMContainer` returns true. * This operation is atomic. * - * User can also query if an item "isOnlyMoving". This returns true only - * if the refcount is 0 and only the moving bit is set. + * User can also query if an item "isOnlyExclusive". This returns true only + * if the refcount is 0 and only the exclusive bit is set. * - * Unmarking moving does not depend on `isInMMContainer` + * Unmarking exclusive does not depend on `isInMMContainer` */ - bool markMoving() noexcept; - RefcountWithFlags::Value unmarkMoving() noexcept; - bool isMoving() const noexcept; - bool isOnlyMoving() const noexcept; + bool markExclusive() noexcept; + RefcountWithFlags::Value unmarkExclusive() noexcept; + bool isExclusive() const noexcept; + bool isOnlyExclusive() const noexcept; /** * Item cannot be marked both chained allocation and diff --git a/cachelib/allocator/MM2Q-inl.h b/cachelib/allocator/MM2Q-inl.h index 094f08fdc2..62924c59e4 100644 --- a/cachelib/allocator/MM2Q-inl.h +++ b/cachelib/allocator/MM2Q-inl.h @@ -243,9 +243,7 @@ bool MM2Q::Container::add(T& node) noexcept { template T::*HookPtr> template void MM2Q::Container::withEvictionIterator(F&& fun) { - lruMutex_->lock_combine([this, &fun]() { - fun(Iterator{lru_.rbegin()}); - }); + lruMutex_->lock_combine([this, &fun]() { fun(Iterator{lru_.rbegin()}); }); } template T::*HookPtr> diff --git a/cachelib/allocator/MMLru-inl.h b/cachelib/allocator/MMLru-inl.h index 6c3fe4c66f..6c44a1ac5b 100644 --- a/cachelib/allocator/MMLru-inl.h +++ b/cachelib/allocator/MMLru-inl.h @@ -214,9 +214,7 @@ bool MMLru::Container::add(T& node) noexcept { template T::*HookPtr> template void MMLru::Container::withEvictionIterator(F&& fun) { - lruMutex_->lock_combine([this, &fun]() { - fun(Iterator{lru_.rbegin()}); - }); + lruMutex_->lock_combine([this, &fun]() { fun(Iterator{lru_.rbegin()}); }); } template T::*HookPtr> diff --git a/cachelib/allocator/MMTinyLFU-inl.h b/cachelib/allocator/MMTinyLFU-inl.h index 4ee95a057a..df1136b2ff 100644 --- a/cachelib/allocator/MMTinyLFU-inl.h +++ b/cachelib/allocator/MMTinyLFU-inl.h @@ -353,7 +353,6 @@ MMTinyLFU::Container::Iterator::Iterator( const Container& c) noexcept : c_(c), tIter_(c.lru_.getList(LruType::Tiny).rbegin()), - mIter_(c.lru_.getList(LruType::Main).rbegin()) - {} + mIter_(c.lru_.getList(LruType::Main).rbegin()) {} } // namespace cachelib } // namespace facebook diff --git a/cachelib/allocator/MMTinyLFU.h b/cachelib/allocator/MMTinyLFU.h index 87362c169a..8767ebc72b 100644 --- a/cachelib/allocator/MMTinyLFU.h +++ b/cachelib/allocator/MMTinyLFU.h @@ -401,9 +401,7 @@ class MMTinyLFU { // 1. Invalidate this iterator // 2. Unlock - void destroy() { - reset(); - } + void destroy() { reset(); } // Reset this iterator to the beginning void resetToBegin() { diff --git a/cachelib/allocator/Refcount.h b/cachelib/allocator/Refcount.h index 8328019f27..08f3d0888a 100644 --- a/cachelib/allocator/Refcount.h +++ b/cachelib/allocator/Refcount.h @@ -82,7 +82,7 @@ class FOLLY_PACK_ATTR RefcountWithFlags { // this flag indicates the allocation is being moved elsewhere // (can be triggered by a resize or reblanace operation) - kMoving, + kExclusive, }; /** @@ -119,7 +119,8 @@ class FOLLY_PACK_ATTR RefcountWithFlags { // Unused. This is just to indciate the maximum number of flags kFlagMax, }; - static_assert(static_cast(kMMFlag0) > static_cast(kMoving), + static_assert(static_cast(kMMFlag0) > + static_cast(kExclusive), "Flags and control bits cannot overlap in bit range."); static_assert(kFlagMax <= NumBits::value, "Too many flags."); @@ -249,16 +250,16 @@ class FOLLY_PACK_ATTR RefcountWithFlags { * an item is currently in the process of being moved. This happens during a * slab rebalance or resize operation or during eviction. * - * An item can only be marked moving when `isInMMContainer` returns true and - * the item is not yet marked as moving. This operation is atomic. + * An item can only be marked exclusive when `isInMMContainer` returns true + * and the item is not yet marked as exclusive. This operation is atomic. * - * User can also query if an item "isOnlyMoving". This returns true only - * if the refcount is 0 and only the moving bit is set. + * User can also query if an item "isOnlyExclusive". This returns true only + * if the refcount is 0 and only the exlusive bit is set. * - * Unmarking moving does not depend on `isInMMContainer` + * Unmarking exclusive does not depend on `isInMMContainer` */ - bool markMoving() noexcept { - Value bitMask = getAdminRef(); + bool markExclusive() noexcept { + Value bitMask = getAdminRef(); Value conditionBitMask = getAdminRef(); Value* const refPtr = &refCount_; @@ -267,8 +268,8 @@ class FOLLY_PACK_ATTR RefcountWithFlags { Value curValue = __atomic_load_n(refPtr, __ATOMIC_RELAXED); while (true) { const bool flagSet = curValue & conditionBitMask; - const bool alreadyMoving = curValue & bitMask; - if (!flagSet || alreadyMoving) { + const bool alreadyExclusive = curValue & bitMask; + if (!flagSet || alreadyExclusive) { return false; } @@ -287,21 +288,23 @@ class FOLLY_PACK_ATTR RefcountWithFlags { } } } - Value unmarkMoving() noexcept { - Value bitMask = ~getAdminRef(); + Value unmarkExclusive() noexcept { + Value bitMask = ~getAdminRef(); return __atomic_and_fetch(&refCount_, bitMask, __ATOMIC_ACQ_REL) & kRefMask; } - bool isMoving() const noexcept { return getRaw() & getAdminRef(); } - bool isOnlyMoving() const noexcept { - // An item is only moving when its refcount is zero and only the moving bit - // among all the control bits is set. This indicates an item is already on - // its way out of cache and does not need to be moved. + bool isExclusive() const noexcept { + return getRaw() & getAdminRef(); + } + bool isOnlyExclusive() const noexcept { + // An item is only exclusive when its refcount is zero and only the exlusive + // bit among all the control bits is set. This indicates an item is already + // on its way out of cache and does not need to be moved. auto ref = getRefWithAccessAndAdmin(); - bool anyOtherBitSet = ref & ~getAdminRef(); + bool anyOtherBitSet = ref & ~getAdminRef(); if (anyOtherBitSet) { return false; } - return ref & getAdminRef(); + return ref & getAdminRef(); } /** @@ -331,13 +334,9 @@ class FOLLY_PACK_ATTR RefcountWithFlags { bool isNvmEvicted() const noexcept { return isFlagSet(); } // Whether or not an item is completely drained of access - // Refcount is 0 and the item is not linked, accessible, nor moving + // Refcount is 0 and the item is not linked, accessible, nor exclusive bool isDrained() const noexcept { return getRefWithAccessAndAdmin() == 0; } - // Whether or not we hold the last exclusive access to this item - // Refcount is 1 and the item is not linked, accessible, nor moving - bool isExclusive() const noexcept { return getRefWithAccessAndAdmin() == 1; } - /** * Functions to set, unset and check flag presence. This is exposed * for external components to manipulate flags. E.g. MMContainer will diff --git a/cachelib/allocator/tests/ItemTest.cpp b/cachelib/allocator/tests/ItemTest.cpp index 45bc529a00..edece3fb58 100644 --- a/cachelib/allocator/tests/ItemTest.cpp +++ b/cachelib/allocator/tests/ItemTest.cpp @@ -83,10 +83,10 @@ TEST(ItemTest, ExpiryTime) { EXPECT_EQ(tenMins, item->getConfiguredTTL()); // Test that writes fail while the item is moving - item->markMoving(); + item->markExclusive(); result = item->updateExpiryTime(0); EXPECT_FALSE(result); - item->unmarkMoving(); + item->unmarkExclusive(); // Test that writes fail while the item is not in an MMContainer item->unmarkInMMContainer(); diff --git a/cachelib/allocator/tests/RefCountTest.cpp b/cachelib/allocator/tests/RefCountTest.cpp index 7d789a85d8..153f4a94a4 100644 --- a/cachelib/allocator/tests/RefCountTest.cpp +++ b/cachelib/allocator/tests/RefCountTest.cpp @@ -81,7 +81,7 @@ void RefCountTest::testBasic() { ASSERT_EQ(0, ref.getRaw()); ASSERT_FALSE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -89,7 +89,7 @@ void RefCountTest::testBasic() { ref.markInMMContainer(); ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -111,7 +111,7 @@ void RefCountTest::testBasic() { // Bumping up access ref shouldn't affect admin ref and flags ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(RefcountWithFlags::kAccessRefMask, ref.getAccessRef()); ASSERT_TRUE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -128,7 +128,7 @@ void RefCountTest::testBasic() { // Bumping down access ref shouldn't affect admin ref and flags ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_TRUE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -136,7 +136,7 @@ void RefCountTest::testBasic() { ref.template unSetFlag(); ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -145,28 +145,28 @@ void RefCountTest::testBasic() { ASSERT_EQ(0, ref.getRaw()); ASSERT_FALSE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); // conditionally set flags - ASSERT_FALSE((ref.markMoving())); + ASSERT_FALSE((ref.markExclusive())); ref.markInMMContainer(); - ASSERT_TRUE((ref.markMoving())); - ASSERT_FALSE((ref.isOnlyMoving())); + ASSERT_TRUE((ref.markExclusive())); + ASSERT_FALSE((ref.isOnlyExclusive())); ref.unmarkInMMContainer(); ref.template setFlag(); - // Have no other admin refcount but with a flag still means "isOnlyMoving" - ASSERT_TRUE((ref.isOnlyMoving())); + // Have no other admin refcount but with a flag still means "isOnlyExclusive" + ASSERT_TRUE((ref.isOnlyExclusive())); - // Set some flags and verify that "isOnlyMoving" does not care about flags + // Set some flags and verify that "isOnlyExclusive" does not care about flags ref.markIsChainedItem(); ASSERT_TRUE(ref.isChainedItem()); - ASSERT_TRUE((ref.isOnlyMoving())); + ASSERT_TRUE((ref.isOnlyExclusive())); ref.unmarkIsChainedItem(); ASSERT_FALSE(ref.isChainedItem()); - ASSERT_TRUE((ref.isOnlyMoving())); + ASSERT_TRUE((ref.isOnlyExclusive())); } } // namespace