diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index c6db0b6d78..a9cd7bc3b0 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -819,20 +819,20 @@ CacheAllocator::releaseBackToAllocator(Item& it, removeFromMMContainer(*head); - // If this chained item is marked as moving, we will not free it. - // We must capture the moving state before we do the decRef when + // If this chained item is marked as exclusive, we will not free it. + // We must capture the exclusive state before we do the decRef when // we know the item must still be valid - const bool wasMoving = head->isMoving(); + const bool wasExclusive = head->isExclusive(); // Decref and check if we were the last reference. Now if the item - // was marked moving, after decRef, it will be free to be released + // was marked exclusive, after decRef, it will be free to be released // by slab release thread const auto childRef = head->decRef(); - // If the item is already moving and we already decremented the + // If the item is already exclusive and we already decremented the // refcount, we don't need to free this item. We'll let the slab // release thread take care of that - if (!wasMoving) { + if (!wasExclusive) { if (childRef != 0) { throw std::runtime_error(folly::sformat( "chained item refcount is not zero. We cannot proceed! " @@ -840,7 +840,7 @@ CacheAllocator::releaseBackToAllocator(Item& it, childRef, head->toString())); } - // Item is not moving and refcount is 0, we can proceed to + // Item is not exclusive and refcount is 0, we can proceed to // free it or recylce the memory if (head == toRecycle) { XDCHECK(ReleaseRes::kReleased != res); @@ -1116,7 +1116,8 @@ bool CacheAllocator::moveRegularItem(Item& oldItem, // this item through an API such as remove(itemHandle). In this case, // it is unsafe to replace the old item with a new one, so we should // also abort. - if (!accessContainer_->replaceIf(oldItem, *newItemHdl, itemMovingPredicate)) { + if (!accessContainer_->replaceIf(oldItem, *newItemHdl, + itemExlusivePredicate)) { return false; } @@ -1165,7 +1166,7 @@ bool CacheAllocator::moveChainedItem(ChainedItem& oldItem, // This item has been unlinked from its parent and we're the only // owner of it, so we're done here - if (!oldItem.isInMMContainer() || oldItem.isOnlyMoving()) { + if (!oldItem.isInMMContainer() || oldItem.isOnlyExclusive()) { return false; } @@ -1196,7 +1197,7 @@ bool CacheAllocator::moveChainedItem(ChainedItem& oldItem, // In case someone else had removed this chained item from its parent by now // So we check again to see if the it has been unlinked from its parent - if (!oldItem.isInMMContainer() || oldItem.isOnlyMoving()) { + if (!oldItem.isInMMContainer() || oldItem.isOnlyExclusive()) { return false; } @@ -1212,7 +1213,7 @@ bool CacheAllocator::moveChainedItem(ChainedItem& oldItem, // parent's chain and the MMContainer. auto oldItemHandle = replaceChainedItemLocked(oldItem, std::move(newItemHdl), *parentHandle); - XDCHECK(oldItemHandle->isMoving()); + XDCHECK(oldItemHandle->isExclusive()); XDCHECK(!oldItemHandle->isInMMContainer()); return true; @@ -1226,65 +1227,82 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { // Keep searching for a candidate until we were able to evict it // or until the search limit has been exhausted unsigned int searchTries = 0; - auto itr = mmContainer.getEvictionIterator(); while ((config_.evictionSearchTries == 0 || - config_.evictionSearchTries > searchTries) && - itr) { + config_.evictionSearchTries > searchTries)) { ++searchTries; - Item* candidate = itr.get(); + Item* toRecycle = nullptr; + Item* candidate = nullptr; + + mmContainer.withEvictionIterator( + [this, &candidate, &toRecycle, &searchTries](auto&& itr) { + while ((config_.evictionSearchTries == 0 || + config_.evictionSearchTries > searchTries) && + itr) { + ++searchTries; + + auto* toRecycle_ = itr.get(); + auto* candidate_ = + toRecycle_->isChainedItem() + ? &toRecycle_->asChainedItem().getParentItem(compressor_) + : toRecycle_; + + // make sure no other thead is evicting the item + if (candidate_->getRefCount() == 0 && candidate_->markExclusive()) { + toRecycle = toRecycle_; + candidate = candidate_; + return; + } + + ++itr; + } + }); + + if (!toRecycle) + continue; + + XDCHECK(toRecycle); + XDCHECK(candidate); + // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. - auto toReleaseHandle = - itr->isChainedItem() - ? advanceIteratorAndTryEvictChainedItem(itr) - : advanceIteratorAndTryEvictRegularItem(mmContainer, itr); + { + auto toReleaseHandle = evictNormalItem(*candidate); + // destroy toReleseHandle. The item won't be release to allocator + // since we marked it as exclusive. + } + const auto ref = candidate->unmarkExclusive(); - if (toReleaseHandle) { - if (toReleaseHandle->hasChainedItem()) { + if (ref == 0u) { + // recycle the item. it's safe to do so, even if toReleaseHandle was + // NULL. If `ref` == 0 then it means that we are the last holder of + // that item. + if (candidate->hasChainedItem()) { (*stats_.chainedItemEvictions)[pid][cid].inc(); } else { (*stats_.regularItemEvictions)[pid][cid].inc(); } if (auto eventTracker = getEventTracker()) { - eventTracker->record( - AllocatorApiEvent::DRAM_EVICT, toReleaseHandle->getKey(), - AllocatorApiResult::EVICTED, toReleaseHandle->getSize(), - toReleaseHandle->getConfiguredTTL().count()); + eventTracker->record(AllocatorApiEvent::DRAM_EVICT, candidate->getKey(), + AllocatorApiResult::EVICTED, candidate->getSize(), + candidate->getConfiguredTTL().count()); } - // Invalidate iterator since later on we may use this mmContainer - // again, which cannot be done unless we drop this iterator - itr.destroy(); - - // we must be the last handle and for chained items, this will be - // the parent. - XDCHECK(toReleaseHandle.get() == candidate || candidate->isChainedItem()); - XDCHECK_EQ(1u, toReleaseHandle->getRefCount()); - - // We manually release the item here because we don't want to - // invoke the Item Handle's destructor which will be decrementing - // an already zero refcount, which will throw exception - auto& itemToRelease = *toReleaseHandle.release(); - - // Decrementing the refcount because we want to recycle the item - const auto ref = decRef(itemToRelease); - XDCHECK_EQ(0u, ref); // check if by releasing the item we intend to, we actually // recycle the candidate. if (ReleaseRes::kRecycled == - releaseBackToAllocator(itemToRelease, RemoveContext::kEviction, - /* isNascent */ false, candidate)) { - return candidate; + releaseBackToAllocator(*candidate, RemoveContext::kEviction, + /* isNascent */ false, toRecycle)) { + return toRecycle; + } + } else { + if (candidate->hasChainedItem()) { + stats_.evictFailParentAC.inc(); + } else { + stats_.evictFailAC.inc(); } - } - - // If we destroyed the itr to possibly evict and failed, we restart - // from the beginning again - if (!itr) { - itr.resetToBegin(); } } return nullptr; @@ -1338,143 +1356,6 @@ bool CacheAllocator::shouldWriteToNvmCacheExclusive( return true; } -template -typename CacheAllocator::WriteHandle -CacheAllocator::advanceIteratorAndTryEvictRegularItem( - MMContainer& mmContainer, EvictionIterator& itr) { - // we should flush this to nvmcache if it is not already present in nvmcache - // and the item is not expired. - Item& item = *itr; - const bool evictToNvmCache = shouldWriteToNvmCache(item); - - auto token = evictToNvmCache ? nvmCache_->createPutToken(item.getKey()) - : typename NvmCacheT::PutToken{}; - // record the in-flight eviciton. If not, we move on to next item to avoid - // stalling eviction. - if (evictToNvmCache && !token.isValid()) { - ++itr; - stats_.evictFailConcurrentFill.inc(); - return WriteHandle{}; - } - - // If there are other accessors, we should abort. Acquire a handle here since - // if we remove the item from both access containers and mm containers - // below, we will need a handle to ensure proper cleanup in case we end up - // not evicting this item - auto evictHandle = accessContainer_->removeIf(item, &itemEvictionPredicate); - - if (!evictHandle) { - ++itr; - stats_.evictFailAC.inc(); - return evictHandle; - } - - mmContainer.remove(itr); - XDCHECK_EQ(reinterpret_cast(evictHandle.get()), - reinterpret_cast(&item)); - XDCHECK(!evictHandle->isInMMContainer()); - XDCHECK(!evictHandle->isAccessible()); - - // If the item is now marked as moving, that means its corresponding slab is - // being released right now. So, we look for the next item that is eligible - // for eviction. It is safe to destroy the handle here since the moving bit - // is set. Iterator was already advance by the remove call above. - if (evictHandle->isMoving()) { - stats_.evictFailMove.inc(); - return WriteHandle{}; - } - - // Invalidate iterator since later on if we are not evicting this - // item, we may need to rely on the handle we created above to ensure - // proper cleanup if the item's raw refcount has dropped to 0. - // And since this item may be a parent item that has some child items - // in this very same mmContainer, we need to make sure we drop this - // exclusive iterator so we can gain access to it when we're cleaning - // up the child items - itr.destroy(); - - // Ensure that there are no accessors after removing from the access - // container - XDCHECK(evictHandle->getRefCount() == 1); - - if (evictToNvmCache && shouldWriteToNvmCacheExclusive(item)) { - XDCHECK(token.isValid()); - nvmCache_->put(evictHandle, std::move(token)); - } - return evictHandle; -} - -template -typename CacheAllocator::WriteHandle -CacheAllocator::advanceIteratorAndTryEvictChainedItem( - EvictionIterator& itr) { - XDCHECK(itr->isChainedItem()); - - ChainedItem* candidate = &itr->asChainedItem(); - ++itr; - - // The parent could change at any point through transferChain. However, if - // that happens, we would realize that the releaseBackToAllocator return - // kNotRecycled and we would try another chained item, leading to transient - // failure. - auto& parent = candidate->getParentItem(compressor_); - - const bool evictToNvmCache = shouldWriteToNvmCache(parent); - - auto token = evictToNvmCache ? nvmCache_->createPutToken(parent.getKey()) - : typename NvmCacheT::PutToken{}; - - // if token is invalid, return. iterator is already advanced. - if (evictToNvmCache && !token.isValid()) { - stats_.evictFailConcurrentFill.inc(); - return WriteHandle{}; - } - - // check if the parent exists in the hashtable and refcount is drained. - auto parentHandle = - accessContainer_->removeIf(parent, &itemEvictionPredicate); - if (!parentHandle) { - stats_.evictFailParentAC.inc(); - return parentHandle; - } - - // Invalidate iterator since later on we may use the mmContainer - // associated with this iterator which cannot be done unless we - // drop this iterator - // - // This must be done once we know the parent is not nullptr. - // Since we can very well be the last holder of this parent item, - // which may have a chained item that is linked in this MM container. - itr.destroy(); - - // Ensure we have the correct parent and we're the only user of the - // parent, then free it from access container. Otherwise, we abort - XDCHECK_EQ(reinterpret_cast(&parent), - reinterpret_cast(parentHandle.get())); - XDCHECK_EQ(1u, parent.getRefCount()); - - removeFromMMContainer(*parentHandle); - - XDCHECK(!parent.isInMMContainer()); - XDCHECK(!parent.isAccessible()); - - // We need to make sure the parent is not marked as moving - // and we're the only holder of the parent item. Safe to destroy the handle - // here since moving bit is set. - if (parentHandle->isMoving()) { - stats_.evictFailParentMove.inc(); - return WriteHandle{}; - } - - if (evictToNvmCache && shouldWriteToNvmCacheExclusive(*parentHandle)) { - XDCHECK(token.isValid()); - XDCHECK(parentHandle->hasChainedItem()); - nvmCache_->put(parentHandle, std::move(token)); - } - - return parentHandle; -} - template typename CacheAllocator::RemoveRes CacheAllocator::remove(typename Item::Key key) { @@ -1910,13 +1791,13 @@ std::vector CacheAllocator::dumpEvictionIterator( std::vector content; auto& mm = *mmContainers_[pid][cid]; - auto evictItr = mm.getEvictionIterator(); - size_t i = 0; - while (evictItr && i < numItems) { - content.push_back(evictItr->toString()); - ++evictItr; - ++i; - } + + mm.withEvictionIterator([&content, numItems](auto&& itr) { + while (itr && content.size() < numItems) { + content.push_back(itr->toString()); + ++itr; + } + }); return content; } @@ -2424,7 +2305,7 @@ void CacheAllocator::releaseSlabImpl( // Need to mark an item for release before proceeding // If we can't mark as moving, it means the item is already freed const bool isAlreadyFreed = - !markMovingForSlabRelease(releaseContext, alloc, throttler); + !markExclusiveForSlabRelease(releaseContext, alloc, throttler); if (isAlreadyFreed) { continue; } @@ -2469,8 +2350,8 @@ bool CacheAllocator::moveForSlabRelease( stats_.numMoveAttempts.inc(); // Nothing to move and the key is likely also bogus for chained items. - if (oldItem.isOnlyMoving()) { - oldItem.unmarkMoving(); + if (oldItem.isOnlyExclusive()) { + oldItem.unmarkExclusive(); const auto res = releaseBackToAllocator(oldItem, RemoveContext::kNormal, false); XDCHECK(res == ReleaseRes::kReleased); @@ -2509,7 +2390,7 @@ bool CacheAllocator::moveForSlabRelease( // that's identical to this one to replace it. Here we just need to wait // until all users have dropped the item handles before we can proceed. startTime = util::getCurrentTimeSec(); - while (!oldItem.isOnlyMoving()) { + while (!oldItem.isOnlyExclusive()) { throttleWith(throttler, [&] { XLOGF(WARN, "Spent {} seconds, slab release still waiting for refcount to " @@ -2564,7 +2445,7 @@ CacheAllocator::allocateNewItemForOldItem(const Item& oldItem) { } // Set up the destination for the move. Since oldChainedItem would have - // the moving bit set, it won't be picked for eviction. + // the exclusive bit set, it won't be picked for eviction. auto newItemHdl = allocateChainedItemInternal(parentHandle, oldChainedItem.getSize()); if (!newItemHdl) { @@ -2616,7 +2497,7 @@ bool CacheAllocator::tryMovingForSlabRelease( // item is still valid. const std::string parentKey = oldItem.asChainedItem().getParentItem(compressor_).getKey().str(); - if (oldItem.isOnlyMoving()) { + if (oldItem.isOnlyExclusive()) { // If chained item no longer has a refcount, its parent is already // being released, so we abort this try to moving. return false; @@ -2647,11 +2528,11 @@ void CacheAllocator::evictForSlabRelease( while (true) { stats_.numEvictionAttempts.inc(); - // if the item is already in a state where only the moving bit is set, - // nothing needs to be done. We simply need to unmark moving bit and free + // if the item is already in a state where only the exclusive bit is set, + // nothing needs to be done. We simply need to unmark exclusive bit and free // the item. - if (item.isOnlyMoving()) { - item.unmarkMoving(); + if (item.isOnlyExclusive()) { + item.unmarkExclusive(); const auto res = releaseBackToAllocator(item, RemoveContext::kNormal, false); XDCHECK(ReleaseRes::kReleased == res); @@ -2664,7 +2545,7 @@ void CacheAllocator::evictForSlabRelease( auto owningHandle = item.isChainedItem() ? evictChainedItemForSlabRelease(item.asChainedItem()) - : evictNormalItemForSlabRelease(item); + : evictNormalItem(item); // we managed to evict the corresponding owner of the item and have the // last handle for the owner. @@ -2681,10 +2562,8 @@ void CacheAllocator::evictForSlabRelease( stats_.numEvictionSuccesses.inc(); - // we have the last handle. no longer need to hold on to the moving bit - item.unmarkMoving(); - - XDCHECK(owningHandle->isExclusive()); + // we have the last handle. no longer need to hold on to the exclusive bit + item.unmarkExclusive(); // manually decrement the refcount to call releaseBackToAllocator const auto ref = decRef(*owningHandle); @@ -2696,7 +2575,7 @@ void CacheAllocator::evictForSlabRelease( } if (shutDownInProgress_) { - item.unmarkMoving(); + item.unmarkExclusive(); allocator_->abortSlabRelease(ctx); throw exception::SlabReleaseAborted( folly::sformat("Slab Release aborted while trying to evict" @@ -2721,10 +2600,10 @@ void CacheAllocator::evictForSlabRelease( template typename CacheAllocator::WriteHandle -CacheAllocator::evictNormalItemForSlabRelease(Item& item) { - XDCHECK(item.isMoving()); +CacheAllocator::evictNormalItem(Item& item) { + XDCHECK(item.isExclusive()); - if (item.isOnlyMoving()) { + if (item.isOnlyExclusive()) { return WriteHandle{}; } @@ -2736,7 +2615,7 @@ CacheAllocator::evictNormalItemForSlabRelease(Item& item) { // We remove the item from both access and mm containers. It doesn't matter // if someone else calls remove on the item at this moment, the item cannot - // be freed as long as we have the moving bit set. + // be freed as long as we have the exclusive bit set. auto handle = accessContainer_->removeIf(item, std::move(predicate)); if (!handle) { @@ -2760,7 +2639,7 @@ CacheAllocator::evictNormalItemForSlabRelease(Item& item) { template typename CacheAllocator::WriteHandle CacheAllocator::evictChainedItemForSlabRelease(ChainedItem& child) { - XDCHECK(child.isMoving()); + XDCHECK(child.isExclusive()); // We have the child marked as moving, but dont know anything about the // state of the parent. Unlike the case of regular eviction where we are @@ -2782,7 +2661,7 @@ CacheAllocator::evictChainedItemForSlabRelease(ChainedItem& child) { // check if the child is still in mmContainer and the expected parent is // valid under the chained item lock. if (expectedParent.getKey() != parentKey || !child.isInMMContainer() || - child.isOnlyMoving() || + child.isOnlyExclusive() || &expectedParent != &child.getParentItem(compressor_) || !expectedParent.isAccessible() || !expectedParent.hasChainedItem()) { return {}; @@ -2837,14 +2716,14 @@ CacheAllocator::evictChainedItemForSlabRelease(ChainedItem& child) { // In case someone else had removed this chained item from its parent by now // So we check again to see if it has been unlinked from its parent - if (!child.isInMMContainer() || child.isOnlyMoving()) { + if (!child.isInMMContainer() || child.isOnlyExclusive()) { return {}; } // check after removing from the MMContainer that the parent is still not // being marked as moving. If parent is moving, it will release the child // item and we will wait for that. - if (parentHandle->isMoving()) { + if (parentHandle->isExclusive()) { return {}; } @@ -2877,7 +2756,7 @@ bool CacheAllocator::removeIfExpired(const ReadHandle& handle) { } template -bool CacheAllocator::markMovingForSlabRelease( +bool CacheAllocator::markExclusiveForSlabRelease( const SlabReleaseContext& ctx, void* alloc, util::Throttler& throttler) { // MemoryAllocator::processAllocForRelease will execute the callback // if the item is not already free. So there are three outcomes here: @@ -2896,7 +2775,7 @@ bool CacheAllocator::markMovingForSlabRelease( // Since this callback is executed, the item is not yet freed itemFreed = false; Item* item = static_cast(memory); - if (item->markMoving()) { + if (item->markExclusive()) { markedMoving = true; } }; @@ -2918,7 +2797,7 @@ bool CacheAllocator::markMovingForSlabRelease( itemFreed = true; if (shutDownInProgress_) { - XDCHECK(!static_cast(alloc)->isMoving()); + XDCHECK(!static_cast(alloc)->isExclusive()); allocator_->abortSlabRelease(ctx); throw exception::SlabReleaseAborted( folly::sformat("Slab Release aborted while still trying to mark" diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index 89086ddfe0..1dd33d2d3b 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1455,7 +1455,7 @@ class CacheAllocator : public CacheBase { FOLLY_ALWAYS_INLINE WriteHandle findFastImpl(Key key, AccessMode mode); // Moves a regular item to a different slab. This should only be used during - // slab release after the item's moving bit has been set. The user supplied + // slab release after the item's exclusive bit has been set. The user supplied // callback is responsible for copying the contents and fixing the semantics // of chained item. // @@ -1478,7 +1478,7 @@ class CacheAllocator : public CacheBase { folly::IOBuf convertToIOBufT(Handle& handle); // Moves a chained item to a different slab. This should only be used during - // slab release after the item's moving bit has been set. The user supplied + // slab release after the item's exclusive bit has been set. The user supplied // callback is responsible for copying the contents and fixing the semantics // of chained item. // @@ -1609,25 +1609,6 @@ class CacheAllocator : public CacheBase { using EvictionIterator = typename MMContainer::Iterator; - // Advance the current iterator and try to evict a regular item - // - // @param mmContainer the container to look for evictions. - // @param itr iterator holding the item - // - // @return valid handle to regular item on success. This will be the last - // handle to the item. On failure an empty handle. - WriteHandle advanceIteratorAndTryEvictRegularItem(MMContainer& mmContainer, - EvictionIterator& itr); - - // Advance the current iterator and try to evict a chained item - // Iterator may also be reset during the course of this function - // - // @param itr iterator holding the item - // - // @return valid handle to the parent item on success. This will be the last - // handle to the item - WriteHandle advanceIteratorAndTryEvictChainedItem(EvictionIterator& itr); - // Deserializer CacheAllocatorMetadata and verify the version // // @param deserializer Deserializer object @@ -1703,9 +1684,9 @@ class CacheAllocator : public CacheBase { // @return true when successfully marked as moving, // fasle when this item has already been freed - bool markMovingForSlabRelease(const SlabReleaseContext& ctx, - void* alloc, - util::Throttler& throttler); + bool markExclusiveForSlabRelease(const SlabReleaseContext& ctx, + void* alloc, + util::Throttler& throttler); // "Move" (by copying) the content in this item to another memory // location by invoking the move callback. @@ -1745,7 +1726,7 @@ class CacheAllocator : public CacheBase { // // @return last handle for corresponding to item on success. empty handle on // failure. caller can retry if needed. - WriteHandle evictNormalItemForSlabRelease(Item& item); + WriteHandle evictNormalItem(Item& item); // Helper function to evict a child item for slab release // As a side effect, the parent item is also evicted @@ -1874,20 +1855,16 @@ class CacheAllocator : public CacheBase { std::optional saveNvmCache(); void saveRamCache(); - static bool itemMovingPredicate(const Item& item) { + static bool itemExlusivePredicate(const Item& item) { return item.getRefCount() == 0; } - static bool itemEvictionPredicate(const Item& item) { - return item.getRefCount() == 0 && !item.isMoving(); - } - static bool itemExpiryPredicate(const Item& item) { return item.getRefCount() == 1 && item.isExpired(); } static bool parentEvictForSlabReleasePredicate(const Item& item) { - return item.getRefCount() == 1 && !item.isMoving(); + return item.getRefCount() == 1 && !item.isExclusive(); } std::unique_ptr createDeserializer(); diff --git a/cachelib/allocator/CacheItem-inl.h b/cachelib/allocator/CacheItem-inl.h index a1c2456af5..274e7c141a 100644 --- a/cachelib/allocator/CacheItem-inl.h +++ b/cachelib/allocator/CacheItem-inl.h @@ -141,12 +141,13 @@ std::string CacheItem::toString() const { return folly::sformat( "item: " "memory={}:raw-ref={}:size={}:key={}:hex-key={}:" - "isInMMContainer={}:isAccessible={}:isMoving={}:references={}:ctime={}:" + "isInMMContainer={}:isAccessible={}:isExclusive={}:references={}:ctime=" + "{}:" "expTime={}:updateTime={}:isNvmClean={}:isNvmEvicted={}:hasChainedItem=" "{}", this, getRefCountAndFlagsRaw(), getSize(), folly::humanify(getKey().str()), folly::hexlify(getKey()), - isInMMContainer(), isAccessible(), isMoving(), getRefCount(), + isInMMContainer(), isAccessible(), isExclusive(), getRefCount(), getCreationTime(), getExpiryTime(), getLastAccessTime(), isNvmClean(), isNvmEvicted(), hasChainedItem()); } @@ -178,11 +179,6 @@ bool CacheItem::isDrained() const noexcept { return ref_.isDrained(); } -template -bool CacheItem::isExclusive() const noexcept { - return ref_.isExclusive(); -} - template void CacheItem::markAccessible() noexcept { ref_.markAccessible(); @@ -214,23 +210,23 @@ bool CacheItem::isInMMContainer() const noexcept { } template -bool CacheItem::markMoving() noexcept { - return ref_.markMoving(); +bool CacheItem::markExclusive() noexcept { + return ref_.markExclusive(); } template -void CacheItem::unmarkMoving() noexcept { - ref_.unmarkMoving(); +RefcountWithFlags::Value CacheItem::unmarkExclusive() noexcept { + return ref_.unmarkExclusive(); } template -bool CacheItem::isMoving() const noexcept { - return ref_.isMoving(); +bool CacheItem::isExclusive() const noexcept { + return ref_.isExclusive(); } template -bool CacheItem::isOnlyMoving() const noexcept { - return ref_.isOnlyMoving(); +bool CacheItem::isOnlyExclusive() const noexcept { + return ref_.isOnlyExclusive(); } template @@ -332,7 +328,7 @@ bool CacheItem::updateExpiryTime(uint32_t expiryTimeSecs) noexcept { // check for moving to make sure we are not updating the expiry time while at // the same time re-allocating the item with the old state of the expiry time // in moveRegularItem(). See D6852328 - if (isMoving() || !isInMMContainer() || isChainedItem()) { + if (isExclusive() || !isInMMContainer() || isChainedItem()) { return false; } // attempt to atomically update the value of expiryTime @@ -448,10 +444,11 @@ std::string CacheChainedItem::toString() const { return folly::sformat( "chained item: " "memory={}:raw-ref={}:size={}:parent-compressed-ptr={}:" - "isInMMContainer={}:isAccessible={}:isMoving={}:references={}:ctime={}:" + "isInMMContainer={}:isAccessible={}:isExclusive={}:references={}:ctime={}" + ":" "expTime={}:updateTime={}", this, Item::getRefCountAndFlagsRaw(), Item::getSize(), cPtr.getRaw(), - Item::isInMMContainer(), Item::isAccessible(), Item::isMoving(), + Item::isInMMContainer(), Item::isAccessible(), Item::isExclusive(), Item::getRefCount(), Item::getCreationTime(), Item::getExpiryTime(), Item::getLastAccessTime()); } diff --git a/cachelib/allocator/CacheItem.h b/cachelib/allocator/CacheItem.h index 87c8b8a19e..1a723ffafe 100644 --- a/cachelib/allocator/CacheItem.h +++ b/cachelib/allocator/CacheItem.h @@ -243,23 +243,23 @@ class CACHELIB_PACKED_ATTR CacheItem { * * This API will only succeed when an item is a regular item, and user * has already inserted it into the cache (via @insert or @insertOrReplace). - * In addition, the item cannot be in a "moving" state. + * In addition, the item cannot be in a "exclusive" state. * * @param expiryTime the expiryTime value to update to * * @return boolean indicating whether expiry time was successfully updated - * false when item is not linked in cache, or in moving state, or a + * false when item is not linked in cache, or in exclusive state, or a * chained item */ bool updateExpiryTime(uint32_t expiryTimeSecs) noexcept; // Same as @updateExpiryTime, but sets expiry time to @ttl seconds from now. // It has the same restrictions as @updateExpiryTime. An item must be a - // regular item and is part of the cache and NOT in the moving state. + // regular item and is part of the cache and NOT in the exclusive state. // // @param ttl TTL (from now) // @return boolean indicating whether expiry time was successfully updated - // false when item is not linked in cache, or in moving state, or a + // false when item is not linked in cache, or in exclusive state, or a // chained item bool extendTTL(std::chrono::seconds ttl) noexcept; @@ -317,13 +317,9 @@ class CACHELIB_PACKED_ATTR CacheItem { // Whether or not an item is completely drained of all references including // the internal ones. This means there is no access refcount bits and zero // admin bits set. I.e. refcount is 0 and the item is not linked, accessible, - // nor moving + // nor exclusive bool isDrained() const noexcept; - // Whether or not we hold the last exclusive access to this item - // Refcount is 1 and the item is not linked, accessible, nor moving - bool isExclusive() const noexcept; - /** * The following three functions correspond to the state of the allocation * in the memory management container. This is protected by the @@ -346,20 +342,20 @@ class CACHELIB_PACKED_ATTR CacheItem { /** * The following two functions corresond to whether or not an item is * currently in the process of being moved. This happens during a slab - * rebalance or resize operation. + * rebalance, eviction or resize operation. * - * An item can only be marked moving when `isInMMContainer` returns true. + * An item can only be marked exclusive when `isInMMContainer` returns true. * This operation is atomic. * - * User can also query if an item "isOnlyMoving". This returns true only - * if the refcount is 0 and only the moving bit is set. + * User can also query if an item "isOnlyExclusive". This returns true only + * if the refcount is 0 and only the exclusive bit is set. * - * Unmarking moving does not depend on `isInMMContainer` + * Unmarking exclusive does not depend on `isInMMContainer` */ - bool markMoving() noexcept; - void unmarkMoving() noexcept; - bool isMoving() const noexcept; - bool isOnlyMoving() const noexcept; + bool markExclusive() noexcept; + RefcountWithFlags::Value unmarkExclusive() noexcept; + bool isExclusive() const noexcept; + bool isOnlyExclusive() const noexcept; /** * Item cannot be marked both chained allocation and diff --git a/cachelib/allocator/MM2Q-inl.h b/cachelib/allocator/MM2Q-inl.h index 1d2482d45b..62924c59e4 100644 --- a/cachelib/allocator/MM2Q-inl.h +++ b/cachelib/allocator/MM2Q-inl.h @@ -241,29 +241,9 @@ bool MM2Q::Container::add(T& node) noexcept { } template T::*HookPtr> -typename MM2Q::Container::Iterator -MM2Q::Container::getEvictionIterator() const noexcept { - // we cannot use combined critical sections with folly::DistributedMutex here - // because the lock is held for the lifetime of the eviction iterator. In - // other words, the abstraction of the iterator just does not lend itself well - // to combinable critical sections as the user can hold the lock for an - // arbitrary amount of time outside a lambda-friendly piece of code (eg. they - // can return the iterator from functions, pass it to functions, etc) - // - // it would be theoretically possible to refactor this interface into - // something like the following to allow combining - // - // mm2q.withEvictionIterator([&](auto iterator) { - // // user code - // }); - // - // at the time of writing it is unclear if the gains from combining are - // reasonable justification for the codemod required to achieve combinability - // as we don't expect this critical section to be the hotspot in user code. - // This is however subject to change at some time in the future as and when - // this assertion becomes false. - LockHolder l(*lruMutex_); - return Iterator{std::move(l), lru_.rbegin()}; +template +void MM2Q::Container::withEvictionIterator(F&& fun) { + lruMutex_->lock_combine([this, &fun]() { fun(Iterator{lru_.rbegin()}); }); } template T::*HookPtr> @@ -460,10 +440,5 @@ void MM2Q::Container::reconfigureLocked(const Time& currTime) { lruRefreshTime_.store(lruRefreshTime, std::memory_order_relaxed); } -// Iterator Context Implementation -template T::*HookPtr> -MM2Q::Container::Iterator::Iterator( - LockHolder l, const typename LruList::Iterator& iter) noexcept - : LruList::Iterator(iter), l_(std::move(l)) {} } // namespace cachelib } // namespace facebook diff --git a/cachelib/allocator/MM2Q.h b/cachelib/allocator/MM2Q.h index 886dffdddd..492ffedea9 100644 --- a/cachelib/allocator/MM2Q.h +++ b/cachelib/allocator/MM2Q.h @@ -347,48 +347,7 @@ class MM2Q { Container(const Container&) = delete; Container& operator=(const Container&) = delete; - // context for iterating the MM container. At any given point of time, - // there can be only one iterator active since we need to lock the LRU for - // iteration. we can support multiple iterators at same time, by using a - // shared ptr in the context for the lock holder in the future. - class Iterator : public LruList::Iterator { - public: - // noncopyable but movable. - Iterator(const Iterator&) = delete; - Iterator& operator=(const Iterator&) = delete; - - Iterator(Iterator&&) noexcept = default; - - // 1. Invalidate this iterator - // 2. Unlock - void destroy() { - LruList::Iterator::reset(); - if (l_.owns_lock()) { - l_.unlock(); - } - } - - // Reset this iterator to the beginning - void resetToBegin() { - if (!l_.owns_lock()) { - l_.lock(); - } - LruList::Iterator::resetToBegin(); - } - - private: - // private because it's easy to misuse and cause deadlock for MM2Q - Iterator& operator=(Iterator&&) noexcept = default; - - // create an lru iterator with the lock being held. - Iterator(LockHolder l, const typename LruList::Iterator& iter) noexcept; - - // only the container can create iterators - friend Container; - - // lock protecting the validity of the iterator - LockHolder l_; - }; + using Iterator = typename LruList::Iterator; // records the information that the node was accessed. This could bump up // the node to the head of the lru depending on the time when the node was @@ -442,10 +401,10 @@ class MM2Q { // source node already existed. bool replace(T& oldNode, T& newNode) noexcept; - // Obtain an iterator that start from the tail and can be used - // to search for evictions. This iterator holds a lock to this - // container and only one such iterator can exist at a time - Iterator getEvictionIterator() const noexcept; + // Execute provided function under container lock. Function gets + // iterator passed as parameter. + template + void withEvictionIterator(F&& f); // get the current config as a copy Config getConfig() const; diff --git a/cachelib/allocator/MMLru-inl.h b/cachelib/allocator/MMLru-inl.h index 5a66161ae0..6c44a1ac5b 100644 --- a/cachelib/allocator/MMLru-inl.h +++ b/cachelib/allocator/MMLru-inl.h @@ -212,10 +212,9 @@ bool MMLru::Container::add(T& node) noexcept { } template T::*HookPtr> -typename MMLru::Container::Iterator -MMLru::Container::getEvictionIterator() const noexcept { - LockHolder l(*lruMutex_); - return Iterator{std::move(l), lru_.rbegin()}; +template +void MMLru::Container::withEvictionIterator(F&& fun) { + lruMutex_->lock_combine([this, &fun]() { fun(Iterator{lru_.rbegin()}); }); } template T::*HookPtr> @@ -358,10 +357,5 @@ void MMLru::Container::reconfigureLocked(const Time& currTime) { lruRefreshTime_.store(lruRefreshTime, std::memory_order_relaxed); } -// Iterator Context Implementation -template T::*HookPtr> -MMLru::Container::Iterator::Iterator( - LockHolder l, const typename LruList::Iterator& iter) noexcept - : LruList::Iterator(iter), l_(std::move(l)) {} } // namespace cachelib } // namespace facebook diff --git a/cachelib/allocator/MMLru.h b/cachelib/allocator/MMLru.h index f89438dd3d..f16fea568a 100644 --- a/cachelib/allocator/MMLru.h +++ b/cachelib/allocator/MMLru.h @@ -234,48 +234,7 @@ class MMLru { Container(const Container&) = delete; Container& operator=(const Container&) = delete; - // context for iterating the MM container. At any given point of time, - // there can be only one iterator active since we need to lock the LRU for - // iteration. we can support multiple iterators at same time, by using a - // shared ptr in the context for the lock holder in the future. - class Iterator : public LruList::Iterator { - public: - // noncopyable but movable. - Iterator(const Iterator&) = delete; - Iterator& operator=(const Iterator&) = delete; - - Iterator(Iterator&&) noexcept = default; - - // 1. Invalidate this iterator - // 2. Unlock - void destroy() { - LruList::Iterator::reset(); - if (l_.owns_lock()) { - l_.unlock(); - } - } - - // Reset this iterator to the beginning - void resetToBegin() { - if (!l_.owns_lock()) { - l_.lock(); - } - LruList::Iterator::resetToBegin(); - } - - private: - // private because it's easy to misuse and cause deadlock for MMLru - Iterator& operator=(Iterator&&) noexcept = default; - - // create an lru iterator with the lock being held. - Iterator(LockHolder l, const typename LruList::Iterator& iter) noexcept; - - // only the container can create iterators - friend Container; - - // lock protecting the validity of the iterator - LockHolder l_; - }; + using Iterator = typename LruList::Iterator; // records the information that the node was accessed. This could bump up // the node to the head of the lru depending on the time when the node was @@ -307,7 +266,6 @@ class MMLru { // state of node is unchanged. bool remove(T& node) noexcept; - using Iterator = Iterator; // same as the above but uses an iterator context. The iterator is updated // on removal of the corresponding node to point to the next node. The // iterator context holds the lock on the lru. @@ -327,10 +285,10 @@ class MMLru { // source node already existed. bool replace(T& oldNode, T& newNode) noexcept; - // Obtain an iterator that start from the tail and can be used - // to search for evictions. This iterator holds a lock to this - // container and only one such iterator can exist at a time - Iterator getEvictionIterator() const noexcept; + // Execute provided function under container lock. Function gets + // iterator passed as parameter. + template + void withEvictionIterator(F&& f); // get copy of current config Config getConfig() const; diff --git a/cachelib/allocator/MMTinyLFU-inl.h b/cachelib/allocator/MMTinyLFU-inl.h index de06902482..df1136b2ff 100644 --- a/cachelib/allocator/MMTinyLFU-inl.h +++ b/cachelib/allocator/MMTinyLFU-inl.h @@ -214,10 +214,10 @@ bool MMTinyLFU::Container::add(T& node) noexcept { } template T::*HookPtr> -typename MMTinyLFU::Container::Iterator -MMTinyLFU::Container::getEvictionIterator() const noexcept { +template +void MMTinyLFU::Container::withEvictionIterator(F&& fun) { LockHolder l(lruMutex_); - return Iterator{std::move(l), *this}; + fun(Iterator{*this}); } template T::*HookPtr> @@ -350,10 +350,9 @@ void MMTinyLFU::Container::reconfigureLocked(const Time& currTime) { // Iterator Context Implementation template T::*HookPtr> MMTinyLFU::Container::Iterator::Iterator( - LockHolder l, const Container& c) noexcept + const Container& c) noexcept : c_(c), tIter_(c.lru_.getList(LruType::Tiny).rbegin()), - mIter_(c.lru_.getList(LruType::Main).rbegin()), - l_(std::move(l)) {} + mIter_(c.lru_.getList(LruType::Main).rbegin()) {} } // namespace cachelib } // namespace facebook diff --git a/cachelib/allocator/MMTinyLFU.h b/cachelib/allocator/MMTinyLFU.h index 14d5ae6906..8767ebc72b 100644 --- a/cachelib/allocator/MMTinyLFU.h +++ b/cachelib/allocator/MMTinyLFU.h @@ -356,10 +356,7 @@ class MMTinyLFU { // source node already existed. bool replace(T& oldNode, T& newNode) noexcept; - // context for iterating the MM container. At any given point of time, - // there can be only one iterator active since we need to lock the LRU for - // iteration. we can support multiple iterators at same time, by using a - // shared ptr in the context for the lock holder in the future. + // context for iterating the MM container. class Iterator { public: using ListIterator = typename LruList::DListIterator; @@ -367,6 +364,7 @@ class MMTinyLFU { Iterator(const Iterator&) = delete; Iterator& operator=(const Iterator&) = delete; Iterator(Iterator&&) noexcept = default; + Iterator& operator=(Iterator&&) noexcept = default; Iterator& operator++() noexcept { ++getIter(); @@ -403,28 +401,16 @@ class MMTinyLFU { // 1. Invalidate this iterator // 2. Unlock - void destroy() { - reset(); - if (l_.owns_lock()) { - l_.unlock(); - } - } + void destroy() { reset(); } // Reset this iterator to the beginning void resetToBegin() { - if (!l_.owns_lock()) { - l_.lock(); - } tIter_.resetToBegin(); mIter_.resetToBegin(); } private: - // private because it's easy to misuse and cause deadlock for MMTinyLFU - Iterator& operator=(Iterator&&) noexcept = default; - - // create an lru iterator with the lock being held. - explicit Iterator(LockHolder l, const Container& c) noexcept; + explicit Iterator(const Container& c) noexcept; const ListIterator& getIter() const noexcept { return evictTiny() ? tIter_ : mIter_; @@ -456,8 +442,6 @@ class MMTinyLFU { // Tiny and main cache iterators ListIterator tIter_; ListIterator mIter_; - // lock protecting the validity of the iterator - LockHolder l_; }; Config getConfig() const; @@ -486,10 +470,10 @@ class MMTinyLFU { // Returns the eviction age stats. See CacheStats.h for details EvictionAgeStat getEvictionAgeStat(uint64_t projectedLength) const noexcept; - // Obtain an iterator that start from the tail and can be used - // to search for evictions. This iterator holds a lock to this - // container and only one such iterator can exist at a time - Iterator getEvictionIterator() const noexcept; + // Execute provided function under container lock. Function gets + // iterator passed as parameter. + template + void withEvictionIterator(F&& f); // for saving the state of the lru // diff --git a/cachelib/allocator/Refcount.h b/cachelib/allocator/Refcount.h index 631e1695f9..08f3d0888a 100644 --- a/cachelib/allocator/Refcount.h +++ b/cachelib/allocator/Refcount.h @@ -82,7 +82,7 @@ class FOLLY_PACK_ATTR RefcountWithFlags { // this flag indicates the allocation is being moved elsewhere // (can be triggered by a resize or reblanace operation) - kMoving, + kExclusive, }; /** @@ -119,7 +119,8 @@ class FOLLY_PACK_ATTR RefcountWithFlags { // Unused. This is just to indciate the maximum number of flags kFlagMax, }; - static_assert(static_cast(kMMFlag0) > static_cast(kMoving), + static_assert(static_cast(kMMFlag0) > + static_cast(kExclusive), "Flags and control bits cannot overlap in bit range."); static_assert(kFlagMax <= NumBits::value, "Too many flags."); @@ -247,18 +248,18 @@ class FOLLY_PACK_ATTR RefcountWithFlags { /** * The following four functions are used to track whether or not * an item is currently in the process of being moved. This happens during a - * slab rebalance or resize operation. + * slab rebalance or resize operation or during eviction. * - * An item can only be marked moving when `isInMMContainer` returns true. - * This operation is atomic. + * An item can only be marked exclusive when `isInMMContainer` returns true + * and the item is not yet marked as exclusive. This operation is atomic. * - * User can also query if an item "isOnlyMoving". This returns true only - * if the refcount is 0 and only the moving bit is set. + * User can also query if an item "isOnlyExclusive". This returns true only + * if the refcount is 0 and only the exlusive bit is set. * - * Unmarking moving does not depend on `isInMMContainer` + * Unmarking exclusive does not depend on `isInMMContainer` */ - bool markMoving() noexcept { - Value bitMask = getAdminRef(); + bool markExclusive() noexcept { + Value bitMask = getAdminRef(); Value conditionBitMask = getAdminRef(); Value* const refPtr = &refCount_; @@ -267,7 +268,8 @@ class FOLLY_PACK_ATTR RefcountWithFlags { Value curValue = __atomic_load_n(refPtr, __ATOMIC_RELAXED); while (true) { const bool flagSet = curValue & conditionBitMask; - if (!flagSet) { + const bool alreadyExclusive = curValue & bitMask; + if (!flagSet || alreadyExclusive) { return false; } @@ -286,21 +288,23 @@ class FOLLY_PACK_ATTR RefcountWithFlags { } } } - void unmarkMoving() noexcept { - Value bitMask = ~getAdminRef(); - __atomic_and_fetch(&refCount_, bitMask, __ATOMIC_ACQ_REL); + Value unmarkExclusive() noexcept { + Value bitMask = ~getAdminRef(); + return __atomic_and_fetch(&refCount_, bitMask, __ATOMIC_ACQ_REL) & kRefMask; + } + bool isExclusive() const noexcept { + return getRaw() & getAdminRef(); } - bool isMoving() const noexcept { return getRaw() & getAdminRef(); } - bool isOnlyMoving() const noexcept { - // An item is only moving when its refcount is zero and only the moving bit - // among all the control bits is set. This indicates an item is already on - // its way out of cache and does not need to be moved. + bool isOnlyExclusive() const noexcept { + // An item is only exclusive when its refcount is zero and only the exlusive + // bit among all the control bits is set. This indicates an item is already + // on its way out of cache and does not need to be moved. auto ref = getRefWithAccessAndAdmin(); - bool anyOtherBitSet = ref & ~getAdminRef(); + bool anyOtherBitSet = ref & ~getAdminRef(); if (anyOtherBitSet) { return false; } - return ref & getAdminRef(); + return ref & getAdminRef(); } /** @@ -330,13 +334,9 @@ class FOLLY_PACK_ATTR RefcountWithFlags { bool isNvmEvicted() const noexcept { return isFlagSet(); } // Whether or not an item is completely drained of access - // Refcount is 0 and the item is not linked, accessible, nor moving + // Refcount is 0 and the item is not linked, accessible, nor exclusive bool isDrained() const noexcept { return getRefWithAccessAndAdmin() == 0; } - // Whether or not we hold the last exclusive access to this item - // Refcount is 1 and the item is not linked, accessible, nor moving - bool isExclusive() const noexcept { return getRefWithAccessAndAdmin() == 1; } - /** * Functions to set, unset and check flag presence. This is exposed * for external components to manipulate flags. E.g. MMContainer will diff --git a/cachelib/allocator/tests/BaseAllocatorTest.h b/cachelib/allocator/tests/BaseAllocatorTest.h index ce5c1f3efb..ce4bb2977e 100644 --- a/cachelib/allocator/tests/BaseAllocatorTest.h +++ b/cachelib/allocator/tests/BaseAllocatorTest.h @@ -4113,15 +4113,16 @@ class BaseAllocatorTest : public AllocatorTest { // Check that item is in the expected container. bool findItem(AllocatorT& allocator, typename AllocatorT::Item* item) { auto& container = allocator.getMMContainer(*item); - auto itr = container.getEvictionIterator(); bool found = false; - while (itr) { - if (itr.get() == item) { - found = true; - break; + container.withEvictionIterator([&found, &item](auto&& itr) { + while (itr) { + if (itr.get() == item) { + found = true; + break; + } + ++itr; } - ++itr; - } + }); return found; } @@ -5509,8 +5510,12 @@ class BaseAllocatorTest : public AllocatorTest { ASSERT_TRUE(big->isInMMContainer()); auto& mmContainer = alloc.getMMContainer(*big); - auto itr = mmContainer.getEvictionIterator(); - ASSERT_EQ(big.get(), &(*itr)); + + typename AllocatorT::Item* evictionCandidate = nullptr; + mmContainer.withEvictionIterator( + [&evictionCandidate](auto&& itr) { evictionCandidate = itr.get(); }); + + ASSERT_EQ(big.get(), evictionCandidate); alloc.remove("hello"); } @@ -5524,8 +5529,11 @@ class BaseAllocatorTest : public AllocatorTest { ASSERT_TRUE(small2->isInMMContainer()); auto& mmContainer = alloc.getMMContainer(*small2); - auto itr = mmContainer.getEvictionIterator(); - ASSERT_EQ(small2.get(), &(*itr)); + + typename AllocatorT::Item* evictionCandidate = nullptr; + mmContainer.withEvictionIterator( + [&evictionCandidate](auto&& itr) { evictionCandidate = itr.get(); }); + ASSERT_EQ(small2.get(), evictionCandidate); alloc.remove("hello"); } diff --git a/cachelib/allocator/tests/ItemTest.cpp b/cachelib/allocator/tests/ItemTest.cpp index 45bc529a00..edece3fb58 100644 --- a/cachelib/allocator/tests/ItemTest.cpp +++ b/cachelib/allocator/tests/ItemTest.cpp @@ -83,10 +83,10 @@ TEST(ItemTest, ExpiryTime) { EXPECT_EQ(tenMins, item->getConfiguredTTL()); // Test that writes fail while the item is moving - item->markMoving(); + item->markExclusive(); result = item->updateExpiryTime(0); EXPECT_FALSE(result); - item->unmarkMoving(); + item->unmarkExclusive(); // Test that writes fail while the item is not in an MMContainer item->unmarkInMMContainer(); diff --git a/cachelib/allocator/tests/MM2QTest.cpp b/cachelib/allocator/tests/MM2QTest.cpp index daf846e6bc..89d757b5ad 100644 --- a/cachelib/allocator/tests/MM2QTest.cpp +++ b/cachelib/allocator/tests/MM2QTest.cpp @@ -43,17 +43,20 @@ TEST_F(MM2QTest, RemoveWithSmallQueues) { // trying to remove through iterator should work as expected. // no need of iter++ since remove will do that. - for (auto iter = c.getEvictionIterator(); iter;) { - auto& node = *iter; - ASSERT_TRUE(node.isInMMContainer()); - - // this will move the iter. - c.remove(iter); - ASSERT_FALSE(node.isInMMContainer()); - if (iter) { - ASSERT_NE((*iter).getId(), node.getId()); + c.withEvictionIterator([&c](auto&& iter) { + while (iter) { + auto& node = *iter; + ASSERT_TRUE(node.isInMMContainer()); + + // this will move the iter. + c.remove(iter); + + ASSERT_FALSE(node.isInMMContainer()); + if (iter) { + ASSERT_NE((*iter).getId(), node.getId()); + } } - } + }); ASSERT_EQ(c.getStats().size, 0); for (const auto& node : nodes) { @@ -92,9 +95,12 @@ TEST_F(MM2QTest, RecordAccessWrites) { } std::vector nodeOrderPrev; - for (auto itr = c_.getEvictionIterator(); itr; ++itr) { - nodeOrderPrev.push_back(itr->getId()); - } + c_.withEvictionIterator([&nodeOrderPrev](auto&& it) { + while (it) { + nodeOrderPrev.push_back(it->getId()); + ++it; + } + }); int nAccess = 1000; std::set accessedNodes; @@ -119,9 +125,12 @@ TEST_F(MM2QTest, RecordAccessWrites) { // after a random set of recordAccess, test the order of the nodes in the // lru. std::vector nodeOrderCurr; - for (auto itr = c_.getEvictionIterator(); itr; ++itr) { - nodeOrderCurr.push_back(itr->getId()); - } + c_.withEvictionIterator([&nodeOrderCurr](auto&& it) { + while (it) { + nodeOrderCurr.push_back(it->getId()); + ++it; + } + }); if ((mode == AccessMode::kWrite && updateOnWrites) || (mode == AccessMode::kRead && updateOnReads)) { @@ -209,13 +218,14 @@ TEST_F(MM2QTest, RecordAccessWrites) { template void MMTypeTest::testIterate(std::vector>& nodes, Container& c) { - auto it2q = c.getEvictionIterator(); auto it = nodes.begin(); - while (it2q) { - ASSERT_EQ(it2q->getId(), (*it)->getId()); - ++it2q; - ++it; - } + c.withEvictionIterator([&it](auto&& it2q) { + while (it2q) { + ASSERT_EQ(it2q->getId(), (*it)->getId()); + ++it2q; + ++it; + } + }); } template @@ -223,14 +233,15 @@ void MMTypeTest::testMatch(std::string expected, MMTypeTest::Container& c) { int index = -1; std::string actual; - auto it2q = c.getEvictionIterator(); - while (it2q) { - ++index; - actual += folly::stringPrintf( - "%d:%s, ", it2q->getId(), - (c.isHot(*it2q) ? "H" : (c.isCold(*it2q) ? "C" : "W"))); - ++it2q; - } + c.withEvictionIterator([&index, &actual, &c](auto&& it2q) { + while (it2q) { + ++index; + actual += folly::stringPrintf( + "%d:%s, ", it2q->getId(), + (c.isHot(*it2q) ? "H" : (c.isCold(*it2q) ? "C" : "W"))); + ++it2q; + } + }); ASSERT_EQ(expected, actual); } diff --git a/cachelib/allocator/tests/MMLruTest.cpp b/cachelib/allocator/tests/MMLruTest.cpp index bb63618668..c63c25e8d6 100644 --- a/cachelib/allocator/tests/MMLruTest.cpp +++ b/cachelib/allocator/tests/MMLruTest.cpp @@ -59,9 +59,12 @@ TEST_F(MMLruTest, RecordAccessWrites) { } std::vector nodeOrderPrev; - for (auto itr = c_.getEvictionIterator(); itr; ++itr) { - nodeOrderPrev.push_back(itr->getId()); - } + c_.withEvictionIterator([&nodeOrderPrev](auto&& itr) { + while (itr) { + nodeOrderPrev.push_back(itr->getId()); + ++itr; + } + }); int nAccess = 1000; std::set accessedNodes; @@ -86,9 +89,12 @@ TEST_F(MMLruTest, RecordAccessWrites) { // after a random set of recordAccess, test the order of the nodes in the // lru. std::vector nodeOrderCurr; - for (auto itr = c_.getEvictionIterator(); itr; ++itr) { - nodeOrderCurr.push_back(itr->getId()); - } + c_.withEvictionIterator([&nodeOrderCurr](auto&& itr) { + while (itr) { + nodeOrderCurr.push_back(itr->getId()); + ++itr; + } + }); if ((mode == AccessMode::kWrite && updateOnWrites) || (mode == AccessMode::kRead && updateOnReads)) { @@ -180,14 +186,16 @@ TEST_F(MMLruTest, InsertionPointBasic) { } auto checkLruConfig = [&](Container& container, std::vector order) { - auto it = container.getEvictionIterator(); int i = 0; - while (it) { - ASSERT_LT(i, order.size()); - EXPECT_EQ(order[i], it->getId()); - i++; - ++it; - } + container.withEvictionIterator([&i, &order](auto&& it) { + while (it) { + ASSERT_LT(i, order.size()); + EXPECT_EQ(order[i], it->getId()); + i++; + ++it; + } + }); + ASSERT_EQ(i, order.size()); }; @@ -379,13 +387,15 @@ TEST_F(MMLruTest, InsertionPointStress) { auto getTailCount = [&]() { size_t ntail = 0; - auto it = c.getEvictionIterator(); - while (it) { - if (it->isTail()) { - ntail++; + c.withEvictionIterator([&ntail](auto&& it) { + while (it) { + if (it->isTail()) { + ntail++; + } + ++it; } - ++it; - } + }); + return ntail; }; diff --git a/cachelib/allocator/tests/MMTinyLFUTest.cpp b/cachelib/allocator/tests/MMTinyLFUTest.cpp index 6200db55c5..1be8bb6ec7 100644 --- a/cachelib/allocator/tests/MMTinyLFUTest.cpp +++ b/cachelib/allocator/tests/MMTinyLFUTest.cpp @@ -59,9 +59,12 @@ TEST_F(MMTinyLFUTest, RecordAccessWrites) { } std::vector nodeOrderPrev; - for (auto itr = c_.getEvictionIterator(); itr; ++itr) { - nodeOrderPrev.push_back(itr->getId()); - } + c_.withEvictionIterator([&nodeOrderPrev](auto&& itr) { + while (itr) { + nodeOrderPrev.push_back(itr->getId()); + ++itr; + } + }); int nAccess = 1000; std::set accessedNodes; @@ -86,9 +89,12 @@ TEST_F(MMTinyLFUTest, RecordAccessWrites) { // after a random set of recordAccess, test the order of the nodes in the // lru. std::vector nodeOrderCurr; - for (auto itr = c_.getEvictionIterator(); itr; ++itr) { - nodeOrderCurr.push_back(itr->getId()); - } + c_.withEvictionIterator([&nodeOrderCurr](auto&& itr) { + while (itr) { + nodeOrderCurr.push_back(itr->getId()); + ++itr; + } + }); if ((mode == AccessMode::kWrite && updateOnWrites) || (mode == AccessMode::kRead && updateOnReads)) { @@ -170,13 +176,14 @@ TEST_F(MMTinyLFUTest, TinyLFUBasic) { auto checkTlfuConfig = [&](Container& container, std::string expected, std::string context) { - auto it = container.getEvictionIterator(); std::string actual; - while (it) { - actual += folly::stringPrintf("%s:%s, ", it->getKey().str().c_str(), - (container.isTiny(*it) ? "T" : "M")); - ++it; - } + container.withEvictionIterator([&container, &actual](auto&& it) { + while (it) { + actual += folly::stringPrintf("%s:%s, ", it->getKey().str().c_str(), + (container.isTiny(*it) ? "T" : "M")); + ++it; + } + }); ASSERT_EQ(expected, actual) << context; }; diff --git a/cachelib/allocator/tests/MMTypeTest.h b/cachelib/allocator/tests/MMTypeTest.h index 5c421cf4c1..ba5faeb2af 100644 --- a/cachelib/allocator/tests/MMTypeTest.h +++ b/cachelib/allocator/tests/MMTypeTest.h @@ -182,9 +182,12 @@ void MMTypeTest::testAddBasic( } std::set foundNodes; - for (auto itr = c.getEvictionIterator(); itr; ++itr) { - foundNodes.insert(itr->getId()); - } + c.withEvictionIterator([&foundNodes](auto&& itr) { + while (itr) { + foundNodes.insert(itr->getId()); + ++itr; + } + }); EXPECT_EQ(foundNodes.size(), c.getStats().size); EXPECT_EQ(foundNodes.size(), c.size()); } @@ -229,26 +232,31 @@ void MMTypeTest::testRemoveBasic(Config config) { } std::set foundNodes; - for (auto itr = c.getEvictionIterator(); itr; ++itr) { - foundNodes.insert(itr->getId()); - } + c.withEvictionIterator([&foundNodes](auto&& itr) { + while (itr) { + foundNodes.insert(itr->getId()); + ++itr; + } + }); for (const auto& node : removedNodes) { ASSERT_EQ(foundNodes.find(node->getId()), foundNodes.end()); } // trying to remove through iterator should work as expected as well. // no need of iter++ since remove will do that. - for (auto iter = c.getEvictionIterator(); iter;) { - auto& node = *iter; - ASSERT_TRUE(node.isInMMContainer()); - - // this will move the iter. - c.remove(iter); - ASSERT_FALSE(node.isInMMContainer()); - if (iter) { - ASSERT_NE((*iter).getId(), node.getId()); + c.withEvictionIterator([&foundNodes, &c](auto&& iter) { + while (iter) { + auto& node = *iter; + ASSERT_TRUE(node.isInMMContainer()); + + // this will move the iter. + c.remove(iter); + ASSERT_FALSE(node.isInMMContainer()); + if (iter) { + ASSERT_NE((*iter).getId(), node.getId()); + } } - } + }); EXPECT_EQ(c.getStats().size, 0); EXPECT_EQ(c.size(), 0); @@ -322,12 +330,16 @@ void MMTypeTest::testSerializationBasic(Config config) { for (auto& node : nodes) { bool foundNode = false; - for (auto it = c2.getEvictionIterator(); it; ++it) { - if (&*node == &*it) { - foundNode = true; - break; + c2.withEvictionIterator([&foundNode, &node](auto&& it) { + while (it) { + if (&*node == &*it) { + foundNode = true; + break; + } + ++it; } - } + }); + ASSERT_TRUE(foundNode); foundNode = false; } diff --git a/cachelib/allocator/tests/RefCountTest.cpp b/cachelib/allocator/tests/RefCountTest.cpp index 7d789a85d8..153f4a94a4 100644 --- a/cachelib/allocator/tests/RefCountTest.cpp +++ b/cachelib/allocator/tests/RefCountTest.cpp @@ -81,7 +81,7 @@ void RefCountTest::testBasic() { ASSERT_EQ(0, ref.getRaw()); ASSERT_FALSE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -89,7 +89,7 @@ void RefCountTest::testBasic() { ref.markInMMContainer(); ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -111,7 +111,7 @@ void RefCountTest::testBasic() { // Bumping up access ref shouldn't affect admin ref and flags ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(RefcountWithFlags::kAccessRefMask, ref.getAccessRef()); ASSERT_TRUE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -128,7 +128,7 @@ void RefCountTest::testBasic() { // Bumping down access ref shouldn't affect admin ref and flags ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_TRUE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -136,7 +136,7 @@ void RefCountTest::testBasic() { ref.template unSetFlag(); ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -145,28 +145,28 @@ void RefCountTest::testBasic() { ASSERT_EQ(0, ref.getRaw()); ASSERT_FALSE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); // conditionally set flags - ASSERT_FALSE((ref.markMoving())); + ASSERT_FALSE((ref.markExclusive())); ref.markInMMContainer(); - ASSERT_TRUE((ref.markMoving())); - ASSERT_FALSE((ref.isOnlyMoving())); + ASSERT_TRUE((ref.markExclusive())); + ASSERT_FALSE((ref.isOnlyExclusive())); ref.unmarkInMMContainer(); ref.template setFlag(); - // Have no other admin refcount but with a flag still means "isOnlyMoving" - ASSERT_TRUE((ref.isOnlyMoving())); + // Have no other admin refcount but with a flag still means "isOnlyExclusive" + ASSERT_TRUE((ref.isOnlyExclusive())); - // Set some flags and verify that "isOnlyMoving" does not care about flags + // Set some flags and verify that "isOnlyExclusive" does not care about flags ref.markIsChainedItem(); ASSERT_TRUE(ref.isChainedItem()); - ASSERT_TRUE((ref.isOnlyMoving())); + ASSERT_TRUE((ref.isOnlyExclusive())); ref.unmarkIsChainedItem(); ASSERT_FALSE(ref.isChainedItem()); - ASSERT_TRUE((ref.isOnlyMoving())); + ASSERT_TRUE((ref.isOnlyExclusive())); } } // namespace diff --git a/cachelib/benchmarks/MMTypeBench.h b/cachelib/benchmarks/MMTypeBench.h index a5dc6280d0..6362ff732b 100644 --- a/cachelib/benchmarks/MMTypeBench.h +++ b/cachelib/benchmarks/MMTypeBench.h @@ -203,10 +203,11 @@ void MMTypeBench::benchRemoveIterator(unsigned int numNodes) { // // no need of iter++ since remove will do that. for (unsigned int deleted = 0; deleted < numNodes; deleted++) { - auto iter = c->getEvictionIterator(); - if (iter) { - c->remove(iter); - } + c->withEvictionIterator([this](auto&& iter) { + if (iter) { + c->remove(iter); + } + }); } } diff --git a/website/docs/Cache_Library_Architecture_Guide/RAM_cache_indexing_and_eviction.md b/website/docs/Cache_Library_Architecture_Guide/RAM_cache_indexing_and_eviction.md index e23217e36c..7a39fa00f8 100644 --- a/website/docs/Cache_Library_Architecture_Guide/RAM_cache_indexing_and_eviction.md +++ b/website/docs/Cache_Library_Architecture_Guide/RAM_cache_indexing_and_eviction.md @@ -109,9 +109,10 @@ It has the following major API functions: is removed form the cache (evicted or explicitly removed by the client) (`bool CacheAllocator::removeFromMMContainer(Item& item)` in `cachelib/allocator/CacheAllocator-inl.h`). -* `getEvictionIterator`: Return an iterator of items to be evicted. This is - called when the cache allocator is looking for eviction. Usually the first item - that can be evicted (no active handles, not moving, etc) is used (see +* `withEvictionIterator`: Executes callback with eviction iterator passed as a + parameter.This is called when the cache allocator is looking for eviction. + Usually the first item that can be evicted (no active handles, not moving, + etc) is used (see `CacheAllocator::findEviction(PoolId pid, ClassId cid)` in `cachelib/allocator/CacheAllocator-inl.h`).