diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index e725a70089..58cf99593b 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -813,20 +813,20 @@ CacheAllocator::releaseBackToAllocator(Item& it, removeFromMMContainer(*head); - // If this chained item is marked as moving, we will not free it. - // We must capture the moving state before we do the decRef when + // If this chained item is marked as exclusive, we will not free it. + // We must capture the exclusive state before we do the decRef when // we know the item must still be valid - const bool wasMoving = head->isMoving(); + const bool wasExclusive = head->isExclusive(); // Decref and check if we were the last reference. Now if the item - // was marked moving, after decRef, it will be free to be released + // was marked exclusive, after decRef, it will be free to be released // by slab release thread const auto childRef = head->decRef(); - // If the item is already moving and we already decremented the + // If the item is already exclusive and we already decremented the // refcount, we don't need to free this item. We'll let the slab // release thread take care of that - if (!wasMoving) { + if (!wasExclusive) { if (childRef != 0) { throw std::runtime_error(folly::sformat( "chained item refcount is not zero. We cannot proceed! " @@ -834,7 +834,7 @@ CacheAllocator::releaseBackToAllocator(Item& it, childRef, head->toString())); } - // Item is not moving and refcount is 0, we can proceed to + // Item is not exclusive and refcount is 0, we can proceed to // free it or recylce the memory if (head == toRecycle) { XDCHECK(ReleaseRes::kReleased != res); @@ -1110,7 +1110,8 @@ bool CacheAllocator::moveRegularItem(Item& oldItem, // this item through an API such as remove(itemHandle). In this case, // it is unsafe to replace the old item with a new one, so we should // also abort. - if (!accessContainer_->replaceIf(oldItem, *newItemHdl, itemMovingPredicate)) { + if (!accessContainer_->replaceIf(oldItem, *newItemHdl, + itemExclusivePredicate)) { return false; } @@ -1159,7 +1160,7 @@ bool CacheAllocator::moveChainedItem(ChainedItem& oldItem, // This item has been unlinked from its parent and we're the only // owner of it, so we're done here - if (!oldItem.isInMMContainer() || oldItem.isOnlyMoving()) { + if (!oldItem.isInMMContainer() || oldItem.isOnlyExclusive()) { return false; } @@ -1190,7 +1191,7 @@ bool CacheAllocator::moveChainedItem(ChainedItem& oldItem, // In case someone else had removed this chained item from its parent by now // So we check again to see if the it has been unlinked from its parent - if (!oldItem.isInMMContainer() || oldItem.isOnlyMoving()) { + if (!oldItem.isInMMContainer() || oldItem.isOnlyExclusive()) { return false; } @@ -1206,7 +1207,7 @@ bool CacheAllocator::moveChainedItem(ChainedItem& oldItem, // parent's chain and the MMContainer. auto oldItemHandle = replaceChainedItemLocked(oldItem, std::move(newItemHdl), *parentHandle); - XDCHECK(oldItemHandle->isMoving()); + XDCHECK(oldItemHandle->isExclusive()); XDCHECK(!oldItemHandle->isInMMContainer()); return true; @@ -1227,53 +1228,63 @@ CacheAllocator::findEviction(PoolId pid, ClassId cid) { ++searchTries; (*stats_.evictionAttempts)[pid][cid].inc(); - Item* candidate = itr.get(); + Item* toRecycle = itr.get(); + + Item* candidate = + toRecycle->isChainedItem() + ? &toRecycle->asChainedItem().getParentItem(compressor_) + : toRecycle; + + // make sure no other thead is evicting the item + if (candidate->getRefCount() != 0 || !candidate->markExclusive()) { + ++itr; + continue; + } + // for chained items, the ownership of the parent can change. We try to // evict what we think as parent and see if the eviction of parent // recycles the child we intend to. - auto toReleaseHandle = - itr->isChainedItem() - ? advanceIteratorAndTryEvictChainedItem(itr) - : advanceIteratorAndTryEvictRegularItem(mmContainer, itr); + bool evictionSuccessful = false; + { + auto toReleaseHandle = + itr->isChainedItem() + ? advanceIteratorAndTryEvictChainedItem(itr) + : advanceIteratorAndTryEvictRegularItem(mmContainer, itr); + evictionSuccessful = toReleaseHandle != nullptr; + // destroy toReleseHandle. The item won't be released to allocator + // since we marked it as exclusive. + } - if (toReleaseHandle) { - if (toReleaseHandle->hasChainedItem()) { + const auto ref = candidate->unmarkExclusive(); + if (ref == 0u) { + // Invalidate iterator since later on we may use this mmContainer + // again, which cannot be done unless we drop this iterator + itr.destroy(); + + // recycle the item. it's safe to do so, even if toReleaseHandle was + // NULL. If `ref` == 0 then it means that we are the last holder of + // that item. + if (candidate->hasChainedItem()) { (*stats_.chainedItemEvictions)[pid][cid].inc(); } else { (*stats_.regularItemEvictions)[pid][cid].inc(); } if (auto eventTracker = getEventTracker()) { - eventTracker->record( - AllocatorApiEvent::DRAM_EVICT, toReleaseHandle->getKey(), - AllocatorApiResult::EVICTED, toReleaseHandle->getSize(), - toReleaseHandle->getConfiguredTTL().count()); + eventTracker->record(AllocatorApiEvent::DRAM_EVICT, candidate->getKey(), + AllocatorApiResult::EVICTED, candidate->getSize(), + candidate->getConfiguredTTL().count()); } - // Invalidate iterator since later on we may use this mmContainer - // again, which cannot be done unless we drop this iterator - itr.destroy(); - - // we must be the last handle and for chained items, this will be - // the parent. - XDCHECK(toReleaseHandle.get() == candidate || candidate->isChainedItem()); - XDCHECK_EQ(1u, toReleaseHandle->getRefCount()); - - // We manually release the item here because we don't want to - // invoke the Item Handle's destructor which will be decrementing - // an already zero refcount, which will throw exception - auto& itemToRelease = *toReleaseHandle.release(); - - // Decrementing the refcount because we want to recycle the item - const auto ref = decRef(itemToRelease); - XDCHECK_EQ(0u, ref); // check if by releasing the item we intend to, we actually // recycle the candidate. if (ReleaseRes::kRecycled == - releaseBackToAllocator(itemToRelease, RemoveContext::kEviction, - /* isNascent */ false, candidate)) { - return candidate; + releaseBackToAllocator(*candidate, RemoveContext::kEviction, + /* isNascent */ false, toRecycle)) { + return toRecycle; } + } else { + XDCHECK(!evictionSuccessful); } // If we destroyed the itr to possibly evict and failed, we restart @@ -1333,143 +1344,6 @@ bool CacheAllocator::shouldWriteToNvmCacheExclusive( return true; } -template -typename CacheAllocator::WriteHandle -CacheAllocator::advanceIteratorAndTryEvictRegularItem( - MMContainer& mmContainer, EvictionIterator& itr) { - // we should flush this to nvmcache if it is not already present in nvmcache - // and the item is not expired. - Item& item = *itr; - const bool evictToNvmCache = shouldWriteToNvmCache(item); - - auto token = evictToNvmCache ? nvmCache_->createPutToken(item.getKey()) - : typename NvmCacheT::PutToken{}; - // record the in-flight eviciton. If not, we move on to next item to avoid - // stalling eviction. - if (evictToNvmCache && !token.isValid()) { - ++itr; - stats_.evictFailConcurrentFill.inc(); - return WriteHandle{}; - } - - // If there are other accessors, we should abort. Acquire a handle here since - // if we remove the item from both access containers and mm containers - // below, we will need a handle to ensure proper cleanup in case we end up - // not evicting this item - auto evictHandle = accessContainer_->removeIf(item, &itemEvictionPredicate); - - if (!evictHandle) { - ++itr; - stats_.evictFailAC.inc(); - return evictHandle; - } - - mmContainer.remove(itr); - XDCHECK_EQ(reinterpret_cast(evictHandle.get()), - reinterpret_cast(&item)); - XDCHECK(!evictHandle->isInMMContainer()); - XDCHECK(!evictHandle->isAccessible()); - - // If the item is now marked as moving, that means its corresponding slab is - // being released right now. So, we look for the next item that is eligible - // for eviction. It is safe to destroy the handle here since the moving bit - // is set. Iterator was already advance by the remove call above. - if (evictHandle->isMoving()) { - stats_.evictFailMove.inc(); - return WriteHandle{}; - } - - // Invalidate iterator since later on if we are not evicting this - // item, we may need to rely on the handle we created above to ensure - // proper cleanup if the item's raw refcount has dropped to 0. - // And since this item may be a parent item that has some child items - // in this very same mmContainer, we need to make sure we drop this - // exclusive iterator so we can gain access to it when we're cleaning - // up the child items - itr.destroy(); - - // Ensure that there are no accessors after removing from the access - // container - XDCHECK(evictHandle->getRefCount() == 1); - - if (evictToNvmCache && shouldWriteToNvmCacheExclusive(item)) { - XDCHECK(token.isValid()); - nvmCache_->put(evictHandle, std::move(token)); - } - return evictHandle; -} - -template -typename CacheAllocator::WriteHandle -CacheAllocator::advanceIteratorAndTryEvictChainedItem( - EvictionIterator& itr) { - XDCHECK(itr->isChainedItem()); - - ChainedItem* candidate = &itr->asChainedItem(); - ++itr; - - // The parent could change at any point through transferChain. However, if - // that happens, we would realize that the releaseBackToAllocator return - // kNotRecycled and we would try another chained item, leading to transient - // failure. - auto& parent = candidate->getParentItem(compressor_); - - const bool evictToNvmCache = shouldWriteToNvmCache(parent); - - auto token = evictToNvmCache ? nvmCache_->createPutToken(parent.getKey()) - : typename NvmCacheT::PutToken{}; - - // if token is invalid, return. iterator is already advanced. - if (evictToNvmCache && !token.isValid()) { - stats_.evictFailConcurrentFill.inc(); - return WriteHandle{}; - } - - // check if the parent exists in the hashtable and refcount is drained. - auto parentHandle = - accessContainer_->removeIf(parent, &itemEvictionPredicate); - if (!parentHandle) { - stats_.evictFailParentAC.inc(); - return parentHandle; - } - - // Invalidate iterator since later on we may use the mmContainer - // associated with this iterator which cannot be done unless we - // drop this iterator - // - // This must be done once we know the parent is not nullptr. - // Since we can very well be the last holder of this parent item, - // which may have a chained item that is linked in this MM container. - itr.destroy(); - - // Ensure we have the correct parent and we're the only user of the - // parent, then free it from access container. Otherwise, we abort - XDCHECK_EQ(reinterpret_cast(&parent), - reinterpret_cast(parentHandle.get())); - XDCHECK_EQ(1u, parent.getRefCount()); - - removeFromMMContainer(*parentHandle); - - XDCHECK(!parent.isInMMContainer()); - XDCHECK(!parent.isAccessible()); - - // We need to make sure the parent is not marked as moving - // and we're the only holder of the parent item. Safe to destroy the handle - // here since moving bit is set. - if (parentHandle->isMoving()) { - stats_.evictFailParentMove.inc(); - return WriteHandle{}; - } - - if (evictToNvmCache && shouldWriteToNvmCacheExclusive(*parentHandle)) { - XDCHECK(token.isValid()); - XDCHECK(parentHandle->hasChainedItem()); - nvmCache_->put(parentHandle, std::move(token)); - } - - return parentHandle; -} - template typename CacheAllocator::RemoveRes CacheAllocator::remove(typename Item::Key key) { @@ -2468,7 +2342,7 @@ void CacheAllocator::releaseSlabImpl( // Need to mark an item for release before proceeding // If we can't mark as moving, it means the item is already freed const bool isAlreadyFreed = - !markMovingForSlabRelease(releaseContext, alloc, throttler); + !markExclusiveForSlabRelease(releaseContext, alloc, throttler); if (isAlreadyFreed) { continue; } @@ -2513,8 +2387,8 @@ bool CacheAllocator::moveForSlabRelease( stats_.numMoveAttempts.inc(); // Nothing to move and the key is likely also bogus for chained items. - if (oldItem.isOnlyMoving()) { - oldItem.unmarkMoving(); + if (oldItem.isOnlyExclusive()) { + oldItem.unmarkExclusive(); const auto res = releaseBackToAllocator(oldItem, RemoveContext::kNormal, false); XDCHECK(res == ReleaseRes::kReleased); @@ -2553,7 +2427,7 @@ bool CacheAllocator::moveForSlabRelease( // that's identical to this one to replace it. Here we just need to wait // until all users have dropped the item handles before we can proceed. startTime = util::getCurrentTimeSec(); - while (!oldItem.isOnlyMoving()) { + while (!oldItem.isOnlyExclusive()) { throttleWith(throttler, [&] { XLOGF(WARN, "Spent {} seconds, slab release still waiting for refcount to " @@ -2608,7 +2482,7 @@ CacheAllocator::allocateNewItemForOldItem(const Item& oldItem) { } // Set up the destination for the move. Since oldChainedItem would have - // the moving bit set, it won't be picked for eviction. + // the exclusive bit set, it won't be picked for eviction. auto newItemHdl = allocateChainedItemInternal(parentHandle, oldChainedItem.getSize()); if (!newItemHdl) { @@ -2660,7 +2534,7 @@ bool CacheAllocator::tryMovingForSlabRelease( // item is still valid. const std::string parentKey = oldItem.asChainedItem().getParentItem(compressor_).getKey().str(); - if (oldItem.isOnlyMoving()) { + if (oldItem.isOnlyExclusive()) { // If chained item no longer has a refcount, its parent is already // being released, so we abort this try to moving. return false; @@ -2691,11 +2565,11 @@ void CacheAllocator::evictForSlabRelease( while (true) { stats_.numEvictionAttempts.inc(); - // if the item is already in a state where only the moving bit is set, - // nothing needs to be done. We simply need to unmark moving bit and free + // if the item is already in a state where only the exclusive bit is set, + // nothing needs to be done. We simply need to unmark exclusive bit and free // the item. - if (item.isOnlyMoving()) { - item.unmarkMoving(); + if (item.isOnlyExclusive()) { + item.unmarkExclusive(); const auto res = releaseBackToAllocator(item, RemoveContext::kNormal, false); XDCHECK(ReleaseRes::kReleased == res); @@ -2725,10 +2599,8 @@ void CacheAllocator::evictForSlabRelease( stats_.numEvictionSuccesses.inc(); - // we have the last handle. no longer need to hold on to the moving bit - item.unmarkMoving(); - - XDCHECK(owningHandle->isExclusive()); + // we have the last handle. no longer need to hold on to the exclusive bit + item.unmarkExclusive(); // manually decrement the refcount to call releaseBackToAllocator const auto ref = decRef(*owningHandle); @@ -2740,7 +2612,7 @@ void CacheAllocator::evictForSlabRelease( } if (shutDownInProgress_) { - item.unmarkMoving(); + item.unmarkExclusive(); allocator_->abortSlabRelease(ctx); throw exception::SlabReleaseAborted( folly::sformat("Slab Release aborted while trying to evict" @@ -2763,12 +2635,132 @@ void CacheAllocator::evictForSlabRelease( } } +template +typename CacheAllocator::WriteHandle +CacheAllocator::advanceIteratorAndTryEvictRegularItem( + MMContainer& mmContainer, EvictionIterator& itr) { + // we should flush this to nvmcache if it is not already present in nvmcache + // and the item is not expired. + Item& item = *itr; + const bool evictToNvmCache = shouldWriteToNvmCache(item); + + auto token = evictToNvmCache ? nvmCache_->createPutToken(item.getKey()) + : typename NvmCacheT::PutToken{}; + + // record the in-flight eviciton. If not, we move on to next item to avoid + // stalling eviction. + if (evictToNvmCache && !token.isValid()) { + ++itr; + stats_.evictFailConcurrentFill.inc(); + return WriteHandle{}; + } + + // If there are other accessors, we should abort. Acquire a handle here since + // if we remove the item from both access containers and mm containers + // below, we will need a handle to ensure proper cleanup in case we end up + // not evicting this item + auto evictHandle = accessContainer_->removeIf(item, &itemExclusivePredicate); + if (!evictHandle) { + ++itr; + stats_.evictFailAC.inc(); + return evictHandle; + } + + mmContainer.remove(itr); + XDCHECK_EQ(reinterpret_cast(evictHandle.get()), + reinterpret_cast(&item)); + XDCHECK(!evictHandle->isInMMContainer()); + XDCHECK(!evictHandle->isAccessible()); + + // Invalidate iterator since later on if we are not evicting this + // item, we may need to rely on the handle we created above to ensure + // proper cleanup if the item's raw refcount has dropped to 0. + // And since this item may be a parent item that has some child items + // in this very same mmContainer, we need to make sure we drop this + // exclusive iterator so we can gain access to it when we're cleaning + // up the child items + itr.destroy(); + + // Ensure that there are no accessors after removing from the access + // container + XDCHECK(evictHandle->getRefCount() == 1); + + if (evictToNvmCache && shouldWriteToNvmCacheExclusive(item)) { + XDCHECK(token.isValid()); + nvmCache_->put(evictHandle, std::move(token)); + } + return evictHandle; +} + +template +typename CacheAllocator::WriteHandle +CacheAllocator::advanceIteratorAndTryEvictChainedItem( + EvictionIterator& itr) { + XDCHECK(itr->isChainedItem()); + + ChainedItem* candidate = &itr->asChainedItem(); + ++itr; + + // The parent could change at any point through transferChain. However, if + // that happens, we would realize that the releaseBackToAllocator return + // kNotRecycled and we would try another chained item, leading to transient + // failure. + auto& parent = candidate->getParentItem(compressor_); + + const bool evictToNvmCache = shouldWriteToNvmCache(parent); + + auto token = evictToNvmCache ? nvmCache_->createPutToken(parent.getKey()) + : typename NvmCacheT::PutToken{}; + + // if token is invalid, return. iterator is already advanced. + if (evictToNvmCache && !token.isValid()) { + stats_.evictFailConcurrentFill.inc(); + return WriteHandle{}; + } + + // check if the parent exists in the hashtable and refcount is drained. + auto parentHandle = + accessContainer_->removeIf(parent, &itemExclusivePredicate); + if (!parentHandle) { + stats_.evictFailParentAC.inc(); + return parentHandle; + } + + // Invalidate iterator since later on we may use the mmContainer + // associated with this iterator which cannot be done unless we + // drop this iterator + // + // This must be done once we know the parent is not nullptr. + // Since we can very well be the last holder of this parent item, + // which may have a chained item that is linked in this MM container. + itr.destroy(); + + // Ensure we have the correct parent and we're the only user of the + // parent, then free it from access container. Otherwise, we abort + XDCHECK_EQ(reinterpret_cast(&parent), + reinterpret_cast(parentHandle.get())); + XDCHECK_EQ(1u, parent.getRefCount()); + + removeFromMMContainer(*parentHandle); + + XDCHECK(!parent.isInMMContainer()); + XDCHECK(!parent.isAccessible()); + + if (evictToNvmCache && shouldWriteToNvmCacheExclusive(*parentHandle)) { + XDCHECK(token.isValid()); + XDCHECK(parentHandle->hasChainedItem()); + nvmCache_->put(parentHandle, std::move(token)); + } + + return parentHandle; +} + template typename CacheAllocator::WriteHandle CacheAllocator::evictNormalItemForSlabRelease(Item& item) { - XDCHECK(item.isMoving()); + XDCHECK(item.isExclusive()); - if (item.isOnlyMoving()) { + if (item.isOnlyExclusive()) { return WriteHandle{}; } @@ -2780,7 +2772,7 @@ CacheAllocator::evictNormalItemForSlabRelease(Item& item) { // We remove the item from both access and mm containers. It doesn't matter // if someone else calls remove on the item at this moment, the item cannot - // be freed as long as we have the moving bit set. + // be freed as long as we have the exclusive bit set. auto handle = accessContainer_->removeIf(item, std::move(predicate)); if (!handle) { @@ -2804,7 +2796,7 @@ CacheAllocator::evictNormalItemForSlabRelease(Item& item) { template typename CacheAllocator::WriteHandle CacheAllocator::evictChainedItemForSlabRelease(ChainedItem& child) { - XDCHECK(child.isMoving()); + XDCHECK(child.isExclusive()); // We have the child marked as moving, but dont know anything about the // state of the parent. Unlike the case of regular eviction where we are @@ -2826,7 +2818,7 @@ CacheAllocator::evictChainedItemForSlabRelease(ChainedItem& child) { // check if the child is still in mmContainer and the expected parent is // valid under the chained item lock. if (expectedParent.getKey() != parentKey || !child.isInMMContainer() || - child.isOnlyMoving() || + child.isOnlyExclusive() || &expectedParent != &child.getParentItem(compressor_) || !expectedParent.isAccessible() || !expectedParent.hasChainedItem()) { return {}; @@ -2881,14 +2873,14 @@ CacheAllocator::evictChainedItemForSlabRelease(ChainedItem& child) { // In case someone else had removed this chained item from its parent by now // So we check again to see if it has been unlinked from its parent - if (!child.isInMMContainer() || child.isOnlyMoving()) { + if (!child.isInMMContainer() || child.isOnlyExclusive()) { return {}; } // check after removing from the MMContainer that the parent is still not // being marked as moving. If parent is moving, it will release the child // item and we will wait for that. - if (parentHandle->isMoving()) { + if (parentHandle->isExclusive()) { return {}; } @@ -2921,7 +2913,7 @@ bool CacheAllocator::removeIfExpired(const ReadHandle& handle) { } template -bool CacheAllocator::markMovingForSlabRelease( +bool CacheAllocator::markExclusiveForSlabRelease( const SlabReleaseContext& ctx, void* alloc, util::Throttler& throttler) { // MemoryAllocator::processAllocForRelease will execute the callback // if the item is not already free. So there are three outcomes here: @@ -2940,7 +2932,7 @@ bool CacheAllocator::markMovingForSlabRelease( // Since this callback is executed, the item is not yet freed itemFreed = false; Item* item = static_cast(memory); - if (item->markMoving()) { + if (item->markExclusive()) { markedMoving = true; } }; @@ -2962,7 +2954,6 @@ bool CacheAllocator::markMovingForSlabRelease( itemFreed = true; if (shutDownInProgress_) { - XDCHECK(!static_cast(alloc)->isMoving()); allocator_->abortSlabRelease(ctx); throw exception::SlabReleaseAborted( folly::sformat("Slab Release aborted while still trying to mark" diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index 676abd7198..63065c7807 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1507,7 +1507,7 @@ class CacheAllocator : public CacheBase { FOLLY_ALWAYS_INLINE WriteHandle findFastImpl(Key key, AccessMode mode); // Moves a regular item to a different slab. This should only be used during - // slab release after the item's moving bit has been set. The user supplied + // slab release after the item's exclusive bit has been set. The user supplied // callback is responsible for copying the contents and fixing the semantics // of chained item. // @@ -1530,7 +1530,7 @@ class CacheAllocator : public CacheBase { folly::IOBuf convertToIOBufT(Handle& handle); // Moves a chained item to a different slab. This should only be used during - // slab release after the item's moving bit has been set. The user supplied + // slab release after the item's exclusive bit has been set. The user supplied // callback is responsible for copying the contents and fixing the semantics // of chained item. // @@ -1755,9 +1755,9 @@ class CacheAllocator : public CacheBase { // @return true when successfully marked as moving, // fasle when this item has already been freed - bool markMovingForSlabRelease(const SlabReleaseContext& ctx, - void* alloc, - util::Throttler& throttler); + bool markExclusiveForSlabRelease(const SlabReleaseContext& ctx, + void* alloc, + util::Throttler& throttler); // "Move" (by copying) the content in this item to another memory // location by invoking the move callback. @@ -1926,20 +1926,16 @@ class CacheAllocator : public CacheBase { std::optional saveNvmCache(); void saveRamCache(); - static bool itemMovingPredicate(const Item& item) { + static bool itemExclusivePredicate(const Item& item) { return item.getRefCount() == 0; } - static bool itemEvictionPredicate(const Item& item) { - return item.getRefCount() == 0 && !item.isMoving(); - } - static bool itemExpiryPredicate(const Item& item) { return item.getRefCount() == 1 && item.isExpired(); } static bool parentEvictForSlabReleasePredicate(const Item& item) { - return item.getRefCount() == 1 && !item.isMoving(); + return item.getRefCount() == 1 && !item.isExclusive(); } std::unique_ptr createDeserializer(); diff --git a/cachelib/allocator/CacheItem-inl.h b/cachelib/allocator/CacheItem-inl.h index b293971b88..8bdf9a96fe 100644 --- a/cachelib/allocator/CacheItem-inl.h +++ b/cachelib/allocator/CacheItem-inl.h @@ -148,12 +148,13 @@ std::string CacheItem::toString() const { return folly::sformat( "item: " "memory={}:raw-ref={}:size={}:key={}:hex-key={}:" - "isInMMContainer={}:isAccessible={}:isMoving={}:references={}:ctime={}:" + "isInMMContainer={}:isAccessible={}:isExclusive={}:references={}:ctime=" + "{}:" "expTime={}:updateTime={}:isNvmClean={}:isNvmEvicted={}:hasChainedItem=" "{}", this, getRefCountAndFlagsRaw(), getSize(), folly::humanify(getKey().str()), folly::hexlify(getKey()), - isInMMContainer(), isAccessible(), isMoving(), getRefCount(), + isInMMContainer(), isAccessible(), isExclusive(), getRefCount(), getCreationTime(), getExpiryTime(), getLastAccessTime(), isNvmClean(), isNvmEvicted(), hasChainedItem()); } @@ -185,11 +186,6 @@ bool CacheItem::isDrained() const noexcept { return ref_.isDrained(); } -template -bool CacheItem::isExclusive() const noexcept { - return ref_.isExclusive(); -} - template void CacheItem::markAccessible() noexcept { ref_.markAccessible(); @@ -221,23 +217,23 @@ bool CacheItem::isInMMContainer() const noexcept { } template -bool CacheItem::markMoving() noexcept { - return ref_.markMoving(); +bool CacheItem::markExclusive() noexcept { + return ref_.markExclusive(); } template -void CacheItem::unmarkMoving() noexcept { - ref_.unmarkMoving(); +RefcountWithFlags::Value CacheItem::unmarkExclusive() noexcept { + return ref_.unmarkExclusive(); } template -bool CacheItem::isMoving() const noexcept { - return ref_.isMoving(); +bool CacheItem::isExclusive() const noexcept { + return ref_.isExclusive(); } template -bool CacheItem::isOnlyMoving() const noexcept { - return ref_.isOnlyMoving(); +bool CacheItem::isOnlyExclusive() const noexcept { + return ref_.isOnlyExclusive(); } template @@ -339,7 +335,7 @@ bool CacheItem::updateExpiryTime(uint32_t expiryTimeSecs) noexcept { // check for moving to make sure we are not updating the expiry time while at // the same time re-allocating the item with the old state of the expiry time // in moveRegularItem(). See D6852328 - if (isMoving() || !isInMMContainer() || isChainedItem()) { + if (isExclusive() || !isInMMContainer() || isChainedItem()) { return false; } // attempt to atomically update the value of expiryTime @@ -455,10 +451,11 @@ std::string CacheChainedItem::toString() const { return folly::sformat( "chained item: " "memory={}:raw-ref={}:size={}:parent-compressed-ptr={}:" - "isInMMContainer={}:isAccessible={}:isMoving={}:references={}:ctime={}:" + "isInMMContainer={}:isAccessible={}:isExclusive={}:references={}:ctime={}" + ":" "expTime={}:updateTime={}", this, Item::getRefCountAndFlagsRaw(), Item::getSize(), cPtr.getRaw(), - Item::isInMMContainer(), Item::isAccessible(), Item::isMoving(), + Item::isInMMContainer(), Item::isAccessible(), Item::isExclusive(), Item::getRefCount(), Item::getCreationTime(), Item::getExpiryTime(), Item::getLastAccessTime()); } diff --git a/cachelib/allocator/CacheItem.h b/cachelib/allocator/CacheItem.h index 0d699d8f97..c17ebb90d5 100644 --- a/cachelib/allocator/CacheItem.h +++ b/cachelib/allocator/CacheItem.h @@ -246,23 +246,23 @@ class CACHELIB_PACKED_ATTR CacheItem { * * This API will only succeed when an item is a regular item, and user * has already inserted it into the cache (via @insert or @insertOrReplace). - * In addition, the item cannot be in a "moving" state. + * In addition, the item cannot be in a "exclusive" state. * * @param expiryTime the expiryTime value to update to * * @return boolean indicating whether expiry time was successfully updated - * false when item is not linked in cache, or in moving state, or a + * false when item is not linked in cache, or in exclusive state, or a * chained item */ bool updateExpiryTime(uint32_t expiryTimeSecs) noexcept; // Same as @updateExpiryTime, but sets expiry time to @ttl seconds from now. // It has the same restrictions as @updateExpiryTime. An item must be a - // regular item and is part of the cache and NOT in the moving state. + // regular item and is part of the cache and NOT in the exclusive state. // // @param ttl TTL (from now) // @return boolean indicating whether expiry time was successfully updated - // false when item is not linked in cache, or in moving state, or a + // false when item is not linked in cache, or in exclusive state, or a // chained item bool extendTTL(std::chrono::seconds ttl) noexcept; @@ -320,13 +320,9 @@ class CACHELIB_PACKED_ATTR CacheItem { // Whether or not an item is completely drained of all references including // the internal ones. This means there is no access refcount bits and zero // admin bits set. I.e. refcount is 0 and the item is not linked, accessible, - // nor moving + // nor exclusive bool isDrained() const noexcept; - // Whether or not we hold the last exclusive access to this item - // Refcount is 1 and the item is not linked, accessible, nor moving - bool isExclusive() const noexcept; - /** * The following three functions correspond to the state of the allocation * in the memory management container. This is protected by the @@ -349,20 +345,22 @@ class CACHELIB_PACKED_ATTR CacheItem { /** * The following two functions corresond to whether or not an item is * currently in the process of being moved. This happens during a slab - * rebalance or resize operation. + * rebalance, eviction or resize operation. * - * An item can only be marked moving when `isInMMContainer` returns true. + * An item can only be marked exclusive when `isInMMContainer` returns true. * This operation is atomic. * - * User can also query if an item "isOnlyMoving". This returns true only - * if the refcount is 0 and only the moving bit is set. + * User can also query if an item "isOnlyExclusive". This returns true only + * if the refcount is 0 and only the exclusive bit is set. * - * Unmarking moving does not depend on `isInMMContainer` + * Unmarking exclusive does not depend on `isInMMContainer`. + * Unmarking exclusive will also return the refcount at the moment of + * unmarking. */ - bool markMoving() noexcept; - void unmarkMoving() noexcept; - bool isMoving() const noexcept; - bool isOnlyMoving() const noexcept; + bool markExclusive() noexcept; + RefcountWithFlags::Value unmarkExclusive() noexcept; + bool isExclusive() const noexcept; + bool isOnlyExclusive() const noexcept; /** * Item cannot be marked both chained allocation and diff --git a/cachelib/allocator/Refcount.h b/cachelib/allocator/Refcount.h index 631e1695f9..3c37e2e098 100644 --- a/cachelib/allocator/Refcount.h +++ b/cachelib/allocator/Refcount.h @@ -82,7 +82,7 @@ class FOLLY_PACK_ATTR RefcountWithFlags { // this flag indicates the allocation is being moved elsewhere // (can be triggered by a resize or reblanace operation) - kMoving, + kExclusive, }; /** @@ -119,7 +119,8 @@ class FOLLY_PACK_ATTR RefcountWithFlags { // Unused. This is just to indciate the maximum number of flags kFlagMax, }; - static_assert(static_cast(kMMFlag0) > static_cast(kMoving), + static_assert(static_cast(kMMFlag0) > + static_cast(kExclusive), "Flags and control bits cannot overlap in bit range."); static_assert(kFlagMax <= NumBits::value, "Too many flags."); @@ -247,18 +248,20 @@ class FOLLY_PACK_ATTR RefcountWithFlags { /** * The following four functions are used to track whether or not * an item is currently in the process of being moved. This happens during a - * slab rebalance or resize operation. + * slab rebalance or resize operation or during eviction. * - * An item can only be marked moving when `isInMMContainer` returns true. - * This operation is atomic. + * An item can only be marked exclusive when `isInMMContainer` returns true + * and the item is not yet marked as exclusive. This operation is atomic. * - * User can also query if an item "isOnlyMoving". This returns true only - * if the refcount is 0 and only the moving bit is set. + * User can also query if an item "isOnlyExclusive". This returns true only + * if the refcount is 0 and only the exclusive bit is set. * - * Unmarking moving does not depend on `isInMMContainer` + * Unmarking exclusive does not depend on `isInMMContainer`. + * Unmarking exclusive will also return the refcount at the moment of + * unmarking. */ - bool markMoving() noexcept { - Value bitMask = getAdminRef(); + bool markExclusive() noexcept { + Value bitMask = getAdminRef(); Value conditionBitMask = getAdminRef(); Value* const refPtr = &refCount_; @@ -267,7 +270,8 @@ class FOLLY_PACK_ATTR RefcountWithFlags { Value curValue = __atomic_load_n(refPtr, __ATOMIC_RELAXED); while (true) { const bool flagSet = curValue & conditionBitMask; - if (!flagSet) { + const bool alreadyExclusive = curValue & bitMask; + if (!flagSet || alreadyExclusive) { return false; } @@ -286,21 +290,24 @@ class FOLLY_PACK_ATTR RefcountWithFlags { } } } - void unmarkMoving() noexcept { - Value bitMask = ~getAdminRef(); - __atomic_and_fetch(&refCount_, bitMask, __ATOMIC_ACQ_REL); + Value unmarkExclusive() noexcept { + Value bitMask = ~getAdminRef(); + return __atomic_and_fetch(&refCount_, bitMask, __ATOMIC_ACQ_REL) & kRefMask; + } + bool isExclusive() const noexcept { + return getRaw() & getAdminRef(); } - bool isMoving() const noexcept { return getRaw() & getAdminRef(); } - bool isOnlyMoving() const noexcept { - // An item is only moving when its refcount is zero and only the moving bit - // among all the control bits is set. This indicates an item is already on - // its way out of cache and does not need to be moved. + bool isOnlyExclusive() const noexcept { + // An item is only exclusive when its refcount is zero and only the + // exclusive bit among all the control bits is set. This indicates an item + // is exclusive to the current thread. No other thread is allowed to + // do anything with it. auto ref = getRefWithAccessAndAdmin(); - bool anyOtherBitSet = ref & ~getAdminRef(); + bool anyOtherBitSet = ref & ~getAdminRef(); if (anyOtherBitSet) { return false; } - return ref & getAdminRef(); + return ref & getAdminRef(); } /** @@ -330,13 +337,9 @@ class FOLLY_PACK_ATTR RefcountWithFlags { bool isNvmEvicted() const noexcept { return isFlagSet(); } // Whether or not an item is completely drained of access - // Refcount is 0 and the item is not linked, accessible, nor moving + // Refcount is 0 and the item is not linked, accessible, nor exclusive bool isDrained() const noexcept { return getRefWithAccessAndAdmin() == 0; } - // Whether or not we hold the last exclusive access to this item - // Refcount is 1 and the item is not linked, accessible, nor moving - bool isExclusive() const noexcept { return getRefWithAccessAndAdmin() == 1; } - /** * Functions to set, unset and check flag presence. This is exposed * for external components to manipulate flags. E.g. MMContainer will diff --git a/cachelib/allocator/tests/ItemTest.cpp b/cachelib/allocator/tests/ItemTest.cpp index 45bc529a00..edece3fb58 100644 --- a/cachelib/allocator/tests/ItemTest.cpp +++ b/cachelib/allocator/tests/ItemTest.cpp @@ -83,10 +83,10 @@ TEST(ItemTest, ExpiryTime) { EXPECT_EQ(tenMins, item->getConfiguredTTL()); // Test that writes fail while the item is moving - item->markMoving(); + item->markExclusive(); result = item->updateExpiryTime(0); EXPECT_FALSE(result); - item->unmarkMoving(); + item->unmarkExclusive(); // Test that writes fail while the item is not in an MMContainer item->unmarkInMMContainer(); diff --git a/cachelib/allocator/tests/RefCountTest.cpp b/cachelib/allocator/tests/RefCountTest.cpp index 7d789a85d8..153f4a94a4 100644 --- a/cachelib/allocator/tests/RefCountTest.cpp +++ b/cachelib/allocator/tests/RefCountTest.cpp @@ -81,7 +81,7 @@ void RefCountTest::testBasic() { ASSERT_EQ(0, ref.getRaw()); ASSERT_FALSE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -89,7 +89,7 @@ void RefCountTest::testBasic() { ref.markInMMContainer(); ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -111,7 +111,7 @@ void RefCountTest::testBasic() { // Bumping up access ref shouldn't affect admin ref and flags ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(RefcountWithFlags::kAccessRefMask, ref.getAccessRef()); ASSERT_TRUE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -128,7 +128,7 @@ void RefCountTest::testBasic() { // Bumping down access ref shouldn't affect admin ref and flags ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_TRUE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -136,7 +136,7 @@ void RefCountTest::testBasic() { ref.template unSetFlag(); ASSERT_TRUE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); @@ -145,28 +145,28 @@ void RefCountTest::testBasic() { ASSERT_EQ(0, ref.getRaw()); ASSERT_FALSE(ref.isInMMContainer()); ASSERT_FALSE(ref.isAccessible()); - ASSERT_FALSE(ref.isMoving()); + ASSERT_FALSE(ref.isExclusive()); ASSERT_EQ(0, ref.getAccessRef()); ASSERT_FALSE(ref.template isFlagSet()); ASSERT_FALSE(ref.template isFlagSet()); // conditionally set flags - ASSERT_FALSE((ref.markMoving())); + ASSERT_FALSE((ref.markExclusive())); ref.markInMMContainer(); - ASSERT_TRUE((ref.markMoving())); - ASSERT_FALSE((ref.isOnlyMoving())); + ASSERT_TRUE((ref.markExclusive())); + ASSERT_FALSE((ref.isOnlyExclusive())); ref.unmarkInMMContainer(); ref.template setFlag(); - // Have no other admin refcount but with a flag still means "isOnlyMoving" - ASSERT_TRUE((ref.isOnlyMoving())); + // Have no other admin refcount but with a flag still means "isOnlyExclusive" + ASSERT_TRUE((ref.isOnlyExclusive())); - // Set some flags and verify that "isOnlyMoving" does not care about flags + // Set some flags and verify that "isOnlyExclusive" does not care about flags ref.markIsChainedItem(); ASSERT_TRUE(ref.isChainedItem()); - ASSERT_TRUE((ref.isOnlyMoving())); + ASSERT_TRUE((ref.isOnlyExclusive())); ref.unmarkIsChainedItem(); ASSERT_FALSE(ref.isChainedItem()); - ASSERT_TRUE((ref.isOnlyMoving())); + ASSERT_TRUE((ref.isOnlyExclusive())); } } // namespace