diff --git a/cachelib/allocator/CMakeLists.txt b/cachelib/allocator/CMakeLists.txt index b64d48d86f..b00302086b 100644 --- a/cachelib/allocator/CMakeLists.txt +++ b/cachelib/allocator/CMakeLists.txt @@ -121,6 +121,7 @@ if (BUILD_TESTS) add_test (tests/MemoryTiersTest.cpp) add_test (tests/MultiAllocatorTest.cpp) add_test (tests/NvmAdmissionPolicyTest.cpp) + add_test (tests/CacheAllocatorConfigTest.cpp) add_test (nvmcache/tests/NvmItemTests.cpp) add_test (nvmcache/tests/InFlightPutsTest.cpp) add_test (nvmcache/tests/TombStoneTests.cpp) diff --git a/cachelib/allocator/CacheAllocator-inl.h b/cachelib/allocator/CacheAllocator-inl.h index 5c9b843bd1..2dc54aa5e2 100644 --- a/cachelib/allocator/CacheAllocator-inl.h +++ b/cachelib/allocator/CacheAllocator-inl.h @@ -202,10 +202,24 @@ ShmSegmentOpts CacheAllocator::createShmCacheOpts(TierId tid) { ShmSegmentOpts opts; opts.alignment = sizeof(Slab); opts.typeOpts = memoryTierConfigs[tid].getShmTypeOpts(); + if (auto *v = std::get_if(&opts.typeOpts)) { + v->usePosix = config_.usePosixShm; + } return opts; } +template +size_t CacheAllocator::memoryTierSize(TierId tid) const +{ + auto partitions = std::accumulate(memoryTierConfigs.begin(), memoryTierConfigs.end(), 0UL, + [](const size_t i, const MemoryTierCacheConfig& config){ + return i + config.getRatio(); + }); + + return memoryTierConfigs[tid].calculateTierSize(config_.getCacheSize(), partitions); +} + template std::unique_ptr CacheAllocator::createNewMemoryAllocator(TierId tid) { @@ -216,7 +230,8 @@ CacheAllocator::createNewMemoryAllocator(TierId tid) { config_.getCacheSize(), config_.slabMemoryBaseAddr, createShmCacheOpts(tid)) .addr, - memoryTierConfigs[tid].getSize()); + memoryTierSize(tid) + ); } template @@ -227,7 +242,7 @@ CacheAllocator::restoreMemoryAllocator(TierId tid) { shmManager_ ->attachShm(detail::kShmCacheName + std::to_string(tid), config_.slabMemoryBaseAddr, createShmCacheOpts(tid)).addr, - memoryTierConfigs[tid].getSize(), + memoryTierSize(tid), config_.disableFullCoredump); } @@ -2250,12 +2265,27 @@ PoolId CacheAllocator::addPool( folly::SharedMutex::WriteHolder w(poolsResizeAndRebalanceLock_); PoolId pid = 0; - auto tierConfigs = config_.getMemoryTierConfigs(); + std::vector tierPoolSizes; + const auto &tierConfigs = config_.getMemoryTierConfigs(); + size_t totalCacheSize = 0; + for (TierId tid = 0; tid < numTiers_; tid++) { - auto tierSizeRatio = static_cast( - tierConfigs[tid].getSize()) / config_.getCacheSize(); - auto tierPoolSize = static_cast(tierSizeRatio * size); - auto res = allocator_[tid]->addPool(name, tierPoolSize, allocSizes, ensureProvisionable); + totalCacheSize += allocator_[tid]->getMemorySize(); + } + + for (TierId tid = 0; tid < numTiers_; tid++) { + auto tierSizeRatio = + static_cast(allocator_[tid]->getMemorySize()) / totalCacheSize; + size_t tierPoolSize = static_cast(tierSizeRatio * size); + + tierPoolSizes.push_back(tierPoolSize); + } + + for (TierId tid = 0; tid < numTiers_; tid++) { + // TODO: what if we manage to add pool only in one tier? + // we should probably remove that on failure + auto res = allocator_[tid]->addPool( + name, tierPoolSizes[tid], allocSizes, ensureProvisionable); XDCHECK(tid == 0 || res == pid); pid = res; } @@ -2418,6 +2448,16 @@ const std::string CacheAllocator::getCacheName() const { return config_.cacheName; } +template +size_t CacheAllocator::getPoolSize(PoolId poolId) const { + size_t poolSize = 0; + for (auto& allocator: allocator_) { + const auto& pool = allocator->getPool(poolId); + poolSize += pool.getPoolSize(); + } + return poolSize; +} + template PoolStats CacheAllocator::getPoolStats(PoolId poolId) const { const auto& pool = allocator_[currentTier()]->getPool(poolId); diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index fb342a6b71..e4444df3bf 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1045,6 +1045,9 @@ class CacheAllocator : public CacheBase { // get cache name const std::string getCacheName() const override final; + // combined pool size for all memory tiers + size_t getPoolSize(PoolId pid) const; + // pool stats by pool id PoolStats getPoolStats(PoolId pid) const override final; @@ -1578,6 +1581,8 @@ class CacheAllocator : public CacheBase { // handle to the item. On failure an empty handle. WriteHandle tryEvictToNextMemoryTier(Item& item); + size_t memoryTierSize(TierId tid) const; + // Deserializer CacheAllocatorMetadata and verify the version // // @param deserializer Deserializer object diff --git a/cachelib/allocator/CacheAllocatorConfig.h b/cachelib/allocator/CacheAllocatorConfig.h index 1d11b3ef14..ca51deb94c 100644 --- a/cachelib/allocator/CacheAllocatorConfig.h +++ b/cachelib/allocator/CacheAllocatorConfig.h @@ -28,6 +28,7 @@ #include "cachelib/allocator/MemoryTierCacheConfig.h" #include "cachelib/allocator/MM2Q.h" #include "cachelib/allocator/MemoryMonitor.h" +#include "cachelib/allocator/MemoryTierCacheConfig.h" #include "cachelib/allocator/NvmAdmissionPolicy.h" #include "cachelib/allocator/PoolOptimizeStrategy.h" #include "cachelib/allocator/RebalanceStrategy.h" @@ -205,15 +206,15 @@ class CacheAllocatorConfig { // cachePersistence(). CacheAllocatorConfig& usePosixForShm(); - // Configures cache memory tiers. Accepts vector of MemoryTierCacheConfig. - // Each vector element describes configuration for a single memory cache tier. - // @throw std::invalid_argument if: - // - the size of configs is 0 - // - memory tiers use both size and ratio parameters + // Configures cache memory tiers. Each tier represents a cache region inside + // byte-addressable memory such as DRAM, Pmem, CXLmem. + // Accepts vector of MemoryTierCacheConfig. Each vector element describes + // configuration for a single memory cache tier. Tier sizes are specified as + // ratios, the number of parts of total cache size each tier would occupy. CacheAllocatorConfig& configureMemoryTiers(const MemoryTierConfigs& configs); - // Return vector of memory tier configs. - MemoryTierConfigs getMemoryTierConfigs() const; + // Return reference to MemoryTierCacheConfigs. + const MemoryTierConfigs& getMemoryTierConfigs() const; // This turns on a background worker that periodically scans through the // access container and look for expired items and remove them. @@ -352,7 +353,7 @@ class CacheAllocatorConfig { const std::string& getCacheName() const noexcept { return cacheName; } - size_t getCacheSize() const noexcept; + size_t getCacheSize() const noexcept { return size; } bool isUsingPosixShm() const noexcept { return usePosixShm; } @@ -367,13 +368,19 @@ class CacheAllocatorConfig { bool validateStrategy( const std::shared_ptr& strategy) const; + // check that memory tier ratios are set properly + const CacheAllocatorConfig& validateMemoryTiers() const; + // @return a map representation of the configs std::map serialize() const; + // The max number of memory cache tiers + inline static const size_t kMaxCacheMemoryTiers = 2; + // Cache name for users to indentify their own cache. std::string cacheName{""}; - // Amount of memory for this cache instance + // Amount of memory for this cache instance (sum of all memory tiers' sizes) size_t size = 1 * 1024 * 1024 * 1024; // Directory for shared memory related metadata @@ -581,8 +588,6 @@ class CacheAllocatorConfig { friend CacheT; private: - void validateMemoryTiersWithSize(const MemoryTierConfigs&, size_t) const; - // Configuration for memory tiers. MemoryTierConfigs memoryTierConfigs{ {MemoryTierCacheConfig::fromShm().setRatio(1)} @@ -606,8 +611,6 @@ CacheAllocatorConfig& CacheAllocatorConfig::setCacheName( template CacheAllocatorConfig& CacheAllocatorConfig::setCacheSize(size_t _size) { - validateMemoryTiersWithSize(this->memoryTierConfigs, _size); - size = _size; constexpr size_t maxCacheSizeWithCoredump = 64'424'509'440; // 60GB if (size <= maxCacheSizeWithCoredump) { @@ -861,57 +864,24 @@ CacheAllocatorConfig& CacheAllocatorConfig::enableItemReaperInBackground( template CacheAllocatorConfig& CacheAllocatorConfig::configureMemoryTiers( - const MemoryTierConfigs& config) { - if (!config.size()) { - throw std::invalid_argument("There must be at least one memory tier."); + const MemoryTierConfigs& config) { + if (config.size() > kMaxCacheMemoryTiers) { + throw std::invalid_argument(folly::sformat( + "Too many memory tiers. The number of supported tiers is {}.", + kMaxCacheMemoryTiers)); } - - for (auto tier_config: config) { - auto tier_size = tier_config.getSize(); - auto tier_ratio = tier_config.getRatio(); - if ((!tier_size and !tier_ratio) || (tier_size and tier_ratio)) { - throw std::invalid_argument( - "For each memory tier either size or ratio must be set."); - } + if (!config.size()) { + throw std::invalid_argument( + "There must be at least one memory tier config."); } - - validateMemoryTiersWithSize(config, this->size); - memoryTierConfigs = config; - return *this; } template -typename CacheAllocatorConfig::MemoryTierConfigs +const typename CacheAllocatorConfig::MemoryTierConfigs& CacheAllocatorConfig::getMemoryTierConfigs() const { - MemoryTierConfigs config = memoryTierConfigs; - size_t sum_ratios = 0; - - for (auto &tier_config: config) { - if (auto *v = std::get_if(&tier_config.shmOpts)) { - v->usePosix = usePosixShm; - } - - sum_ratios += tier_config.getRatio(); - } - - if (sum_ratios == 0) - return config; - - // if ratios are used, size must be specified - XDCHECK(size); - - // Convert ratios to sizes, size must be non-zero - size_t sum_sizes = 0; - size_t partition_size = size / sum_ratios; - for (auto& tier_config: config) { - tier_config.setSize(partition_size * tier_config.getRatio()); - tier_config.setRatio(0); - sum_sizes += tier_config.getSize(); - } - - return config; + return memoryTierConfigs; } template @@ -1037,46 +1007,6 @@ CacheAllocatorConfig::setSkipPromoteChildrenWhenParentFailed() { return *this; } -template -size_t CacheAllocatorConfig::getCacheSize() const noexcept { - if (size) - return size; - - size_t sum_sizes = 0; - for (const auto &tier_config : getMemoryTierConfigs()) { - sum_sizes += tier_config.getSize(); - } - - return sum_sizes; -} - -template -void CacheAllocatorConfig::validateMemoryTiersWithSize( - const MemoryTierConfigs &config, size_t size) const { - size_t sum_ratios = 0; - size_t sum_sizes = 0; - - for (const auto &tier_config: config) { - sum_ratios += tier_config.getRatio(); - sum_sizes += tier_config.getSize(); - } - - if (sum_ratios && sum_sizes) { - throw std::invalid_argument("Cannot mix ratios and sizes."); - } else if (sum_sizes) { - if (size && sum_sizes != size) { - throw std::invalid_argument( - "Sum of tier sizes doesn't match total cache size. " - "Setting of cache total size is not required when per-tier " - "sizes are specified - it is calculated as sum of tier sizes."); - } - } else if (!sum_ratios && !sum_sizes) { - throw std::invalid_argument( - "Either sum of all memory tiers sizes or sum of all ratios " - "must be greater than 0."); - } -} - template const CacheAllocatorConfig& CacheAllocatorConfig::validate() const { // we can track tail hits only if MMType is MM2Q @@ -1101,23 +1031,7 @@ const CacheAllocatorConfig& CacheAllocatorConfig::validate() const { "It's not allowed to enable both RemoveCB and ItemDestructor."); } - size_t sum_ratios = 0; - for (auto tier_config: memoryTierConfigs) { - sum_ratios += tier_config.getRatio(); - } - - if (sum_ratios) { - if (!size) { - throw std::invalid_argument( - "Total cache size must be specified when size ratios are " - "used to specify memory tier sizes."); - } else if (size < sum_ratios) { - throw std::invalid_argument( - "Sum of all tier size ratios is greater than total cache size."); - } - } - - return *this; + return validateMemoryTiers(); } template @@ -1144,6 +1058,24 @@ bool CacheAllocatorConfig::validateStrategy( (type != PoolOptimizeStrategy::MarginalHits || trackTailHits); } +template +const CacheAllocatorConfig& CacheAllocatorConfig::validateMemoryTiers() + const { + size_t parts = 0; + for (const auto& tierConfig : memoryTierConfigs) { + if (!tierConfig.getRatio()) { + throw std::invalid_argument("Tier ratio must be an integer number >=1."); + } + parts += tierConfig.getRatio(); + } + + if (parts > size) { + throw std::invalid_argument( + "Sum of tier ratios must be less than total cache size."); + } + return *this; +} + template std::map CacheAllocatorConfig::serialize() const { std::map configMap; diff --git a/cachelib/allocator/MemoryTierCacheConfig.h b/cachelib/allocator/MemoryTierCacheConfig.h index 12fd2c91f0..482d9be105 100644 --- a/cachelib/allocator/MemoryTierCacheConfig.h +++ b/cachelib/allocator/MemoryTierCacheConfig.h @@ -40,42 +40,46 @@ class MemoryTierCacheConfig { return config; } - // Specifies size of this memory tier. Sizes of tiers must be specified by - // either setting size explicitly or using ratio, mixing of the two is not supported. - MemoryTierCacheConfig& setSize(size_t _size) { - size = _size; - return *this; - } - // Specifies ratio of this memory tier to other tiers. Absolute size // of each tier can be calculated as: - // cacheSize * tierRatio / Sum of ratios for all tiers; the difference - // between total cache size and sum of all tier sizes resulted from - // round off error is accounted for when calculating the last tier's - // size to make the totals equal. - MemoryTierCacheConfig& setRatio(double _ratio) { + // cacheSize * tierRatio / Sum of ratios for all tiers. + MemoryTierCacheConfig& setRatio(size_t _ratio) { + if (!_ratio) { + throw std::invalid_argument("Tier ratio must be an integer number >=1."); + } ratio = _ratio; return *this; } size_t getRatio() const noexcept { return ratio; } - size_t getSize() const noexcept { return size; } - const ShmTypeOpts& getShmTypeOpts() const noexcept { return shmOpts; } - // Size of this memory tiers - size_t size{0}; + size_t calculateTierSize(size_t totalCacheSize, size_t partitionNum) const { + if (!partitionNum) { + throw std::invalid_argument( + "The total number of tier ratios must be an integer number >=1."); + } - // Ratio is a number of parts of the total cache size to be allocated for this tier. - // E.g. if X is a total cache size, Yi are ratios specified for memory tiers, - // then size of the i-th tier Xi = (X / (Y1 + Y2)) * Yi and X = sum(Xi) - size_t ratio{0}; + if (partitionNum > totalCacheSize) { + throw std::invalid_argument( + "Ratio must be less or equal to total cache size."); + } + + return static_cast(getRatio() * (static_cast(totalCacheSize) / partitionNum)); + } + +private: + // Ratio is a number of parts of the total cache size to be allocated for this + // tier. E.g. if X is a total cache size, Yi are ratios specified for memory + // tiers, and Y is the sum of all Yi, then size of the i-th tier + // Xi = (X / Y) * Yi. For examle, to configure 2-tier cache where each + // tier is a half of the total cache size, set both tiers' ratios to 1. + size_t ratio{1}; // Options specific to shm type ShmTypeOpts shmOpts; -private: MemoryTierCacheConfig() = default; }; } // namespace cachelib diff --git a/cachelib/allocator/memory/SlabAllocator.cpp b/cachelib/allocator/memory/SlabAllocator.cpp index ee5e9e5485..f48fdd5cbc 100644 --- a/cachelib/allocator/memory/SlabAllocator.cpp +++ b/cachelib/allocator/memory/SlabAllocator.cpp @@ -40,7 +40,7 @@ using namespace facebook::cachelib; namespace { -size_t roundDownToSlabSize(size_t size) { return size - (size % sizeof(Slab)); } +static inline size_t roundDownToSlabSize(size_t size) { return size - (size % sizeof(Slab)); } } // namespace // definitions to avoid ODR violation. diff --git a/cachelib/allocator/tests/AllocatorMemoryTiersTest.cpp b/cachelib/allocator/tests/AllocatorMemoryTiersTest.cpp index b6db9ce168..90ef34be41 100644 --- a/cachelib/allocator/tests/AllocatorMemoryTiersTest.cpp +++ b/cachelib/allocator/tests/AllocatorMemoryTiersTest.cpp @@ -23,7 +23,9 @@ namespace tests { using LruAllocatorMemoryTiersTest = AllocatorMemoryTiersTest; // TODO(MEMORY_TIER): add more tests with different eviction policies -TEST_F(LruAllocatorMemoryTiersTest, MultiTiers) { this->testMultiTiers(); } +TEST_F(LruAllocatorMemoryTiersTest, MultiTiersInvalid) { this->testMultiTiersInvalid(); } +TEST_F(LruAllocatorMemoryTiersTest, MultiTiersValid) { this->testMultiTiersValid(); } +TEST_F(LruAllocatorMemoryTiersTest, MultiTiersValidMixed) { this->testMultiTiersValidMixed(); } } // end of namespace tests } // end of namespace cachelib diff --git a/cachelib/allocator/tests/AllocatorMemoryTiersTest.h b/cachelib/allocator/tests/AllocatorMemoryTiersTest.h index 8208c6b19f..dba8cfd2dd 100644 --- a/cachelib/allocator/tests/AllocatorMemoryTiersTest.h +++ b/cachelib/allocator/tests/AllocatorMemoryTiersTest.h @@ -27,7 +27,7 @@ namespace tests { template class AllocatorMemoryTiersTest : public AllocatorTest { public: - void testMultiTiers() { + void testMultiTiersInvalid() { typename AllocatorT::Config config; config.setCacheSize(100 * Slab::kSize); config.configureMemoryTiers({ @@ -41,6 +41,48 @@ class AllocatorMemoryTiersTest : public AllocatorTest { ASSERT_THROW(std::make_unique(AllocatorT::SharedMemNew, config), std::invalid_argument); } + + void testMultiTiersValid() { + typename AllocatorT::Config config; + config.setCacheSize(100 * Slab::kSize); + config.enableCachePersistence("/tmp"); + config.usePosixForShm(); + config.configureMemoryTiers({ + MemoryTierCacheConfig::fromFile("/tmp/a" + std::to_string(::getpid())) + .setRatio(1), + MemoryTierCacheConfig::fromFile("/tmp/b" + std::to_string(::getpid())) + .setRatio(1) + }); + + auto alloc = std::make_unique(AllocatorT::SharedMemNew, config); + ASSERT(alloc != nullptr); + + auto pool = alloc->addPool("default", alloc->getCacheMemoryStats().cacheSize); + auto handle = alloc->allocate(pool, "key", std::string("value").size()); + ASSERT(handle != nullptr); + ASSERT_NO_THROW(alloc->insertOrReplace(handle)); + } + + void testMultiTiersValidMixed() { + typename AllocatorT::Config config; + config.setCacheSize(100 * Slab::kSize); + config.enableCachePersistence("/tmp"); + config.usePosixForShm(); + config.configureMemoryTiers({ + MemoryTierCacheConfig::fromShm() + .setRatio(1), + MemoryTierCacheConfig::fromFile("/tmp/b" + std::to_string(::getpid())) + .setRatio(1) + }); + + auto alloc = std::make_unique(AllocatorT::SharedMemNew, config); + ASSERT(alloc != nullptr); + + auto pool = alloc->addPool("default", alloc->getCacheMemoryStats().cacheSize); + auto handle = alloc->allocate(pool, "key", std::string("value").size()); + ASSERT(handle != nullptr); + ASSERT_NO_THROW(alloc->insertOrReplace(handle)); + } }; } // namespace tests } // namespace cachelib diff --git a/cachelib/allocator/tests/CacheAllocatorConfigTest.cpp b/cachelib/allocator/tests/CacheAllocatorConfigTest.cpp new file mode 100644 index 0000000000..cd4edc89c6 --- /dev/null +++ b/cachelib/allocator/tests/CacheAllocatorConfigTest.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cachelib/allocator/CacheAllocatorConfig.h" +#include "cachelib/allocator/MemoryTierCacheConfig.h" +#include "cachelib/allocator/tests/TestBase.h" + +namespace facebook { +namespace cachelib { + +namespace tests { + +using AllocatorT = LruAllocator; +using MemoryTierConfigs = CacheAllocatorConfig::MemoryTierConfigs; + +size_t defaultTotalSize = 1 * 1024LL * 1024LL * 1024LL; + +class CacheAllocatorConfigTest : public testing::Test {}; + +MemoryTierConfigs generateTierConfigs(size_t numTiers, + MemoryTierCacheConfig& config) { + return MemoryTierConfigs(numTiers, config); +} + +TEST_F(CacheAllocatorConfigTest, MultipleTier0Config) { + AllocatorT::Config config; + // Throws if vector of tier configs is emptry + EXPECT_THROW(config.configureMemoryTiers(MemoryTierConfigs()), + std::invalid_argument); +} + +TEST_F(CacheAllocatorConfigTest, MultipleTier1Config) { + AllocatorT::Config config; + // Accepts single-tier configuration + config.setCacheSize(defaultTotalSize) + .configureMemoryTiers({MemoryTierCacheConfig::fromShm().setRatio(1)}); + config.validateMemoryTiers(); +} + +TEST_F(CacheAllocatorConfigTest, InvalidTierRatios) { + AllocatorT::Config config; + EXPECT_THROW(config.configureMemoryTiers(generateTierConfigs( + config.kMaxCacheMemoryTiers + 1, + MemoryTierCacheConfig::fromShm().setRatio(0))), + std::invalid_argument); +} + +TEST_F(CacheAllocatorConfigTest, TotalCacheSizeLessThanRatios) { + AllocatorT::Config config; + // Throws if total cache size is set to 0 + config.setCacheSize(defaultTotalSize) + .configureMemoryTiers( + {MemoryTierCacheConfig::fromShm().setRatio(defaultTotalSize + 1)}); + EXPECT_THROW(config.validate(), std::invalid_argument); +} + +} // namespace tests +} // namespace cachelib +} // namespace facebook diff --git a/cachelib/allocator/tests/MemoryTiersTest.cpp b/cachelib/allocator/tests/MemoryTiersTest.cpp index 94339d560b..47dae87aef 100644 --- a/cachelib/allocator/tests/MemoryTiersTest.cpp +++ b/cachelib/allocator/tests/MemoryTiersTest.cpp @@ -14,6 +14,8 @@ * limitations under the License. */ +#include + #include #include "cachelib/allocator/CacheAllocator.h" @@ -26,18 +28,17 @@ namespace tests { using LruAllocatorConfig = CacheAllocatorConfig; using LruMemoryTierConfigs = LruAllocatorConfig::MemoryTierConfigs; using Strings = std::vector; -using SizePair = std::tuple; -using SizePairs = std::vector; -const size_t defaultTotalCacheSize{1 * 1024 * 1024 * 1024}; +constexpr size_t MB = 1024ULL * 1024ULL; +constexpr size_t GB = MB * 1024ULL; + +using Ratios = std::vector; + +const size_t defaultTotalCacheSize{1 * GB}; const std::string defaultCacheDir{"/var/metadataDir"}; const std::string defaultPmemPath{"/dev/shm/p1"}; const std::string defaultDaxPath{"/dev/dax0.0"}; -const size_t metaDataSize = 4194304; -constexpr size_t MB = 1024ULL * 1024ULL; -constexpr size_t GB = MB * 1024ULL; - template class MemoryTiersTest : public AllocatorTest { public: @@ -50,38 +51,31 @@ class MemoryTiersTest : public AllocatorTest { EXPECT_EQ(actualConfig.getCacheDir(), expectedCacheDir); auto configs = actualConfig.getMemoryTierConfigs(); - size_t sum_sizes = std::accumulate( - configs.begin(), configs.end(), 0, - [](const size_t i, const MemoryTierCacheConfig& config) { - return i + config.getSize(); - }); size_t sum_ratios = std::accumulate( - configs.begin(), configs.end(), 0, + configs.begin(), configs.end(), 0UL, [](const size_t i, const MemoryTierCacheConfig& config) { return i + config.getRatio(); }); + size_t sum_sizes = std::accumulate( + configs.begin(), configs.end(), 0UL, + [&](const size_t i, const MemoryTierCacheConfig& config) { + return i + config.calculateTierSize(actualConfig.getCacheSize(), + sum_ratios); + }); - size_t partition_size = 0; - if (sum_ratios) { - partition_size = actualConfig.getCacheSize() / sum_ratios; - /* Sum of sizes can be lower due to rounding down to partition_size. */ - EXPECT_GE(sum_sizes, expectedTotalCacheSize - partition_size); - } + EXPECT_GE(expectedTotalCacheSize, sum_ratios * Slab::kSize); + EXPECT_LE(sum_sizes, expectedTotalCacheSize); + EXPECT_GE(sum_sizes, expectedTotalCacheSize - configs.size() * Slab::kSize); for (auto i = 0; i < configs.size(); ++i) { auto& opt = std::get(configs[i].getShmTypeOpts()); EXPECT_EQ(opt.path, expectedPaths[i]); - EXPECT_GT(configs[i].getSize(), 0); - if (configs[i].getRatio() && (i < configs.size() - 1)) { - EXPECT_EQ(configs[i].getSize(), partition_size * configs[i].getRatio()); - } } } LruAllocatorConfig createTestCacheConfig( const Strings& tierPaths = {defaultPmemPath}, - const SizePairs& sizePairs = {std::make_tuple(1 /* ratio */, - 0 /* size */)}, + const Ratios& ratios = {1}, bool setPosixForShm = true, size_t cacheSize = defaultTotalCacheSize, const std::string& cacheDir = defaultCacheDir) { @@ -94,9 +88,8 @@ class MemoryTiersTest : public AllocatorTest { LruMemoryTierConfigs tierConfigs; tierConfigs.reserve(tierPaths.size()); for (auto i = 0; i < tierPaths.size(); ++i) { - tierConfigs.push_back(MemoryTierCacheConfig::fromFile(tierPaths[i]) - .setRatio(std::get<0>(sizePairs[i])) - .setSize(std::get<1>(sizePairs[i]))); + tierConfigs.push_back( + MemoryTierCacheConfig::fromFile(tierPaths[i]).setRatio(ratios[i])); } cfg.configureMemoryTiers(tierConfigs); return cfg; @@ -124,6 +117,30 @@ class MemoryTiersTest : public AllocatorTest { dramConfig.setCacheSize(totalCacheSize); return dramConfig; } + + void validatePoolSize(PoolId poolId, + std::unique_ptr& allocator, + size_t expectedSize) { + size_t actualSize = allocator->getPoolSize(poolId); + EXPECT_EQ(actualSize, expectedSize); + } + + void testAddPool(std::unique_ptr& alloc, + size_t poolSize, + bool isSizeValid = true, + size_t numTiers = 2) { + if (isSizeValid) { + auto pool = alloc->addPool("validPoolSize", poolSize); + EXPECT_LE(alloc->getPoolSize(pool), poolSize); + if (poolSize >= numTiers * Slab::kSize) + EXPECT_GE(alloc->getPoolSize(pool), poolSize - numTiers * Slab::kSize); + } else { + EXPECT_THROW(alloc->addPool("invalidPoolSize", poolSize), + std::invalid_argument); + // TODO: test this for all tiers + EXPECT_EQ(alloc->getPoolIds().size(), 0); + } + } }; using LruMemoryTiersTest = MemoryTiersTest; @@ -138,117 +155,129 @@ TEST_F(LruMemoryTiersTest, TestValid1TierDaxRatioConfig) { basicCheck(cfg, {defaultDaxPath}); } -TEST_F(LruMemoryTiersTest, TestValid1TierDaxSizeConfig) { - LruAllocatorConfig cfg = - createTestCacheConfig({defaultDaxPath}, - {std::make_tuple(0, defaultTotalCacheSize)}, - /* setPosixShm */ true, - /* cacheSize */ 0); - basicCheck(cfg, {defaultDaxPath}); - - // Setting size after conifguringMemoryTiers with sizes is not allowed. - EXPECT_THROW(cfg.setCacheSize(defaultTotalCacheSize + 1), - std::invalid_argument); -} - TEST_F(LruMemoryTiersTest, TestValid2TierDaxPmemConfig) { LruAllocatorConfig cfg = - createTestCacheConfig({defaultDaxPath, defaultPmemPath}, - {std::make_tuple(1, 0), std::make_tuple(1, 0)}); + createTestCacheConfig({defaultDaxPath, defaultPmemPath}, {1, 1}); basicCheck(cfg, {defaultDaxPath, defaultPmemPath}); } TEST_F(LruMemoryTiersTest, TestValid2TierDaxPmemRatioConfig) { LruAllocatorConfig cfg = - createTestCacheConfig({defaultDaxPath, defaultPmemPath}, - {std::make_tuple(5, 0), std::make_tuple(2, 0)}); + createTestCacheConfig({defaultDaxPath, defaultPmemPath}, {5, 2}); basicCheck(cfg, {defaultDaxPath, defaultPmemPath}); } -TEST_F(LruMemoryTiersTest, TestValid2TierDaxPmemSizeConfig) { - size_t size_1 = 4321, size_2 = 1234; - LruAllocatorConfig cfg = createTestCacheConfig( - {defaultDaxPath, defaultPmemPath}, - {std::make_tuple(0, size_1), std::make_tuple(0, size_2)}, true, 0); - basicCheck(cfg, {defaultDaxPath, defaultPmemPath}, size_1 + size_2); - - // Setting size after conifguringMemoryTiers with sizes is not allowed. - EXPECT_THROW(cfg.setCacheSize(size_1 + size_2 + 1), std::invalid_argument); -} - TEST_F(LruMemoryTiersTest, TestInvalid2TierConfigPosixShmNotSet) { LruAllocatorConfig cfg = createTestCacheConfig({defaultDaxPath, defaultPmemPath}, - {std::make_tuple(1, 0), std::make_tuple(1, 0)}, + {1, 1}, /* setPosixShm */ false); } TEST_F(LruMemoryTiersTest, TestInvalid2TierConfigNumberOfPartitionsTooLarge) { EXPECT_THROW(createTestCacheConfig({defaultDaxPath, defaultPmemPath}, - {std::make_tuple(defaultTotalCacheSize, 0), - std::make_tuple(1, 0)}) + {defaultTotalCacheSize, 1}) .validate(), std::invalid_argument); } -TEST_F(LruMemoryTiersTest, TestInvalid2TierConfigSizesAndRatiosMixed) { - EXPECT_THROW( - createTestCacheConfig({defaultDaxPath, defaultPmemPath}, - {std::make_tuple(1, 0), std::make_tuple(1, 1)}), - std::invalid_argument); - EXPECT_THROW( - createTestCacheConfig({defaultDaxPath, defaultPmemPath}, - {std::make_tuple(1, 1), std::make_tuple(0, 1)}), - std::invalid_argument); -} - TEST_F(LruMemoryTiersTest, TestInvalid2TierConfigSizesAndRatioNotSet) { - EXPECT_THROW( - createTestCacheConfig({defaultDaxPath, defaultPmemPath}, - {std::make_tuple(1, 0), std::make_tuple(0, 0)}), - std::invalid_argument); + EXPECT_THROW(createTestCacheConfig({defaultDaxPath, defaultPmemPath}, {1, 0}), + std::invalid_argument); } TEST_F(LruMemoryTiersTest, TestInvalid2TierConfigRatiosCacheSizeNotSet) { - EXPECT_THROW( - createTestCacheConfig({defaultDaxPath, defaultPmemPath}, - {std::make_tuple(1, 0), std::make_tuple(1, 0)}, - /* setPosixShm */ true, /* cacheSize */ 0) - .validate(), - std::invalid_argument); + EXPECT_THROW(createTestCacheConfig({defaultDaxPath, defaultPmemPath}, {1, 1}, + /* setPosixShm */ true, /* cacheSize */ 0) + .validate(), + std::invalid_argument); } TEST_F(LruMemoryTiersTest, TestInvalid2TierConfigSizesNeCacheSize) { - EXPECT_THROW( - createTestCacheConfig({defaultDaxPath, defaultPmemPath}, - {std::make_tuple(0, 1), std::make_tuple(0, 1)}), - std::invalid_argument); + EXPECT_THROW(createTestCacheConfig({defaultDaxPath, defaultPmemPath}, {0, 0}), + std::invalid_argument); } -TEST_F(LruMemoryTiersTest, TestTieredCacheSize) { - size_t totalSizes[] = {50 * MB, 77 * MB, 100 * MB, 101 * MB + MB / 2, - 1 * GB, 4 * GB, 8 * GB, 9 * GB}; - size_t numTiers[] = {2, 3, 4}; +TEST_F(LruMemoryTiersTest, TestPoolAllocations) { + std::vector totalCacheSizes = {2 * GB}; - auto getCacheSize = [&](size_t cacheSize, size_t tiers) { - std::unique_ptr alloc; - if (tiers < 2) { - alloc = std::unique_ptr( - new LruAllocator(createDramCacheConfig(cacheSize))); - } else { - alloc = std::unique_ptr( - new LruAllocator(LruAllocator::SharedMemNew, - createTieredCacheConfig(cacheSize, tiers))); + static const size_t numExtraSizes = 4; + static const size_t numExtraSlabs = 20; + + for (size_t i = 0; i < numExtraSizes; i++) { + totalCacheSizes.push_back(totalCacheSizes.back() + + (folly::Random::rand64() % numExtraSlabs) * + Slab::kSize); + } + + const std::string path = "/tmp/tier"; + Strings paths = {path + "0", path + "1"}; + + size_t min_ratio = 1; + size_t max_ratio = 111; + + static const size_t numCombinations = 100; + + for (auto totalCacheSize : totalCacheSizes) { + for (size_t k = 0; k < numCombinations; k++) { + const size_t i = folly::Random::rand32() % max_ratio + min_ratio; + const size_t j = folly::Random::rand32() % max_ratio + min_ratio; + LruAllocatorConfig cfg = + createTestCacheConfig(paths, {i, j}, + /* usePoisx */ true, totalCacheSize); + basicCheck(cfg, paths, totalCacheSize); + + std::unique_ptr alloc = std::unique_ptr( + new LruAllocator(LruAllocator::SharedMemNew, cfg)); + + size_t size = (folly::Random::rand64() % + (alloc->getCacheMemoryStats().cacheSize - Slab::kSize)) + + Slab::kSize; + testAddPool(alloc, size, true); } - return alloc->getCacheMemoryStats().cacheSize; - }; - - for (auto totalSize : totalSizes) { - auto dramCacheSize = getCacheSize(totalSize, 1); - for (auto n : numTiers) { - auto tieredCacheSize = getCacheSize(totalSize, n); - EXPECT_GT(dramCacheSize, tieredCacheSize); - EXPECT_GE(metaDataSize * n * 2, dramCacheSize - tieredCacheSize); + } +} + +TEST_F(LruMemoryTiersTest, TestPoolInvalidAllocations) { + std::vector totalCacheSizes = {48 * MB, 51 * MB, 256 * MB, + 1 * GB, 5 * GB, 8 * GB}; + const std::string path = "/tmp/tier"; + Strings paths = {path + "0", path + "1"}; + + size_t min_ratio = 1; + size_t max_ratio = 111; + + static const size_t numCombinations = 100; + + for (auto totalCacheSize : totalCacheSizes) { + for (size_t k = 0; k < numCombinations; k++) { + const size_t i = folly::Random::rand32() % max_ratio + min_ratio; + const size_t j = folly::Random::rand32() % max_ratio + min_ratio; + LruAllocatorConfig cfg = + createTestCacheConfig(paths, {i, j}, + /* usePoisx */ true, totalCacheSize); + + std::unique_ptr alloc = nullptr; + try { + alloc = std::unique_ptr( + new LruAllocator(LruAllocator::SharedMemNew, cfg)); + } catch(...) { + // expection only if cache too small + size_t sum_ratios = std::accumulate( + cfg.getMemoryTierConfigs().begin(), cfg.getMemoryTierConfigs().end(), 0UL, + [](const size_t i, const MemoryTierCacheConfig& config) { + return i + config.getRatio(); + }); + auto tier1slabs = cfg.getMemoryTierConfigs()[0].calculateTierSize(cfg.getCacheSize(), sum_ratios) / Slab::kSize; + auto tier2slabs = cfg.getMemoryTierConfigs()[1].calculateTierSize(cfg.getCacheSize(), sum_ratios) / Slab::kSize; + EXPECT_TRUE(tier1slabs <= 2 || tier2slabs <= 2); + + continue; + } + + size_t size = (folly::Random::rand64() % (100 * GB)) + + alloc->getCacheMemoryStats().cacheSize; + testAddPool(alloc, size, false); } } } diff --git a/cachelib/cachebench/util/CacheConfig.cpp b/cachelib/cachebench/util/CacheConfig.cpp index 2604744bd9..fbf84f8ee5 100644 --- a/cachelib/cachebench/util/CacheConfig.cpp +++ b/cachelib/cachebench/util/CacheConfig.cpp @@ -138,9 +138,8 @@ std::shared_ptr CacheConfig::getRebalanceStrategy() const { MemoryTierConfig::MemoryTierConfig(const folly::dynamic& configJson) { JSONSetVal(configJson, file); JSONSetVal(configJson, ratio); - JSONSetVal(configJson, size); - checkCorrectSize(); + checkCorrectSize(); } } // namespace cachebench diff --git a/cachelib/cachebench/util/CacheConfig.h b/cachelib/cachebench/util/CacheConfig.h index f09d5966bd..3d790516cd 100644 --- a/cachelib/cachebench/util/CacheConfig.h +++ b/cachelib/cachebench/util/CacheConfig.h @@ -47,13 +47,12 @@ struct MemoryTierConfig : public JSONConfig { explicit MemoryTierConfig(const folly::dynamic& configJson); MemoryTierCacheConfig getMemoryTierCacheConfig() { MemoryTierCacheConfig config = memoryTierCacheConfigFromSource(); - config.setSize(size).setRatio(ratio); + config.setRatio(ratio); return config; } std::string file{""}; size_t ratio{0}; - size_t size{0}; private: MemoryTierCacheConfig memoryTierCacheConfigFromSource() {