diff --git a/libc/src/stdlib/CMakeLists.txt b/libc/src/stdlib/CMakeLists.txt index f0091ad367c0a..afb2d6d91cba4 100644 --- a/libc/src/stdlib/CMakeLists.txt +++ b/libc/src/stdlib/CMakeLists.txt @@ -380,6 +380,18 @@ elseif(LIBC_TARGET_OS_IS_GPU) aligned_alloc ) else() + add_header_library( + block + HDRS + block.h + DEPENDS + libc.src.__support.CPP.algorithm + libc.src.__support.CPP.limits + libc.src.__support.CPP.new + libc.src.__support.CPP.optional + libc.src.__support.CPP.span + libc.src.__support.CPP.type_traits + ) add_entrypoint_external( malloc ) diff --git a/libc/src/stdlib/block.h b/libc/src/stdlib/block.h new file mode 100644 index 0000000000000..ce26add082d0a --- /dev/null +++ b/libc/src/stdlib/block.h @@ -0,0 +1,482 @@ +//===-- Implementation header for a block of memory -------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_STDLIB_BLOCK_H +#define LLVM_LIBC_SRC_STDLIB_BLOCK_H + +#include "src/__support/CPP/algorithm.h" +#include "src/__support/CPP/cstddef.h" +#include "src/__support/CPP/limits.h" +#include "src/__support/CPP/new.h" +#include "src/__support/CPP/optional.h" +#include "src/__support/CPP/span.h" +#include "src/__support/CPP/type_traits.h" + +#include + +namespace LIBC_NAMESPACE { + +namespace internal { +// Types of corrupted blocks, and functions to crash with an error message +// corresponding to each type. +enum class BlockStatus { + VALID, + MISALIGNED, + PREV_MISMATCHED, + NEXT_MISMATCHED, +}; +} // namespace internal + +/// Returns the value rounded down to the nearest multiple of alignment. +LIBC_INLINE constexpr size_t align_down(size_t value, size_t alignment) { + // Note this shouldn't overflow since the result will always be <= value. + return (value / alignment) * alignment; +} + +/// Returns the value rounded down to the nearest multiple of alignment. +LIBC_INLINE template +constexpr T *align_down(T *value, size_t alignment) { + return reinterpret_cast( + align_down(reinterpret_cast(value), alignment)); +} + +/// Returns the value rounded up to the nearest multiple of alignment. +LIBC_INLINE constexpr size_t align_up(size_t value, size_t alignment) { + __builtin_add_overflow(value, alignment - 1, &value); + return align_down(value, alignment); +} + +/// Returns the value rounded up to the nearest multiple of alignment. +template +LIBC_INLINE constexpr T *align_up(T *value, size_t alignment) { + return reinterpret_cast( + align_up(reinterpret_cast(value), alignment)); +} + +using ByteSpan = cpp::span; +using cpp::optional; + +/// Memory region with links to adjacent blocks. +/// +/// The blocks do not encode their size directly. Instead, they encode offsets +/// to the next and previous blocks using the type given by the `OffsetType` +/// template parameter. The encoded offsets are simply the offsets divded by the +/// minimum block alignment, `ALIGNMENT`. +/// +/// The `ALIGNMENT` constant provided by the derived block is typically the +/// minimum value of `alignof(OffsetType)`. Since the addressable range of a +/// block is given by `std::numeric_limits::max() * +/// ALIGNMENT`, it may be advantageous to set a higher alignment if it allows +/// using a smaller offset type, even if this wastes some bytes in order to +/// align block headers. +/// +/// Blocks will always be aligned to a `ALIGNMENT` boundary. Block sizes will +/// always be rounded up to a multiple of `ALIGNMENT`. +/// +/// As an example, the diagram below represents two contiguous +/// `Block`s. The indices indicate byte offsets: +/// +/// @code{.unparsed} +/// Block 1: +/// +---------------------+------+--------------+ +/// | Header | Info | Usable space | +/// +----------+----------+------+--------------+ +/// | prev | next | | | +/// | 0......3 | 4......7 | 8..9 | 10.......280 | +/// | 00000000 | 00000046 | 8008 | | +/// +----------+----------+------+--------------+ +/// Block 2: +/// +---------------------+------+--------------+ +/// | Header | Info | Usable space | +/// +----------+----------+------+--------------+ +/// | prev | next | | | +/// | 0......3 | 4......7 | 8..9 | 10......1056 | +/// | 00000046 | 00000106 | 2008 | f7f7....f7f7 | +/// +----------+----------+------+--------------+ +/// @endcode +/// +/// The overall size of the block (e.g. 280 bytes) is given by its next offset +/// multiplied by the alignment (e.g. 0x106 * 4). Also, the next offset of a +/// block matches the previous offset of its next block. The first block in a +/// list is denoted by having a previous offset of `0`. +/// +/// @tparam OffsetType Unsigned integral type used to encode offsets. Larger +/// types can address more memory, but consume greater +/// overhead. +/// @tparam kAlign Sets the overall alignment for blocks. Minimum is +/// `alignof(OffsetType)` (the default). Larger values can +/// address more memory, but consume greater overhead. +template +class Block { +public: + using offset_type = OffsetType; + static_assert(cpp::is_unsigned_v, + "offset type must be unsigned"); + + static constexpr size_t ALIGNMENT = cpp::max(kAlign, alignof(offset_type)); + static constexpr size_t BLOCK_OVERHEAD = align_up(sizeof(Block), ALIGNMENT); + + // No copy or move. + Block(const Block &other) = delete; + Block &operator=(const Block &other) = delete; + + /// Creates the first block for a given memory region. + static optional init(ByteSpan region); + + /// @returns A pointer to a `Block`, given a pointer to the start of the + /// usable space inside the block. + /// + /// This is the inverse of `usable_space()`. + /// + /// @warning This method does not do any checking; passing a random + /// pointer will return a non-null pointer. + static Block *from_usable_space(void *usable_space) { + auto *bytes = reinterpret_cast(usable_space); + return reinterpret_cast(bytes - BLOCK_OVERHEAD); + } + static const Block *from_usable_space(const void *usable_space) { + const auto *bytes = reinterpret_cast(usable_space); + return reinterpret_cast(bytes - BLOCK_OVERHEAD); + } + + /// @returns The total size of the block in bytes, including the header. + size_t outer_size() const { return next_ * ALIGNMENT; } + + /// @returns The number of usable bytes inside the block. + size_t inner_size() const { return outer_size() - BLOCK_OVERHEAD; } + + /// @returns The number of bytes requested using AllocFirst or AllocLast. + size_t requested_size() const { return inner_size() - padding_; } + + /// @returns A pointer to the usable space inside this block. + cpp::byte *usable_space() { + return reinterpret_cast(this) + BLOCK_OVERHEAD; + } + const cpp::byte *usable_space() const { + return reinterpret_cast(this) + BLOCK_OVERHEAD; + } + + /// Marks the block as free and merges it with any free neighbors. + /// + /// This method is static in order to consume and replace the given block + /// pointer. If neither member is free, the returned pointer will point to the + /// original block. Otherwise, it will point to the new, larger block created + /// by merging adjacent free blocks together. + static void free(Block *&block); + + /// Attempts to split this block. + /// + /// If successful, the block will have an inner size of `new_inner_size`, + /// rounded up to a `ALIGNMENT` boundary. The remaining space will be + /// returned as a new block. + /// + /// This method may fail if the remaining space is too small to hold a new + /// block. If this method fails for any reason, the original block is + /// unmodified. + /// + /// This method is static in order to consume and replace the given block + /// pointer with a pointer to the new, smaller block. + static optional split(Block *&block, size_t new_inner_size); + + /// Merges this block with the one that comes after it. + /// + /// This method is static in order to consume and replace the given block + /// pointer with a pointer to the new, larger block. + static bool merge_next(Block *&block); + + /// Fetches the block immediately after this one. + /// + /// For performance, this always returns a block pointer, even if the returned + /// pointer is invalid. The pointer is valid if and only if `last()` is false. + /// + /// Typically, after calling `Init` callers may save a pointer past the end of + /// the list using `next()`. This makes it easy to subsequently iterate over + /// the list: + /// @code{.cpp} + /// auto result = Block<>::init(byte_span); + /// Block<>* begin = *result; + /// Block<>* end = begin->next(); + /// ... + /// for (auto* block = begin; block != end; block = block->next()) { + /// // Do something which each block. + /// } + /// @endcode + Block *next() const; + + /// @copydoc `next`. + static Block *next_block(const Block *block) { + return block == nullptr ? nullptr : block->next(); + } + + /// @returns The block immediately before this one, or a null pointer if this + /// is the first block. + Block *prev() const; + + /// @copydoc `prev`. + static Block *prev_block(const Block *block) { + return block == nullptr ? nullptr : block->prev(); + } + + /// Returns the current alignment of a block. + size_t alignment() const { return used() ? info_.alignment : 1; } + + /// Indicates whether the block is in use. + /// + /// @returns `true` if the block is in use or `false` if not. + bool used() const { return info_.used; } + + /// Indicates whether this block is the last block or not (i.e. whether + /// `next()` points to a valid block or not). This is needed because + /// `next()` points to the end of this block, whether there is a valid + /// block there or not. + /// + /// @returns `true` is this is the last block or `false` if not. + bool last() const { return info_.last; } + + /// Marks this block as in use. + void mark_used() { info_.used = 1; } + + /// Marks this block as free. + void mark_free() { info_.used = 0; } + + /// Marks this block as the last one in the chain. + void mark_last() { info_.last = 1; } + + /// Clears the last bit from this block. + void clear_last() { info_.last = 1; } + + /// @brief Checks if a block is valid. + /// + /// @returns `true` if and only if the following conditions are met: + /// * The block is aligned. + /// * The prev/next fields match with the previous and next blocks. + bool is_valid() const { + return check_status() == internal::BlockStatus::VALID; + } + +private: + /// Consumes the block and returns as a span of bytes. + static ByteSpan as_bytes(Block *&&block); + + /// Consumes the span of bytes and uses it to construct and return a block. + static Block *as_block(size_t prev_outer_size, ByteSpan bytes); + + Block(size_t prev_outer_size, size_t outer_size); + + /// Returns a `BlockStatus` that is either VALID or indicates the reason why + /// the block is invalid. + /// + /// If the block is invalid at multiple points, this function will only return + /// one of the reasons. + internal::BlockStatus check_status() const; + + /// Like `split`, but assumes the caller has already checked to parameters to + /// ensure the split will succeed. + static Block *split_impl(Block *&block, size_t new_inner_size); + + /// Offset (in increments of the minimum alignment) from this block to the + /// previous block. 0 if this is the first block. + offset_type prev_ = 0; + + /// Offset (in increments of the minimum alignment) from this block to the + /// next block. Valid even if this is the last block, since it equals the + /// size of the block. + offset_type next_ = 0; + + /// Information about the current state of the block: + /// * If the `used` flag is set, the block's usable memory has been allocated + /// and is being used. + /// * If the `last` flag is set, the block does not have a next block. + /// * If the `used` flag is set, the alignment represents the requested value + /// when the memory was allocated, which may be less strict than the actual + /// alignment. + struct { + uint16_t used : 1; + uint16_t last : 1; + uint16_t alignment : 14; + } info_; + + /// Number of bytes allocated beyond what was requested. This will be at most + /// the minimum alignment, i.e. `alignof(offset_type).` + uint16_t padding_ = 0; +} __attribute__((packed, aligned(kAlign))); + +// Public template method implementations. + +LIBC_INLINE ByteSpan get_aligned_subspan(ByteSpan bytes, size_t alignment) { + if (bytes.data() == nullptr) + return ByteSpan(); + + auto unaligned_start = reinterpret_cast(bytes.data()); + auto aligned_start = align_up(unaligned_start, alignment); + auto unaligned_end = unaligned_start + bytes.size(); + auto aligned_end = align_down(unaligned_end, alignment); + + if (aligned_end <= aligned_start) + return ByteSpan(); + + return bytes.subspan(aligned_start - unaligned_start, + aligned_end - aligned_start); +} + +template +optional *> +Block::init(ByteSpan region) { + optional result = get_aligned_subspan(region, ALIGNMENT); + if (!result) + return {}; + + region = result.value(); + if (region.size() < BLOCK_OVERHEAD) + return {}; + + if (cpp::numeric_limits::max() < region.size() / ALIGNMENT) + return {}; + + Block *block = as_block(0, region); + block->mark_last(); + return block; +} + +template +void Block::free(Block *&block) { + if (block == nullptr) + return; + + block->mark_free(); + Block *prev = block->prev(); + + if (merge_next(prev)) + block = prev; + + merge_next(block); +} + +template +optional *> +Block::split(Block *&block, size_t new_inner_size) { + if (block == nullptr) + return {}; + + if (block->used()) + return {}; + + size_t old_inner_size = block->inner_size(); + new_inner_size = align_up(new_inner_size, ALIGNMENT); + if (old_inner_size < new_inner_size) + return {}; + + if (old_inner_size - new_inner_size < BLOCK_OVERHEAD) + return {}; + + return split_impl(block, new_inner_size); +} + +template +Block * +Block::split_impl(Block *&block, size_t new_inner_size) { + size_t prev_outer_size = block->prev_ * ALIGNMENT; + size_t outer_size1 = new_inner_size + BLOCK_OVERHEAD; + bool is_last = block->last(); + ByteSpan bytes = as_bytes(cpp::move(block)); + Block *block1 = as_block(prev_outer_size, bytes.subspan(0, outer_size1)); + Block *block2 = as_block(outer_size1, bytes.subspan(outer_size1)); + + if (is_last) + block2->mark_last(); + else + block2->next()->prev_ = block2->next_; + + block = cpp::move(block1); + return block2; +} + +template +bool Block::merge_next(Block *&block) { + if (block == nullptr) + return false; + + if (block->last()) + return false; + + Block *next = block->next(); + if (block->used() || next->used()) + return false; + + size_t prev_outer_size = block->prev_ * ALIGNMENT; + bool is_last = next->last(); + ByteSpan prev_bytes = as_bytes(cpp::move(block)); + ByteSpan next_bytes = as_bytes(cpp::move(next)); + size_t outer_size = prev_bytes.size() + next_bytes.size(); + cpp::byte *merged = ::new (prev_bytes.data()) cpp::byte[outer_size]; + block = as_block(prev_outer_size, ByteSpan(merged, outer_size)); + + if (is_last) + block->mark_last(); + else + block->next()->prev_ = block->next_; + + return true; +} + +template +Block *Block::next() const { + uintptr_t addr = + last() ? 0 : reinterpret_cast(this) + outer_size(); + return reinterpret_cast(addr); +} + +template +Block *Block::prev() const { + uintptr_t addr = + (prev_ == 0) ? 0 + : reinterpret_cast(this) - (prev_ * ALIGNMENT); + return reinterpret_cast(addr); +} + +// Private template method implementations. + +template +Block::Block(size_t prev_outer_size, size_t outer_size) { + prev_ = prev_outer_size / ALIGNMENT; + next_ = outer_size / ALIGNMENT; + info_.used = 0; + info_.last = 0; + info_.alignment = ALIGNMENT; +} + +template +ByteSpan Block::as_bytes(Block *&&block) { + size_t block_size = block->outer_size(); + cpp::byte *bytes = new (cpp::move(block)) cpp::byte[block_size]; + return {bytes, block_size}; +} + +template +Block * +Block::as_block(size_t prev_outer_size, ByteSpan bytes) { + return ::new (bytes.data()) Block(prev_outer_size, bytes.size()); +} + +template +internal::BlockStatus Block::check_status() const { + if (reinterpret_cast(this) % ALIGNMENT != 0) + return internal::BlockStatus::MISALIGNED; + + if (!last() && (this >= next() || this != next()->prev())) + return internal::BlockStatus::NEXT_MISMATCHED; + + if (prev() && (this <= prev() || this != prev()->next())) + return internal::BlockStatus::PREV_MISMATCHED; + + return internal::BlockStatus::VALID; +} + +} // namespace LIBC_NAMESPACE + +#endif // LLVM_LIBC_SRC_STDLIB_BLOCK_H diff --git a/libc/test/src/stdlib/CMakeLists.txt b/libc/test/src/stdlib/CMakeLists.txt index 38488778c657c..f122cd56a6060 100644 --- a/libc/test/src/stdlib/CMakeLists.txt +++ b/libc/test/src/stdlib/CMakeLists.txt @@ -54,6 +54,19 @@ add_libc_test( libc.src.stdlib.atoll ) +add_libc_test( + block_test + SUITE + libc-stdlib-tests + SRCS + block_test.cpp + DEPENDS + libc.src.stdlib.block + libc.src.__support.CPP.array + libc.src.__support.CPP.span + libc.src.string.memcpy +) + add_fp_unittest( strtod_test SUITE diff --git a/libc/test/src/stdlib/block_test.cpp b/libc/test/src/stdlib/block_test.cpp new file mode 100644 index 0000000000000..0544e699cc8b2 --- /dev/null +++ b/libc/test/src/stdlib/block_test.cpp @@ -0,0 +1,570 @@ +//===-- Unittests for a block of memory -------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include + +#include "src/stdlib/block.h" + +#include "src/__support/CPP/array.h" +#include "src/__support/CPP/span.h" +#include "src/string/memcpy.h" +#include "test/UnitTest/Test.h" + +// Block types. +using LargeOffsetBlock = LIBC_NAMESPACE::Block; +using SmallOffsetBlock = LIBC_NAMESPACE::Block; + +// For each of the block types above, we'd like to run the same tests since +// they should work independently of the parameter sizes. Rather than re-writing +// the same test for each case, let's instead create a custom test framework for +// each test case that invokes the actual testing function for each block type. +// +// It's organized this way because the ASSERT/EXPECT macros only work within a +// `Test` class due to those macros expanding to `test` methods. +#define TEST_FOR_EACH_BLOCK_TYPE(TestCase) \ + class LlvmLibcBlockTest##TestCase : public LIBC_NAMESPACE::testing::Test { \ + public: \ + template void RunTest(); \ + }; \ + TEST_F(LlvmLibcBlockTest##TestCase, TestCase) { \ + RunTest(); \ + RunTest(); \ + } \ + template void LlvmLibcBlockTest##TestCase::RunTest() + +using LIBC_NAMESPACE::cpp::array; +using LIBC_NAMESPACE::cpp::byte; +using LIBC_NAMESPACE::cpp::span; + +TEST_FOR_EACH_BLOCK_TYPE(CanCreateSingleAlignedBlock) { + constexpr size_t kN = 1024; + alignas(BlockType::ALIGNMENT) array bytes; + + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block = *result; + + EXPECT_EQ(block->outer_size(), kN); + EXPECT_EQ(block->inner_size(), kN - BlockType::BLOCK_OVERHEAD); + EXPECT_EQ(block->prev(), static_cast(nullptr)); + EXPECT_EQ(block->next(), static_cast(nullptr)); + EXPECT_FALSE(block->used()); + EXPECT_TRUE(block->last()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanCreateUnalignedSingleBlock) { + constexpr size_t kN = 1024; + + // Force alignment, so we can un-force it below + alignas(BlockType::ALIGNMENT) array bytes; + span aligned(bytes); + + auto result = BlockType::init(aligned.subspan(1)); + EXPECT_TRUE(result.has_value()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CannotCreateTooSmallBlock) { + array bytes; + auto result = BlockType::init(bytes); + EXPECT_FALSE(result.has_value()); +} + +// This test specifically checks that we cannot allocate a block with a size +// larger than what can be held by the offset type, we don't need to test with +// multiple block types for this particular check, so we use the normal TEST +// macro and not the custom framework. +TEST(LlvmLibcBlockTest, CannotCreateTooLargeBlock) { + using BlockType = LIBC_NAMESPACE::Block; + constexpr size_t kN = 1024; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + EXPECT_FALSE(result.has_value()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlock) { + constexpr size_t kN = 1024; + constexpr size_t kSplitN = 512; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + auto *block1 = *result; + + result = BlockType::split(block1, kSplitN); + ASSERT_TRUE(result.has_value()); + + auto *block2 = *result; + + EXPECT_EQ(block1->inner_size(), kSplitN); + EXPECT_EQ(block1->outer_size(), kSplitN + BlockType::BLOCK_OVERHEAD); + EXPECT_FALSE(block1->last()); + + EXPECT_EQ(block2->outer_size(), kN - kSplitN - BlockType::BLOCK_OVERHEAD); + EXPECT_FALSE(block2->used()); + EXPECT_TRUE(block2->last()); + + EXPECT_EQ(block1->next(), block2); + EXPECT_EQ(block2->prev(), block1); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlockUnaligned) { + constexpr size_t kN = 1024; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block1 = *result; + + // We should split at sizeof(BlockType) + kSplitN bytes. Then + // we need to round that up to an alignof(BlockType) boundary. + constexpr size_t kSplitN = 513; + uintptr_t split_addr = reinterpret_cast(block1) + kSplitN; + split_addr += alignof(BlockType) - (split_addr % alignof(BlockType)); + uintptr_t split_len = split_addr - (uintptr_t)&bytes; + + result = BlockType::split(block1, kSplitN); + ASSERT_TRUE(result.has_value()); + BlockType *block2 = *result; + + EXPECT_EQ(block1->inner_size(), split_len); + EXPECT_EQ(block1->outer_size(), split_len + BlockType::BLOCK_OVERHEAD); + + EXPECT_EQ(block2->outer_size(), kN - block1->outer_size()); + EXPECT_FALSE(block2->used()); + + EXPECT_EQ(block1->next(), block2); + EXPECT_EQ(block2->prev(), block1); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanSplitMidBlock) { + // split once, then split the original block again to ensure that the + // pointers get rewired properly. + // I.e. + // [[ BLOCK 1 ]] + // block1->split() + // [[ BLOCK1 ]][[ BLOCK2 ]] + // block1->split() + // [[ BLOCK1 ]][[ BLOCK3 ]][[ BLOCK2 ]] + + constexpr size_t kN = 1024; + constexpr size_t kSplit1 = 512; + constexpr size_t kSplit2 = 256; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block1 = *result; + + result = BlockType::split(block1, kSplit1); + ASSERT_TRUE(result.has_value()); + BlockType *block2 = *result; + + result = BlockType::split(block1, kSplit2); + ASSERT_TRUE(result.has_value()); + BlockType *block3 = *result; + + EXPECT_EQ(block1->next(), block3); + EXPECT_EQ(block3->prev(), block1); + EXPECT_EQ(block3->next(), block2); + EXPECT_EQ(block2->prev(), block3); +} + +TEST_FOR_EACH_BLOCK_TYPE(CannotSplitTooSmallBlock) { + constexpr size_t kN = 64; + constexpr size_t kSplitN = kN + 1; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block = *result; + + result = BlockType::split(block, kSplitN); + ASSERT_FALSE(result.has_value()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CannotSplitBlockWithoutHeaderSpace) { + constexpr size_t kN = 1024; + constexpr size_t kSplitN = kN - BlockType::BLOCK_OVERHEAD - 1; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block = *result; + + result = BlockType::split(block, kSplitN); + ASSERT_FALSE(result.has_value()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CannotSplitNull) { + BlockType *block = nullptr; + auto result = BlockType::split(block, 1); + ASSERT_FALSE(result.has_value()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CannotMakeBlockLargerInSplit) { + // Ensure that we can't ask for more space than the block actually has... + constexpr size_t kN = 1024; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block = *result; + + result = BlockType::split(block, block->inner_size() + 1); + ASSERT_FALSE(result.has_value()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CannotMakeSecondBlockLargerInSplit) { + // Ensure that the second block in split is at least of the size of header. + constexpr size_t kN = 1024; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block = *result; + + result = BlockType::split(block, block->inner_size() - + BlockType::BLOCK_OVERHEAD + 1); + ASSERT_FALSE(result.has_value()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanMakeZeroSizeFirstBlock) { + // This block does support splitting with zero payload size. + constexpr size_t kN = 1024; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block = *result; + + result = BlockType::split(block, 0); + ASSERT_TRUE(result.has_value()); + EXPECT_EQ(block->inner_size(), static_cast(0)); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanMakeZeroSizeSecondBlock) { + // Likewise, the split block can be zero-width. + constexpr size_t kN = 1024; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block1 = *result; + + result = BlockType::split(block1, + block1->inner_size() - BlockType::BLOCK_OVERHEAD); + ASSERT_TRUE(result.has_value()); + BlockType *block2 = *result; + + EXPECT_EQ(block2->inner_size(), static_cast(0)); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanMarkBlockUsed) { + constexpr size_t kN = 1024; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block = *result; + + block->mark_used(); + EXPECT_TRUE(block->used()); + + // Size should be unaffected. + EXPECT_EQ(block->outer_size(), kN); + + block->mark_free(); + EXPECT_FALSE(block->used()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CannotSplitUsedBlock) { + constexpr size_t kN = 1024; + constexpr size_t kSplitN = 512; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block = *result; + + block->mark_used(); + result = BlockType::split(block, kSplitN); + ASSERT_FALSE(result.has_value()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanMergeWithNextBlock) { + // Do the three way merge from "CanSplitMidBlock", and let's + // merge block 3 and 2 + constexpr size_t kN = 1024; + constexpr size_t kSplit1 = 512; + constexpr size_t kSplit2 = 256; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block1 = *result; + + result = BlockType::split(block1, kSplit1); + ASSERT_TRUE(result.has_value()); + + result = BlockType::split(block1, kSplit2); + ASSERT_TRUE(result.has_value()); + BlockType *block3 = *result; + + EXPECT_TRUE(BlockType::merge_next(block3)); + + EXPECT_EQ(block1->next(), block3); + EXPECT_EQ(block3->prev(), block1); + EXPECT_EQ(block1->inner_size(), kSplit2); + EXPECT_EQ(block3->outer_size(), kN - block1->outer_size()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CannotMergeWithFirstOrLastBlock) { + constexpr size_t kN = 1024; + constexpr size_t kSplitN = 512; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block1 = *result; + + // Do a split, just to check that the checks on next/prev are different... + result = BlockType::split(block1, kSplitN); + ASSERT_TRUE(result.has_value()); + BlockType *block2 = *result; + + EXPECT_FALSE(BlockType::merge_next(block2)); +} + +TEST_FOR_EACH_BLOCK_TYPE(CannotMergeNull) { + BlockType *block = nullptr; + EXPECT_FALSE(BlockType::merge_next(block)); +} + +TEST_FOR_EACH_BLOCK_TYPE(CannotMergeUsedBlock) { + constexpr size_t kN = 1024; + constexpr size_t kSplitN = 512; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block = *result; + + // Do a split, just to check that the checks on next/prev are different... + result = BlockType::split(block, kSplitN); + ASSERT_TRUE(result.has_value()); + + block->mark_used(); + EXPECT_FALSE(BlockType::merge_next(block)); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanFreeSingleBlock) { + constexpr size_t kN = 1024; + alignas(BlockType::ALIGNMENT) array bytes; + + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block = *result; + + block->mark_used(); + BlockType::free(block); + EXPECT_FALSE(block->used()); + EXPECT_EQ(block->outer_size(), kN); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanFreeBlockWithoutMerging) { + constexpr size_t kN = 1024; + constexpr size_t kSplit1 = 512; + constexpr size_t kSplit2 = 256; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block1 = *result; + + result = BlockType::split(block1, kSplit1); + ASSERT_TRUE(result.has_value()); + BlockType *block2 = *result; + + result = BlockType::split(block2, kSplit2); + ASSERT_TRUE(result.has_value()); + BlockType *block3 = *result; + + block1->mark_used(); + block2->mark_used(); + block3->mark_used(); + + BlockType::free(block2); + EXPECT_FALSE(block2->used()); + EXPECT_NE(block2->prev(), static_cast(nullptr)); + EXPECT_FALSE(block2->last()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanFreeBlockAndMergeWithPrev) { + constexpr size_t kN = 1024; + constexpr size_t kSplit1 = 512; + constexpr size_t kSplit2 = 256; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block1 = *result; + + result = BlockType::split(block1, kSplit1); + ASSERT_TRUE(result.has_value()); + BlockType *block2 = *result; + + result = BlockType::split(block2, kSplit2); + ASSERT_TRUE(result.has_value()); + BlockType *block3 = *result; + + block2->mark_used(); + block3->mark_used(); + + BlockType::free(block2); + EXPECT_FALSE(block2->used()); + EXPECT_EQ(block2->prev(), static_cast(nullptr)); + EXPECT_FALSE(block2->last()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanFreeBlockAndMergeWithNext) { + constexpr size_t kN = 1024; + constexpr size_t kSplit1 = 512; + constexpr size_t kSplit2 = 256; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block1 = *result; + + result = BlockType::split(block1, kSplit1); + ASSERT_TRUE(result.has_value()); + BlockType *block2 = *result; + + result = BlockType::split(block2, kSplit2); + ASSERT_TRUE(result.has_value()); + + block1->mark_used(); + block2->mark_used(); + + BlockType::free(block2); + EXPECT_FALSE(block2->used()); + EXPECT_NE(block2->prev(), static_cast(nullptr)); + EXPECT_TRUE(block2->last()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanFreeUsedBlockAndMergeWithBoth) { + constexpr size_t kN = 1024; + constexpr size_t kSplit1 = 512; + constexpr size_t kSplit2 = 256; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block1 = *result; + + result = BlockType::split(block1, kSplit1); + ASSERT_TRUE(result.has_value()); + BlockType *block2 = *result; + + result = BlockType::split(block2, kSplit2); + ASSERT_TRUE(result.has_value()); + + block2->mark_used(); + + BlockType::free(block2); + EXPECT_FALSE(block2->used()); + EXPECT_EQ(block2->prev(), static_cast(nullptr)); + EXPECT_TRUE(block2->last()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanCheckValidBlock) { + constexpr size_t kN = 1024; + constexpr size_t kSplit1 = 512; + constexpr size_t kSplit2 = 256; + + alignas(BlockType::ALIGNMENT) array bytes; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block1 = *result; + + result = BlockType::split(block1, kSplit1); + ASSERT_TRUE(result.has_value()); + BlockType *block2 = *result; + + result = BlockType::split(block2, kSplit2); + ASSERT_TRUE(result.has_value()); + BlockType *block3 = *result; + + EXPECT_TRUE(block1->is_valid()); + EXPECT_TRUE(block2->is_valid()); + EXPECT_TRUE(block3->is_valid()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanCheckInvalidBlock) { + constexpr size_t kN = 1024; + constexpr size_t kSplit1 = 128; + constexpr size_t kSplit2 = 384; + constexpr size_t kSplit3 = 256; + + array bytes{}; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block1 = *result; + + result = BlockType::split(block1, kSplit1); + ASSERT_TRUE(result.has_value()); + BlockType *block2 = *result; + + result = BlockType::split(block2, kSplit2); + ASSERT_TRUE(result.has_value()); + BlockType *block3 = *result; + + result = BlockType::split(block3, kSplit3); + ASSERT_TRUE(result.has_value()); + + // Corrupt a Block header. + // This must not touch memory outside the original region, or the test may + // (correctly) abort when run with address sanitizer. + // To remain as agostic to the internals of `Block` as possible, the test + // copies a smaller block's header to a larger block. + EXPECT_TRUE(block1->is_valid()); + EXPECT_TRUE(block2->is_valid()); + EXPECT_TRUE(block3->is_valid()); + auto *src = reinterpret_cast(block1); + auto *dst = reinterpret_cast(block2); + LIBC_NAMESPACE::memcpy(dst, src, sizeof(BlockType)); + EXPECT_FALSE(block1->is_valid()); + EXPECT_FALSE(block2->is_valid()); + EXPECT_FALSE(block3->is_valid()); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanGetBlockFromUsableSpace) { + constexpr size_t kN = 1024; + + array bytes{}; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + BlockType *block1 = *result; + + void *ptr = block1->usable_space(); + BlockType *block2 = BlockType::from_usable_space(ptr); + EXPECT_EQ(block1, block2); +} + +TEST_FOR_EACH_BLOCK_TYPE(CanGetConstBlockFromUsableSpace) { + constexpr size_t kN = 1024; + + array bytes{}; + auto result = BlockType::init(bytes); + ASSERT_TRUE(result.has_value()); + const BlockType *block1 = *result; + + const void *ptr = block1->usable_space(); + const BlockType *block2 = BlockType::from_usable_space(ptr); + EXPECT_EQ(block1, block2); +}