diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f2f9e58 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +target +Cargo.lock \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..2073e6f --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,5 @@ +[package] +authors = ["The Allocators WG"] +name = "wg-allocators" +version = "0.0.0" +edition = "2018" diff --git a/README.md b/README.md index 1563325..6759f39 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,17 @@ To be notified of every issue and pull request in this repository, try GitHub’ For lower-latency discussion, we have the [`t-libs/wg-allocators`] stream on Zulip. +## Code +In order to test different features, we mirrored some code from [rust-lang/rust]. +Currently, this includes `Alloc`, `AllocErr`, `CannotRealocInPlace`, `Excess`, `Layout`, `LayoutErr`, `Global` and `Box`. +Additionally, the following functions are adjusted to work with our `Layout`: `alloc`, `alloc_zeroed`, `dealloc`, `realloc`, and `handle_alloc_error`. + +However, a few things had to be adjusted to match the `alloc` crate as soon as possible: +- the `Drop` trait of `Box` calls `box_free` directly. +- for `Fn`, `FnOnce`, `FnMut`, and `FnBox` `F` has to implement `Copy` + +[rust-lang/rust]: https://github.com/rust-lang/rust [`std::alloc::Alloc`]: https://doc.rust-lang.org/1.34.0/std/alloc/trait.Alloc.html [RFC #1398]: https://github.com/rust-lang/rfcs/pull/1398 "Allocators, take III" [tracking issue #32838]: https://github.com/rust-lang/rust/issues/32838 "Allocator traits and std::heap" diff --git a/src/alloc.rs b/src/alloc.rs new file mode 100644 index 0000000..02c2946 --- /dev/null +++ b/src/alloc.rs @@ -0,0 +1,1225 @@ +//! Memory allocation APIs + +// #![stable(feature = "alloc_module", since = "1.28.0")] + +use core::cmp; +use core::fmt; +use core::mem; +use core::usize; +use core::ptr::{self, NonNull}; +use core::num::NonZeroUsize; +use core::ptr::Unique; +use core::intrinsics::{size_of_val, min_align_of_val}; + +/// Represents the combination of a starting address and +/// a total capacity of the returned block. +// #[unstable(feature = "allocator_api", issue = "32838")] +#[derive(Debug)] +pub struct Excess(pub NonNull, pub usize); + +fn size_align() -> (usize, usize) { + (mem::size_of::(), mem::align_of::()) +} + +/// Layout of a block of memory. +/// +/// An instance of `Layout` describes a particular layout of memory. +/// You build a `Layout` up as an input to give to an allocator. +/// +/// All layouts have an associated non-negative size and a +/// power-of-two alignment. +/// +/// (Note however that layouts are *not* required to have positive +/// size, even though many allocators require that all memory +/// requests have positive size. A caller to the `Alloc::alloc` +/// method must either ensure that conditions like this are met, or +/// use specific allocators with looser requirements.) +// #[stable(feature = "alloc_layout", since = "1.28.0")] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +// #[lang = "alloc_layout"] +pub struct Layout { + // size of the requested block of memory, measured in bytes. + size_: usize, + + // alignment of the requested block of memory, measured in bytes. + // we ensure that this is always a power-of-two, because API's + // like `posix_memalign` require it and it is a reasonable + // constraint to impose on Layout constructors. + // + // (However, we do not analogously require `align >= sizeof(void*)`, + // even though that is *also* a requirement of `posix_memalign`.) + align_: NonZeroUsize, +} + +impl Into for Layout { + fn into(self) -> rust_alloc::alloc::Layout { + unsafe { + rust_alloc::alloc::Layout::from_size_align_unchecked(self.size(), self.align()) + } + } +} + +impl From for Layout { + fn from(layout: rust_alloc::alloc::Layout) -> Self { + unsafe { + Self::from_size_align_unchecked(layout.size(), layout.align()) + } + } +} + +impl Layout { + /// Constructs a `Layout` from a given `size` and `align`, + /// or returns `LayoutErr` if either of the following conditions + /// are not met: + /// + /// * `align` must not be zero, + /// + /// * `align` must be a power of two, + /// + /// * `size`, when rounded up to the nearest multiple of `align`, + /// must not overflow (i.e., the rounded value must be less than + /// `usize::MAX`). + // #[stable(feature = "alloc_layout", since = "1.28.0")] + #[inline] + pub fn from_size_align(size: usize, align: usize) -> Result { + if !align.is_power_of_two() { + return Err(LayoutErr { private: () }); + } + + // (power-of-two implies align != 0.) + + // Rounded up size is: + // size_rounded_up = (size + align - 1) & !(align - 1); + // + // We know from above that align != 0. If adding (align - 1) + // does not overflow, then rounding up will be fine. + // + // Conversely, &-masking with !(align - 1) will subtract off + // only low-order-bits. Thus if overflow occurs with the sum, + // the &-mask cannot subtract enough to undo that overflow. + // + // Above implies that checking for summation overflow is both + // necessary and sufficient. + if size > usize::MAX - (align - 1) { + return Err(LayoutErr { private: () }); + } + + unsafe { + Ok(Layout::from_size_align_unchecked(size, align)) + } + } + + /// Creates a layout, bypassing all checks. + /// + /// # Safety + /// + /// This function is unsafe as it does not verify the preconditions from + /// [`Layout::from_size_align`](#method.from_size_align). + // #[stable(feature = "alloc_layout", since = "1.28.0")] + #[inline] + pub unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self { + Layout { size_: size, align_: NonZeroUsize::new_unchecked(align) } + } + + /// The minimum size in bytes for a memory block of this layout. + // #[stable(feature = "alloc_layout", since = "1.28.0")] + #[inline] + pub fn size(&self) -> usize { self.size_ } + + /// The minimum byte alignment for a memory block of this layout. + // #[stable(feature = "alloc_layout", since = "1.28.0")] + #[inline] + pub fn align(&self) -> usize { self.align_.get() } + + /// Constructs a `Layout` suitable for holding a value of type `T`. + // #[stable(feature = "alloc_layout", since = "1.28.0")] + #[inline] + pub fn new() -> Self { + let (size, align) = size_align::(); + // Note that the align is guaranteed by rustc to be a power of two and + // the size+align combo is guaranteed to fit in our address space. As a + // result use the unchecked constructor here to avoid inserting code + // that panics if it isn't optimized well enough. + debug_assert!(Layout::from_size_align(size, align).is_ok()); + unsafe { + Layout::from_size_align_unchecked(size, align) + } + } + + /// Produces layout describing a record that could be used to + /// allocate backing structure for `T` (which could be a trait + /// or other unsized type like a slice). + // #[stable(feature = "alloc_layout", since = "1.28.0")] + #[inline] + pub fn for_value(t: &T) -> Self { + let (size, align) = (mem::size_of_val(t), mem::align_of_val(t)); + // See rationale in `new` for why this us using an unsafe variant below + debug_assert!(Layout::from_size_align(size, align).is_ok()); + unsafe { + Layout::from_size_align_unchecked(size, align) + } + } + + /// Creates a layout describing the record that can hold a value + /// of the same layout as `self`, but that also is aligned to + /// alignment `align` (measured in bytes). + /// + /// If `self` already meets the prescribed alignment, then returns + /// `self`. + /// + /// Note that this method does not add any padding to the overall + /// size, regardless of whether the returned layout has a different + /// alignment. In other words, if `K` has size 16, `K.align_to(32)` + /// will *still* have size 16. + /// + /// Returns an error if the combination of `self.size()` and the given + /// `align` violates the conditions listed in + /// [`Layout::from_size_align`](#method.from_size_align). + // #[unstable(feature = "alloc_layout_extra", issue = "55724")] + #[inline] + pub fn align_to(&self, align: usize) -> Result { + Layout::from_size_align(self.size(), cmp::max(self.align(), align)) + } + + /// Returns the amount of padding we must insert after `self` + /// to ensure that the following address will satisfy `align` + /// (measured in bytes). + /// + /// e.g., if `self.size()` is 9, then `self.padding_needed_for(4)` + /// returns 3, because that is the minimum number of bytes of + /// padding required to get a 4-aligned address (assuming that the + /// corresponding memory block starts at a 4-aligned address). + /// + /// The return value of this function has no meaning if `align` is + /// not a power-of-two. + /// + /// Note that the utility of the returned value requires `align` + /// to be less than or equal to the alignment of the starting + /// address for the whole allocated block of memory. One way to + /// satisfy this constraint is to ensure `align <= self.align()`. + // #[unstable(feature = "alloc_layout_extra", issue = "55724")] + #[inline] + pub fn padding_needed_for(&self, align: usize) -> usize { + let len = self.size(); + + // Rounded up value is: + // len_rounded_up = (len + align - 1) & !(align - 1); + // and then we return the padding difference: `len_rounded_up - len`. + // + // We use modular arithmetic throughout: + // + // 1. align is guaranteed to be > 0, so align - 1 is always + // valid. + // + // 2. `len + align - 1` can overflow by at most `align - 1`, + // so the &-mask wth `!(align - 1)` will ensure that in the + // case of overflow, `len_rounded_up` will itself be 0. + // Thus the returned padding, when added to `len`, yields 0, + // which trivially satisfies the alignment `align`. + // + // (Of course, attempts to allocate blocks of memory whose + // size and padding overflow in the above manner should cause + // the allocator to yield an error anyway.) + + let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) + & !align.wrapping_sub(1); + len_rounded_up.wrapping_sub(len) + } + + /// Creates a layout by rounding the size of this layout up to a multiple + /// of the layout's alignment. + /// + /// Returns `Err` if the padded size would overflow. + /// + /// This is equivalent to adding the result of `padding_needed_for` + /// to the layout's current size. + // #[unstable(feature = "alloc_layout_extra", issue = "55724")] + #[inline] + pub fn pad_to_align(&self) -> Result { + let pad = self.padding_needed_for(self.align()); + let new_size = self.size().checked_add(pad) + .ok_or(LayoutErr { private: () })?; + + Layout::from_size_align(new_size, self.align()) + } + + /// Creates a layout describing the record for `n` instances of + /// `self`, with a suitable amount of padding between each to + /// ensure that each instance is given its requested size and + /// alignment. On success, returns `(k, offs)` where `k` is the + /// layout of the array and `offs` is the distance between the start + /// of each element in the array. + /// + /// On arithmetic overflow, returns `LayoutErr`. + // #[unstable(feature = "alloc_layout_extra", issue = "55724")] + #[inline] + pub fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutErr> { + let padded_size = self.size().checked_add(self.padding_needed_for(self.align())) + .ok_or(LayoutErr { private: () })?; + let alloc_size = padded_size.checked_mul(n) + .ok_or(LayoutErr { private: () })?; + + unsafe { + // self.align is already known to be valid and alloc_size has been + // padded already. + Ok((Layout::from_size_align_unchecked(alloc_size, self.align()), padded_size)) + } + } + + /// Creates a layout describing the record for `self` followed by + /// `next`, including any necessary padding to ensure that `next` + /// will be properly aligned. Note that the result layout will + /// satisfy the alignment properties of both `self` and `next`. + /// + /// The resulting layout will be the same as that of a C struct containing + /// two fields with the layouts of `self` and `next`, in that order. + /// + /// Returns `Some((k, offset))`, where `k` is layout of the concatenated + /// record and `offset` is the relative location, in bytes, of the + /// start of the `next` embedded within the concatenated record + /// (assuming that the record itself starts at offset 0). + /// + /// On arithmetic overflow, returns `LayoutErr`. + // #[unstable(feature = "alloc_layout_extra", issue = "55724")] + #[inline] + pub fn extend(&self, next: Self) -> Result<(Self, usize), LayoutErr> { + let new_align = cmp::max(self.align(), next.align()); + let pad = self.padding_needed_for(next.align()); + + let offset = self.size().checked_add(pad) + .ok_or(LayoutErr { private: () })?; + let new_size = offset.checked_add(next.size()) + .ok_or(LayoutErr { private: () })?; + + let layout = Layout::from_size_align(new_size, new_align)?; + Ok((layout, offset)) + } + + /// Creates a layout describing the record for `n` instances of + /// `self`, with no padding between each instance. + /// + /// Note that, unlike `repeat`, `repeat_packed` does not guarantee + /// that the repeated instances of `self` will be properly + /// aligned, even if a given instance of `self` is properly + /// aligned. In other words, if the layout returned by + /// `repeat_packed` is used to allocate an array, it is not + /// guaranteed that all elements in the array will be properly + /// aligned. + /// + /// On arithmetic overflow, returns `LayoutErr`. + // #[unstable(feature = "alloc_layout_extra", issue = "55724")] + #[inline] + pub fn repeat_packed(&self, n: usize) -> Result { + let size = self.size().checked_mul(n).ok_or(LayoutErr { private: () })?; + Layout::from_size_align(size, self.align()) + } + + /// Creates a layout describing the record for `self` followed by + /// `next` with no additional padding between the two. Since no + /// padding is inserted, the alignment of `next` is irrelevant, + /// and is not incorporated *at all* into the resulting layout. + /// + /// On arithmetic overflow, returns `LayoutErr`. + // #[unstable(feature = "alloc_layout_extra", issue = "55724")] + #[inline] + pub fn extend_packed(&self, next: Self) -> Result { + let new_size = self.size().checked_add(next.size()) + .ok_or(LayoutErr { private: () })?; + let layout = Layout::from_size_align(new_size, self.align())?; + Ok(layout) + } + + /// Creates a layout describing the record for a `[T; n]`. + /// + /// On arithmetic overflow, returns `LayoutErr`. + // #[unstable(feature = "alloc_layout_extra", issue = "55724")] + #[inline] + pub fn array(n: usize) -> Result { + Layout::new::() + .repeat(n) + .map(|(k, offs)| { + debug_assert!(offs == mem::size_of::()); + k + }) + } +} + +/// The parameters given to `Layout::from_size_align` +/// or some other `Layout` constructor +/// do not satisfy its documented constraints. +// #[stable(feature = "alloc_layout", since = "1.28.0")] +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct LayoutErr { + private: () +} + +// (we need this for downstream impl of trait Error) +// #[stable(feature = "alloc_layout", since = "1.28.0")] +impl fmt::Display for LayoutErr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid parameters to Layout::from_size_align") + } +} + +/// The `AllocErr` error indicates an allocation failure +/// that may be due to resource exhaustion or to +/// something wrong when combining the given input arguments with this +/// allocator. +// #[unstable(feature = "allocator_api", issue = "32838")] +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct AllocErr; + +// (we need this for downstream impl of trait Error) +// #[unstable(feature = "allocator_api", issue = "32838")] +impl fmt::Display for AllocErr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("memory allocation failed") + } +} + +/// The `CannotReallocInPlace` error is used when `grow_in_place` or +/// `shrink_in_place` were unable to reuse the given memory block for +/// a requested layout. +// #[unstable(feature = "allocator_api", issue = "32838")] +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct CannotReallocInPlace; + +// #[unstable(feature = "allocator_api", issue = "32838")] +impl CannotReallocInPlace { + pub fn description(&self) -> &str { + "cannot reallocate allocator's memory in place" + } +} + +// (we need this for downstream impl of trait Error) +// #[unstable(feature = "allocator_api", issue = "32838")] +impl fmt::Display for CannotReallocInPlace { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.description()) + } +} + +/// An implementation of `Alloc` can allocate, reallocate, and +/// deallocate arbitrary blocks of data described via `Layout`. +/// +/// Some of the methods require that a memory block be *currently +/// allocated* via an allocator. This means that: +/// +/// * the starting address for that memory block was previously +/// returned by a previous call to an allocation method (`alloc`, +/// `alloc_zeroed`, `alloc_excess`, `alloc_one`, `alloc_array`) or +/// reallocation method (`realloc`, `realloc_excess`, or +/// `realloc_array`), and +/// +/// * the memory block has not been subsequently deallocated, where +/// blocks are deallocated either by being passed to a deallocation +/// method (`dealloc`, `dealloc_one`, `dealloc_array`) or by being +/// passed to a reallocation method (see above) that returns `Ok`. +/// +/// A note regarding zero-sized types and zero-sized layouts: many +/// methods in the `Alloc` trait state that allocation requests +/// must be non-zero size, or else undefined behavior can result. +/// +/// * However, some higher-level allocation methods (`alloc_one`, +/// `alloc_array`) are well-defined on zero-sized types and can +/// optionally support them: it is left up to the implementor +/// whether to return `Err`, or to return `Ok` with some pointer. +/// +/// * If an `Alloc` implementation chooses to return `Ok` in this +/// case (i.e., the pointer denotes a zero-sized inaccessible block) +/// then that returned pointer must be considered "currently +/// allocated". On such an allocator, *all* methods that take +/// currently-allocated pointers as inputs must accept these +/// zero-sized pointers, *without* causing undefined behavior. +/// +/// * In other words, if a zero-sized pointer can flow out of an +/// allocator, then that allocator must likewise accept that pointer +/// flowing back into its deallocation and reallocation methods. +/// +/// Some of the methods require that a layout *fit* a memory block. +/// What it means for a layout to "fit" a memory block means (or +/// equivalently, for a memory block to "fit" a layout) is that the +/// following two conditions must hold: +/// +/// 1. The block's starting address must be aligned to `layout.align()`. +/// +/// 2. The block's size must fall in the range `[use_min, use_max]`, where: +/// +/// * `use_min` is `self.usable_size(layout).0`, and +/// +/// * `use_max` is the capacity that was (or would have been) +/// returned when (if) the block was allocated via a call to +/// `alloc_excess` or `realloc_excess`. +/// +/// Note that: +/// +/// * the size of the layout most recently used to allocate the block +/// is guaranteed to be in the range `[use_min, use_max]`, and +/// +/// * a lower-bound on `use_max` can be safely approximated by a call to +/// `usable_size`. +/// +/// * if a layout `k` fits a memory block (denoted by `ptr`) +/// currently allocated via an allocator `a`, then it is legal to +/// use that layout to deallocate it, i.e., `a.dealloc(ptr, k);`. +/// +/// # Safety +/// +/// The `Alloc` trait is an `unsafe` trait for a number of reasons, and +/// implementors must ensure that they adhere to these contracts: +/// +/// * Pointers returned from allocation functions must point to valid memory and +/// retain their validity until at least the instance of `Alloc` is dropped +/// itself. +/// +/// * `Layout` queries and calculations in general must be correct. Callers of +/// this trait are allowed to rely on the contracts defined on each method, +/// and implementors must ensure such contracts remain true. +/// +/// Note that this list may get tweaked over time as clarifications are made in +/// the future. +// #[unstable(feature = "allocator_api", issue = "32838")] +pub unsafe trait Alloc { + + // (Note: some existing allocators have unspecified but well-defined + // behavior in response to a zero size allocation request ; + // e.g., in C, `malloc` of 0 will either return a null pointer or a + // unique pointer, but will not have arbitrary undefined + // behavior. + // However in jemalloc for example, + // `mallocx(0)` is documented as undefined behavior.) + + /// Returns a pointer meeting the size and alignment guarantees of + /// `layout`. + /// + /// If this method returns an `Ok(addr)`, then the `addr` returned + /// will be non-null address pointing to a block of storage + /// suitable for holding an instance of `layout`. + /// + /// The returned block of storage may or may not have its contents + /// initialized. (Extension subtraits might restrict this + /// behavior, e.g., to ensure initialization to particular sets of + /// bit patterns.) + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure that `layout` has non-zero size. + /// + /// (Extension subtraits might provide more specific bounds on + /// behavior, e.g., guarantee a sentinel address or a null pointer + /// in response to a zero-size allocation request.) + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `layout` does not meet allocator's size or alignment + /// constraints. + /// + /// Implementations are encouraged to return `Err` on memory + /// exhaustion rather than panicking or aborting, but this is not + /// a strict requirement. (Specifically: it is *legal* to + /// implement this trait atop an underlying native allocation + /// library that aborts on memory exhaustion.) + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn alloc(&mut self, layout: Layout) -> Result, AllocErr>; + + /// Deallocate the memory referenced by `ptr`. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must denote a block of memory currently allocated via + /// this allocator, + /// + /// * `layout` must *fit* that block of memory, + /// + /// * In addition to fitting the block of memory `layout`, the + /// alignment of the `layout` must match the alignment used + /// to allocate that block of memory. + unsafe fn dealloc(&mut self, ptr: NonNull, layout: Layout); + + // == ALLOCATOR-SPECIFIC QUANTITIES AND LIMITS == + // usable_size + + /// Returns bounds on the guaranteed usable size of a successful + /// allocation created with the specified `layout`. + /// + /// In particular, if one has a memory block allocated via a given + /// allocator `a` and layout `k` where `a.usable_size(k)` returns + /// `(l, u)`, then one can pass that block to `a.dealloc()` with a + /// layout in the size range [l, u]. + /// + /// (All implementors of `usable_size` must ensure that + /// `l <= k.size() <= u`) + /// + /// Both the lower- and upper-bounds (`l` and `u` respectively) + /// are provided, because an allocator based on size classes could + /// misbehave if one attempts to deallocate a block without + /// providing a correct value for its size (i.e., one within the + /// range `[l, u]`). + /// + /// Clients who wish to make use of excess capacity are encouraged + /// to use the `alloc_excess` and `realloc_excess` instead, as + /// this method is constrained to report conservative values that + /// serve as valid bounds for *all possible* allocation method + /// calls. + /// + /// However, for clients that do not wish to track the capacity + /// returned by `alloc_excess` locally, this method is likely to + /// produce useful results. + #[inline] + fn usable_size(&self, layout: &Layout) -> (usize, usize) { + (layout.size(), layout.size()) + } + + // == METHODS FOR MEMORY REUSE == + // realloc. alloc_excess, realloc_excess + + /// Returns a pointer suitable for holding data described by + /// a new layout with `layout`’s alignment and a size given + /// by `new_size`. To + /// accomplish this, this may extend or shrink the allocation + /// referenced by `ptr` to fit the new layout. + /// + /// If this returns `Ok`, then ownership of the memory block + /// referenced by `ptr` has been transferred to this + /// allocator. The memory may or may not have been freed, and + /// should be considered unusable (unless of course it was + /// transferred back to the caller again via the return value of + /// this method). + /// + /// If this method returns `Err`, then ownership of the memory + /// block has not been transferred to this allocator, and the + /// contents of the memory block are unaltered. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must be currently allocated via this allocator, + /// + /// * `layout` must *fit* the `ptr` (see above). (The `new_size` + /// argument need not fit it.) + /// + /// * `new_size` must be greater than zero. + /// + /// * `new_size`, when rounded up to the nearest multiple of `layout.align()`, + /// must not overflow (i.e., the rounded value must be less than `usize::MAX`). + /// + /// (Extension subtraits might provide more specific bounds on + /// behavior, e.g., guarantee a sentinel address or a null pointer + /// in response to a zero-size allocation request.) + /// + /// # Errors + /// + /// Returns `Err` only if the new layout + /// does not meet the allocator's size + /// and alignment constraints of the allocator, or if reallocation + /// otherwise fails. + /// + /// Implementations are encouraged to return `Err` on memory + /// exhaustion rather than panicking or aborting, but this is not + /// a strict requirement. (Specifically: it is *legal* to + /// implement this trait atop an underlying native allocation + /// library that aborts on memory exhaustion.) + /// + /// Clients wishing to abort computation in response to a + /// reallocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn realloc(&mut self, + ptr: NonNull, + layout: Layout, + new_size: usize) -> Result, AllocErr> { + let old_size = layout.size(); + + if new_size >= old_size { + if let Ok(()) = self.grow_in_place(ptr, layout.clone(), new_size) { + return Ok(ptr); + } + } else if new_size < old_size { + if let Ok(()) = self.shrink_in_place(ptr, layout.clone(), new_size) { + return Ok(ptr); + } + } + + // otherwise, fall back on alloc + copy + dealloc. + let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); + let result = self.alloc(new_layout); + if let Ok(new_ptr) = result { + ptr::copy_nonoverlapping(ptr.as_ptr(), + new_ptr.as_ptr(), + cmp::min(old_size, new_size)); + self.dealloc(ptr, layout); + } + result + } + + /// Behaves like `alloc`, but also ensures that the contents + /// are set to zero before being returned. + /// + /// # Safety + /// + /// This function is unsafe for the same reasons that `alloc` is. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `layout` does not meet allocator's size or alignment + /// constraints, just as in `alloc`. + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result, AllocErr> { + let size = layout.size(); + let p = self.alloc(layout); + if let Ok(p) = p { + ptr::write_bytes(p.as_ptr(), 0, size); + } + p + } + + /// Behaves like `alloc`, but also returns the whole size of + /// the returned block. For some `layout` inputs, like arrays, this + /// may include extra storage usable for additional data. + /// + /// # Safety + /// + /// This function is unsafe for the same reasons that `alloc` is. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `layout` does not meet allocator's size or alignment + /// constraints, just as in `alloc`. + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn alloc_excess(&mut self, layout: Layout) -> Result { + let usable_size = self.usable_size(&layout); + self.alloc(layout).map(|p| Excess(p, usable_size.1)) + } + + /// Behaves like `realloc`, but also returns the whole size of + /// the returned block. For some `layout` inputs, like arrays, this + /// may include extra storage usable for additional data. + /// + /// # Safety + /// + /// This function is unsafe for the same reasons that `realloc` is. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `layout` does not meet allocator's size or alignment + /// constraints, just as in `realloc`. + /// + /// Clients wishing to abort computation in response to a + /// reallocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn realloc_excess(&mut self, + ptr: NonNull, + layout: Layout, + new_size: usize) -> Result { + let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); + let usable_size = self.usable_size(&new_layout); + self.realloc(ptr, layout, new_size) + .map(|p| Excess(p, usable_size.1)) + } + + /// Attempts to extend the allocation referenced by `ptr` to fit `new_size`. + /// + /// If this returns `Ok`, then the allocator has asserted that the + /// memory block referenced by `ptr` now fits `new_size`, and thus can + /// be used to carry data of a layout of that size and same alignment as + /// `layout`. (The allocator is allowed to + /// expend effort to accomplish this, such as extending the memory block to + /// include successor blocks, or virtual memory tricks.) + /// + /// Regardless of what this method returns, ownership of the + /// memory block referenced by `ptr` has not been transferred, and + /// the contents of the memory block are unaltered. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must be currently allocated via this allocator, + /// + /// * `layout` must *fit* the `ptr` (see above); note the + /// `new_size` argument need not fit it, + /// + /// * `new_size` must not be less than `layout.size()`, + /// + /// # Errors + /// + /// Returns `Err(CannotReallocInPlace)` when the allocator is + /// unable to assert that the memory block referenced by `ptr` + /// could fit `layout`. + /// + /// Note that one cannot pass `CannotReallocInPlace` to the `handle_alloc_error` + /// function; clients are expected either to be able to recover from + /// `grow_in_place` failures without aborting, or to fall back on + /// another reallocation method before resorting to an abort. + unsafe fn grow_in_place(&mut self, + ptr: NonNull, + layout: Layout, + new_size: usize) -> Result<(), CannotReallocInPlace> { + let _ = ptr; // this default implementation doesn't care about the actual address. + debug_assert!(new_size >= layout.size()); + let (_l, u) = self.usable_size(&layout); + // _l <= layout.size() [guaranteed by usable_size()] + // layout.size() <= new_layout.size() [required by this method] + if new_size <= u { + Ok(()) + } else { + Err(CannotReallocInPlace) + } + } + + /// Attempts to shrink the allocation referenced by `ptr` to fit `new_size`. + /// + /// If this returns `Ok`, then the allocator has asserted that the + /// memory block referenced by `ptr` now fits `new_size`, and + /// thus can only be used to carry data of that smaller + /// layout. (The allocator is allowed to take advantage of this, + /// carving off portions of the block for reuse elsewhere.) The + /// truncated contents of the block within the smaller layout are + /// unaltered, and ownership of block has not been transferred. + /// + /// If this returns `Err`, then the memory block is considered to + /// still represent the original (larger) `layout`. None of the + /// block has been carved off for reuse elsewhere, ownership of + /// the memory block has not been transferred, and the contents of + /// the memory block are unaltered. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must be currently allocated via this allocator, + /// + /// * `layout` must *fit* the `ptr` (see above); note the + /// `new_size` argument need not fit it, + /// + /// * `new_size` must not be greater than `layout.size()` + /// (and must be greater than zero), + /// + /// # Errors + /// + /// Returns `Err(CannotReallocInPlace)` when the allocator is + /// unable to assert that the memory block referenced by `ptr` + /// could fit `layout`. + /// + /// Note that one cannot pass `CannotReallocInPlace` to the `handle_alloc_error` + /// function; clients are expected either to be able to recover from + /// `shrink_in_place` failures without aborting, or to fall back + /// on another reallocation method before resorting to an abort. + unsafe fn shrink_in_place(&mut self, + ptr: NonNull, + layout: Layout, + new_size: usize) -> Result<(), CannotReallocInPlace> { + let _ = ptr; // this default implementation doesn't care about the actual address. + debug_assert!(new_size <= layout.size()); + let (l, _u) = self.usable_size(&layout); + // layout.size() <= _u [guaranteed by usable_size()] + // new_layout.size() <= layout.size() [required by this method] + if l <= new_size { + Ok(()) + } else { + Err(CannotReallocInPlace) + } + } + + + // == COMMON USAGE PATTERNS == + // alloc_one, dealloc_one, alloc_array, realloc_array. dealloc_array + + /// Allocates a block suitable for holding an instance of `T`. + /// + /// Captures a common usage pattern for allocators. + /// + /// The returned block is suitable for passing to the + /// `alloc`/`realloc` methods of this allocator. + /// + /// Note to implementors: If this returns `Ok(ptr)`, then `ptr` + /// must be considered "currently allocated" and must be + /// acceptable input to methods such as `realloc` or `dealloc`, + /// *even if* `T` is a zero-sized type. In other words, if your + /// `Alloc` implementation overrides this method in a manner + /// that can return a zero-sized `ptr`, then all reallocation and + /// deallocation methods need to be similarly overridden to accept + /// such values as input. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `T` does not meet allocator's size or alignment constraints. + /// + /// For zero-sized `T`, may return either of `Ok` or `Err`, but + /// will *not* yield undefined behavior. + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + fn alloc_one(&mut self) -> Result, AllocErr> + where Self: Sized + { + let k = Layout::new::(); + if k.size() > 0 { + unsafe { self.alloc(k).map(|p| p.cast()) } + } else { + Err(AllocErr) + } + } + + /// Deallocates a block suitable for holding an instance of `T`. + /// + /// The given block must have been produced by this allocator, + /// and must be suitable for storing a `T` (in terms of alignment + /// as well as minimum and maximum size); otherwise yields + /// undefined behavior. + /// + /// Captures a common usage pattern for allocators. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure both: + /// + /// * `ptr` must denote a block of memory currently allocated via this allocator + /// + /// * the layout of `T` must *fit* that block of memory. + unsafe fn dealloc_one(&mut self, ptr: NonNull) + where Self: Sized + { + let k = Layout::new::(); + if k.size() > 0 { + self.dealloc(ptr.cast(), k); + } + } + + /// Allocates a block suitable for holding `n` instances of `T`. + /// + /// Captures a common usage pattern for allocators. + /// + /// The returned block is suitable for passing to the + /// `alloc`/`realloc` methods of this allocator. + /// + /// Note to implementors: If this returns `Ok(ptr)`, then `ptr` + /// must be considered "currently allocated" and must be + /// acceptable input to methods such as `realloc` or `dealloc`, + /// *even if* `T` is a zero-sized type. In other words, if your + /// `Alloc` implementation overrides this method in a manner + /// that can return a zero-sized `ptr`, then all reallocation and + /// deallocation methods need to be similarly overridden to accept + /// such values as input. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `[T; n]` does not meet allocator's size or alignment + /// constraints. + /// + /// For zero-sized `T` or `n == 0`, may return either of `Ok` or + /// `Err`, but will *not* yield undefined behavior. + /// + /// Always returns `Err` on arithmetic overflow. + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + fn alloc_array(&mut self, n: usize) -> Result, AllocErr> + where Self: Sized + { + match Layout::array::(n) { + Ok(ref layout) if layout.size() > 0 => { + unsafe { + self.alloc(layout.clone()).map(|p| p.cast()) + } + } + _ => Err(AllocErr), + } + } + + /// Reallocates a block previously suitable for holding `n_old` + /// instances of `T`, returning a block suitable for holding + /// `n_new` instances of `T`. + /// + /// Captures a common usage pattern for allocators. + /// + /// The returned block is suitable for passing to the + /// `alloc`/`realloc` methods of this allocator. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must be currently allocated via this allocator, + /// + /// * the layout of `[T; n_old]` must *fit* that block of memory. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `[T; n_new]` does not meet allocator's size or alignment + /// constraints. + /// + /// For zero-sized `T` or `n_new == 0`, may return either of `Ok` or + /// `Err`, but will *not* yield undefined behavior. + /// + /// Always returns `Err` on arithmetic overflow. + /// + /// Clients wishing to abort computation in response to a + /// reallocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn realloc_array(&mut self, + ptr: NonNull, + n_old: usize, + n_new: usize) -> Result, AllocErr> + where Self: Sized + { + match (Layout::array::(n_old), Layout::array::(n_new)) { + (Ok(ref k_old), Ok(ref k_new)) if k_old.size() > 0 && k_new.size() > 0 => { + debug_assert!(k_old.align() == k_new.align()); + self.realloc(ptr.cast(), k_old.clone(), k_new.size()).map(NonNull::cast) + } + _ => { + Err(AllocErr) + } + } + } + + /// Deallocates a block suitable for holding `n` instances of `T`. + /// + /// Captures a common usage pattern for allocators. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure both: + /// + /// * `ptr` must denote a block of memory currently allocated via this allocator + /// + /// * the layout of `[T; n]` must *fit* that block of memory. + /// + /// # Errors + /// + /// Returning `Err` indicates that either `[T; n]` or the given + /// memory block does not meet allocator's size or alignment + /// constraints. + /// + /// Always returns `Err` on arithmetic overflow. + unsafe fn dealloc_array(&mut self, ptr: NonNull, n: usize) -> Result<(), AllocErr> + where Self: Sized + { + match Layout::array::(n) { + Ok(ref k) if k.size() > 0 => { + Ok(self.dealloc(ptr.cast(), k.clone())) + } + _ => { + Err(AllocErr) + } + } + } +} + +/// The global memory allocator. +/// +/// This type implements the [`Alloc`] trait by forwarding calls +/// to the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +/// +/// Note: while this type is unstable, the functionality it provides can be +/// accessed through the [free functions in `alloc`](index.html#functions). +// #[unstable(feature = "allocator_api", issue = "32838")] +#[derive(Copy, Clone, Default, Debug)] +pub struct Global; + +/// Allocate memory with the global allocator. +/// +/// This function forwards calls to the [`GlobalAlloc::alloc`] method +/// of the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +/// +/// This function is expected to be deprecated in favor of the `alloc` method +/// of the [`Global`] type when it and the [`Alloc`] trait become stable. +/// +/// # Safety +/// +/// See [`GlobalAlloc::alloc`]. +/// +/// # Examples +/// +/// ``` +/// use wg_allocators::alloc::{alloc, dealloc, Layout}; +/// +/// unsafe { +/// let layout = Layout::new::(); +/// let ptr = alloc(layout); +/// +/// *(ptr as *mut u16) = 42; +/// assert_eq!(*(ptr as *mut u16), 42); +/// +/// dealloc(ptr, layout); +/// } +/// ``` +// #[stable(feature = "global_alloc", since = "1.28.0")] +#[inline] +pub unsafe fn alloc(layout: Layout) -> *mut u8 { + rust_alloc::alloc::alloc(layout.into()) +} + +/// Deallocate memory with the global allocator. +/// +/// This function forwards calls to the [`GlobalAlloc::dealloc`] method +/// of the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +/// +/// This function is expected to be deprecated in favor of the `dealloc` method +/// of the [`Global`] type when it and the [`Alloc`] trait become stable. +/// +/// # Safety +/// +/// See [`GlobalAlloc::dealloc`]. +// #[stable(feature = "global_alloc", since = "1.28.0")] +#[inline] +pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) { + rust_alloc::alloc::dealloc(ptr, layout.into()) +} + +/// Reallocate memory with the global allocator. +/// +/// This function forwards calls to the [`GlobalAlloc::realloc`] method +/// of the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +/// +/// This function is expected to be deprecated in favor of the `realloc` method +/// of the [`Global`] type when it and the [`Alloc`] trait become stable. +/// +/// # Safety +/// +/// See [`GlobalAlloc::realloc`]. +// #[stable(feature = "global_alloc", since = "1.28.0")] +#[inline] +pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + rust_alloc::alloc::realloc(ptr, layout.into(), new_size) +} + +/// Allocate zero-initialized memory with the global allocator. +/// +/// This function forwards calls to the [`GlobalAlloc::alloc_zeroed`] method +/// of the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +/// +/// This function is expected to be deprecated in favor of the `alloc_zeroed` method +/// of the [`Global`] type when it and the [`Alloc`] trait become stable. +/// +/// # Safety +/// +/// See [`GlobalAlloc::alloc_zeroed`]. +/// +/// # Examples +/// +/// ``` +/// use wg_allocators::alloc::{alloc_zeroed, dealloc, Layout}; +/// +/// unsafe { +/// let layout = Layout::new::(); +/// let ptr = alloc_zeroed(layout); +/// +/// assert_eq!(*(ptr as *mut u16), 0); +/// +/// dealloc(ptr, layout); +/// } +/// ``` +// #[stable(feature = "global_alloc", since = "1.28.0")] +#[inline] +pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 { + rust_alloc::alloc::alloc_zeroed(layout.into()) +} + +// #[unstable(feature = "allocator_api", issue = "32838")] +unsafe impl Alloc for Global { + #[inline] + unsafe fn alloc(&mut self, layout: Layout) -> Result, AllocErr> { + NonNull::new(alloc(layout)).ok_or(AllocErr) + } + + #[inline] + unsafe fn dealloc(&mut self, ptr: NonNull, layout: Layout) { + dealloc(ptr.as_ptr(), layout) + } + + #[inline] + unsafe fn realloc(&mut self, + ptr: NonNull, + layout: Layout, + new_size: usize) + -> Result, AllocErr> + { + NonNull::new(realloc(ptr.as_ptr(), layout, new_size)).ok_or(AllocErr) + } + + #[inline] + unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result, AllocErr> { + NonNull::new(alloc_zeroed(layout)).ok_or(AllocErr) + } +} + +#[inline] +pub(crate) unsafe fn box_free(ptr: Unique) { + let ptr = ptr.as_ptr(); + let size = size_of_val(&*ptr); + let align = min_align_of_val(&*ptr); + // We do not allocate for Box when T is ZST, so deallocation is also not necessary. + if size != 0 { + let layout = Layout::from_size_align_unchecked(size, align); + dealloc(ptr as *mut u8, layout); + } +} + +/// Abort on memory allocation error or failure. +/// +/// Callers of memory allocation APIs wishing to abort computation +/// in response to an allocation error are encouraged to call this function, +/// rather than directly invoking `panic!` or similar. +/// +/// The default behavior of this function is to print a message to standard error +/// and abort the process. +/// It can be replaced with [`set_alloc_error_hook`] and [`take_alloc_error_hook`]. +/// +/// [`set_alloc_error_hook`]: ../../std/alloc/fn.set_alloc_error_hook.html +/// [`take_alloc_error_hook`]: ../../std/alloc/fn.take_alloc_error_hook.html +// #[stable(feature = "global_alloc", since = "1.28.0")] +// #[rustc_allocator_nounwind] +pub fn handle_alloc_error(layout: Layout) -> ! { + rust_alloc::alloc::handle_alloc_error(layout.into()) +} \ No newline at end of file diff --git a/src/boxed.rs b/src/boxed.rs new file mode 100644 index 0000000..4f5a3ad --- /dev/null +++ b/src/boxed.rs @@ -0,0 +1,951 @@ +//! A pointer type for heap allocation. +//! +//! `Box`, casually referred to as a 'box', provides the simplest form of +//! heap allocation in Rust. Boxes provide ownership for this allocation, and +//! drop their contents when they go out of scope. +//! +//! For non-zero-sized values, a [`Box`] will use the [`Global`] allocator for +//! its allocation. It is valid to convert both ways between a [`Box`] and a +//! raw pointer allocated with the [`Global`] allocator, given that the +//! [`Layout`] used with the allocator is correct for the type. More precisely, +//! a `value: *mut T` that has been allocated with the [`Global`] allocator +//! with `Layout::for_value(&*value)` may be converted into a box using +//! `Box::::from_raw(value)`. Conversely, the memory backing a `value: *mut +//! T` obtained from `Box::::into_raw` may be deallocated using the +//! [`Global`] allocator with `Layout::for_value(&*value)`. +//! +//! # Examples +//! +//! Move a value from the stack to the heap by creating a [`Box`]: +//! +//! ``` +//! # use wg_allocators::boxed::Box; +//! let val: u8 = 5; +//! let boxed: Box = Box::new(val); +//! ``` +//! +//! Move a value from a [`Box`] back to the stack by [dereferencing]: +//! +//! ``` +//! # use wg_allocators::boxed::Box; +//! let boxed: Box = Box::new(5); +//! let val: u8 = *boxed; +//! ``` +//! +//! Creating a recursive data structure: +//! +//! ``` +//! # use wg_allocators::boxed::Box; +//! #[derive(Debug)] +//! enum List { +//! Cons(T, Box>), +//! Nil, +//! } +//! +//! fn main() { +//! let list: List = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil)))); +//! println!("{:?}", list); +//! } +//! ``` +//! +//! This will print `Cons(1, Cons(2, Nil))`. +//! +//! Recursive structures must be boxed, because if the definition of `Cons` +//! looked like this: +//! +//! ```compile_fail,E0072 +//! # enum List { +//! Cons(T, List), +//! # } +//! ``` +//! +//! It wouldn't work. This is because the size of a `List` depends on how many +//! elements are in the list, and so we don't know how much memory to allocate +//! for a `Cons`. By introducing a `Box`, which has a defined size, we know how +//! big `Cons` needs to be. +//! +//! [dereferencing]: ../../std/ops/trait.Deref.html +//! [`Box`]: struct.Box.html +//! [`Global`]: ../alloc/struct.Global.html +//! [`Layout`]: ../alloc/struct.Layout.html + +// #![stable(feature = "rust1", since = "1.0.0")] + +use core::any::Any; +use core::borrow; +use core::cmp::Ordering; +use core::convert::From; +use core::fmt; +use core::future::Future; +use core::hash::{Hash, Hasher}; +use core::iter::{Iterator, FromIterator, FusedIterator}; +use core::marker::{Unpin, Unsize}; +use core::mem; +use core::pin::Pin; +use core::ops::{ + CoerceUnsized, DispatchFromDyn, Deref, DerefMut, Receiver, Generator, GeneratorState +}; +use core::ptr::{self, NonNull, Unique}; +use core::task::{Context, Poll}; + +use rust_alloc::vec::Vec; +use rust_alloc::raw_vec::RawVec; +use rust_alloc::str::from_boxed_utf8_unchecked; + +/// A pointer type for heap allocation. +/// +/// See the [module-level documentation](../../std/boxed/index.html) for more. +// #[lang = "owned_box"] +// #[fundamental] +// #[stable(feature = "rust1", since = "1.0.0")] +pub struct Box(Unique); + +impl Box { + /// Allocates memory on the heap and then places `x` into it. + /// + /// This doesn't actually allocate if `T` is zero-sized. + /// + /// # Examples + /// + /// ``` + /// # use wg_allocators::boxed::Box; + /// let five = Box::new(5); + /// ``` + // #[stable(feature = "rust1", since = "1.0.0")] + #[inline(always)] + pub fn new(x: T) -> Box { + Box(rust_alloc::boxed::Box::into_unique(box x)) + } + + /// Constructs a new `Pin>`. If `T` does not implement `Unpin`, then + /// `x` will be pinned in memory and unable to be moved. + // #[stable(feature = "pin", since = "1.33.0")] + #[inline(always)] + pub fn pin(x: T) -> Pin> { + Box::new(x).into() + } +} + +impl Box { + /// Constructs a box from a raw pointer. + /// + /// After calling this function, the raw pointer is owned by the + /// resulting `Box`. Specifically, the `Box` destructor will call + /// the destructor of `T` and free the allocated memory. Since the + /// way `Box` allocates and releases memory is unspecified, the + /// only valid pointer to pass to this function is the one taken + /// from another `Box` via the [`Box::into_raw`] function. + /// + /// This function is unsafe because improper use may lead to + /// memory problems. For example, a double-free may occur if the + /// function is called twice on the same raw pointer. + /// + /// [`Box::into_raw`]: struct.Box.html#method.into_raw + /// + /// # Examples + /// + /// ``` + /// # use wg_allocators::boxed::Box; + /// let x = Box::new(5); + /// let ptr = Box::into_raw(x); + /// let x = unsafe { Box::from_raw(ptr) }; + /// ``` + // #[stable(feature = "box_raw", since = "1.4.0")] + #[inline] + pub unsafe fn from_raw(raw: *mut T) -> Self { + Box(Unique::new_unchecked(raw)) + } + + /// Consumes the `Box`, returning a wrapped raw pointer. + /// + /// The pointer will be properly aligned and non-null. + /// + /// After calling this function, the caller is responsible for the + /// memory previously managed by the `Box`. In particular, the + /// caller should properly destroy `T` and release the memory. The + /// proper way to do so is to convert the raw pointer back into a + /// `Box` with the [`Box::from_raw`] function. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// [`Box::from_raw`]: struct.Box.html#method.from_raw + /// + /// # Examples + /// + /// ``` + /// # use wg_allocators::boxed::Box; + /// let x = Box::new(5); + /// let ptr = Box::into_raw(x); + /// ``` + // #[stable(feature = "box_raw", since = "1.4.0")] + #[inline] + pub fn into_raw(b: Box) -> *mut T { + Box::into_raw_non_null(b).as_ptr() + } + + /// Consumes the `Box`, returning the wrapped pointer as `NonNull`. + /// + /// After calling this function, the caller is responsible for the + /// memory previously managed by the `Box`. In particular, the + /// caller should properly destroy `T` and release the memory. The + /// proper way to do so is to convert the `NonNull` pointer + /// into a raw pointer and back into a `Box` with the [`Box::from_raw`] + /// function. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `Box::into_raw_non_null(b)` + /// instead of `b.into_raw_non_null()`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// [`Box::from_raw`]: struct.Box.html#method.from_raw + /// + /// # Examples + /// + /// ``` + /// #![feature(box_into_raw_non_null)] + /// # use wg_allocators::boxed::Box; + /// + /// fn main() { + /// let x = Box::new(5); + /// let ptr = Box::into_raw_non_null(x); + /// } + /// ``` + // #[unstable(feature = "box_into_raw_non_null", issue = "47336")] + #[inline] + pub fn into_raw_non_null(b: Box) -> NonNull { + Box::into_unique(b).into() + } + + // #[unstable(feature = "ptr_internals", issue = "0", reason = "use into_raw_non_null instead")] + #[inline] + #[doc(hidden)] + pub fn into_unique(mut b: Box) -> Unique { + // Box is kind-of a library type, but recognized as a "unique pointer" by + // Stacked Borrows. This function here corresponds to "reborrowing to + // a raw pointer", but there is no actual reborrow here -- so + // without some care, the pointer we are returning here still carries + // the `Uniq` tag. We round-trip through a mutable reference to avoid that. + let unique = unsafe { b.0.as_mut() as *mut T }; + mem::forget(b); + unsafe { Unique::new_unchecked(unique) } + } + + /// Consumes and leaks the `Box`, returning a mutable reference, + /// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime + /// `'a`. If the type has only static references, or none at all, then this + /// may be chosen to be `'static`. + /// + /// This function is mainly useful for data that lives for the remainder of + /// the program's life. Dropping the returned reference will cause a memory + /// leak. If this is not acceptable, the reference should first be wrapped + /// with the [`Box::from_raw`] function producing a `Box`. This `Box` can + /// then be dropped which will properly destroy `T` and release the + /// allocated memory. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `Box::leak(b)` instead of `b.leak()`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// [`Box::from_raw`]: struct.Box.html#method.from_raw + /// + /// # Examples + /// + /// Simple usage: + /// + /// ``` + /// # use wg_allocators::boxed::Box; + /// fn main() { + /// let x = Box::new(41); + /// let static_ref: &'static mut usize = Box::leak(x); + /// *static_ref += 1; + /// assert_eq!(*static_ref, 42); + /// } + /// ``` + /// + /// Unsized data: + /// + /// ``` + /// # use wg_allocators::boxed::Box; + /// fn main() { + /// let x = vec![1, 2, 3].into_boxed_slice(); + /// # let x = unsafe { Box::from_raw(std::boxed::Box::into_raw(x)) }; + /// let static_ref = Box::leak(x); + /// static_ref[0] = 4; + /// assert_eq!(*static_ref, [4, 2, 3]); + /// } + /// ``` + // #[stable(feature = "box_leak", since = "1.26.0")] + #[inline] + pub fn leak<'a>(b: Box) -> &'a mut T + where + T: 'a // Technically not needed, but kept to be explicit. + { + unsafe { &mut *Box::into_raw(b) } + } + + /// Converts a `Box` into a `Pin>` + /// + /// This conversion does not allocate on the heap and happens in place. + /// + /// This is also available via [`From`]. + // #[unstable(feature = "box_into_pin", issue = "0")] + pub fn into_pin(boxed: Box) -> Pin> { + // It's not possible to move or replace the insides of a `Pin>` + // when `T: !Unpin`, so it's safe to pin it directly without any + // additional requirements. + unsafe { Pin::new_unchecked(boxed) } + } +} + +// #[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<#[may_dangle] T: ?Sized> Drop for Box { + fn drop(&mut self) { + unsafe { + crate::alloc::box_free(self.0) + } + } +} + +// #[stable(feature = "rust1", since = "1.0.0")] +impl Default for Box { + /// Creates a `Box`, with the `Default` value for T. + fn default() -> Box { + Box::new(Default::default()) + } +} + +// #[stable(feature = "rust1", since = "1.0.0")] +impl Default for Box<[T]> { + fn default() -> Box<[T]> { + Box::<[T; 0]>::new([]) + } +} + +// #[stable(feature = "default_box_extra", since = "1.17.0")] +impl Default for Box { + fn default() -> Box { + let boxed = unsafe { from_boxed_utf8_unchecked(Default::default()) }; + Box(rust_alloc::boxed::Box::into_unique(boxed)) + } +} + +// #[stable(feature = "rust1", since = "1.0.0")] +impl Clone for Box { + /// Returns a new box with a `clone()` of this box's contents. + /// + /// # Examples + /// + /// ``` + /// # use wg_allocators::boxed::Box; + /// let x = Box::new(5); + /// let y = x.clone(); + /// ``` + #[rustfmt::skip] + #[inline] + fn clone(&self) -> Box { + let boxed = box { (**self).clone() }; + Box(rust_alloc::boxed::Box::into_unique(boxed)) + } + /// Copies `source`'s contents into `self` without creating a new allocation. + /// + /// # Examples + /// + /// ``` + /// # use wg_allocators::boxed::Box; + /// let x = Box::new(5); + /// let mut y = Box::new(10); + /// + /// y.clone_from(&x); + /// + /// assert_eq!(*y, 5); + /// ``` + #[inline] + fn clone_from(&mut self, source: &Box) { + (**self).clone_from(&(**source)); + } +} + + +// #[stable(feature = "box_slice_clone", since = "1.3.0")] +impl Clone for Box { + fn clone(&self) -> Self { + let len = self.len(); + let buf = RawVec::with_capacity(len); + unsafe { + ptr::copy_nonoverlapping(self.as_ptr(), buf.ptr(), len); + let boxed = from_boxed_utf8_unchecked(buf.into_box()); + Box(rust_alloc::boxed::Box::into_unique(boxed)) + } + } +} + +// #[stable(feature = "rust1", since = "1.0.0")] +impl PartialEq for Box { + #[inline] + fn eq(&self, other: &Box) -> bool { + PartialEq::eq(&**self, &**other) + } + #[inline] + fn ne(&self, other: &Box) -> bool { + PartialEq::ne(&**self, &**other) + } +} +// #[stable(feature = "rust1", since = "1.0.0")] +impl PartialOrd for Box { + #[inline] + fn partial_cmp(&self, other: &Box) -> Option { + PartialOrd::partial_cmp(&**self, &**other) + } + #[inline] + fn lt(&self, other: &Box) -> bool { + PartialOrd::lt(&**self, &**other) + } + #[inline] + fn le(&self, other: &Box) -> bool { + PartialOrd::le(&**self, &**other) + } + #[inline] + fn ge(&self, other: &Box) -> bool { + PartialOrd::ge(&**self, &**other) + } + #[inline] + fn gt(&self, other: &Box) -> bool { + PartialOrd::gt(&**self, &**other) + } +} +// #[stable(feature = "rust1", since = "1.0.0")] +impl Ord for Box { + #[inline] + fn cmp(&self, other: &Box) -> Ordering { + Ord::cmp(&**self, &**other) + } +} +// #[stable(feature = "rust1", since = "1.0.0")] +impl Eq for Box {} + +// #[stable(feature = "rust1", since = "1.0.0")] +impl Hash for Box { + fn hash(&self, state: &mut H) { + (**self).hash(state); + } +} + +// #[stable(feature = "indirect_hasher_impl", since = "1.22.0")] +impl Hasher for Box { + fn finish(&self) -> u64 { + (**self).finish() + } + fn write(&mut self, bytes: &[u8]) { + (**self).write(bytes) + } + fn write_u8(&mut self, i: u8) { + (**self).write_u8(i) + } + fn write_u16(&mut self, i: u16) { + (**self).write_u16(i) + } + fn write_u32(&mut self, i: u32) { + (**self).write_u32(i) + } + fn write_u64(&mut self, i: u64) { + (**self).write_u64(i) + } + fn write_u128(&mut self, i: u128) { + (**self).write_u128(i) + } + fn write_usize(&mut self, i: usize) { + (**self).write_usize(i) + } + fn write_i8(&mut self, i: i8) { + (**self).write_i8(i) + } + fn write_i16(&mut self, i: i16) { + (**self).write_i16(i) + } + fn write_i32(&mut self, i: i32) { + (**self).write_i32(i) + } + fn write_i64(&mut self, i: i64) { + (**self).write_i64(i) + } + fn write_i128(&mut self, i: i128) { + (**self).write_i128(i) + } + fn write_isize(&mut self, i: isize) { + (**self).write_isize(i) + } +} + +// #[stable(feature = "from_for_ptrs", since = "1.6.0")] +impl From for Box { + /// Converts a generic type `T` into a `Box` + /// + /// The conversion allocates on the heap and moves `t` + /// from the stack into it. + /// + /// # Examples + /// ```rust + /// # use wg_allocators::boxed::Box; + /// let x = 5; + /// let boxed = Box::new(5); + /// + /// assert_eq!(Box::from(x), boxed); + /// ``` + fn from(t: T) -> Self { + Box::new(t) + } +} + +// #[stable(feature = "pin", since = "1.33.0")] +impl From> for Pin> { + /// Converts a `Box` into a `Pin>` + /// + /// This conversion does not allocate on the heap and happens in place. + fn from(boxed: Box) -> Self { + Box::into_pin(boxed) + } +} + +// #[stable(feature = "box_from_slice", since = "1.17.0")] +impl From<&[T]> for Box<[T]> { + /// Converts a `&[T]` into a `Box<[T]>` + /// + /// This conversion allocates on the heap + /// and performs a copy of `slice`. + /// + /// # Examples + /// ```rust + /// # use wg_allocators::boxed::Box; + /// // create a &[u8] which will be used to create a Box<[u8]> + /// let slice: &[u8] = &[104, 101, 108, 108, 111]; + /// let boxed_slice: Box<[u8]> = Box::from(slice); + /// + /// println!("{:?}", boxed_slice); + /// ``` + fn from(slice: &[T]) -> Box<[T]> { + let mut boxed = unsafe { RawVec::with_capacity(slice.len()).into_box() }; + boxed.copy_from_slice(slice); + Box(rust_alloc::boxed::Box::into_unique(boxed)) + } +} + +// #[stable(feature = "box_from_slice", since = "1.17.0")] +impl From<&str> for Box { + /// Converts a `&str` into a `Box` + /// + /// This conversion allocates on the heap + /// and performs a copy of `s`. + /// + /// # Examples + /// ```rust + /// # use wg_allocators::boxed::Box; + /// let boxed: Box = Box::from("hello"); + /// println!("{}", boxed); + /// ``` + #[inline] + fn from(s: &str) -> Box { + let boxed = unsafe { from_boxed_utf8_unchecked(rust_alloc::boxed::Box::from(s.as_bytes())) }; + Box(rust_alloc::boxed::Box::into_unique(boxed)) + } +} + +// #[stable(feature = "boxed_str_conv", since = "1.19.0")] +impl From> for Box<[u8]> { + /// Converts a `Box>` into a `Box<[u8]>` + /// + /// This conversion does not allocate on the heap and happens in place. + /// + /// # Examples + /// ```rust + /// # use wg_allocators::boxed::Box; + /// // create a Box which will be used to create a Box<[u8]> + /// let boxed: Box = Box::from("hello"); + /// let boxed_str: Box<[u8]> = Box::from(boxed); + /// + /// // create a &[u8] which will be used to create a Box<[u8]> + /// let slice: &[u8] = &[104, 101, 108, 108, 111]; + /// let boxed_slice = Box::from(slice); + /// + /// assert_eq!(boxed_slice, boxed_str); + /// ``` + #[inline] + fn from(s: Box) -> Self { + unsafe { Box::from_raw(Box::into_raw(s) as *mut [u8]) } + } +} + +impl Box { + #[inline] + // #[stable(feature = "rust1", since = "1.0.0")] + /// Attempt to downcast the box to a concrete type. + /// + /// # Examples + /// + /// ``` + /// # use wg_allocators::boxed::Box; + /// use std::any::Any; + /// + /// fn print_if_string(value: Box) { + /// if let Ok(string) = value.downcast::() { + /// println!("String ({}): {}", string.len(), string); + /// } + /// } + /// + /// fn main() { + /// let my_string = "Hello World".to_string(); + /// print_if_string(Box::new(my_string)); + /// print_if_string(Box::new(0i8)); + /// } + /// ``` + pub fn downcast(self) -> Result, Box> { + if self.is::() { + unsafe { + let raw: *mut dyn Any = Box::into_raw(self); + Ok(Box::from_raw(raw as *mut T)) + } + } else { + Err(self) + } + } +} + +impl Box { + #[inline] + // #[stable(feature = "rust1", since = "1.0.0")] + /// Attempt to downcast the box to a concrete type. + /// + /// # Examples + /// + /// ``` + /// # use wg_allocators::boxed::Box; + /// use std::any::Any; + /// + /// fn print_if_string(value: Box) { + /// if let Ok(string) = value.downcast::() { + /// println!("String ({}): {}", string.len(), string); + /// } + /// } + /// + /// fn main() { + /// let my_string = "Hello World".to_string(); + /// print_if_string(Box::new(my_string)); + /// print_if_string(Box::new(0i8)); + /// } + /// ``` + pub fn downcast(self) -> Result, Box> { + >::downcast(self).map_err(|s| unsafe { + // reapply the Send marker + Box::from_raw(Box::into_raw(s) as *mut (dyn Any + Send)) + }) + } +} + +// #[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Display for Box { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} + +// #[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for Box { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +// #[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Pointer for Box { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // It's not possible to extract the inner Uniq directly from the Box, + // instead we cast it to a *const which aliases the Unique + let ptr: *const T = &**self; + fmt::Pointer::fmt(&ptr, f) + } +} + +// #[stable(feature = "rust1", since = "1.0.0")] +impl Deref for Box { + type Target = T; + + fn deref(&self) -> &T { + unsafe { self.0.as_ref() } + } +} + +// #[stable(feature = "rust1", since = "1.0.0")] +impl DerefMut for Box { + fn deref_mut(&mut self) -> &mut T { + unsafe { self.0.as_mut() } + } +} + +// #[unstable(feature = "receiver_trait", issue = "0")] +impl Receiver for Box {} + +// #[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Box { + type Item = I::Item; + fn next(&mut self) -> Option { + (**self).next() + } + fn size_hint(&self) -> (usize, Option) { + (**self).size_hint() + } + fn nth(&mut self, n: usize) -> Option { + (**self).nth(n) + } +} +// #[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Box { + fn next_back(&mut self) -> Option { + (**self).next_back() + } + fn nth_back(&mut self, n: usize) -> Option { + (**self).nth_back(n) + } +} +// #[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Box { + fn len(&self) -> usize { + (**self).len() + } + fn is_empty(&self) -> bool { + (**self).is_empty() + } +} + +// #[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Box {} + +// #[stable(feature = "boxed_closure_impls", since = "1.35.0")] +// [WG]: Added copy trait +impl + ?Sized + Copy> FnOnce for Box { + type Output = >::Output; + + extern "rust-call" fn call_once(self, args: A) -> Self::Output { + >::call_once(*self, args) + } +} + +// #[stable(feature = "boxed_closure_impls", since = "1.35.0")] +// [WG]: Added copy trait +impl + ?Sized + Copy> FnMut for Box { + extern "rust-call" fn call_mut(&mut self, args: A) -> Self::Output { + >::call_mut(self, args) + } +} + +// #[stable(feature = "boxed_closure_impls", since = "1.35.0")] +// [WG]: Added copy trait +impl + ?Sized + Copy> Fn for Box { + extern "rust-call" fn call(&self, args: A) -> Self::Output { + >::call(self, args) + } +} + +/// `FnBox` is a version of the `FnOnce` intended for use with boxed +/// closure objects. The idea is that where one would normally store a +/// `Box` in a data structure, you should use +/// `Box`. The two traits behave essentially the same, except +/// that a `FnBox` closure can only be called if it is boxed. (Note +/// that `FnBox` may be deprecated in the future if `Box` +/// closures become directly usable.) +/// +/// # Examples +/// +/// Here is a snippet of code which creates a hashmap full of boxed +/// once closures and then removes them one by one, calling each +/// closure as it is removed. Note that the type of the closures +/// stored in the map is `Box i32>` and not `Box i32>`. +/// +/// ``` +/// #![feature(fnbox)] +/// +/// use std::boxed::FnBox; +/// use std::collections::HashMap; +/// +/// fn make_map() -> HashMap i32>> { +/// let mut map: HashMap i32>> = HashMap::new(); +/// map.insert(1, Box::new(|| 22)); +/// map.insert(2, Box::new(|| 44)); +/// map +/// } +/// +/// fn main() { +/// let mut map = make_map(); +/// for i in &[1, 2] { +/// let f = map.remove(&i).unwrap(); +/// assert_eq!(f(), i * 22); +/// } +/// } +/// ``` +#[rustc_paren_sugar] +// #[unstable(feature = "fnbox", +// reason = "will be deprecated if and when `Box` becomes usable", issue = "28796")] +pub trait FnBox: FnOnce { + /// Performs the call operation. + fn call_box(self: Box, args: A) -> Self::Output; +} + +// #[unstable(feature = "fnbox", +// reason = "will be deprecated if and when `Box` becomes usable", issue = "28796")] +// [WG]: Added copy trait +impl FnBox for F + where F: FnOnce +{ + fn call_box(self: Box, args: A) -> F::Output { + self.call_once(args) + } +} + +// #[unstable(feature = "coerce_unsized", issue = "27732")] +impl, U: ?Sized> CoerceUnsized> for Box {} + +// #[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl, U: ?Sized> DispatchFromDyn> for Box {} + +// #[stable(feature = "boxed_slice_from_iter", since = "1.32.0")] +impl FromIterator for Box<[A]> { + fn from_iter>(iter: T) -> Self { + let boxed = iter.into_iter().collect::>().into_boxed_slice(); + Box(rust_alloc::boxed::Box::into_unique(boxed)) + } +} + +// #[stable(feature = "box_slice_clone", since = "1.3.0")] +impl Clone for Box<[T]> { + fn clone(&self) -> Self { + let mut new = BoxBuilder { + data: RawVec::with_capacity(self.len()), + len: 0, + }; + + let mut target = new.data.ptr(); + + for item in self.iter() { + unsafe { + ptr::write(target, item.clone()); + target = target.offset(1); + }; + + new.len += 1; + } + + return unsafe { new.into_box() }; + + // Helper type for responding to panics correctly. + struct BoxBuilder { + data: RawVec, + len: usize, + } + + impl BoxBuilder { + unsafe fn into_box(self) -> Box<[T]> { + let raw = ptr::read(&self.data); + mem::forget(self); + let boxed = raw.into_box(); + Box(rust_alloc::boxed::Box::into_unique(boxed)) + } + } + + impl Drop for BoxBuilder { + fn drop(&mut self) { + let mut data = self.data.ptr(); + let max = unsafe { data.add(self.len) }; + + while data != max { + unsafe { + ptr::read(data); + data = data.offset(1); + } + } + } + } + } +} + +// #[stable(feature = "box_borrow", since = "1.1.0")] +impl borrow::Borrow for Box { + fn borrow(&self) -> &T { + &**self + } +} + +// #[stable(feature = "box_borrow", since = "1.1.0")] +impl borrow::BorrowMut for Box { + fn borrow_mut(&mut self) -> &mut T { + &mut **self + } +} + +// #[stable(since = "1.5.0", feature = "smart_ptr_as_ref")] +impl AsRef for Box { + fn as_ref(&self) -> &T { + &**self + } +} + +// #[stable(since = "1.5.0", feature = "smart_ptr_as_ref")] +impl AsMut for Box { + fn as_mut(&mut self) -> &mut T { + &mut **self + } +} + +/* Nota bene + * + * We could have chosen not to add this impl, and instead have written a + * function of Pin> to Pin. Such a function would not be sound, + * because Box implements Unpin even when T does not, as a result of + * this impl. + * + * We chose this API instead of the alternative for a few reasons: + * - Logically, it is helpful to understand pinning in regard to the + * memory region being pointed to. For this reason none of the + * standard library pointer types support projecting through a pin + * (Box is the only pointer type in std for which this would be + * safe.) + * - It is in practice very useful to have Box be unconditionally + * Unpin because of trait objects, for which the structural auto + * trait functionality does not apply (e.g., Box would + * otherwise not be Unpin). + * + * Another type with the same semantics as Box but only a conditional + * implementation of `Unpin` (where `T: Unpin`) would be valid/safe, and + * could have a method to project a Pin from it. + */ +// #[stable(feature = "pin", since = "1.33.0")] +impl Unpin for Box { } + +// #[unstable(feature = "generator_trait", issue = "43122")] +impl Generator for Box { + type Yield = G::Yield; + type Return = G::Return; + + fn resume(mut self: Pin<&mut Self>) -> GeneratorState { + G::resume(Pin::new(&mut *self)) + } +} + +// #[unstable(feature = "generator_trait", issue = "43122")] +impl Generator for Pin> { + type Yield = G::Yield; + type Return = G::Return; + + fn resume(mut self: Pin<&mut Self>) -> GeneratorState { + G::resume((*self).as_mut()) + } +} + +// #[stable(feature = "futures_api", since = "1.36.0")] +impl Future for Box { + type Output = F::Output; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + F::poll(Pin::new(&mut *self), cx) + } +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..104a0bf --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,21 @@ +#![feature(allocator_api)] +#![feature(box_syntax)] +#![feature(coerce_unsized)] +#![feature(core_intrinsics)] +#![feature(dispatch_from_dyn)] +#![feature(dropck_eyepatch)] +#![feature(exact_size_is_empty)] +#![feature(fn_traits)] +#![feature(generator_trait)] +#![feature(iter_nth_back)] +#![feature(ptr_internals)] +#![feature(raw_vec_internals)] +#![feature(receiver_trait)] +#![feature(unboxed_closures)] +#![feature(unsize)] +#![feature(unsized_locals)] + +extern crate alloc as rust_alloc; + +pub mod alloc; +pub mod boxed; \ No newline at end of file