|
| 1 | +const std = @import("../std.zig"); |
| 2 | +const builtin = @import("builtin"); |
| 3 | +const math = std.math; |
| 4 | +const Allocator = std.mem.Allocator; |
| 5 | +const mem = std.mem; |
| 6 | +const assert = std.debug.assert; |
| 7 | + |
| 8 | +pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type { |
| 9 | + return struct { |
| 10 | + pub const vtable = Allocator.VTable{ |
| 11 | + .alloc = alloc, |
| 12 | + .resize = resize, |
| 13 | + .free = free, |
| 14 | + }; |
| 15 | + |
| 16 | + pub const Error = Allocator.Error; |
| 17 | + |
| 18 | + lock: std.Thread.Mutex = .{}, |
| 19 | + |
| 20 | + const max_usize = math.maxInt(usize); |
| 21 | + const ushift = math.Log2Int(usize); |
| 22 | + const bigpage_size = 64 * 1024; |
| 23 | + const pages_per_bigpage = bigpage_size / mem.page_size; |
| 24 | + const bigpage_count = max_usize / bigpage_size; |
| 25 | + |
| 26 | + /// Because of storing free list pointers, the minimum size class is 3. |
| 27 | + const min_class = math.log2(math.ceilPowerOfTwoAssert(usize, 1 + @sizeOf(usize))); |
| 28 | + const size_class_count = math.log2(bigpage_size) - min_class; |
| 29 | + /// 0 - 1 bigpage |
| 30 | + /// 1 - 2 bigpages |
| 31 | + /// 2 - 4 bigpages |
| 32 | + /// etc. |
| 33 | + const big_size_class_count = math.log2(bigpage_count); |
| 34 | + |
| 35 | + var next_addrs = [1]usize{0} ** size_class_count; |
| 36 | + /// For each size class, points to the freed pointer. |
| 37 | + var frees = [1]usize{0} ** size_class_count; |
| 38 | + /// For each big size class, points to the freed pointer. |
| 39 | + var big_frees = [1]usize{0} ** big_size_class_count; |
| 40 | + |
| 41 | + // TODO don't do the naive locking strategy |
| 42 | + var lock: std.Thread.Mutex = .{}; |
| 43 | + fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*]u8 { |
| 44 | + _ = ctx; |
| 45 | + _ = return_address; |
| 46 | + lock.lock(); |
| 47 | + defer lock.unlock(); |
| 48 | + // Make room for the freelist next pointer. |
| 49 | + const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align)); |
| 50 | + const actual_len = @max(len +| @sizeOf(usize), alignment); |
| 51 | + const slot_size = math.ceilPowerOfTwo(usize, actual_len) catch return null; |
| 52 | + const class = math.log2(slot_size) - min_class; |
| 53 | + if (class < size_class_count) { |
| 54 | + const addr = a: { |
| 55 | + const top_free_ptr = frees[class]; |
| 56 | + if (top_free_ptr != 0) { |
| 57 | + const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size - @sizeOf(usize)))); |
| 58 | + frees[class] = node.*; |
| 59 | + break :a top_free_ptr; |
| 60 | + } |
| 61 | + |
| 62 | + const next_addr = next_addrs[class]; |
| 63 | + if (next_addr % mem.page_size == 0) { |
| 64 | + const addr = allocBigPages(1); |
| 65 | + if (addr == 0) return null; |
| 66 | + //std.debug.print("allocated fresh slot_size={d} class={d} addr=0x{x}\n", .{ |
| 67 | + // slot_size, class, addr, |
| 68 | + //}); |
| 69 | + next_addrs[class] = addr + slot_size; |
| 70 | + break :a addr; |
| 71 | + } else { |
| 72 | + next_addrs[class] = next_addr + slot_size; |
| 73 | + break :a next_addr; |
| 74 | + } |
| 75 | + }; |
| 76 | + return @as([*]u8, @ptrFromInt(addr)); |
| 77 | + } |
| 78 | + const bigpages_needed = bigPagesNeeded(actual_len); |
| 79 | + const addr = allocBigPages(bigpages_needed); |
| 80 | + return @as([*]u8, @ptrFromInt(addr)); |
| 81 | + } |
| 82 | + |
| 83 | + fn resize( |
| 84 | + ctx: *anyopaque, |
| 85 | + buf: []u8, |
| 86 | + log2_buf_align: u8, |
| 87 | + new_len: usize, |
| 88 | + return_address: usize, |
| 89 | + ) bool { |
| 90 | + _ = ctx; |
| 91 | + _ = return_address; |
| 92 | + lock.lock(); |
| 93 | + defer lock.unlock(); |
| 94 | + // We don't want to move anything from one size class to another, but we |
| 95 | + // can recover bytes in between powers of two. |
| 96 | + const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align)); |
| 97 | + const old_actual_len = @max(buf.len + @sizeOf(usize), buf_align); |
| 98 | + const new_actual_len = @max(new_len +| @sizeOf(usize), buf_align); |
| 99 | + const old_small_slot_size = math.ceilPowerOfTwoAssert(usize, old_actual_len); |
| 100 | + const old_small_class = math.log2(old_small_slot_size) - min_class; |
| 101 | + if (old_small_class < size_class_count) { |
| 102 | + const new_small_slot_size = math.ceilPowerOfTwo(usize, new_actual_len) catch return false; |
| 103 | + return old_small_slot_size == new_small_slot_size; |
| 104 | + } else { |
| 105 | + const old_bigpages_needed = bigPagesNeeded(old_actual_len); |
| 106 | + const old_big_slot_pages = math.ceilPowerOfTwoAssert(usize, old_bigpages_needed); |
| 107 | + const new_bigpages_needed = bigPagesNeeded(new_actual_len); |
| 108 | + const new_big_slot_pages = math.ceilPowerOfTwo(usize, new_bigpages_needed) catch return false; |
| 109 | + return old_big_slot_pages == new_big_slot_pages; |
| 110 | + } |
| 111 | + } |
| 112 | + |
| 113 | + fn free( |
| 114 | + ctx: *anyopaque, |
| 115 | + buf: []u8, |
| 116 | + log2_buf_align: u8, |
| 117 | + return_address: usize, |
| 118 | + ) void { |
| 119 | + _ = ctx; |
| 120 | + _ = return_address; |
| 121 | + lock.lock(); |
| 122 | + defer lock.unlock(); |
| 123 | + const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align)); |
| 124 | + const actual_len = @max(buf.len + @sizeOf(usize), buf_align); |
| 125 | + const slot_size = math.ceilPowerOfTwoAssert(usize, actual_len); |
| 126 | + const class = math.log2(slot_size) - min_class; |
| 127 | + const addr = @intFromPtr(buf.ptr); |
| 128 | + if (class < size_class_count) { |
| 129 | + const node = @as(*usize, @ptrFromInt(addr + (slot_size - @sizeOf(usize)))); |
| 130 | + node.* = frees[class]; |
| 131 | + frees[class] = addr; |
| 132 | + } else { |
| 133 | + const bigpages_needed = bigPagesNeeded(actual_len); |
| 134 | + const pow2_pages = math.ceilPowerOfTwoAssert(usize, bigpages_needed); |
| 135 | + const big_slot_size_bytes = pow2_pages * bigpage_size; |
| 136 | + const node = @as(*usize, @ptrFromInt(addr + (big_slot_size_bytes - @sizeOf(usize)))); |
| 137 | + const big_class = math.log2(pow2_pages); |
| 138 | + node.* = big_frees[big_class]; |
| 139 | + big_frees[big_class] = addr; |
| 140 | + } |
| 141 | + } |
| 142 | + |
| 143 | + inline fn bigPagesNeeded(byte_count: usize) usize { |
| 144 | + return (byte_count + (bigpage_size + (@sizeOf(usize) - 1))) / bigpage_size; |
| 145 | + } |
| 146 | + |
| 147 | + fn allocBigPages(n: usize) usize { |
| 148 | + const pow2_pages = math.ceilPowerOfTwoAssert(usize, n); |
| 149 | + const slot_size_bytes = pow2_pages * bigpage_size; |
| 150 | + const class = math.log2(pow2_pages); |
| 151 | + |
| 152 | + const top_free_ptr = big_frees[class]; |
| 153 | + if (top_free_ptr != 0) { |
| 154 | + const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size_bytes - @sizeOf(usize)))); |
| 155 | + big_frees[class] = node.*; |
| 156 | + return top_free_ptr; |
| 157 | + } |
| 158 | + return sbrk(pow2_pages * pages_per_bigpage * mem.page_size); |
| 159 | + } |
| 160 | + }; |
| 161 | +} |
0 commit comments