Skip to content

Commit 0c28a18

Browse files
committed
WasmAllocator: cleanup
1 parent 4235982 commit 0c28a18

File tree

1 file changed

+10
-10
lines changed

1 file changed

+10
-10
lines changed

std/heap.zig

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -318,9 +318,10 @@ pub const FixedBufferAllocator = struct {
318318
}
319319
};
320320

321+
// FIXME: Exposed LLVM intrinsics is a bug
322+
// See: https://github.com/ziglang/zig/issues/2291
321323
extern fn @"llvm.wasm.memory.size.i32"(u32) u32;
322324
extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32;
323-
const WASM_PAGE_SIZE = 64 * 1024; // 64 kilobytes
324325

325326
pub const wasm_allocator = &wasm_allocator_state.allocator;
326327
var wasm_allocator_state = WasmAllocator{
@@ -333,7 +334,7 @@ var wasm_allocator_state = WasmAllocator{
333334
.end_index = 0,
334335
};
335336

336-
pub const WasmAllocator = struct {
337+
const WasmAllocator = struct {
337338
allocator: Allocator,
338339
start_ptr: [*]u8,
339340
num_pages: usize,
@@ -347,11 +348,11 @@ pub const WasmAllocator = struct {
347348
const adjusted_index = self.end_index + (adjusted_addr - addr);
348349
const new_end_index = adjusted_index + size;
349350

350-
const required_memory = new_end_index - (self.num_pages * WASM_PAGE_SIZE);
351+
if (new_end_index > self.num_pages * os.page_size) {
352+
const required_memory = new_end_index - (self.num_pages * os.page_size);
351353

352-
if (required_memory > 0) {
353-
var num_pages: u32 = @divTrunc(required_memory, WASM_PAGE_SIZE);
354-
if (@rem(required_memory, WASM_PAGE_SIZE) != 0) {
354+
var num_pages: u32 = required_memory / os.page_size;
355+
if (required_memory % os.page_size != 0) {
355356
num_pages += 1;
356357
}
357358

@@ -369,7 +370,7 @@ pub const WasmAllocator = struct {
369370
return result;
370371
}
371372

372-
// Check if memory is the last "item" and it aligns. That lets us expand or reclaim memory
373+
// Check if memory is the last "item" and is aligned correctly
373374
fn is_last_item(allocator: *Allocator, memory: []u8, alignment: u29) bool {
374375
const self = @fieldParentPtr(WasmAllocator, "allocator", allocator);
375376
return memory.ptr == self.start_ptr + self.end_index - memory.len and mem.alignForward(@ptrToInt(memory.ptr), alignment) == @ptrToInt(memory.ptr);
@@ -380,14 +381,14 @@ pub const WasmAllocator = struct {
380381

381382
// Initialize start_ptr at the first realloc
382383
if (self.num_pages == 0) {
383-
self.start_ptr = @intToPtr([*]u8, @intCast(usize, @"llvm.wasm.memory.size.i32"(0)) * WASM_PAGE_SIZE);
384+
self.start_ptr = @intToPtr([*]u8, @intCast(usize, @"llvm.wasm.memory.size.i32"(0)) * os.page_size);
384385
}
385386

386387
if (is_last_item(allocator, old_mem, new_align)) {
387388
const start_index = self.end_index - old_mem.len;
388389
const new_end_index = start_index + new_size;
389390

390-
if (new_end_index > self.num_pages * WASM_PAGE_SIZE) {
391+
if (new_end_index > self.num_pages * os.page_size) {
391392
_ = try alloc(allocator, new_end_index - self.end_index, new_align);
392393
}
393394
const result = self.start_ptr[start_index..new_end_index];
@@ -404,7 +405,6 @@ pub const WasmAllocator = struct {
404405
}
405406

406407
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
407-
// TODO: Use is_last_item or other heuristic here
408408
return old_mem[0..new_size];
409409
}
410410
};

0 commit comments

Comments
 (0)