From 1c485be4fb798679fde7db148392de558a6fa97c Mon Sep 17 00:00:00 2001 From: Achille Roussel Date: Wed, 15 Mar 2023 14:56:52 -0700 Subject: [PATCH] runtime: reuse freed memory blocks on wasm When compiling Go programs to WebAssembly, the memory allocation strategy was neither releasing memory to the OS nor reusing blocks freed by calls to runtime.sysFreeOS. This CL unifies the plan9 and wasm memory management strategy since both platforms use a linear memory space and do not have a mechanism for returning memory blocks to the OS. Fixes #59061 --- src/runtime/mem_js.go | 71 ++------------- src/runtime/mem_plan9.go | 174 ------------------------------------- src/runtime/mem_sbrk.go | 183 +++++++++++++++++++++++++++++++++++++++ src/runtime/os_js.go | 4 +- src/runtime/os_plan9.go | 2 +- 5 files changed, 194 insertions(+), 240 deletions(-) create mode 100644 src/runtime/mem_sbrk.go diff --git a/src/runtime/mem_js.go b/src/runtime/mem_js.go index 78eda47b1fbb0f..3aaf6ca5e70046 100644 --- a/src/runtime/mem_js.go +++ b/src/runtime/mem_js.go @@ -6,72 +6,18 @@ package runtime -import ( - "unsafe" -) +import "unsafe" -// Don't split the stack as this function may be invoked without a valid G, -// which prevents us from allocating more stack. -// -//go:nosplit -func sysAllocOS(n uintptr) unsafe.Pointer { - p := sysReserveOS(nil, n) - sysMapOS(p, n) - return p -} - -func sysUnusedOS(v unsafe.Pointer, n uintptr) { -} - -func sysUsedOS(v unsafe.Pointer, n uintptr) { -} - -func sysHugePageOS(v unsafe.Pointer, n uintptr) { -} - -// Don't split the stack as this function may be invoked without a valid G, -// which prevents us from allocating more stack. -// -//go:nosplit -func sysFreeOS(v unsafe.Pointer, n uintptr) { -} +func sbrk(n uintptr) unsafe.Pointer { + grow := divRoundUp(n, physPageSize) + size := currentMemory() -func sysFaultOS(v unsafe.Pointer, n uintptr) { -} - -var reserveEnd uintptr - -func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { - // TODO(neelance): maybe unify with mem_plan9.go, depending on how https://github.com/WebAssembly/design/blob/master/FutureFeatures.md#finer-grained-control-over-memory turns out - - if v != nil { - // The address space of WebAssembly's linear memory is contiguous, - // so requesting specific addresses is not supported. We could use - // a different address, but then mheap.sysAlloc discards the result - // right away and we don't reuse chunks passed to sysFree. + if growMemory(int32(grow)) < 0 { return nil } - // Round up the initial reserveEnd to 64 KiB so that - // reservations are always aligned to the page size. - initReserveEnd := alignUp(lastmoduledatap.end, physPageSize) - if reserveEnd < initReserveEnd { - reserveEnd = initReserveEnd - } - v = unsafe.Pointer(reserveEnd) - reserveEnd += alignUp(n, physPageSize) - - current := currentMemory() - // reserveEnd is always at a page boundary. - needed := int32(reserveEnd / physPageSize) - if current < needed { - if growMemory(needed-current) == -1 { - return nil - } - resetMemoryDataView() - } - - return v + resetMemoryDataView() + return unsafe.Pointer(uintptr(size) * physPageSize) } func currentMemory() int32 @@ -82,6 +28,3 @@ func growMemory(pages int32) int32 // //go:wasmimport gojs runtime.resetMemoryDataView func resetMemoryDataView() - -func sysMapOS(v unsafe.Pointer, n uintptr) { -} diff --git a/src/runtime/mem_plan9.go b/src/runtime/mem_plan9.go index 88e7d92a7c5d9f..9b18a2919d84c8 100644 --- a/src/runtime/mem_plan9.go +++ b/src/runtime/mem_plan9.go @@ -6,126 +6,6 @@ package runtime import "unsafe" -const memDebug = false - -var bloc uintptr -var blocMax uintptr -var memlock mutex - -type memHdr struct { - next memHdrPtr - size uintptr -} - -var memFreelist memHdrPtr // sorted in ascending order - -type memHdrPtr uintptr - -func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) } -func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) } - -func memAlloc(n uintptr) unsafe.Pointer { - n = memRound(n) - var prevp *memHdr - for p := memFreelist.ptr(); p != nil; p = p.next.ptr() { - if p.size >= n { - if p.size == n { - if prevp != nil { - prevp.next = p.next - } else { - memFreelist = p.next - } - } else { - p.size -= n - p = (*memHdr)(add(unsafe.Pointer(p), p.size)) - } - *p = memHdr{} - return unsafe.Pointer(p) - } - prevp = p - } - return sbrk(n) -} - -func memFree(ap unsafe.Pointer, n uintptr) { - n = memRound(n) - memclrNoHeapPointers(ap, n) - bp := (*memHdr)(ap) - bp.size = n - bpn := uintptr(ap) - if memFreelist == 0 { - bp.next = 0 - memFreelist.set(bp) - return - } - p := memFreelist.ptr() - if bpn < uintptr(unsafe.Pointer(p)) { - memFreelist.set(bp) - if bpn+bp.size == uintptr(unsafe.Pointer(p)) { - bp.size += p.size - bp.next = p.next - *p = memHdr{} - } else { - bp.next.set(p) - } - return - } - for ; p.next != 0; p = p.next.ptr() { - if bpn > uintptr(unsafe.Pointer(p)) && bpn < uintptr(unsafe.Pointer(p.next)) { - break - } - } - if bpn+bp.size == uintptr(unsafe.Pointer(p.next)) { - bp.size += p.next.ptr().size - bp.next = p.next.ptr().next - *p.next.ptr() = memHdr{} - } else { - bp.next = p.next - } - if uintptr(unsafe.Pointer(p))+p.size == bpn { - p.size += bp.size - p.next = bp.next - *bp = memHdr{} - } else { - p.next.set(bp) - } -} - -func memCheck() { - if !memDebug { - return - } - for p := memFreelist.ptr(); p != nil && p.next != 0; p = p.next.ptr() { - if uintptr(unsafe.Pointer(p)) == uintptr(unsafe.Pointer(p.next)) { - print("runtime: ", unsafe.Pointer(p), " == ", unsafe.Pointer(p.next), "\n") - throw("mem: infinite loop") - } - if uintptr(unsafe.Pointer(p)) > uintptr(unsafe.Pointer(p.next)) { - print("runtime: ", unsafe.Pointer(p), " > ", unsafe.Pointer(p.next), "\n") - throw("mem: unordered list") - } - if uintptr(unsafe.Pointer(p))+p.size > uintptr(unsafe.Pointer(p.next)) { - print("runtime: ", unsafe.Pointer(p), "+", p.size, " > ", unsafe.Pointer(p.next), "\n") - throw("mem: overlapping blocks") - } - for b := add(unsafe.Pointer(p), unsafe.Sizeof(memHdr{})); uintptr(b) < uintptr(unsafe.Pointer(p))+p.size; b = add(b, 1) { - if *(*byte)(b) != 0 { - print("runtime: value at addr ", b, " with offset ", uintptr(b)-uintptr(unsafe.Pointer(p)), " in block ", p, " of size ", p.size, " is not zero\n") - throw("mem: uninitialised memory") - } - } - } -} - -func memRound(p uintptr) uintptr { - return (p + _PAGESIZE - 1) &^ (_PAGESIZE - 1) -} - -func initBloc() { - bloc = memRound(firstmoduledata.end) - blocMax = bloc -} - func sbrk(n uintptr) unsafe.Pointer { // Plan 9 sbrk from /sys/src/libc/9sys/sbrk.c bl := bloc @@ -139,57 +19,3 @@ func sbrk(n uintptr) unsafe.Pointer { bloc += n return unsafe.Pointer(bl) } - -func sysAllocOS(n uintptr) unsafe.Pointer { - lock(&memlock) - p := memAlloc(n) - memCheck() - unlock(&memlock) - return p -} - -func sysFreeOS(v unsafe.Pointer, n uintptr) { - lock(&memlock) - if uintptr(v)+n == bloc { - // Address range being freed is at the end of memory, - // so record a new lower value for end of memory. - // Can't actually shrink address space because segment is shared. - memclrNoHeapPointers(v, n) - bloc -= n - } else { - memFree(v, n) - memCheck() - } - unlock(&memlock) -} - -func sysUnusedOS(v unsafe.Pointer, n uintptr) { -} - -func sysUsedOS(v unsafe.Pointer, n uintptr) { -} - -func sysHugePageOS(v unsafe.Pointer, n uintptr) { -} - -func sysMapOS(v unsafe.Pointer, n uintptr) { -} - -func sysFaultOS(v unsafe.Pointer, n uintptr) { -} - -func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { - lock(&memlock) - var p unsafe.Pointer - if uintptr(v) == bloc { - // Address hint is the current end of memory, - // so try to extend the address space. - p = sbrk(n) - } - if p == nil && v == nil { - p = memAlloc(n) - memCheck() - } - unlock(&memlock) - return p -} diff --git a/src/runtime/mem_sbrk.go b/src/runtime/mem_sbrk.go new file mode 100644 index 00000000000000..4d5d3d7ce31436 --- /dev/null +++ b/src/runtime/mem_sbrk.go @@ -0,0 +1,183 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 || wasm + +package runtime + +import "unsafe" + +const memDebug = false + +var bloc uintptr +var blocMax uintptr +var memlock mutex + +type memHdr struct { + next memHdrPtr + size uintptr +} + +var memFreelist memHdrPtr // sorted in ascending order + +type memHdrPtr uintptr + +func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) } +func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) } + +func memAlloc(n uintptr) unsafe.Pointer { + n = memRound(n) + var prevp *memHdr + for p := memFreelist.ptr(); p != nil; p = p.next.ptr() { + if p.size >= n { + if p.size == n { + if prevp != nil { + prevp.next = p.next + } else { + memFreelist = p.next + } + } else { + p.size -= n + p = (*memHdr)(add(unsafe.Pointer(p), p.size)) + } + *p = memHdr{} + return unsafe.Pointer(p) + } + prevp = p + } + return sbrk(n) +} + +func memFree(ap unsafe.Pointer, n uintptr) { + n = memRound(n) + memclrNoHeapPointers(ap, n) + bp := (*memHdr)(ap) + bp.size = n + bpn := uintptr(ap) + if memFreelist == 0 { + bp.next = 0 + memFreelist.set(bp) + return + } + p := memFreelist.ptr() + if bpn < uintptr(unsafe.Pointer(p)) { + memFreelist.set(bp) + if bpn+bp.size == uintptr(unsafe.Pointer(p)) { + bp.size += p.size + bp.next = p.next + *p = memHdr{} + } else { + bp.next.set(p) + } + return + } + for ; p.next != 0; p = p.next.ptr() { + if bpn > uintptr(unsafe.Pointer(p)) && bpn < uintptr(unsafe.Pointer(p.next)) { + break + } + } + if bpn+bp.size == uintptr(unsafe.Pointer(p.next)) { + bp.size += p.next.ptr().size + bp.next = p.next.ptr().next + *p.next.ptr() = memHdr{} + } else { + bp.next = p.next + } + if uintptr(unsafe.Pointer(p))+p.size == bpn { + p.size += bp.size + p.next = bp.next + *bp = memHdr{} + } else { + p.next.set(bp) + } +} + +func memCheck() { + if !memDebug { + return + } + for p := memFreelist.ptr(); p != nil && p.next != 0; p = p.next.ptr() { + if uintptr(unsafe.Pointer(p)) == uintptr(unsafe.Pointer(p.next)) { + print("runtime: ", unsafe.Pointer(p), " == ", unsafe.Pointer(p.next), "\n") + throw("mem: infinite loop") + } + if uintptr(unsafe.Pointer(p)) > uintptr(unsafe.Pointer(p.next)) { + print("runtime: ", unsafe.Pointer(p), " > ", unsafe.Pointer(p.next), "\n") + throw("mem: unordered list") + } + if uintptr(unsafe.Pointer(p))+p.size > uintptr(unsafe.Pointer(p.next)) { + print("runtime: ", unsafe.Pointer(p), "+", p.size, " > ", unsafe.Pointer(p.next), "\n") + throw("mem: overlapping blocks") + } + for b := add(unsafe.Pointer(p), unsafe.Sizeof(memHdr{})); uintptr(b) < uintptr(unsafe.Pointer(p))+p.size; b = add(b, 1) { + if *(*byte)(b) != 0 { + print("runtime: value at addr ", b, " with offset ", uintptr(b)-uintptr(unsafe.Pointer(p)), " in block ", p, " of size ", p.size, " is not zero\n") + throw("mem: uninitialised memory") + } + } + } +} + +func memRound(p uintptr) uintptr { + return alignUp(p, physPageSize) +} + +func initBloc() { + bloc = memRound(firstmoduledata.end) + blocMax = bloc +} + +func sysAllocOS(n uintptr) unsafe.Pointer { + lock(&memlock) + p := memAlloc(n) + memCheck() + unlock(&memlock) + return p +} + +func sysFreeOS(v unsafe.Pointer, n uintptr) { + lock(&memlock) + if uintptr(v)+n == bloc { + // Address range being freed is at the end of memory, + // so record a new lower value for end of memory. + // Can't actually shrink address space because segment is shared. + memclrNoHeapPointers(v, n) + bloc -= n + } else { + memFree(v, n) + memCheck() + } + unlock(&memlock) +} + +func sysUnusedOS(v unsafe.Pointer, n uintptr) { +} + +func sysUsedOS(v unsafe.Pointer, n uintptr) { +} + +func sysHugePageOS(v unsafe.Pointer, n uintptr) { +} + +func sysMapOS(v unsafe.Pointer, n uintptr) { +} + +func sysFaultOS(v unsafe.Pointer, n uintptr) { +} + +func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { + lock(&memlock) + var p unsafe.Pointer + if uintptr(v) == bloc { + // Address hint is the current end of memory, + // so try to extend the address space. + p = sbrk(n) + } + if p == nil && v == nil { + p = memAlloc(n) + memCheck() + } + unlock(&memlock) + return p +} diff --git a/src/runtime/os_js.go b/src/runtime/os_js.go index 63a3d95afa5124..79fdbcfc07bf0a 100644 --- a/src/runtime/os_js.go +++ b/src/runtime/os_js.go @@ -102,9 +102,11 @@ func mdestroy(mp *m) { } func osinit() { + // https://webassembly.github.io/spec/core/exec/runtime.html#memory-instances + physPageSize = 64 * 1024 + initBloc() ncpu = 1 getg().m.procid = 2 - physPageSize = 64 * 1024 } // wasm has no signals diff --git a/src/runtime/os_plan9.go b/src/runtime/os_plan9.go index 5e5a63dcbf97e0..367a5c1cd1957c 100644 --- a/src/runtime/os_plan9.go +++ b/src/runtime/os_plan9.go @@ -319,9 +319,9 @@ func getpid() uint64 { } func osinit() { + physPageSize = getPageSize() initBloc() ncpu = getproccount() - physPageSize = getPageSize() getg().m.procid = getpid() }