Skip to content

runtime: reuse freed memory blocks on wasm #59065

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 7 additions & 64 deletions src/runtime/mem_js.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,72 +6,18 @@

package runtime

import (
"unsafe"
)
import "unsafe"

// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysAllocOS(n uintptr) unsafe.Pointer {
p := sysReserveOS(nil, n)
sysMapOS(p, n)
return p
}

func sysUnusedOS(v unsafe.Pointer, n uintptr) {
}

func sysUsedOS(v unsafe.Pointer, n uintptr) {
}

func sysHugePageOS(v unsafe.Pointer, n uintptr) {
}

// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr) {
}
func sbrk(n uintptr) unsafe.Pointer {
grow := divRoundUp(n, physPageSize)
size := currentMemory()

func sysFaultOS(v unsafe.Pointer, n uintptr) {
}

var reserveEnd uintptr

func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
// TODO(neelance): maybe unify with mem_plan9.go, depending on how https://github.com/WebAssembly/design/blob/master/FutureFeatures.md#finer-grained-control-over-memory turns out

if v != nil {
// The address space of WebAssembly's linear memory is contiguous,
// so requesting specific addresses is not supported. We could use
// a different address, but then mheap.sysAlloc discards the result
// right away and we don't reuse chunks passed to sysFree.
if growMemory(int32(grow)) < 0 {
return nil
}

// Round up the initial reserveEnd to 64 KiB so that
// reservations are always aligned to the page size.
initReserveEnd := alignUp(lastmoduledatap.end, physPageSize)
if reserveEnd < initReserveEnd {
reserveEnd = initReserveEnd
}
v = unsafe.Pointer(reserveEnd)
reserveEnd += alignUp(n, physPageSize)

current := currentMemory()
// reserveEnd is always at a page boundary.
needed := int32(reserveEnd / physPageSize)
if current < needed {
if growMemory(needed-current) == -1 {
return nil
}
resetMemoryDataView()
}

return v
resetMemoryDataView()
return unsafe.Pointer(uintptr(size) * physPageSize)
}

func currentMemory() int32
Expand All @@ -82,6 +28,3 @@ func growMemory(pages int32) int32
//
//go:wasmimport gojs runtime.resetMemoryDataView
func resetMemoryDataView()

func sysMapOS(v unsafe.Pointer, n uintptr) {
}
174 changes: 0 additions & 174 deletions src/runtime/mem_plan9.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,126 +6,6 @@ package runtime

import "unsafe"

const memDebug = false

var bloc uintptr
var blocMax uintptr
var memlock mutex

type memHdr struct {
next memHdrPtr
size uintptr
}

var memFreelist memHdrPtr // sorted in ascending order

type memHdrPtr uintptr

func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) }
func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) }

func memAlloc(n uintptr) unsafe.Pointer {
n = memRound(n)
var prevp *memHdr
for p := memFreelist.ptr(); p != nil; p = p.next.ptr() {
if p.size >= n {
if p.size == n {
if prevp != nil {
prevp.next = p.next
} else {
memFreelist = p.next
}
} else {
p.size -= n
p = (*memHdr)(add(unsafe.Pointer(p), p.size))
}
*p = memHdr{}
return unsafe.Pointer(p)
}
prevp = p
}
return sbrk(n)
}

func memFree(ap unsafe.Pointer, n uintptr) {
n = memRound(n)
memclrNoHeapPointers(ap, n)
bp := (*memHdr)(ap)
bp.size = n
bpn := uintptr(ap)
if memFreelist == 0 {
bp.next = 0
memFreelist.set(bp)
return
}
p := memFreelist.ptr()
if bpn < uintptr(unsafe.Pointer(p)) {
memFreelist.set(bp)
if bpn+bp.size == uintptr(unsafe.Pointer(p)) {
bp.size += p.size
bp.next = p.next
*p = memHdr{}
} else {
bp.next.set(p)
}
return
}
for ; p.next != 0; p = p.next.ptr() {
if bpn > uintptr(unsafe.Pointer(p)) && bpn < uintptr(unsafe.Pointer(p.next)) {
break
}
}
if bpn+bp.size == uintptr(unsafe.Pointer(p.next)) {
bp.size += p.next.ptr().size
bp.next = p.next.ptr().next
*p.next.ptr() = memHdr{}
} else {
bp.next = p.next
}
if uintptr(unsafe.Pointer(p))+p.size == bpn {
p.size += bp.size
p.next = bp.next
*bp = memHdr{}
} else {
p.next.set(bp)
}
}

func memCheck() {
if !memDebug {
return
}
for p := memFreelist.ptr(); p != nil && p.next != 0; p = p.next.ptr() {
if uintptr(unsafe.Pointer(p)) == uintptr(unsafe.Pointer(p.next)) {
print("runtime: ", unsafe.Pointer(p), " == ", unsafe.Pointer(p.next), "\n")
throw("mem: infinite loop")
}
if uintptr(unsafe.Pointer(p)) > uintptr(unsafe.Pointer(p.next)) {
print("runtime: ", unsafe.Pointer(p), " > ", unsafe.Pointer(p.next), "\n")
throw("mem: unordered list")
}
if uintptr(unsafe.Pointer(p))+p.size > uintptr(unsafe.Pointer(p.next)) {
print("runtime: ", unsafe.Pointer(p), "+", p.size, " > ", unsafe.Pointer(p.next), "\n")
throw("mem: overlapping blocks")
}
for b := add(unsafe.Pointer(p), unsafe.Sizeof(memHdr{})); uintptr(b) < uintptr(unsafe.Pointer(p))+p.size; b = add(b, 1) {
if *(*byte)(b) != 0 {
print("runtime: value at addr ", b, " with offset ", uintptr(b)-uintptr(unsafe.Pointer(p)), " in block ", p, " of size ", p.size, " is not zero\n")
throw("mem: uninitialised memory")
}
}
}
}

func memRound(p uintptr) uintptr {
return (p + _PAGESIZE - 1) &^ (_PAGESIZE - 1)
}

func initBloc() {
bloc = memRound(firstmoduledata.end)
blocMax = bloc
}

func sbrk(n uintptr) unsafe.Pointer {
// Plan 9 sbrk from /sys/src/libc/9sys/sbrk.c
bl := bloc
Expand All @@ -139,57 +19,3 @@ func sbrk(n uintptr) unsafe.Pointer {
bloc += n
return unsafe.Pointer(bl)
}

func sysAllocOS(n uintptr) unsafe.Pointer {
lock(&memlock)
p := memAlloc(n)
memCheck()
unlock(&memlock)
return p
}

func sysFreeOS(v unsafe.Pointer, n uintptr) {
lock(&memlock)
if uintptr(v)+n == bloc {
// Address range being freed is at the end of memory,
// so record a new lower value for end of memory.
// Can't actually shrink address space because segment is shared.
memclrNoHeapPointers(v, n)
bloc -= n
} else {
memFree(v, n)
memCheck()
}
unlock(&memlock)
}

func sysUnusedOS(v unsafe.Pointer, n uintptr) {
}

func sysUsedOS(v unsafe.Pointer, n uintptr) {
}

func sysHugePageOS(v unsafe.Pointer, n uintptr) {
}

func sysMapOS(v unsafe.Pointer, n uintptr) {
}

func sysFaultOS(v unsafe.Pointer, n uintptr) {
}

func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
lock(&memlock)
var p unsafe.Pointer
if uintptr(v) == bloc {
// Address hint is the current end of memory,
// so try to extend the address space.
p = sbrk(n)
}
if p == nil && v == nil {
p = memAlloc(n)
memCheck()
}
unlock(&memlock)
return p
}
Loading