Skip to content

Commit ea5f9b6

Browse files
committed
runtime: use offAddr in more parts of the runtime
This change uses the new offAddr type in more parts of the runtime where we've been implicitly switching from the default address space to a contiguous view. The purpose of offAddr is to represent addresses in the contiguous view of the address space, and to make direct computations between real addresses and offset addresses impossible. This change thus improves readability in the runtime. Updates #35788. Change-Id: I4e1c5fed3ed68aa12f49a42b82eb3f46aba82fc1 Reviewed-on: https://go-review.googlesource.com/c/go/+/230718 Run-TryBot: Michael Knyszek <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: Austin Clements <[email protected]>
1 parent d69509f commit ea5f9b6

File tree

4 files changed

+99
-86
lines changed

4 files changed

+99
-86
lines changed

src/runtime/mgcscavenge.go

+7-7
Original file line numberDiff line numberDiff line change
@@ -452,8 +452,8 @@ func (s *pageAlloc) scavengeStartGen() {
452452
s.inUse.cloneInto(&s.scav.inUse)
453453

454454
// Pick the new starting address for the scavenger cycle.
455-
var startAddr uintptr
456-
if s.scav.scavLWM < s.scav.freeHWM {
455+
var startAddr offAddr
456+
if s.scav.scavLWM.lessThan(s.scav.freeHWM) {
457457
// The "free" high watermark exceeds the "scavenged" low watermark,
458458
// so there are free scavengable pages in parts of the address space
459459
// that the scavenger already searched, the high watermark being the
@@ -467,7 +467,7 @@ func (s *pageAlloc) scavengeStartGen() {
467467
// scavenging from where we were.
468468
startAddr = s.scav.scavLWM
469469
}
470-
s.scav.inUse.removeGreaterEqual(startAddr)
470+
s.scav.inUse.removeGreaterEqual(startAddr.addr())
471471

472472
// reservationBytes may be zero if s.inUse.totalBytes is small, or if
473473
// scavengeReservationShards is large. This case is fine as the scavenger
@@ -478,8 +478,8 @@ func (s *pageAlloc) scavengeStartGen() {
478478
s.scav.reservationBytes = alignUp(s.inUse.totalBytes, pallocChunkBytes) / scavengeReservationShards
479479
s.scav.gen++
480480
s.scav.released = 0
481-
s.scav.freeHWM = 0
482-
s.scav.scavLWM = maxSearchAddr
481+
s.scav.freeHWM = minOffAddr
482+
s.scav.scavLWM = maxOffAddr
483483
}
484484

485485
// scavengeReserve reserves a contiguous range of the address space
@@ -698,8 +698,8 @@ func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr
698698
addr := chunkBase(ci) + uintptr(base)*pageSize
699699

700700
// Update the scavenge low watermark.
701-
if addr < s.scav.scavLWM {
702-
s.scav.scavLWM = addr
701+
if oAddr := (offAddr{addr}); oAddr.lessThan(s.scav.scavLWM) {
702+
s.scav.scavLWM = oAddr
703703
}
704704

705705
// Only perform the actual scavenging if we're not in a test.

src/runtime/mpagealloc.go

+60-72
Original file line numberDiff line numberDiff line change
@@ -81,15 +81,14 @@ const (
8181
// there should this change.
8282
pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits
8383
pallocChunksL1Shift = pallocChunksL2Bits
84-
85-
// Maximum searchAddr value, which indicates that the heap has no free space.
86-
//
87-
// We subtract arenaBaseOffset because we want this to represent the maximum
88-
// value in the shifted address space, but searchAddr is stored as a regular
89-
// memory address. See arenaBaseOffset for details.
90-
maxSearchAddr = ^uintptr(0) - arenaBaseOffset
9184
)
9285

86+
// Maximum searchAddr value, which indicates that the heap has no free space.
87+
//
88+
// We alias maxOffAddr just to make it clear that this is the maximum address
89+
// for the page allocator's search space. See maxOffAddr for details.
90+
var maxSearchAddr = maxOffAddr
91+
9392
// Global chunk index.
9493
//
9594
// Represents an index into the leaf level of the radix tree.
@@ -134,6 +133,18 @@ func (i chunkIdx) l2() uint {
134133
}
135134
}
136135

136+
// offAddrToLevelIndex converts an address in the offset address space
137+
// to the index into summary[level] containing addr.
138+
func offAddrToLevelIndex(level int, addr offAddr) int {
139+
return int((addr.a + arenaBaseOffset) >> levelShift[level])
140+
}
141+
142+
// levelIndexToOffAddr converts an index into summary[level] into
143+
// the corresponding address in the offset address space.
144+
func levelIndexToOffAddr(level, idx int) offAddr {
145+
return offAddr{(uintptr(idx) << levelShift[level]) - arenaBaseOffset}
146+
}
147+
137148
// addrsToSummaryRange converts base and limit pointers into a range
138149
// of entries for the given summary level.
139150
//
@@ -232,7 +243,7 @@ type pageAlloc struct {
232243
// Note that adding in arenaBaseOffset transforms addresses
233244
// to a new address space with a linear view of the full address
234245
// space on architectures with segmented address spaces.
235-
searchAddr uintptr
246+
searchAddr offAddr
236247

237248
// start and end represent the chunk indices
238249
// which pageAlloc knows about. It assumes
@@ -271,13 +282,13 @@ type pageAlloc struct {
271282
// released is the amount of memory released this generation.
272283
released uintptr
273284

274-
// scavLWM is the lowest address that the scavenger reached this
285+
// scavLWM is the lowest (offset) address that the scavenger reached this
275286
// scavenge generation.
276-
scavLWM uintptr
287+
scavLWM offAddr
277288

278-
// freeHWM is the highest address of a page that was freed to
289+
// freeHWM is the highest (offset) address of a page that was freed to
279290
// the page allocator this scavenge generation.
280-
freeHWM uintptr
291+
freeHWM offAddr
281292
}
282293

283294
// mheap_.lock. This level of indirection makes it possible
@@ -319,29 +330,6 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
319330
s.scav.scavLWM = maxSearchAddr
320331
}
321332

322-
// compareSearchAddrTo compares an address against s.searchAddr in a linearized
323-
// view of the address space on systems with discontinuous process address spaces.
324-
// This linearized view is the same one generated by chunkIndex and arenaIndex,
325-
// done by adding arenaBaseOffset.
326-
//
327-
// On systems without a discontinuous address space, it's just a normal comparison.
328-
//
329-
// Returns < 0 if addr is less than s.searchAddr in the linearized address space.
330-
// Returns > 0 if addr is greater than s.searchAddr in the linearized address space.
331-
// Returns 0 if addr and s.searchAddr are equal.
332-
func (s *pageAlloc) compareSearchAddrTo(addr uintptr) int {
333-
// Compare with arenaBaseOffset added because it gives us a linear, contiguous view
334-
// of the heap on architectures with signed address spaces.
335-
lAddr := addr + arenaBaseOffset
336-
lSearchAddr := s.searchAddr + arenaBaseOffset
337-
if lAddr < lSearchAddr {
338-
return -1
339-
} else if lAddr > lSearchAddr {
340-
return 1
341-
}
342-
return 0
343-
}
344-
345333
// chunkOf returns the chunk at the given chunk index.
346334
func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
347335
return &s.chunks[ci.l1()][ci.l2()]
@@ -378,10 +366,10 @@ func (s *pageAlloc) grow(base, size uintptr) {
378366
s.inUse.add(makeAddrRange(base, limit))
379367

380368
// A grow operation is a lot like a free operation, so if our
381-
// chunk ends up below the (linearized) s.searchAddr, update
382-
// s.searchAddr to the new address, just like in free.
383-
if s.compareSearchAddrTo(base) < 0 {
384-
s.searchAddr = base
369+
// chunk ends up below s.searchAddr, update s.searchAddr to the
370+
// new address, just like in free.
371+
if b := (offAddr{base}); b.lessThan(s.searchAddr) {
372+
s.searchAddr = b
385373
}
386374

387375
// Add entries into chunks, which is sparse, if needed. Then,
@@ -545,7 +533,7 @@ func (s *pageAlloc) allocRange(base, npages uintptr) uintptr {
545533
// searchAddr returned is invalid and must be ignored.
546534
//
547535
// s.mheapLock must be held.
548-
func (s *pageAlloc) find(npages uintptr) (uintptr, uintptr) {
536+
func (s *pageAlloc) find(npages uintptr) (uintptr, offAddr) {
549537
// Search algorithm.
550538
//
551539
// This algorithm walks each level l of the radix tree from the root level
@@ -585,13 +573,13 @@ func (s *pageAlloc) find(npages uintptr) (uintptr, uintptr) {
585573
// firstFree is updated by calling foundFree each time free space in the
586574
// heap is discovered.
587575
//
588-
// At the end of the search, base-arenaBaseOffset is the best new
576+
// At the end of the search, base.addr() is the best new
589577
// searchAddr we could deduce in this search.
590578
firstFree := struct {
591-
base, bound uintptr
579+
base, bound offAddr
592580
}{
593-
base: 0,
594-
bound: (1<<heapAddrBits - 1),
581+
base: minOffAddr,
582+
bound: maxOffAddr,
595583
}
596584
// foundFree takes the given address range [addr, addr+size) and
597585
// updates firstFree if it is a narrower range. The input range must
@@ -602,17 +590,17 @@ func (s *pageAlloc) find(npages uintptr) (uintptr, uintptr) {
602590
// pages on the root level and narrow that down if we descend into
603591
// that summary. But as soon as we need to iterate beyond that summary
604592
// in a level to find a large enough range, we'll stop narrowing.
605-
foundFree := func(addr, size uintptr) {
606-
if firstFree.base <= addr && addr+size-1 <= firstFree.bound {
593+
foundFree := func(addr offAddr, size uintptr) {
594+
if firstFree.base.lessEqual(addr) && addr.add(size-1).lessEqual(firstFree.bound) {
607595
// This range fits within the current firstFree window, so narrow
608596
// down the firstFree window to the base and bound of this range.
609597
firstFree.base = addr
610-
firstFree.bound = addr + size - 1
611-
} else if !(addr+size-1 < firstFree.base || addr > firstFree.bound) {
598+
firstFree.bound = addr.add(size - 1)
599+
} else if !(addr.add(size-1).lessThan(firstFree.base) || firstFree.bound.lessThan(addr)) {
612600
// This range only partially overlaps with the firstFree range,
613601
// so throw.
614-
print("runtime: addr = ", hex(addr), ", size = ", size, "\n")
615-
print("runtime: base = ", hex(firstFree.base), ", bound = ", hex(firstFree.bound), "\n")
602+
print("runtime: addr = ", hex(addr.addr()), ", size = ", size, "\n")
603+
print("runtime: base = ", hex(firstFree.base.addr()), ", bound = ", hex(firstFree.bound.addr()), "\n")
616604
throw("range partially overlaps")
617605
}
618606
}
@@ -642,7 +630,7 @@ nextLevel:
642630
// searchAddr on the previous level or we're on the root leve, in which
643631
// case the searchAddr should be the same as i after levelShift.
644632
j0 := 0
645-
if searchIdx := int((s.searchAddr + arenaBaseOffset) >> levelShift[l]); searchIdx&^(entriesPerBlock-1) == i {
633+
if searchIdx := offAddrToLevelIndex(l, s.searchAddr); searchIdx&^(entriesPerBlock-1) == i {
646634
j0 = searchIdx & (entriesPerBlock - 1)
647635
}
648636

@@ -668,7 +656,7 @@ nextLevel:
668656

669657
// We've encountered a non-zero summary which means
670658
// free memory, so update firstFree.
671-
foundFree(uintptr((i+j)<<levelShift[l]), (uintptr(1)<<logMaxPages)*pageSize)
659+
foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize)
672660

673661
s := sum.start()
674662
if size+s >= uint(npages) {
@@ -706,8 +694,8 @@ nextLevel:
706694
if size >= uint(npages) {
707695
// We found a sufficiently large run of free pages straddling
708696
// some boundary, so compute the address and return it.
709-
addr := uintptr(i<<levelShift[l]) - arenaBaseOffset + uintptr(base)*pageSize
710-
return addr, firstFree.base - arenaBaseOffset
697+
addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
698+
return addr, firstFree.base
711699
}
712700
if l == 0 {
713701
// We're at level zero, so that means we've exhausted our search.
@@ -719,7 +707,7 @@ nextLevel:
719707
// lied to us. In either case, dump some useful state and throw.
720708
print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n")
721709
print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n")
722-
print("runtime: s.searchAddr = ", hex(s.searchAddr), ", i = ", i, "\n")
710+
print("runtime: s.searchAddr = ", hex(s.searchAddr.addr()), ", i = ", i, "\n")
723711
print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n")
724712
for j := 0; j < len(entries); j++ {
725713
sum := entries[j]
@@ -752,8 +740,8 @@ nextLevel:
752740
// Since we actually searched the chunk, we may have
753741
// found an even narrower free window.
754742
searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
755-
foundFree(searchAddr+arenaBaseOffset, chunkBase(ci+1)-searchAddr)
756-
return addr, firstFree.base - arenaBaseOffset
743+
foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr)
744+
return addr, firstFree.base
757745
}
758746

759747
// alloc allocates npages worth of memory from the page heap, returning the base
@@ -767,25 +755,25 @@ nextLevel:
767755
func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
768756
// If the searchAddr refers to a region which has a higher address than
769757
// any known chunk, then we know we're out of memory.
770-
if chunkIndex(s.searchAddr) >= s.end {
758+
if chunkIndex(s.searchAddr.addr()) >= s.end {
771759
return 0, 0
772760
}
773761

774762
// If npages has a chance of fitting in the chunk where the searchAddr is,
775763
// search it directly.
776-
searchAddr := uintptr(0)
777-
if pallocChunkPages-chunkPageIndex(s.searchAddr) >= uint(npages) {
764+
searchAddr := minOffAddr
765+
if pallocChunkPages-chunkPageIndex(s.searchAddr.addr()) >= uint(npages) {
778766
// npages is guaranteed to be no greater than pallocChunkPages here.
779-
i := chunkIndex(s.searchAddr)
767+
i := chunkIndex(s.searchAddr.addr())
780768
if max := s.summary[len(s.summary)-1][i].max(); max >= uint(npages) {
781-
j, searchIdx := s.chunkOf(i).find(npages, chunkPageIndex(s.searchAddr))
769+
j, searchIdx := s.chunkOf(i).find(npages, chunkPageIndex(s.searchAddr.addr()))
782770
if j == ^uint(0) {
783771
print("runtime: max = ", max, ", npages = ", npages, "\n")
784-
print("runtime: searchIdx = ", chunkPageIndex(s.searchAddr), ", s.searchAddr = ", hex(s.searchAddr), "\n")
772+
print("runtime: searchIdx = ", chunkPageIndex(s.searchAddr.addr()), ", s.searchAddr = ", hex(s.searchAddr.addr()), "\n")
785773
throw("bad summary data")
786774
}
787775
addr = chunkBase(i) + uintptr(j)*pageSize
788-
searchAddr = chunkBase(i) + uintptr(searchIdx)*pageSize
776+
searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize}
789777
goto Found
790778
}
791779
}
@@ -807,10 +795,10 @@ Found:
807795
// Go ahead and actually mark the bits now that we have an address.
808796
scav = s.allocRange(addr, npages)
809797

810-
// If we found a higher (linearized) searchAddr, we know that all the
811-
// heap memory before that searchAddr in a linear address space is
798+
// If we found a higher searchAddr, we know that all the
799+
// heap memory before that searchAddr in an offset address space is
812800
// allocated, so bump s.searchAddr up to the new one.
813-
if s.compareSearchAddrTo(searchAddr) > 0 {
801+
if s.searchAddr.lessThan(searchAddr) {
814802
s.searchAddr = searchAddr
815803
}
816804
return addr, scav
@@ -820,14 +808,14 @@ Found:
820808
//
821809
// s.mheapLock must be held.
822810
func (s *pageAlloc) free(base, npages uintptr) {
823-
// If we're freeing pages below the (linearized) s.searchAddr, update searchAddr.
824-
if s.compareSearchAddrTo(base) < 0 {
825-
s.searchAddr = base
811+
// If we're freeing pages below the s.searchAddr, update searchAddr.
812+
if b := (offAddr{base}); b.lessThan(s.searchAddr) {
813+
s.searchAddr = b
826814
}
827815
// Update the free high watermark for the scavenger.
828816
limit := base + npages*pageSize - 1
829-
if s.scav.freeHWM < limit {
830-
s.scav.freeHWM = limit
817+
if offLimit := (offAddr{limit}); s.scav.freeHWM.lessThan(offLimit) {
818+
s.scav.freeHWM = offLimit
831819
}
832820
if npages == 1 {
833821
// Fast path: we're clearing a single bit, and we know exactly

src/runtime/mpagecache.go

+6-6
Original file line numberDiff line numberDiff line change
@@ -91,8 +91,8 @@ func (c *pageCache) flush(s *pageAlloc) {
9191
}
9292
// Since this is a lot like a free, we need to make sure
9393
// we update the searchAddr just like free does.
94-
if s.compareSearchAddrTo(c.base) < 0 {
95-
s.searchAddr = c.base
94+
if b := (offAddr{c.base}); b.lessThan(s.searchAddr) {
95+
s.searchAddr = b
9696
}
9797
s.update(c.base, pageCachePages, false, false)
9898
*c = pageCache{}
@@ -106,15 +106,15 @@ func (c *pageCache) flush(s *pageAlloc) {
106106
func (s *pageAlloc) allocToCache() pageCache {
107107
// If the searchAddr refers to a region which has a higher address than
108108
// any known chunk, then we know we're out of memory.
109-
if chunkIndex(s.searchAddr) >= s.end {
109+
if chunkIndex(s.searchAddr.addr()) >= s.end {
110110
return pageCache{}
111111
}
112112
c := pageCache{}
113-
ci := chunkIndex(s.searchAddr) // chunk index
113+
ci := chunkIndex(s.searchAddr.addr()) // chunk index
114114
if s.summary[len(s.summary)-1][ci] != 0 {
115115
// Fast path: there's free pages at or near the searchAddr address.
116116
chunk := s.chunkOf(ci)
117-
j, _ := chunk.find(1, chunkPageIndex(s.searchAddr))
117+
j, _ := chunk.find(1, chunkPageIndex(s.searchAddr.addr()))
118118
if j == ^uint(0) {
119119
throw("bad summary data")
120120
}
@@ -156,6 +156,6 @@ func (s *pageAlloc) allocToCache() pageCache {
156156
// However, s.searchAddr is not allowed to point into unmapped heap memory
157157
// unless it is maxSearchAddr, so make it the last page as opposed to
158158
// the page after.
159-
s.searchAddr = c.base + pageSize*(pageCachePages-1)
159+
s.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
160160
return c
161161
}

0 commit comments

Comments
 (0)