Skip to content

Commit d69509f

Browse files
committed
runtime: make addrRange[s] operate on offset addresses
Currently addrRange and addrRanges operate on real addresses. That is, the addresses they manipulate don't include arenaBaseOffset. When added to an address, arenaBaseOffset makes the address space appear contiguous on platforms where the address space is segmented. While this is generally OK because even those platforms which have a segmented address space usually don't give addresses in a different segment, today it causes a mismatch between the scavenger and the rest of the page allocator. The scavenger scavenges from the highest addresses first, but only via real address, whereas the page allocator allocates memory in offset address order. So this change makes addrRange and addrRanges, i.e. what the scavenger operates on, use offset addresses. However, lots of the page allocator relies on an addrRange containing real addresses. To make this transition less error-prone, this change introduces a new type, offAddr, whose purpose is to make offset addresses a distinct type, so any attempt to trivially mix real and offset addresses will trigger a compilation error. This change doesn't attempt to use offAddr in all of the runtime; a follow-up change will look for and catch remaining uses of an offset address which doesn't use the type. Updates #35788. Change-Id: I991d891ac8ace8339ca180daafdf6b261a4d43d1 Reviewed-on: https://go-review.googlesource.com/c/go/+/230717 Run-TryBot: Michael Knyszek <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: Austin Clements <[email protected]>
1 parent dba1205 commit d69509f

File tree

5 files changed

+103
-37
lines changed

5 files changed

+103
-37
lines changed

src/runtime/export_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -748,8 +748,8 @@ func (p *PageAlloc) InUse() []AddrRange {
748748
ranges := make([]AddrRange, 0, len(p.inUse.ranges))
749749
for _, r := range p.inUse.ranges {
750750
ranges = append(ranges, AddrRange{
751-
Base: r.base,
752-
Limit: r.limit,
751+
Base: r.base.addr(),
752+
Limit: r.limit.addr(),
753753
})
754754
}
755755
return ranges

src/runtime/mgcscavenge.go

+11-11
Original file line numberDiff line numberDiff line change
@@ -508,11 +508,11 @@ func (s *pageAlloc) scavengeReserve() (addrRange, uint32) {
508508
// palloc chunk because that's the unit of operation for
509509
// the scavenger, so align down, potentially extending
510510
// the range.
511-
newBase := alignDown(r.base, pallocChunkBytes)
511+
newBase := alignDown(r.base.addr(), pallocChunkBytes)
512512

513513
// Remove from inUse however much extra we just pulled out.
514514
s.scav.inUse.removeGreaterEqual(newBase)
515-
r.base = newBase
515+
r.base = offAddr{newBase}
516516
return r, s.scav.gen
517517
}
518518

@@ -528,7 +528,7 @@ func (s *pageAlloc) scavengeUnreserve(r addrRange, gen uint32) {
528528
if r.size() == 0 || gen != s.scav.gen {
529529
return
530530
}
531-
if r.base%pallocChunkBytes != 0 {
531+
if r.base.addr()%pallocChunkBytes != 0 {
532532
throw("unreserving unaligned region")
533533
}
534534
s.scav.inUse.add(r)
@@ -559,7 +559,7 @@ func (s *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (ui
559559
return 0, work
560560
}
561561
// Check the prerequisites of work.
562-
if work.base%pallocChunkBytes != 0 {
562+
if work.base.addr()%pallocChunkBytes != 0 {
563563
throw("scavengeOne called with unaligned work region")
564564
}
565565
// Calculate the maximum number of pages to scavenge.
@@ -598,9 +598,9 @@ func (s *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (ui
598598
// Fast path: check the chunk containing the top-most address in work,
599599
// starting at that address's page index in the chunk.
600600
//
601-
// Note that work.limit is exclusive, so get the chunk we care about
601+
// Note that work.end() is exclusive, so get the chunk we care about
602602
// by subtracting 1.
603-
maxAddr := work.limit - 1
603+
maxAddr := work.limit.addr() - 1
604604
maxChunk := chunkIndex(maxAddr)
605605
if s.summary[len(s.summary)-1][maxChunk].max() >= uint(minPages) {
606606
// We only bother looking for a candidate if there at least
@@ -609,12 +609,12 @@ func (s *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (ui
609609

610610
// If we found something, scavenge it and return!
611611
if npages != 0 {
612-
work.limit = s.scavengeRangeLocked(maxChunk, base, npages)
612+
work.limit = offAddr{s.scavengeRangeLocked(maxChunk, base, npages)}
613613
return uintptr(npages) * pageSize, work
614614
}
615615
}
616616
// Update the limit to reflect the fact that we checked maxChunk already.
617-
work.limit = chunkBase(maxChunk)
617+
work.limit = offAddr{chunkBase(maxChunk)}
618618

619619
// findCandidate finds the next scavenge candidate in work optimistically.
620620
//
@@ -623,7 +623,7 @@ func (s *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (ui
623623
// The heap need not be locked.
624624
findCandidate := func(work addrRange) (chunkIdx, bool) {
625625
// Iterate over this work's chunks.
626-
for i := chunkIndex(work.limit - 1); i >= chunkIndex(work.base); i-- {
626+
for i := chunkIndex(work.limit.addr() - 1); i >= chunkIndex(work.base.addr()); i-- {
627627
// If this chunk is totally in-use or has no unscavenged pages, don't bother
628628
// doing a more sophisticated check.
629629
//
@@ -673,12 +673,12 @@ func (s *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (ui
673673
chunk := s.chunkOf(candidateChunkIdx)
674674
base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
675675
if npages > 0 {
676-
work.limit = s.scavengeRangeLocked(candidateChunkIdx, base, npages)
676+
work.limit = offAddr{s.scavengeRangeLocked(candidateChunkIdx, base, npages)}
677677
return uintptr(npages) * pageSize, work
678678
}
679679

680680
// We were fooled, so let's continue from where we left off.
681-
work.limit = chunkBase(candidateChunkIdx)
681+
work.limit = offAddr{chunkBase(candidateChunkIdx)}
682682
}
683683
return 0, work
684684
}

src/runtime/mpagealloc.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -375,7 +375,7 @@ func (s *pageAlloc) grow(base, size uintptr) {
375375
// Note that [base, limit) will never overlap with any existing
376376
// range inUse because grow only ever adds never-used memory
377377
// regions to the page allocator.
378-
s.inUse.add(addrRange{base, limit})
378+
s.inUse.add(makeAddrRange(base, limit))
379379

380380
// A grow operation is a lot like a free operation, so if our
381381
// chunk ends up below the (linearized) s.searchAddr, update

src/runtime/mpagealloc_64bit.go

+6-6
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ func (s *pageAlloc) sysGrow(base, limit uintptr) {
106106
// of summary indices which must be mapped to support those addresses
107107
// in the summary range.
108108
addrRangeToSummaryRange := func(level int, r addrRange) (int, int) {
109-
sumIdxBase, sumIdxLimit := addrsToSummaryRange(level, r.base, r.limit)
109+
sumIdxBase, sumIdxLimit := addrsToSummaryRange(level, r.base.addr(), r.limit.addr())
110110
return blockAlignSummaryRange(level, sumIdxBase, sumIdxLimit)
111111
}
112112

@@ -118,8 +118,8 @@ func (s *pageAlloc) sysGrow(base, limit uintptr) {
118118
limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize)
119119
base := unsafe.Pointer(&s.summary[level][0])
120120
return addrRange{
121-
uintptr(add(base, baseOffset)),
122-
uintptr(add(base, limitOffset)),
121+
offAddr{uintptr(add(base, baseOffset))},
122+
offAddr{uintptr(add(base, limitOffset))},
123123
}
124124
}
125125

@@ -145,7 +145,7 @@ func (s *pageAlloc) sysGrow(base, limit uintptr) {
145145
// Walk up the radix tree and map summaries in as needed.
146146
for l := range s.summary {
147147
// Figure out what part of the summary array this new address space needs.
148-
needIdxBase, needIdxLimit := addrRangeToSummaryRange(l, addrRange{base, limit})
148+
needIdxBase, needIdxLimit := addrRangeToSummaryRange(l, makeAddrRange(base, limit))
149149

150150
// Update the summary slices with a new upper-bound. This ensures
151151
// we get tight bounds checks on at least the top bound.
@@ -174,7 +174,7 @@ func (s *pageAlloc) sysGrow(base, limit uintptr) {
174174
}
175175

176176
// Map and commit need.
177-
sysMap(unsafe.Pointer(need.base), need.size(), s.sysStat)
178-
sysUsed(unsafe.Pointer(need.base), need.size())
177+
sysMap(unsafe.Pointer(need.base.addr()), need.size(), s.sysStat)
178+
sysUsed(unsafe.Pointer(need.base.addr()), need.size())
179179
}
180180
}

src/runtime/mranges.go

+83-17
Original file line numberDiff line numberDiff line change
@@ -15,42 +15,107 @@ import (
1515
)
1616

1717
// addrRange represents a region of address space.
18+
//
19+
// An addrRange must never span a gap in the address space.
1820
type addrRange struct {
1921
// base and limit together represent the region of address space
2022
// [base, limit). That is, base is inclusive, limit is exclusive.
21-
base, limit uintptr
23+
// These are address over an offset view of the address space on
24+
// platforms with a segmented address space, that is, on platforms
25+
// where arenaBaseOffset != 0.
26+
base, limit offAddr
27+
}
28+
29+
// makeAddrRange creates a new address range from two virtual addresses.
30+
//
31+
// Throws if the base and limit are not in the same memory segment.
32+
func makeAddrRange(base, limit uintptr) addrRange {
33+
r := addrRange{offAddr{base}, offAddr{limit}}
34+
if (base+arenaBaseOffset >= arenaBaseOffset) != (limit+arenaBaseOffset >= arenaBaseOffset) {
35+
throw("addr range base and limit are not in the same memory segment")
36+
}
37+
return r
2238
}
2339

2440
// size returns the size of the range represented in bytes.
2541
func (a addrRange) size() uintptr {
26-
if a.limit <= a.base {
42+
if !a.base.lessThan(a.limit) {
2743
return 0
2844
}
29-
return a.limit - a.base
45+
// Subtraction is safe because limit and base must be in the same
46+
// segment of the address space.
47+
return a.limit.diff(a.base)
3048
}
3149

3250
// contains returns whether or not the range contains a given address.
3351
func (a addrRange) contains(addr uintptr) bool {
34-
return addr >= a.base && addr < a.limit
52+
return a.base.lessEqual(offAddr{addr}) && (offAddr{addr}).lessThan(a.limit)
3553
}
3654

3755
// subtract takes the addrRange toPrune and cuts out any overlap with
3856
// from, then returns the new range. subtract assumes that a and b
3957
// either don't overlap at all, only overlap on one side, or are equal.
4058
// If b is strictly contained in a, thus forcing a split, it will throw.
4159
func (a addrRange) subtract(b addrRange) addrRange {
42-
if a.base >= b.base && a.limit <= b.limit {
60+
if b.base.lessEqual(a.base) && a.limit.lessEqual(b.limit) {
4361
return addrRange{}
44-
} else if a.base < b.base && a.limit > b.limit {
62+
} else if a.base.lessThan(b.base) && b.limit.lessThan(a.limit) {
4563
throw("bad prune")
46-
} else if a.limit > b.limit && a.base < b.limit {
64+
} else if b.limit.lessThan(a.limit) && a.base.lessThan(b.limit) {
4765
a.base = b.limit
48-
} else if a.base < b.base && a.limit > b.base {
66+
} else if a.base.lessThan(b.base) && b.base.lessThan(a.limit) {
4967
a.limit = b.base
5068
}
5169
return a
5270
}
5371

72+
// offAddr represents an address in a contiguous view
73+
// of the address space on systems where the address space is
74+
// segmented. On other systems, it's just a normal address.
75+
type offAddr struct {
76+
a uintptr
77+
}
78+
79+
// add adds a uintptr offset to the offAddr.
80+
func (l offAddr) add(bytes uintptr) offAddr {
81+
return offAddr{a: l.a + bytes}
82+
}
83+
84+
// sub subtracts a uintptr offset from the offAddr.
85+
func (l offAddr) sub(bytes uintptr) offAddr {
86+
return offAddr{a: l.a - bytes}
87+
}
88+
89+
// diff returns the amount of bytes in between the
90+
// two offAddrs.
91+
func (l1 offAddr) diff(l2 offAddr) uintptr {
92+
return l1.a - l2.a
93+
}
94+
95+
// lessThan returns true if l1 is less than l2 in the offset
96+
// address space.
97+
func (l1 offAddr) lessThan(l2 offAddr) bool {
98+
return (l1.a + arenaBaseOffset) < (l2.a + arenaBaseOffset)
99+
}
100+
101+
// lessEqual returns true if l1 is less than or equal to l2 in
102+
// the offset address space.
103+
func (l1 offAddr) lessEqual(l2 offAddr) bool {
104+
return (l1.a + arenaBaseOffset) <= (l2.a + arenaBaseOffset)
105+
}
106+
107+
// equal returns true if the two offAddr values are equal.
108+
func (l1 offAddr) equal(l2 offAddr) bool {
109+
// No need to compare in the offset space, it
110+
// means the same thing.
111+
return l1 == l2
112+
}
113+
114+
// addr returns the virtual address for this offset address.
115+
func (l offAddr) addr() uintptr {
116+
return l.a
117+
}
118+
54119
// addrRanges is a data structure holding a collection of ranges of
55120
// address space.
56121
//
@@ -84,13 +149,14 @@ func (a *addrRanges) init(sysStat *uint64) {
84149

85150
// findSucc returns the first index in a such that base is
86151
// less than the base of the addrRange at that index.
87-
func (a *addrRanges) findSucc(base uintptr) int {
152+
func (a *addrRanges) findSucc(addr uintptr) int {
88153
// TODO(mknyszek): Consider a binary search for large arrays.
89154
// While iterating over these ranges is potentially expensive,
90155
// the expected number of ranges is small, ideally just 1,
91156
// since Go heaps are usually mostly contiguous.
157+
base := offAddr{addr}
92158
for i := range a.ranges {
93-
if base < a.ranges[i].base {
159+
if base.lessThan(a.ranges[i].base) {
94160
return i
95161
}
96162
}
@@ -121,9 +187,9 @@ func (a *addrRanges) add(r addrRange) {
121187

122188
// Because we assume r is not currently represented in a,
123189
// findSucc gives us our insertion index.
124-
i := a.findSucc(r.base)
125-
coalescesDown := i > 0 && a.ranges[i-1].limit == r.base
126-
coalescesUp := i < len(a.ranges) && r.limit == a.ranges[i].base
190+
i := a.findSucc(r.base.addr())
191+
coalescesDown := i > 0 && a.ranges[i-1].limit.equal(r.base)
192+
coalescesUp := i < len(a.ranges) && r.limit.equal(a.ranges[i].base)
127193
if coalescesUp && coalescesDown {
128194
// We have neighbors and they both border us.
129195
// Merge a.ranges[i-1], r, and a.ranges[i] together into a.ranges[i-1].
@@ -176,10 +242,10 @@ func (a *addrRanges) removeLast(nBytes uintptr) addrRange {
176242
r := a.ranges[len(a.ranges)-1]
177243
size := r.size()
178244
if size > nBytes {
179-
newLimit := r.limit - nBytes
180-
a.ranges[len(a.ranges)-1].limit = newLimit
245+
newEnd := r.limit.sub(nBytes)
246+
a.ranges[len(a.ranges)-1].limit = newEnd
181247
a.totalBytes -= nBytes
182-
return addrRange{newLimit, r.limit}
248+
return addrRange{newEnd, r.limit}
183249
}
184250
a.ranges = a.ranges[:len(a.ranges)-1]
185251
a.totalBytes -= size
@@ -202,7 +268,7 @@ func (a *addrRanges) removeGreaterEqual(addr uintptr) {
202268
}
203269
if r := a.ranges[pivot-1]; r.contains(addr) {
204270
removed += r.size()
205-
r = r.subtract(addrRange{addr, maxSearchAddr})
271+
r = r.subtract(makeAddrRange(addr, maxSearchAddr))
206272
if r.size() == 0 {
207273
pivot--
208274
} else {

0 commit comments

Comments
 (0)