Skip to content

Commit dba1205

Browse files
committed
runtime: avoid re-scanning scavenged and untouched memory
Currently the scavenger will reset to the top of the heap every GC. This means if it scavenges a bunch of memory which doesn't get used again, it's going to keep re-scanning that memory on subsequent cycles. This problem is especially bad when it comes to heap spikes: suppose an application's heap spikes to 2x its steady-state size. The scavenger will run over the top half of that heap even if the heap shrinks, for the rest of the application's lifetime. To fix this, we maintain two numbers: a "free" high watermark, which represents the highest address freed to the page allocator in that cycle, and a "scavenged" low watermark, which represents how low of an address the scavenger got to when scavenging. If the "free" watermark exceeds the "scavenged" watermark, then we pick the "free" watermark as the new "top of the heap" for the scavenger when starting the next scavenger cycle. Otherwise, we have the scavenger pick up where it left off. With this mechanism, we only ever re-scan scavenged memory if a random page gets freed very high up in the heap address space while most of the action is happening in the lower parts. This case should be exceedingly unlikely because the page reclaimer walks over the heap from low address to high addresses, and we use a first-fit address-ordered allocation policy. Updates #35788. Change-Id: Id335603b526ce3a0eb79ef286d1a4e876abc9cab Reviewed-on: https://go-review.googlesource.com/c/go/+/218997 Run-TryBot: Michael Knyszek <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: Austin Clements <[email protected]> Reviewed-by: David Chase <[email protected]>
1 parent 55ec518 commit dba1205

File tree

2 files changed

+42
-1
lines changed

2 files changed

+42
-1
lines changed

src/runtime/mgcscavenge.go

+26
Original file line numberDiff line numberDiff line change
@@ -450,6 +450,25 @@ func (s *pageAlloc) scavengeStartGen() {
450450
printScavTrace(s.scav.gen, s.scav.released, false)
451451
}
452452
s.inUse.cloneInto(&s.scav.inUse)
453+
454+
// Pick the new starting address for the scavenger cycle.
455+
var startAddr uintptr
456+
if s.scav.scavLWM < s.scav.freeHWM {
457+
// The "free" high watermark exceeds the "scavenged" low watermark,
458+
// so there are free scavengable pages in parts of the address space
459+
// that the scavenger already searched, the high watermark being the
460+
// highest one. Pick that as our new starting point to ensure we
461+
// see those pages.
462+
startAddr = s.scav.freeHWM
463+
} else {
464+
// The "free" high watermark does not exceed the "scavenged" low
465+
// watermark. This means the allocator didn't free any memory in
466+
// the range we scavenged last cycle, so we might as well continue
467+
// scavenging from where we were.
468+
startAddr = s.scav.scavLWM
469+
}
470+
s.scav.inUse.removeGreaterEqual(startAddr)
471+
453472
// reservationBytes may be zero if s.inUse.totalBytes is small, or if
454473
// scavengeReservationShards is large. This case is fine as the scavenger
455474
// will simply be turned off, but it does mean that scavengeReservationShards,
@@ -459,6 +478,8 @@ func (s *pageAlloc) scavengeStartGen() {
459478
s.scav.reservationBytes = alignUp(s.inUse.totalBytes, pallocChunkBytes) / scavengeReservationShards
460479
s.scav.gen++
461480
s.scav.released = 0
481+
s.scav.freeHWM = 0
482+
s.scav.scavLWM = maxSearchAddr
462483
}
463484

464485
// scavengeReserve reserves a contiguous range of the address space
@@ -676,6 +697,11 @@ func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr
676697
// Compute the full address for the start of the range.
677698
addr := chunkBase(ci) + uintptr(base)*pageSize
678699

700+
// Update the scavenge low watermark.
701+
if addr < s.scav.scavLWM {
702+
s.scav.scavLWM = addr
703+
}
704+
679705
// Only perform the actual scavenging if we're not in a test.
680706
// It's dangerous to do so otherwise.
681707
if s.test {

src/runtime/mpagealloc.go

+16-1
Original file line numberDiff line numberDiff line change
@@ -270,6 +270,14 @@ type pageAlloc struct {
270270

271271
// released is the amount of memory released this generation.
272272
released uintptr
273+
274+
// scavLWM is the lowest address that the scavenger reached this
275+
// scavenge generation.
276+
scavLWM uintptr
277+
278+
// freeHWM is the highest address of a page that was freed to
279+
// the page allocator this scavenge generation.
280+
freeHWM uintptr
273281
}
274282

275283
// mheap_.lock. This level of indirection makes it possible
@@ -306,6 +314,9 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
306314

307315
// Set the mheapLock.
308316
s.mheapLock = mheapLock
317+
318+
// Initialize scavenge tracking state.
319+
s.scav.scavLWM = maxSearchAddr
309320
}
310321

311322
// compareSearchAddrTo compares an address against s.searchAddr in a linearized
@@ -813,14 +824,18 @@ func (s *pageAlloc) free(base, npages uintptr) {
813824
if s.compareSearchAddrTo(base) < 0 {
814825
s.searchAddr = base
815826
}
827+
// Update the free high watermark for the scavenger.
828+
limit := base + npages*pageSize - 1
829+
if s.scav.freeHWM < limit {
830+
s.scav.freeHWM = limit
831+
}
816832
if npages == 1 {
817833
// Fast path: we're clearing a single bit, and we know exactly
818834
// where it is, so mark it directly.
819835
i := chunkIndex(base)
820836
s.chunkOf(i).free1(chunkPageIndex(base))
821837
} else {
822838
// Slow path: we're clearing more bits so we may need to iterate.
823-
limit := base + npages*pageSize - 1
824839
sc, ec := chunkIndex(base), chunkIndex(limit)
825840
si, ei := chunkPageIndex(base), chunkPageIndex(limit)
826841

0 commit comments

Comments
 (0)