@@ -405,15 +405,14 @@ func (s *pageAlloc) scavengeOne(max uintptr, locked bool) uintptr {
405
405
}
406
406
407
407
lockHeap ()
408
- top := chunkIndex (s .scavAddr )
409
- if top < s .start {
408
+ ci := chunkIndex (s .scavAddr )
409
+ if ci < s .start {
410
410
unlockHeap ()
411
411
return 0
412
412
}
413
413
414
414
// Check the chunk containing the scav addr, starting at the addr
415
415
// and see if there are any free and unscavenged pages.
416
- ci := chunkIndex (s .scavAddr )
417
416
if s .summary [len (s .summary )- 1 ][ci ].max () >= uint (minPages ) {
418
417
// We only bother looking for a candidate if there at least
419
418
// minPages free pages at all. It's important that we only
@@ -429,59 +428,97 @@ func (s *pageAlloc) scavengeOne(max uintptr, locked bool) uintptr {
429
428
return uintptr (npages ) * pageSize
430
429
}
431
430
}
432
- unlockHeap ()
433
431
434
- // Slow path: iterate optimistically looking for any free and unscavenged page.
435
- // If we think we see something, stop and verify it!
436
- for i := top - 1 ; i >= s .start ; i -- {
437
- // If this chunk is totally in-use or has no unscavenged pages, don't bother
438
- // doing a more sophisticated check.
439
- //
440
- // Note we're accessing the summary and the chunks without a lock, but
441
- // that's fine. We're being optimistic anyway.
442
-
443
- // Check if there are enough free pages at all. It's imperative that we
444
- // check this before the chunk itself so that we quickly skip over
445
- // unused parts of the address space, which may have a cleared bitmap
446
- // but a zero'd summary which indicates not to allocate from there.
447
- if s .summary [len (s .summary )- 1 ][i ].max () < uint (minPages ) {
448
- continue
432
+ // getInUseRange returns the highest range in the
433
+ // intersection of [0, addr] and s.inUse.
434
+ //
435
+ // s.mheapLock must be held.
436
+ getInUseRange := func (addr uintptr ) addrRange {
437
+ top := s .inUse .findSucc (addr )
438
+ if top == 0 {
439
+ return addrRange {}
440
+ }
441
+ r := s .inUse .ranges [top - 1 ]
442
+ // addr is inclusive, so treat it as such when
443
+ // updating the limit, which is exclusive.
444
+ if r .limit > addr + 1 {
445
+ r .limit = addr + 1
449
446
}
447
+ return r
448
+ }
450
449
451
- // Run over the chunk looking harder for a candidate. Again, we could
452
- // race with a lot of different pieces of code, but we're just being
453
- // optimistic. Make sure we load the l2 pointer atomically though, to
454
- // avoid races with heap growth. It may or may not be possible to also
455
- // see a nil pointer in this case if we do race with heap growth, but
456
- // just defensively ignore the nils. This operation is optimistic anyway.
457
- l2 := (* [1 << pallocChunksL2Bits ]pallocData )(atomic .Loadp (unsafe .Pointer (& s .chunks [i .l1 ()])))
458
- if l2 == nil || ! l2 [i .l2 ()].hasScavengeCandidate (minPages ) {
459
- continue
450
+ // Slow path: iterate optimistically over the in-use address space
451
+ // looking for any free and unscavenged page. If we think we see something,
452
+ // lock and verify it!
453
+ //
454
+ // We iterate over the address space by taking ranges from inUse.
455
+ newRange:
456
+ for {
457
+ r := getInUseRange (s .scavAddr )
458
+ if r .size () == 0 {
459
+ break
460
460
}
461
+ unlockHeap ()
461
462
462
- // We found a candidate, so let's lock and verify it.
463
- lockHeap ()
463
+ // Iterate over all of the chunks described by r.
464
+ // Note that r.limit is the exclusive upper bound, but what
465
+ // we want is the top chunk instead, inclusive, so subtract 1.
466
+ bot , top := chunkIndex (r .base ), chunkIndex (r .limit - 1 )
467
+ for i := top ; i >= bot ; i -- {
468
+ // If this chunk is totally in-use or has no unscavenged pages, don't bother
469
+ // doing a more sophisticated check.
470
+ //
471
+ // Note we're accessing the summary and the chunks without a lock, but
472
+ // that's fine. We're being optimistic anyway.
473
+
474
+ // Check quickly if there are enough free pages at all.
475
+ if s .summary [len (s .summary )- 1 ][i ].max () < uint (minPages ) {
476
+ continue
477
+ }
464
478
465
- // Find, verify, and scavenge if we can.
466
- chunk := s .chunkOf (i )
467
- base , npages := chunk .findScavengeCandidate (pallocChunkPages - 1 , minPages , maxPages )
468
- if npages > 0 {
469
- // We found memory to scavenge! Mark the bits and report that up.
470
- s .scavengeRangeLocked (i , base , npages )
471
- unlockHeap ()
472
- return uintptr (npages ) * pageSize
479
+ // Run over the chunk looking harder for a candidate. Again, we could
480
+ // race with a lot of different pieces of code, but we're just being
481
+ // optimistic. Make sure we load the l2 pointer atomically though, to
482
+ // avoid races with heap growth. It may or may not be possible to also
483
+ // see a nil pointer in this case if we do race with heap growth, but
484
+ // just defensively ignore the nils. This operation is optimistic anyway.
485
+ l2 := (* [1 << pallocChunksL2Bits ]pallocData )(atomic .Loadp (unsafe .Pointer (& s .chunks [i .l1 ()])))
486
+ if l2 == nil || ! l2 [i .l2 ()].hasScavengeCandidate (minPages ) {
487
+ continue
488
+ }
489
+
490
+ // We found a candidate, so let's lock and verify it.
491
+ lockHeap ()
492
+
493
+ // Find, verify, and scavenge if we can.
494
+ chunk := s .chunkOf (i )
495
+ base , npages := chunk .findScavengeCandidate (pallocChunkPages - 1 , minPages , maxPages )
496
+ if npages > 0 {
497
+ // We found memory to scavenge! Mark the bits and report that up.
498
+ // scavengeRangeLocked will update scavAddr for us, also.
499
+ s .scavengeRangeLocked (i , base , npages )
500
+ unlockHeap ()
501
+ return uintptr (npages ) * pageSize
502
+ }
503
+
504
+ // We were fooled, let's take this opportunity to move the scavAddr
505
+ // all the way down to where we searched as scavenged for future calls
506
+ // and keep iterating. Then, go get a new range.
507
+ s .scavAddr = chunkBase (i - 1 ) + pallocChunkPages * pageSize - 1
508
+ continue newRange
473
509
}
510
+ lockHeap ()
474
511
475
- // We were fooled, let's take this opportunity to move the scavAddr
476
- // all the way down to where we searched as scavenged for future calls
477
- // and keep iterating.
478
- s .scavAddr = chunkBase (i - 1 ) + pallocChunkPages * pageSize - 1
479
- unlockHeap ()
512
+ // Move the scavenger down the heap, past everything we just searched.
513
+ // Since we don't check if scavAddr moved while twe let go of the heap lock,
514
+ // it's possible that it moved down and we're moving it up here. This
515
+ // raciness could result in us searching parts of the heap unnecessarily.
516
+ // TODO(mknyszek): Remove this racy behavior through explicit address
517
+ // space reservations, which are difficult to do with just scavAddr.
518
+ s .scavAddr = r .base - 1
480
519
}
481
-
482
- lockHeap ()
483
- // We couldn't find anything, so signal that there's nothing left
484
- // to scavenge.
520
+ // We reached the end of the in-use address space and couldn't find anything,
521
+ // so signal that there's nothing left to scavenge.
485
522
s .scavAddr = minScavAddr
486
523
unlockHeap ()
487
524
0 commit comments