@@ -180,6 +180,14 @@ template <typename T> class NonZeroLengthArray<T, 0> {
180
180
181
181
template <typename Config> class MapAllocatorCache {
182
182
public:
183
+ typedef enum { COMMITTED = 0 , DECOMMITTED = 1 , NONE } EntryListT;
184
+
185
+ // TODO: Refactor the intrusive list to support non-pointer link type
186
+ typedef struct {
187
+ u16 Head;
188
+ u16 Tail;
189
+ } ListInfo;
190
+
183
191
void getStats (ScopedString *Str) {
184
192
ScopedLock L (Mutex);
185
193
uptr Integral;
@@ -197,13 +205,18 @@ template <typename Config> class MapAllocatorCache {
197
205
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
198
206
Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
199
207
200
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
201
- CachedBlock &Entry = Entries[I];
202
- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
203
- " BlockSize: %zu %s\n " ,
204
- Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
205
- Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
206
- }
208
+ auto printList = [&](EntryListT ListType) REQUIRES (Mutex) {
209
+ for (u32 I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
210
+ I = Entries[I].Next ) {
211
+ CachedBlock &Entry = Entries[I];
212
+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
213
+ " BlockSize: %zu %s\n " ,
214
+ Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
215
+ Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
216
+ }
217
+ };
218
+ printList (COMMITTED);
219
+ printList (DECOMMITTED);
207
220
}
208
221
209
222
// Ensure the default maximum specified fits the array.
@@ -227,8 +240,10 @@ template <typename Config> class MapAllocatorCache {
227
240
setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
228
241
229
242
// The cache is initially empty
230
- LRUHead = CachedBlock::InvalidEntry;
231
- LRUTail = CachedBlock::InvalidEntry;
243
+ EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
244
+ EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
245
+ EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
246
+ EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
232
247
233
248
// Available entries will be retrieved starting from the beginning of the
234
249
// Entries array
@@ -310,15 +325,19 @@ template <typename Config> class MapAllocatorCache {
310
325
// All excess entries are evicted from the cache
311
326
while (needToEvict ()) {
312
327
// Save MemMaps of evicted entries to perform unmap outside of lock
313
- EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
314
- remove (LRUTail);
328
+ EntryListT EvictionListType;
329
+ if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
330
+ EvictionListType = COMMITTED;
331
+ else
332
+ EvictionListType = DECOMMITTED;
333
+ remove (EntryLists[EvictionListType].Tail , EvictionListType);
315
334
}
316
335
317
- insert (Entry);
336
+ insert (Entry, (Entry. Time == 0 ) ? DECOMMITTED : COMMITTED );
318
337
319
338
if (OldestTime == 0 )
320
339
OldestTime = Entry.Time ;
321
- } while (0 );
340
+ } while (0 ); // ScopedLock L(Mutex);
322
341
323
342
for (MemMapT &EvictMemMap : EvictionMemMaps)
324
343
EvictMemMap.unmap (EvictMemMap.getBase (), EvictMemMap.getCapacity ());
@@ -335,56 +354,69 @@ template <typename Config> class MapAllocatorCache {
335
354
// 10% of the requested size proved to be the optimal choice for
336
355
// retrieving cached blocks after testing several options.
337
356
constexpr u32 FragmentedBytesDivisor = 10 ;
338
- bool Found = false ;
339
357
CachedBlock Entry;
340
358
uptr EntryHeaderPos = 0 ;
359
+ uptr OptimalFitIndex = CachedBlock::InvalidEntry;
341
360
{
342
361
ScopedLock L (Mutex);
343
362
CallsToRetrieve++;
344
363
if (EntriesCount == 0 )
345
364
return false ;
346
- u32 OptimalFitIndex = 0 ;
347
365
uptr MinDiff = UINTPTR_MAX;
348
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
349
- I = Entries[I].Next ) {
350
- const uptr CommitBase = Entries[I].CommitBase ;
351
- const uptr CommitSize = Entries[I].CommitSize ;
352
- const uptr AllocPos =
353
- roundDown (CommitBase + CommitSize - Size , Alignment);
354
- const uptr HeaderPos = AllocPos - HeadersSize;
355
- if (HeaderPos > CommitBase + CommitSize)
356
- continue ;
357
- if (HeaderPos < CommitBase ||
358
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
359
- continue ;
360
- }
361
- Found = true ;
362
- const uptr Diff = HeaderPos - CommitBase;
363
- // immediately use a cached block if it's size is close enough to the
364
- // requested size.
365
- const uptr MaxAllowedFragmentedBytes =
366
- (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
367
- if (Diff <= MaxAllowedFragmentedBytes) {
366
+ EntryListT OptimalFitListType = NONE;
367
+ auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES (Mutex) {
368
+ for (uptr I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
369
+ I = Entries[I].Next ) {
370
+ const uptr CommitBase = Entries[I].CommitBase ;
371
+ const uptr CommitSize = Entries[I].CommitSize ;
372
+ const uptr AllocPos =
373
+ roundDown (CommitBase + CommitSize - Size , Alignment);
374
+ const uptr HeaderPos = AllocPos - HeadersSize;
375
+ if (HeaderPos > CommitBase + CommitSize)
376
+ continue ;
377
+ if (HeaderPos < CommitBase ||
378
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
379
+ continue ;
380
+
381
+ const uptr Diff = HeaderPos - CommitBase;
382
+ // immediately use a cached block if it's size is close enough to
383
+ // the requested size.
384
+ const uptr MaxAllowedFragmentedBytes =
385
+ (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
386
+ if (Diff <= MaxAllowedFragmentedBytes) {
387
+ OptimalFitIndex = I;
388
+ EntryHeaderPos = HeaderPos;
389
+ OptimalFitListType = ListType;
390
+ return Entries[OptimalFitIndex];
391
+ }
392
+
393
+ // keep track of the smallest cached block
394
+ // that is greater than (AllocSize + HeaderSize)
395
+ if (Diff > MinDiff)
396
+ continue ;
368
397
OptimalFitIndex = I;
398
+ MinDiff = Diff;
399
+ OptimalFitListType = ListType;
369
400
EntryHeaderPos = HeaderPos;
370
- break ;
371
401
}
372
- // keep track of the smallest cached block
373
- // that is greater than (AllocSize + HeaderSize)
374
- if (Diff > MinDiff)
375
- continue ;
376
- OptimalFitIndex = I;
377
- MinDiff = Diff;
378
- EntryHeaderPos = HeaderPos;
379
- }
380
- if (Found) {
381
- Entry = Entries[OptimalFitIndex];
382
- remove (OptimalFitIndex);
383
- SuccessfulRetrieves++;
384
- }
385
- }
386
- if (!Found)
387
- return false ;
402
+ CachedBlock FoundEntry;
403
+ if (OptimalFitIndex != CachedBlock::InvalidEntry)
404
+ FoundEntry = Entries[OptimalFitIndex];
405
+ return FoundEntry;
406
+ };
407
+
408
+ // Prioritize valid fit from COMMITTED entries over
409
+ // optimal fit from DECOMMITTED entries
410
+ Entry = FindAvailableEntry (COMMITTED);
411
+ if (!Entry.isValid ())
412
+ Entry = FindAvailableEntry (DECOMMITTED);
413
+
414
+ if (!Entry.isValid ())
415
+ return false ;
416
+
417
+ remove (OptimalFitIndex, OptimalFitListType);
418
+ SuccessfulRetrieves++;
419
+ } // ScopedLock L(Mutex);
388
420
389
421
*H = reinterpret_cast <LargeBlock::Header *>(
390
422
LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
@@ -448,10 +480,15 @@ template <typename Config> class MapAllocatorCache {
448
480
Quarantine[I].invalidate ();
449
481
}
450
482
}
451
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
452
- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
453
- Entries[I].CommitSize , 0 );
454
- }
483
+ auto disableLists = [&](EntryListT EntryList) REQUIRES (Mutex) {
484
+ for (u32 I = EntryLists[COMMITTED].Head ; I != CachedBlock::InvalidEntry;
485
+ I = Entries[I].Next ) {
486
+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
487
+ Entries[I].CommitSize , 0 );
488
+ }
489
+ };
490
+ disableLists (COMMITTED);
491
+ disableLists (DECOMMITTED);
455
492
QuarantinePos = -1U ;
456
493
}
457
494
@@ -466,7 +503,7 @@ template <typename Config> class MapAllocatorCache {
466
503
return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
467
504
}
468
505
469
- void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
506
+ void insert (const CachedBlock &Entry, EntryListT ListType ) REQUIRES(Mutex) {
470
507
DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
471
508
472
509
// Cache should be populated with valid entries when not empty
@@ -475,71 +512,92 @@ template <typename Config> class MapAllocatorCache {
475
512
u32 FreeIndex = AvailableHead;
476
513
AvailableHead = Entries[AvailableHead].Next ;
477
514
478
- if (EntriesCount == 0 ) {
479
- LRUTail = static_cast <u16>(FreeIndex);
480
- } else {
481
- // Check list order
482
- if (EntriesCount > 1 )
483
- DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
484
- Entries[LRUHead].Prev = static_cast <u16>(FreeIndex);
485
- }
486
-
487
515
Entries[FreeIndex] = Entry;
488
- Entries[FreeIndex].Next = LRUHead;
489
- Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
490
- LRUHead = static_cast <u16>(FreeIndex);
516
+ pushFront (FreeIndex, ListType);
491
517
EntriesCount++;
492
518
519
+ if (Entries[EntryLists[ListType].Head ].Next != CachedBlock::InvalidEntry) {
520
+ DCHECK_GE (Entries[EntryLists[ListType].Head ].Time ,
521
+ Entries[Entries[EntryLists[ListType].Head ].Next ].Time );
522
+ }
493
523
// Availability stack should not have available entries when all entries
494
524
// are in use
495
525
if (EntriesCount == Config::getEntriesArraySize ())
496
526
DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
497
527
}
498
528
499
- void remove (uptr I) REQUIRES(Mutex) {
500
- DCHECK (Entries[I].isValid ());
501
-
502
- Entries[I].invalidate ();
503
-
504
- if (I == LRUHead)
505
- LRUHead = Entries[I].Next ;
529
+ // Joins the entries adjacent to Entries[I], effectively
530
+ // unlinking Entries[I] from the list
531
+ void unlink (uptr I, EntryListT ListType) REQUIRES(Mutex) {
532
+ if (I == EntryLists[ListType].Head )
533
+ EntryLists[ListType].Head = Entries[I].Next ;
506
534
else
507
535
Entries[Entries[I].Prev ].Next = Entries[I].Next ;
508
536
509
- if (I == LRUTail )
510
- LRUTail = Entries[I].Prev ;
537
+ if (I == EntryLists[ListType]. Tail )
538
+ EntryLists[ListType]. Tail = Entries[I].Prev ;
511
539
else
512
540
Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
541
+ }
513
542
543
+ // Invalidates Entries[I], removes Entries[I] from list, and pushes
544
+ // Entries[I] onto the stack of available entries
545
+ void remove (uptr I, EntryListT ListType) REQUIRES(Mutex) {
546
+ DCHECK (Entries[I].isValid ());
547
+
548
+ Entries[I].invalidate ();
549
+
550
+ unlink (I, ListType);
514
551
Entries[I].Next = AvailableHead;
515
552
AvailableHead = static_cast <u16>(I);
516
553
EntriesCount--;
517
554
518
555
// Cache should not have valid entries when not empty
519
556
if (EntriesCount == 0 ) {
520
- DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
521
- DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
557
+ DCHECK_EQ (EntryLists[COMMITTED].Head , CachedBlock::InvalidEntry);
558
+ DCHECK_EQ (EntryLists[COMMITTED].Tail , CachedBlock::InvalidEntry);
559
+ DCHECK_EQ (EntryLists[DECOMMITTED].Head , CachedBlock::InvalidEntry);
560
+ DCHECK_EQ (EntryLists[DECOMMITTED].Tail , CachedBlock::InvalidEntry);
522
561
}
523
562
}
524
563
564
+ inline void pushFront (uptr I, EntryListT ListType) REQUIRES(Mutex) {
565
+ if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
566
+ EntryLists[ListType].Tail = static_cast <u16>(I);
567
+ else
568
+ Entries[EntryLists[ListType].Head ].Prev = static_cast <u16>(I);
569
+
570
+ Entries[I].Next = EntryLists[ListType].Head ;
571
+ Entries[I].Prev = CachedBlock::InvalidEntry;
572
+ EntryLists[ListType].Head = static_cast <u16>(I);
573
+ }
574
+
525
575
void empty () {
526
576
MemMapT MapInfo[Config::getEntriesArraySize ()];
527
577
uptr N = 0 ;
528
578
{
529
579
ScopedLock L (Mutex);
530
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
531
- if (!Entries[I].isValid ())
532
- continue ;
533
- MapInfo[N] = Entries[I].MemMap ;
534
- remove (I);
535
- N++;
536
- }
580
+ auto emptyList = [&](EntryListT ListType) REQUIRES (Mutex) {
581
+ for (uptr I = EntryLists[ListType].Head ;
582
+ I != CachedBlock::InvalidEntry;) {
583
+ uptr ToRemove = I;
584
+ I = Entries[I].Next ;
585
+ MapInfo[N] = Entries[ToRemove].MemMap ;
586
+ remove (ToRemove, ListType);
587
+ N++;
588
+ }
589
+ };
590
+ emptyList (COMMITTED);
591
+ emptyList (DECOMMITTED);
537
592
EntriesCount = 0 ;
538
593
}
539
594
for (uptr I = 0 ; I < N; I++) {
540
595
MemMapT &MemMap = MapInfo[I];
541
596
MemMap.unmap (MemMap.getBase (), MemMap.getCapacity ());
542
597
}
598
+
599
+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
600
+ DCHECK (!Entries[I].isValid ());
543
601
}
544
602
545
603
void releaseIfOlderThan (CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
@@ -561,8 +619,13 @@ template <typename Config> class MapAllocatorCache {
561
619
OldestTime = 0 ;
562
620
for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
563
621
releaseIfOlderThan (Quarantine[I], Time);
564
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
622
+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
623
+ if (Entries[I].isValid () && Entries[I].Time && Entries[I].Time <= Time) {
624
+ unlink (I, COMMITTED);
625
+ pushFront (I, DECOMMITTED);
626
+ }
565
627
releaseIfOlderThan (Entries[I], Time);
628
+ }
566
629
}
567
630
568
631
HybridMutex Mutex;
@@ -579,10 +642,12 @@ template <typename Config> class MapAllocatorCache {
579
642
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
580
643
Quarantine GUARDED_BY (Mutex) = {};
581
644
582
- // The LRUHead of the cache is the most recently used cache entry
583
- u16 LRUHead GUARDED_BY (Mutex) = 0;
584
- // The LRUTail of the cache is the least recently used cache entry
585
- u16 LRUTail GUARDED_BY (Mutex) = 0;
645
+ // EntryLists stores the head and tail indices of all
646
+ // lists being used to store valid cache entries.
647
+ // Currently there are lists storing COMMITTED and DECOMMITTED entries.
648
+ // COMMITTED entries are those that are not madvise()'d
649
+ // DECOMMITTED entries are those that are madvise()'d
650
+ ListInfo EntryLists[2 ] GUARDED_BY(Mutex) = {};
586
651
// The AvailableHead is the top of the stack of available entries
587
652
u16 AvailableHead GUARDED_BY (Mutex) = 0;
588
653
};
0 commit comments