@@ -180,6 +180,14 @@ template <typename T> class NonZeroLengthArray<T, 0> {
180
180
181
181
template <typename Config> class MapAllocatorCache {
182
182
public:
183
+ typedef enum { COMMITTED = 0 , DECOMMITTED = 1 , NONE } EntryListT;
184
+
185
+ // TODO: Refactor the intrusive list to support non-pointer link type
186
+ typedef struct {
187
+ u16 Head;
188
+ u16 Tail;
189
+ } ListInfo;
190
+
183
191
void getStats (ScopedString *Str) {
184
192
ScopedLock L (Mutex);
185
193
uptr Integral;
@@ -197,13 +205,18 @@ template <typename Config> class MapAllocatorCache {
197
205
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
198
206
Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
199
207
200
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
201
- CachedBlock &Entry = Entries[I];
202
- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
203
- " BlockSize: %zu %s\n " ,
204
- Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
205
- Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
206
- }
208
+ auto printList = [&](EntryListT ListType) REQUIRES (Mutex) {
209
+ for (u32 I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
210
+ I = Entries[I].Next ) {
211
+ CachedBlock &Entry = Entries[I];
212
+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
213
+ " BlockSize: %zu %s\n " ,
214
+ Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
215
+ Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
216
+ }
217
+ };
218
+ printList (COMMITTED);
219
+ printList (DECOMMITTED);
207
220
}
208
221
209
222
// Ensure the default maximum specified fits the array.
@@ -227,8 +240,10 @@ template <typename Config> class MapAllocatorCache {
227
240
setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
228
241
229
242
// The cache is initially empty
230
- LRUHead = CachedBlock::InvalidEntry;
231
- LRUTail = CachedBlock::InvalidEntry;
243
+ EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
244
+ EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
245
+ EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
246
+ EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
232
247
233
248
// Available entries will be retrieved starting from the beginning of the
234
249
// Entries array
@@ -310,11 +325,15 @@ template <typename Config> class MapAllocatorCache {
310
325
// All excess entries are evicted from the cache
311
326
while (needToEvict ()) {
312
327
// Save MemMaps of evicted entries to perform unmap outside of lock
313
- EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
314
- remove (LRUTail);
328
+ EntryListT EvictionListType;
329
+ if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
330
+ EvictionListType = COMMITTED;
331
+ else
332
+ EvictionListType = DECOMMITTED;
333
+ remove (EntryLists[EvictionListType].Tail , EvictionListType);
315
334
}
316
335
317
- insert (Entry);
336
+ insert (Entry, (Entry. Time == 0 ) ? DECOMMITTED : COMMITTED );
318
337
319
338
if (OldestTime == 0 )
320
339
OldestTime = Entry.Time ;
@@ -335,56 +354,70 @@ template <typename Config> class MapAllocatorCache {
335
354
// 10% of the requested size proved to be the optimal choice for
336
355
// retrieving cached blocks after testing several options.
337
356
constexpr u32 FragmentedBytesDivisor = 10 ;
338
- bool Found = false ;
339
357
CachedBlock Entry;
340
358
uptr EntryHeaderPos = 0 ;
359
+ uptr OptimalFitIndex = CachedBlock::InvalidEntry;
341
360
{
342
361
ScopedLock L (Mutex);
343
362
CallsToRetrieve++;
344
363
if (EntriesCount == 0 )
345
364
return false ;
346
- u32 OptimalFitIndex = 0 ;
347
365
uptr MinDiff = UINTPTR_MAX;
348
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
349
- I = Entries[I].Next ) {
350
- const uptr CommitBase = Entries[I].CommitBase ;
351
- const uptr CommitSize = Entries[I].CommitSize ;
352
- const uptr AllocPos =
353
- roundDown (CommitBase + CommitSize - Size , Alignment);
354
- const uptr HeaderPos = AllocPos - HeadersSize;
355
- if (HeaderPos > CommitBase + CommitSize)
356
- continue ;
357
- if (HeaderPos < CommitBase ||
358
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
359
- continue ;
360
- }
361
- Found = true ;
362
- const uptr Diff = HeaderPos - CommitBase;
363
- // immediately use a cached block if it's size is close enough to the
364
- // requested size.
365
- const uptr MaxAllowedFragmentedBytes =
366
- (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
367
- if (Diff <= MaxAllowedFragmentedBytes) {
366
+ EntryListT OptimalFitListType = NONE;
367
+ auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES (Mutex) {
368
+ for (uptr I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
369
+ I = Entries[I].Next ) {
370
+ const uptr CommitBase = Entries[I].CommitBase ;
371
+ const uptr CommitSize = Entries[I].CommitSize ;
372
+ const uptr AllocPos =
373
+ roundDown (CommitBase + CommitSize - Size , Alignment);
374
+ const uptr HeaderPos = AllocPos - HeadersSize;
375
+ if (HeaderPos > CommitBase + CommitSize)
376
+ continue ;
377
+ if (HeaderPos < CommitBase ||
378
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
379
+ continue ;
380
+
381
+ const uptr Diff = HeaderPos - CommitBase;
382
+ // immediately use a cached block if it's size is close enough to
383
+ // the requested size.
384
+ const uptr MaxAllowedFragmentedBytes =
385
+ (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
386
+ if (Diff <= MaxAllowedFragmentedBytes) {
387
+ OptimalFitIndex = I;
388
+ EntryHeaderPos = HeaderPos;
389
+ OptimalFitListType = ListType;
390
+ return Entries[OptimalFitIndex];
391
+ }
392
+
393
+ // keep track of the smallest cached block
394
+ // that is greater than (AllocSize + HeaderSize)
395
+ if (Diff > MinDiff)
396
+ continue ;
368
397
OptimalFitIndex = I;
398
+ MinDiff = Diff;
399
+ OptimalFitListType = ListType;
369
400
EntryHeaderPos = HeaderPos;
370
- break ;
371
401
}
372
- // keep track of the smallest cached block
373
- // that is greater than (AllocSize + HeaderSize)
374
- if (Diff > MinDiff)
375
- continue ;
376
- OptimalFitIndex = I;
377
- MinDiff = Diff;
378
- EntryHeaderPos = HeaderPos;
379
- }
380
- if (Found) {
381
- Entry = Entries[OptimalFitIndex];
382
- remove (OptimalFitIndex);
402
+ CachedBlock FoundEntry;
403
+ if (OptimalFitIndex != CachedBlock::InvalidEntry)
404
+ FoundEntry = Entries[OptimalFitIndex];
405
+ return FoundEntry;
406
+ };
407
+
408
+ // Prioritize valid fit from committed entries over
409
+ // optimal fit from DECOMMITTED entries
410
+ Entry = FindAvailableEntry (COMMITTED);
411
+ if (!Entry.isValid ())
412
+ Entry = FindAvailableEntry (DECOMMITTED);
413
+
414
+ if (!Entry.isValid ()) {
415
+ return false ;
416
+ } else {
417
+ remove (OptimalFitIndex, OptimalFitListType);
383
418
SuccessfulRetrieves++;
384
419
}
385
420
}
386
- if (!Found)
387
- return false ;
388
421
389
422
*H = reinterpret_cast <LargeBlock::Header *>(
390
423
LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
@@ -448,10 +481,15 @@ template <typename Config> class MapAllocatorCache {
448
481
Quarantine[I].invalidate ();
449
482
}
450
483
}
451
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
452
- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
453
- Entries[I].CommitSize , 0 );
454
- }
484
+ auto disableLists = [&](EntryListT EntryList) REQUIRES (Mutex) {
485
+ for (u32 I = EntryLists[COMMITTED].Head ; I != CachedBlock::InvalidEntry;
486
+ I = Entries[I].Next ) {
487
+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
488
+ Entries[I].CommitSize , 0 );
489
+ }
490
+ };
491
+ disableLists (COMMITTED);
492
+ disableLists (DECOMMITTED);
455
493
QuarantinePos = -1U ;
456
494
}
457
495
@@ -466,7 +504,7 @@ template <typename Config> class MapAllocatorCache {
466
504
return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
467
505
}
468
506
469
- void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
507
+ void insert (const CachedBlock &Entry, EntryListT ListType ) REQUIRES(Mutex) {
470
508
DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
471
509
472
510
// Cache should be populated with valid entries when not empty
@@ -475,71 +513,88 @@ template <typename Config> class MapAllocatorCache {
475
513
u32 FreeIndex = AvailableHead;
476
514
AvailableHead = Entries[AvailableHead].Next ;
477
515
478
- if (EntriesCount == 0 ) {
479
- LRUTail = static_cast <u16>(FreeIndex);
480
- } else {
481
- // Check list order
482
- if (EntriesCount > 1 )
483
- DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
484
- Entries[LRUHead].Prev = static_cast <u16>(FreeIndex);
485
- }
486
-
487
516
Entries[FreeIndex] = Entry;
488
- Entries[FreeIndex].Next = LRUHead;
489
- Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
490
- LRUHead = static_cast <u16>(FreeIndex);
517
+ pushFront (FreeIndex, ListType);
491
518
EntriesCount++;
492
519
520
+ if (Entries[EntryLists[ListType].Head ].Next != CachedBlock::InvalidEntry) {
521
+ DCHECK_GE (Entries[EntryLists[ListType].Head ].Time ,
522
+ Entries[Entries[EntryLists[ListType].Head ].Next ].Time );
523
+ }
493
524
// Availability stack should not have available entries when all entries
494
525
// are in use
495
526
if (EntriesCount == Config::getEntriesArraySize ())
496
527
DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
497
528
}
498
529
499
- void remove (uptr I) REQUIRES(Mutex) {
500
- DCHECK (Entries[I].isValid ());
501
-
502
- Entries[I].invalidate ();
503
-
504
- if (I == LRUHead)
505
- LRUHead = Entries[I].Next ;
530
+ void unlink (uptr I, EntryListT ListType) REQUIRES(Mutex) {
531
+ if (I == EntryLists[ListType].Head )
532
+ EntryLists[ListType].Head = Entries[I].Next ;
506
533
else
507
534
Entries[Entries[I].Prev ].Next = Entries[I].Next ;
508
535
509
- if (I == LRUTail )
510
- LRUTail = Entries[I].Prev ;
536
+ if (I == EntryLists[ListType]. Tail )
537
+ EntryLists[ListType]. Tail = Entries[I].Prev ;
511
538
else
512
539
Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
540
+ }
541
+
542
+ void remove (uptr I, EntryListT ListType) REQUIRES(Mutex) {
543
+ DCHECK (Entries[I].isValid ());
544
+
545
+ Entries[I].invalidate ();
513
546
547
+ unlink (I, ListType);
514
548
Entries[I].Next = AvailableHead;
515
549
AvailableHead = static_cast <u16>(I);
516
550
EntriesCount--;
517
551
518
552
// Cache should not have valid entries when not empty
519
553
if (EntriesCount == 0 ) {
520
- DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
521
- DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
554
+ DCHECK_EQ (EntryLists[COMMITTED].Head , CachedBlock::InvalidEntry);
555
+ DCHECK_EQ (EntryLists[COMMITTED].Tail , CachedBlock::InvalidEntry);
556
+ DCHECK_EQ (EntryLists[DECOMMITTED].Head , CachedBlock::InvalidEntry);
557
+ DCHECK_EQ (EntryLists[DECOMMITTED].Tail , CachedBlock::InvalidEntry);
522
558
}
523
559
}
524
560
561
+ inline void pushFront (uptr I, EntryListT ListType) REQUIRES(Mutex) {
562
+ if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
563
+ EntryLists[ListType].Tail = static_cast <u16>(I);
564
+ else
565
+ Entries[EntryLists[ListType].Head ].Prev = static_cast <u16>(I);
566
+
567
+ Entries[I].Next = EntryLists[ListType].Head ;
568
+ Entries[I].Prev = CachedBlock::InvalidEntry;
569
+ EntryLists[ListType].Head = static_cast <u16>(I);
570
+ }
571
+
525
572
void empty () {
526
573
MemMapT MapInfo[Config::getEntriesArraySize ()];
527
574
uptr N = 0 ;
528
575
{
529
576
ScopedLock L (Mutex);
530
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
531
- if (!Entries[I].isValid ())
532
- continue ;
533
- MapInfo[N] = Entries[I].MemMap ;
534
- remove (I);
535
- N++;
536
- }
577
+ auto emptyList = [&](EntryListT ListType) REQUIRES (Mutex) {
578
+ for (uptr I = EntryLists[ListType].Head ;
579
+ I != CachedBlock::InvalidEntry;) {
580
+ uptr ToRemove = I;
581
+ I = Entries[I].Next ;
582
+ MapInfo[N] = Entries[ToRemove].MemMap ;
583
+ remove (ToRemove, ListType);
584
+ N++;
585
+ }
586
+ };
587
+ emptyList (COMMITTED);
588
+ emptyList (DECOMMITTED);
537
589
EntriesCount = 0 ;
538
590
}
539
591
for (uptr I = 0 ; I < N; I++) {
540
592
MemMapT &MemMap = MapInfo[I];
541
593
MemMap.unmap (MemMap.getBase (), MemMap.getCapacity ());
542
594
}
595
+
596
+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
597
+ DCHECK (!Entries[I].isValid ());
543
598
}
544
599
545
600
void releaseIfOlderThan (CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
@@ -561,8 +616,13 @@ template <typename Config> class MapAllocatorCache {
561
616
OldestTime = 0 ;
562
617
for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
563
618
releaseIfOlderThan (Quarantine[I], Time);
564
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
619
+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
620
+ if (Entries[I].isValid () && Entries[I].Time && Entries[I].Time <= Time) {
621
+ unlink (I, COMMITTED);
622
+ pushFront (I, DECOMMITTED);
623
+ }
565
624
releaseIfOlderThan (Entries[I], Time);
625
+ }
566
626
}
567
627
568
628
HybridMutex Mutex;
@@ -579,10 +639,12 @@ template <typename Config> class MapAllocatorCache {
579
639
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
580
640
Quarantine GUARDED_BY (Mutex) = {};
581
641
582
- // The LRUHead of the cache is the most recently used cache entry
583
- u16 LRUHead GUARDED_BY (Mutex) = 0;
584
- // The LRUTail of the cache is the least recently used cache entry
585
- u16 LRUTail GUARDED_BY (Mutex) = 0;
642
+ // EntryLists stores the head and tail indices of all
643
+ // lists being used to store valid cache entries.
644
+ // Currently there are lists storing COMMITTED and DECOMMITTED entries.
645
+ // COMMITTED entries are those that are not madvise()'d
646
+ // DECOMMITTED entries are those that are madvise()'d
647
+ ListInfo EntryLists[2 ] GUARDED_BY(Mutex) = {};
586
648
// The AvailableHead is the top of the stack of available entries
587
649
u16 AvailableHead GUARDED_BY (Mutex) = 0;
588
650
};
0 commit comments