@@ -180,14 +180,6 @@ template <typename T> class NonZeroLengthArray<T, 0> {
180
180
181
181
template <typename Config> class MapAllocatorCache {
182
182
public:
183
- typedef enum { COMMITTED = 0 , DECOMMITTED = 1 , NONE } EntryListT;
184
-
185
- // TODO: Refactor the intrusive list to support non-pointer link type
186
- typedef struct {
187
- u16 Head;
188
- u16 Tail;
189
- } ListInfo;
190
-
191
183
void getStats (ScopedString *Str) {
192
184
ScopedLock L (Mutex);
193
185
uptr Integral;
@@ -205,18 +197,13 @@ template <typename Config> class MapAllocatorCache {
205
197
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
206
198
Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
207
199
208
- auto printList = [&](EntryListT ListType) REQUIRES (Mutex) {
209
- for (u32 I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
210
- I = Entries[I].Next ) {
211
- CachedBlock &Entry = Entries[I];
212
- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
213
- " BlockSize: %zu %s\n " ,
214
- Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
215
- Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
216
- }
217
- };
218
- printList (COMMITTED);
219
- printList (DECOMMITTED);
200
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
201
+ CachedBlock &Entry = Entries[I];
202
+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
203
+ " BlockSize: %zu %s\n " ,
204
+ Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
205
+ Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
206
+ }
220
207
}
221
208
222
209
// Ensure the default maximum specified fits the array.
@@ -240,10 +227,8 @@ template <typename Config> class MapAllocatorCache {
240
227
setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
241
228
242
229
// The cache is initially empty
243
- EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
244
- EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
245
- EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
246
- EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
230
+ LRUHead = CachedBlock::InvalidEntry;
231
+ LRUTail = CachedBlock::InvalidEntry;
247
232
248
233
// Available entries will be retrieved starting from the beginning of the
249
234
// Entries array
@@ -325,19 +310,15 @@ template <typename Config> class MapAllocatorCache {
325
310
// All excess entries are evicted from the cache
326
311
while (needToEvict ()) {
327
312
// Save MemMaps of evicted entries to perform unmap outside of lock
328
- EntryListT EvictionListType;
329
- if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
330
- EvictionListType = COMMITTED;
331
- else
332
- EvictionListType = DECOMMITTED;
333
- remove (EntryLists[EvictionListType].Tail , EvictionListType);
313
+ EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
314
+ remove (LRUTail);
334
315
}
335
316
336
- insert (Entry, (Entry. Time == 0 ) ? DECOMMITTED : COMMITTED );
317
+ insert (Entry);
337
318
338
319
if (OldestTime == 0 )
339
320
OldestTime = Entry.Time ;
340
- } while (0 ); // ScopedLock L(Mutex);
321
+ } while (0 );
341
322
342
323
for (MemMapT &EvictMemMap : EvictionMemMaps)
343
324
EvictMemMap.unmap (EvictMemMap.getBase (), EvictMemMap.getCapacity ());
@@ -354,69 +335,56 @@ template <typename Config> class MapAllocatorCache {
354
335
// 10% of the requested size proved to be the optimal choice for
355
336
// retrieving cached blocks after testing several options.
356
337
constexpr u32 FragmentedBytesDivisor = 10 ;
338
+ bool Found = false ;
357
339
CachedBlock Entry;
358
340
uptr EntryHeaderPos = 0 ;
359
- uptr OptimalFitIndex = CachedBlock::InvalidEntry;
360
341
{
361
342
ScopedLock L (Mutex);
362
343
CallsToRetrieve++;
363
344
if (EntriesCount == 0 )
364
345
return false ;
346
+ u32 OptimalFitIndex = 0 ;
365
347
uptr MinDiff = UINTPTR_MAX;
366
- EntryListT OptimalFitListType = NONE;
367
- auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES (Mutex) {
368
- for (uptr I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
369
- I = Entries[I].Next ) {
370
- const uptr CommitBase = Entries[I].CommitBase ;
371
- const uptr CommitSize = Entries[I].CommitSize ;
372
- const uptr AllocPos =
373
- roundDown (CommitBase + CommitSize - Size , Alignment);
374
- const uptr HeaderPos = AllocPos - HeadersSize;
375
- if (HeaderPos > CommitBase + CommitSize)
376
- continue ;
377
- if (HeaderPos < CommitBase ||
378
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
379
- continue ;
380
-
381
- const uptr Diff = HeaderPos - CommitBase;
382
- // immediately use a cached block if it's size is close enough to
383
- // the requested size.
384
- const uptr MaxAllowedFragmentedBytes =
385
- (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
386
- if (Diff <= MaxAllowedFragmentedBytes) {
387
- OptimalFitIndex = I;
388
- EntryHeaderPos = HeaderPos;
389
- OptimalFitListType = ListType;
390
- return Entries[OptimalFitIndex];
391
- }
392
-
393
- // keep track of the smallest cached block
394
- // that is greater than (AllocSize + HeaderSize)
395
- if (Diff > MinDiff)
396
- continue ;
348
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
349
+ I = Entries[I].Next ) {
350
+ const uptr CommitBase = Entries[I].CommitBase ;
351
+ const uptr CommitSize = Entries[I].CommitSize ;
352
+ const uptr AllocPos =
353
+ roundDown (CommitBase + CommitSize - Size , Alignment);
354
+ const uptr HeaderPos = AllocPos - HeadersSize;
355
+ if (HeaderPos > CommitBase + CommitSize)
356
+ continue ;
357
+ if (HeaderPos < CommitBase ||
358
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
359
+ continue ;
360
+ }
361
+ Found = true ;
362
+ const uptr Diff = HeaderPos - CommitBase;
363
+ // immediately use a cached block if it's size is close enough to the
364
+ // requested size.
365
+ const uptr MaxAllowedFragmentedBytes =
366
+ (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
367
+ if (Diff <= MaxAllowedFragmentedBytes) {
397
368
OptimalFitIndex = I;
398
- MinDiff = Diff;
399
- OptimalFitListType = ListType;
400
369
EntryHeaderPos = HeaderPos;
370
+ break ;
401
371
}
402
- CachedBlock FoundEntry;
403
- if (OptimalFitIndex != CachedBlock::InvalidEntry)
404
- FoundEntry = Entries[OptimalFitIndex];
405
- return FoundEntry;
406
- };
407
-
408
- // Prioritize valid fit from COMMITTED entries over
409
- // optimal fit from DECOMMITTED entries
410
- Entry = FindAvailableEntry (COMMITTED);
411
- if (!Entry.isValid ())
412
- Entry = FindAvailableEntry (DECOMMITTED);
413
-
414
- if (!Entry.isValid ())
415
- return false ;
416
-
417
- remove (OptimalFitIndex, OptimalFitListType);
418
- SuccessfulRetrieves++;
419
- } // ScopedLock L(Mutex);
372
+ // keep track of the smallest cached block
373
+ // that is greater than (AllocSize + HeaderSize)
374
+ if (Diff > MinDiff)
375
+ continue ;
376
+ OptimalFitIndex = I;
377
+ MinDiff = Diff;
378
+ EntryHeaderPos = HeaderPos;
379
+ }
380
+ if (Found) {
381
+ Entry = Entries[OptimalFitIndex];
382
+ remove (OptimalFitIndex);
383
+ SuccessfulRetrieves++;
384
+ }
385
+ }
386
+ if (!Found)
387
+ return false ;
420
388
421
389
*H = reinterpret_cast <LargeBlock::Header *>(
422
390
LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
@@ -480,15 +448,10 @@ template <typename Config> class MapAllocatorCache {
480
448
Quarantine[I].invalidate ();
481
449
}
482
450
}
483
- auto disableLists = [&](EntryListT EntryList) REQUIRES (Mutex) {
484
- for (u32 I = EntryLists[COMMITTED].Head ; I != CachedBlock::InvalidEntry;
485
- I = Entries[I].Next ) {
486
- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
487
- Entries[I].CommitSize , 0 );
488
- }
489
- };
490
- disableLists (COMMITTED);
491
- disableLists (DECOMMITTED);
451
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
452
+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
453
+ Entries[I].CommitSize , 0 );
454
+ }
492
455
QuarantinePos = -1U ;
493
456
}
494
457
@@ -503,7 +466,7 @@ template <typename Config> class MapAllocatorCache {
503
466
return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
504
467
}
505
468
506
- void insert (const CachedBlock &Entry, EntryListT ListType ) REQUIRES(Mutex) {
469
+ void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
507
470
DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
508
471
509
472
// Cache should be populated with valid entries when not empty
@@ -512,92 +475,71 @@ template <typename Config> class MapAllocatorCache {
512
475
u32 FreeIndex = AvailableHead;
513
476
AvailableHead = Entries[AvailableHead].Next ;
514
477
478
+ if (EntriesCount == 0 ) {
479
+ LRUTail = static_cast <u16>(FreeIndex);
480
+ } else {
481
+ // Check list order
482
+ if (EntriesCount > 1 )
483
+ DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
484
+ Entries[LRUHead].Prev = static_cast <u16>(FreeIndex);
485
+ }
486
+
515
487
Entries[FreeIndex] = Entry;
516
- pushFront (FreeIndex, ListType);
488
+ Entries[FreeIndex].Next = LRUHead;
489
+ Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
490
+ LRUHead = static_cast <u16>(FreeIndex);
517
491
EntriesCount++;
518
492
519
- if (Entries[EntryLists[ListType].Head ].Next != CachedBlock::InvalidEntry) {
520
- DCHECK_GE (Entries[EntryLists[ListType].Head ].Time ,
521
- Entries[Entries[EntryLists[ListType].Head ].Next ].Time );
522
- }
523
493
// Availability stack should not have available entries when all entries
524
494
// are in use
525
495
if (EntriesCount == Config::getEntriesArraySize ())
526
496
DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
527
497
}
528
498
529
- // Joins the entries adjacent to Entries[I], effectively
530
- // unlinking Entries[I] from the list
531
- void unlink (uptr I, EntryListT ListType) REQUIRES(Mutex) {
532
- if (I == EntryLists[ListType].Head )
533
- EntryLists[ListType].Head = Entries[I].Next ;
499
+ void remove (uptr I) REQUIRES(Mutex) {
500
+ DCHECK (Entries[I].isValid ());
501
+
502
+ Entries[I].invalidate ();
503
+
504
+ if (I == LRUHead)
505
+ LRUHead = Entries[I].Next ;
534
506
else
535
507
Entries[Entries[I].Prev ].Next = Entries[I].Next ;
536
508
537
- if (I == EntryLists[ListType]. Tail )
538
- EntryLists[ListType]. Tail = Entries[I].Prev ;
509
+ if (I == LRUTail )
510
+ LRUTail = Entries[I].Prev ;
539
511
else
540
512
Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
541
- }
542
513
543
- // Invalidates Entries[I], removes Entries[I] from list, and pushes
544
- // Entries[I] onto the stack of available entries
545
- void remove (uptr I, EntryListT ListType) REQUIRES(Mutex) {
546
- DCHECK (Entries[I].isValid ());
547
-
548
- Entries[I].invalidate ();
549
-
550
- unlink (I, ListType);
551
514
Entries[I].Next = AvailableHead;
552
515
AvailableHead = static_cast <u16>(I);
553
516
EntriesCount--;
554
517
555
518
// Cache should not have valid entries when not empty
556
519
if (EntriesCount == 0 ) {
557
- DCHECK_EQ (EntryLists[COMMITTED].Head , CachedBlock::InvalidEntry);
558
- DCHECK_EQ (EntryLists[COMMITTED].Tail , CachedBlock::InvalidEntry);
559
- DCHECK_EQ (EntryLists[DECOMMITTED].Head , CachedBlock::InvalidEntry);
560
- DCHECK_EQ (EntryLists[DECOMMITTED].Tail , CachedBlock::InvalidEntry);
520
+ DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
521
+ DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
561
522
}
562
523
}
563
524
564
- inline void pushFront (uptr I, EntryListT ListType) REQUIRES(Mutex) {
565
- if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
566
- EntryLists[ListType].Tail = static_cast <u16>(I);
567
- else
568
- Entries[EntryLists[ListType].Head ].Prev = static_cast <u16>(I);
569
-
570
- Entries[I].Next = EntryLists[ListType].Head ;
571
- Entries[I].Prev = CachedBlock::InvalidEntry;
572
- EntryLists[ListType].Head = static_cast <u16>(I);
573
- }
574
-
575
525
void empty () {
576
526
MemMapT MapInfo[Config::getEntriesArraySize ()];
577
527
uptr N = 0 ;
578
528
{
579
529
ScopedLock L (Mutex);
580
- auto emptyList = [&](EntryListT ListType) REQUIRES (Mutex) {
581
- for (uptr I = EntryLists[ListType].Head ;
582
- I != CachedBlock::InvalidEntry;) {
583
- uptr ToRemove = I;
584
- I = Entries[I].Next ;
585
- MapInfo[N] = Entries[ToRemove].MemMap ;
586
- remove (ToRemove, ListType);
587
- N++;
588
- }
589
- };
590
- emptyList (COMMITTED);
591
- emptyList (DECOMMITTED);
530
+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
531
+ if (!Entries[I].isValid ())
532
+ continue ;
533
+ MapInfo[N] = Entries[I].MemMap ;
534
+ remove (I);
535
+ N++;
536
+ }
592
537
EntriesCount = 0 ;
593
538
}
594
539
for (uptr I = 0 ; I < N; I++) {
595
540
MemMapT &MemMap = MapInfo[I];
596
541
MemMap.unmap (MemMap.getBase (), MemMap.getCapacity ());
597
542
}
598
-
599
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
600
- DCHECK (!Entries[I].isValid ());
601
543
}
602
544
603
545
void releaseIfOlderThan (CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
@@ -619,13 +561,8 @@ template <typename Config> class MapAllocatorCache {
619
561
OldestTime = 0 ;
620
562
for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
621
563
releaseIfOlderThan (Quarantine[I], Time);
622
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
623
- if (Entries[I].isValid () && Entries[I].Time && Entries[I].Time <= Time) {
624
- unlink (I, COMMITTED);
625
- pushFront (I, DECOMMITTED);
626
- }
564
+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
627
565
releaseIfOlderThan (Entries[I], Time);
628
- }
629
566
}
630
567
631
568
HybridMutex Mutex;
@@ -642,12 +579,10 @@ template <typename Config> class MapAllocatorCache {
642
579
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
643
580
Quarantine GUARDED_BY (Mutex) = {};
644
581
645
- // EntryLists stores the head and tail indices of all
646
- // lists being used to store valid cache entries.
647
- // Currently there are lists storing COMMITTED and DECOMMITTED entries.
648
- // COMMITTED entries are those that are not madvise()'d
649
- // DECOMMITTED entries are those that are madvise()'d
650
- ListInfo EntryLists[2 ] GUARDED_BY(Mutex) = {};
582
+ // The LRUHead of the cache is the most recently used cache entry
583
+ u16 LRUHead GUARDED_BY (Mutex) = 0;
584
+ // The LRUTail of the cache is the least recently used cache entry
585
+ u16 LRUTail GUARDED_BY (Mutex) = 0;
651
586
// The AvailableHead is the top of the stack of available entries
652
587
u16 AvailableHead GUARDED_BY (Mutex) = 0;
653
588
};
0 commit comments