@@ -67,14 +67,13 @@ class Allocator {
67
67
if (UNLIKELY (Header.State != Chunk::State::Quarantined))
68
68
reportInvalidChunkState (AllocatorAction::Recycling, Ptr);
69
69
70
- Chunk::UnpackedHeader NewHeader = Header;
71
- NewHeader.State = Chunk::State::Available;
72
- Chunk::compareExchangeHeader (Allocator.Cookie , Ptr, &NewHeader, &Header);
70
+ Header.State = Chunk::State::Available;
71
+ Chunk::storeHeader (Allocator.Cookie , Ptr, &Header);
73
72
74
73
if (allocatorSupportsMemoryTagging<Config>())
75
74
Ptr = untagPointer (Ptr);
76
- void *BlockBegin = Allocator::getBlockBegin (Ptr, &NewHeader );
77
- Cache.deallocate (NewHeader .ClassId , BlockBegin);
75
+ void *BlockBegin = Allocator::getBlockBegin (Ptr, &Header );
76
+ Cache.deallocate (Header .ClassId , BlockBegin);
78
77
}
79
78
80
79
// We take a shortcut when allocating a quarantine batch by working with the
@@ -117,9 +116,8 @@ class Allocator {
117
116
DCHECK_EQ (Header.Offset , 0 );
118
117
DCHECK_EQ (Header.SizeOrUnusedBytes , sizeof (QuarantineBatch));
119
118
120
- Chunk::UnpackedHeader NewHeader = Header;
121
- NewHeader.State = Chunk::State::Available;
122
- Chunk::compareExchangeHeader (Allocator.Cookie , Ptr, &NewHeader, &Header);
119
+ Header.State = Chunk::State::Available;
120
+ Chunk::storeHeader (Allocator.Cookie , Ptr, &Header);
123
121
Cache.deallocate (QuarantineClassId,
124
122
reinterpret_cast <void *>(reinterpret_cast <uptr>(Ptr) -
125
123
Chunk::getHeaderSize ()));
@@ -610,47 +608,46 @@ class Allocator {
610
608
if (UNLIKELY (!isAligned (reinterpret_cast <uptr>(OldPtr), MinAlignment)))
611
609
reportMisalignedPointer (AllocatorAction::Reallocating, OldPtr);
612
610
613
- Chunk::UnpackedHeader OldHeader ;
614
- Chunk::loadHeader (Cookie, OldPtr, &OldHeader );
611
+ Chunk::UnpackedHeader Header ;
612
+ Chunk::loadHeader (Cookie, OldPtr, &Header );
615
613
616
- if (UNLIKELY (OldHeader .State != Chunk::State::Allocated))
614
+ if (UNLIKELY (Header .State != Chunk::State::Allocated))
617
615
reportInvalidChunkState (AllocatorAction::Reallocating, OldPtr);
618
616
619
617
// Pointer has to be allocated with a malloc-type function. Some
620
618
// applications think that it is OK to realloc a memalign'ed pointer, which
621
619
// will trigger this check. It really isn't.
622
620
if (Options.get (OptionBit::DeallocTypeMismatch)) {
623
- if (UNLIKELY (OldHeader .OriginOrWasZeroed != Chunk::Origin::Malloc))
621
+ if (UNLIKELY (Header .OriginOrWasZeroed != Chunk::Origin::Malloc))
624
622
reportDeallocTypeMismatch (AllocatorAction::Reallocating, OldPtr,
625
- OldHeader .OriginOrWasZeroed ,
623
+ Header .OriginOrWasZeroed ,
626
624
Chunk::Origin::Malloc);
627
625
}
628
626
629
- void *BlockBegin = getBlockBegin (OldTaggedPtr, &OldHeader );
627
+ void *BlockBegin = getBlockBegin (OldTaggedPtr, &Header );
630
628
uptr BlockEnd;
631
629
uptr OldSize;
632
- const uptr ClassId = OldHeader .ClassId ;
630
+ const uptr ClassId = Header .ClassId ;
633
631
if (LIKELY (ClassId)) {
634
632
BlockEnd = reinterpret_cast <uptr>(BlockBegin) +
635
633
SizeClassMap::getSizeByClassId (ClassId);
636
- OldSize = OldHeader .SizeOrUnusedBytes ;
634
+ OldSize = Header .SizeOrUnusedBytes ;
637
635
} else {
638
636
BlockEnd = SecondaryT::getBlockEnd (BlockBegin);
639
637
OldSize = BlockEnd - (reinterpret_cast <uptr>(OldTaggedPtr) +
640
- OldHeader .SizeOrUnusedBytes );
638
+ Header .SizeOrUnusedBytes );
641
639
}
642
640
// If the new chunk still fits in the previously allocated block (with a
643
641
// reasonable delta), we just keep the old block, and update the chunk
644
642
// header to reflect the size change.
645
643
if (reinterpret_cast <uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
646
644
if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached ()) {
647
- Chunk::UnpackedHeader NewHeader = OldHeader;
648
- NewHeader.SizeOrUnusedBytes =
645
+ Header.SizeOrUnusedBytes =
649
646
(ClassId ? NewSize
650
647
: BlockEnd -
651
648
(reinterpret_cast <uptr>(OldTaggedPtr) + NewSize)) &
652
649
Chunk::SizeOrUnusedBytesMask;
653
- Chunk::compareExchangeHeader (Cookie, OldPtr, &NewHeader, &OldHeader );
650
+ Chunk::storeHeader (Cookie, OldPtr, &Header );
654
651
if (UNLIKELY (useMemoryTagging<Config>(Options))) {
655
652
if (ClassId) {
656
653
resizeTaggedChunk (reinterpret_cast <uptr>(OldTaggedPtr) + OldSize,
@@ -672,7 +669,7 @@ class Allocator {
672
669
void *NewPtr = allocate (NewSize, Chunk::Origin::Malloc, Alignment);
673
670
if (LIKELY (NewPtr)) {
674
671
memcpy (NewPtr, OldTaggedPtr, Min (NewSize, OldSize));
675
- quarantineOrDeallocateChunk (Options, OldTaggedPtr, &OldHeader , OldSize);
672
+ quarantineOrDeallocateChunk (Options, OldTaggedPtr, &Header , OldSize);
676
673
}
677
674
return NewPtr;
678
675
}
@@ -1111,31 +1108,30 @@ class Allocator {
1111
1108
Chunk::UnpackedHeader *Header,
1112
1109
uptr Size) NO_THREAD_SAFETY_ANALYSIS {
1113
1110
void *Ptr = getHeaderTaggedPointer (TaggedPtr);
1114
- Chunk::UnpackedHeader NewHeader = *Header;
1115
1111
// If the quarantine is disabled, the actual size of a chunk is 0 or larger
1116
1112
// than the maximum allowed, we return a chunk directly to the backend.
1117
1113
// This purposefully underflows for Size == 0.
1118
1114
const bool BypassQuarantine = !Quarantine.getCacheSize () ||
1119
1115
((Size - 1 ) >= QuarantineMaxChunkSize) ||
1120
- !NewHeader. ClassId ;
1116
+ !Header-> ClassId ;
1121
1117
if (BypassQuarantine)
1122
- NewHeader. State = Chunk::State::Available;
1118
+ Header-> State = Chunk::State::Available;
1123
1119
else
1124
- NewHeader. State = Chunk::State::Quarantined;
1125
- NewHeader. OriginOrWasZeroed = useMemoryTagging<Config>(Options) &&
1126
- NewHeader. ClassId &&
1127
- !TSDRegistry.getDisableMemInit ();
1128
- Chunk::compareExchangeHeader (Cookie, Ptr, &NewHeader , Header);
1120
+ Header-> State = Chunk::State::Quarantined;
1121
+ Header-> OriginOrWasZeroed = useMemoryTagging<Config>(Options) &&
1122
+ Header-> ClassId &&
1123
+ !TSDRegistry.getDisableMemInit ();
1124
+ Chunk::storeHeader (Cookie, Ptr, Header);
1129
1125
1130
1126
if (UNLIKELY (useMemoryTagging<Config>(Options))) {
1131
1127
u8 PrevTag = extractTag (reinterpret_cast <uptr>(TaggedPtr));
1132
1128
storeDeallocationStackMaybe (Options, Ptr, PrevTag, Size);
1133
- if (NewHeader. ClassId ) {
1129
+ if (Header-> ClassId ) {
1134
1130
if (!TSDRegistry.getDisableMemInit ()) {
1135
1131
uptr TaggedBegin, TaggedEnd;
1136
1132
const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe (
1137
- Options, reinterpret_cast <uptr>(getBlockBegin (Ptr, &NewHeader )),
1138
- NewHeader. ClassId );
1133
+ Options, reinterpret_cast <uptr>(getBlockBegin (Ptr, Header )),
1134
+ Header-> ClassId );
1139
1135
// Exclude the previous tag so that immediate use after free is
1140
1136
// detected 100% of the time.
1141
1137
setRandomTag (Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
@@ -1146,8 +1142,8 @@ class Allocator {
1146
1142
if (BypassQuarantine) {
1147
1143
if (allocatorSupportsMemoryTagging<Config>())
1148
1144
Ptr = untagPointer (Ptr);
1149
- void *BlockBegin = getBlockBegin (Ptr, &NewHeader );
1150
- const uptr ClassId = NewHeader. ClassId ;
1145
+ void *BlockBegin = getBlockBegin (Ptr, Header );
1146
+ const uptr ClassId = Header-> ClassId ;
1151
1147
if (LIKELY (ClassId)) {
1152
1148
bool UnlockRequired;
1153
1149
auto *TSD = TSDRegistry.getTSDAndLock (&UnlockRequired);
0 commit comments