@@ -139,11 +139,7 @@ pub const DirectAllocator = struct {
139
139
return shrink (allocator , old_mem , old_align , new_size , new_align );
140
140
}
141
141
const result = try alloc (allocator , new_size , new_align );
142
- if (result .len >= old_mem .len ) {
143
- mem .copy (u8 , result , old_mem );
144
- } else {
145
- @memcpy (result .ptr , old_mem .ptr , new_size );
146
- }
142
+ @memcpy (result .ptr , old_mem .ptr , std .math .min (old_mem .len , result .len ));
147
143
_ = os .posix .munmap (@ptrToInt (old_mem .ptr ), old_mem .len );
148
144
return result ;
149
145
},
@@ -170,16 +166,20 @@ pub const DirectAllocator = struct {
170
166
) orelse return error .OutOfMemory ;
171
167
const offset = old_adjusted_addr - root_addr ;
172
168
const new_root_addr = @ptrToInt (new_ptr );
173
- const adjusted_addr = new_root_addr + offset ;
174
- const new_adjusted_addr = mem .alignForward (new_root_addr , new_align );
175
- // If HeapReAlloc didn't happen to move the memory to the new alignment
176
- // then we need to copy it
177
- if (new_adjusted_addr != adjusted_addr ) {
169
+ var new_adjusted_addr = new_root_addr + offset ;
170
+ const offset_is_valid = new_adjusted_addr + new_size + @sizeOf (usize ) <= new_root_addr + amt ;
171
+ const offset_is_aligned = new_adjusted_addr % new_align == 0 ;
172
+ if (! offset_is_valid or ! offset_is_aligned ) {
173
+ // If HeapReAlloc didn't happen to move the memory to the new alignment,
174
+ // or the memory starting at the old offset would be outside of the new allocation,
175
+ // then we need to copy the memory to a valid aligned address and use that
176
+ const new_aligned_addr = mem .alignForward (new_root_addr , new_align );
178
177
@memcpy (
178
+ @intToPtr ([* ]u8 , new_aligned_addr ),
179
179
@intToPtr ([* ]u8 , new_adjusted_addr ),
180
- @intToPtr ([* ]u8 , adjusted_addr ),
181
180
std .math .min (old_mem .len , new_size ),
182
181
);
182
+ new_adjusted_addr = new_aligned_addr ;
183
183
}
184
184
const new_record_addr = new_adjusted_addr + new_size ;
185
185
@intToPtr (* align (1 ) usize , new_record_addr ).* = new_root_addr ;
@@ -270,11 +270,7 @@ pub const ArenaAllocator = struct {
270
270
return error .OutOfMemory ;
271
271
} else {
272
272
const result = try alloc (allocator , new_size , new_align );
273
- if (result .len >= old_mem .len ) {
274
- mem .copy (u8 , result , old_mem );
275
- } else {
276
- @memcpy (result .ptr , old_mem .ptr , new_size );
277
- }
273
+ @memcpy (result .ptr , old_mem .ptr , std .math .min (old_mem .len , result .len ));
278
274
return result ;
279
275
}
280
276
}
@@ -332,11 +328,7 @@ pub const FixedBufferAllocator = struct {
332
328
return error .OutOfMemory ;
333
329
} else {
334
330
const result = try alloc (allocator , new_size , new_align );
335
- if (result .len >= old_mem .len ) {
336
- mem .copy (u8 , result , old_mem );
337
- } else {
338
- @memcpy (result .ptr , old_mem .ptr , new_size );
339
- }
331
+ @memcpy (result .ptr , old_mem .ptr , std .math .min (old_mem .len , result .len ));
340
332
return result ;
341
333
}
342
334
}
@@ -479,11 +471,7 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
479
471
return error .OutOfMemory ;
480
472
} else {
481
473
const result = try alloc (allocator , new_size , new_align );
482
- if (result .len >= old_mem .len ) {
483
- mem .copy (u8 , result , old_mem );
484
- } else {
485
- @memcpy (result .ptr , old_mem .ptr , new_size );
486
- }
474
+ @memcpy (result .ptr , old_mem .ptr , std .math .min (old_mem .len , result .len ));
487
475
return result ;
488
476
}
489
477
}
0 commit comments