Skip to content

Commit c012c8a

Browse files
authored
gh-115103: Delay reuse of mimalloc pages that store PyObjects (#115435)
This implements the delayed reuse of mimalloc pages that contain Python objects in the free-threaded build. Allocations of the same size class are grouped in data structures called pages. These are different from operating system pages. For thread-safety, we want to ensure that memory used to store PyObjects remains valid as long as there may be concurrent lock-free readers; we want to delay using it for other size classes, in other heaps, or returning it to the operating system. When a mimalloc page becomes empty, instead of immediately freeing it, we tag it with a QSBR goal and insert it into a per-thread state linked list of pages to be freed. When mimalloc needs a fresh page, we process the queue and free any still empty pages that are now deemed safe to be freed. Pages waiting to be freed are still available for allocations of the same size class and allocating from a page prevent it from being freed. There is additional logic to handle abandoned pages when threads exit.
1 parent 02ee475 commit c012c8a

File tree

9 files changed

+199
-17
lines changed

9 files changed

+199
-17
lines changed

Include/internal/mimalloc/mimalloc/types.h

+8-1
Original file line numberDiff line numberDiff line change
@@ -311,6 +311,7 @@ typedef struct mi_page_s {
311311
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
312312
uint8_t is_committed : 1; // `true` if the page virtual memory is committed
313313
uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized
314+
uint8_t use_qsbr : 1; // delay page freeing using qsbr
314315
uint8_t tag : 4; // tag from the owning heap
315316
uint8_t debug_offset; // number of bytes to preserve when filling freed or uninitialized memory
316317

@@ -336,8 +337,13 @@ typedef struct mi_page_s {
336337
struct mi_page_s* next; // next page owned by this thread with the same `block_size`
337338
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
338339

340+
#ifdef Py_GIL_DISABLED
341+
struct llist_node qsbr_node;
342+
uint64_t qsbr_goal;
343+
#endif
344+
339345
// 64-bit 9 words, 32-bit 12 words, (+2 for secure)
340-
#if MI_INTPTR_SIZE==8
346+
#if MI_INTPTR_SIZE==8 && !defined(Py_GIL_DISABLED)
341347
uintptr_t padding[1];
342348
#endif
343349
} mi_page_t;
@@ -555,6 +561,7 @@ struct mi_heap_s {
555561
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
556562
uint8_t tag; // custom identifier for this heap
557563
uint8_t debug_offset; // number of bytes to preserve when filling freed or uninitialized memory
564+
bool page_use_qsbr; // should freeing pages be delayed using QSBR
558565
};
559566

560567

Include/internal/pycore_mimalloc.h

+1
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ struct _mimalloc_thread_state {
4848
mi_heap_t *current_object_heap;
4949
mi_heap_t heaps[_Py_MIMALLOC_HEAP_COUNT];
5050
mi_tld_t tld;
51+
struct llist_node page_list;
5152
};
5253
#endif
5354

Include/internal/pycore_qsbr.h

+15
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,12 @@ extern "C" {
2929
#define QSBR_INITIAL 1
3030
#define QSBR_INCR 2
3131

32+
// Wrap-around safe comparison. This is a holdover from the FreeBSD
33+
// implementation, which uses 32-bit sequence numbers. We currently use 64-bit
34+
// sequence numbers, so wrap-around is unlikely.
35+
#define QSBR_LT(a, b) ((int64_t)((a)-(b)) < 0)
36+
#define QSBR_LEQ(a, b) ((int64_t)((a)-(b)) <= 0)
37+
3238
struct _qsbr_shared;
3339
struct _PyThreadStateImpl; // forward declare to avoid circular dependency
3440

@@ -89,6 +95,15 @@ _Py_qsbr_quiescent_state(struct _qsbr_thread_state *qsbr)
8995
_Py_atomic_store_uint64_release(&qsbr->seq, seq);
9096
}
9197

98+
// Have the read sequences advanced to the given goal? Like `_Py_qsbr_poll()`,
99+
// but does not perform a scan of threads.
100+
static inline bool
101+
_Py_qbsr_goal_reached(struct _qsbr_thread_state *qsbr, uint64_t goal)
102+
{
103+
uint64_t rd_seq = _Py_atomic_load_uint64(&qsbr->shared->rd_seq);
104+
return QSBR_LEQ(goal, rd_seq);
105+
}
106+
92107
// Advance the write sequence and return the new goal. This should be called
93108
// after data is removed. The returned goal is used with `_Py_qsbr_poll()` to
94109
// determine when it is safe to reclaim (free) the memory.

Objects/mimalloc/heap.c

+7-1
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,10 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
9898
if (mi_page_all_free(page)) {
9999
// no more used blocks, free the page.
100100
// note: this will free retired pages as well.
101-
_mi_page_free(page, pq, collect >= MI_FORCE);
101+
bool freed = _PyMem_mi_page_maybe_free(page, pq, collect >= MI_FORCE);
102+
if (!freed && collect == MI_ABANDON) {
103+
_mi_page_abandon(page, pq);
104+
}
102105
}
103106
else if (collect == MI_ABANDON) {
104107
// still used blocks but the thread is done; abandon the page
@@ -153,6 +156,9 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
153156
// collect retired pages
154157
_mi_heap_collect_retired(heap, force);
155158

159+
// free pages that were delayed with QSBR
160+
_PyMem_mi_heap_collect_qsbr(heap);
161+
156162
// collect all pages owned by this thread
157163
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
158164
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );

Objects/mimalloc/page.c

+33-2
Original file line numberDiff line numberDiff line change
@@ -225,6 +225,9 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
225225

226226
// and the local free list
227227
if (page->local_free != NULL) {
228+
// any previous QSBR goals are no longer valid because we reused the page
229+
_PyMem_mi_page_clear_qsbr(page);
230+
228231
if mi_likely(page->free == NULL) {
229232
// usual case
230233
page->free = page->local_free;
@@ -267,6 +270,7 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
267270
// TODO: push on full queue immediately if it is full?
268271
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
269272
mi_page_queue_push(heap, pq, page);
273+
_PyMem_mi_page_reclaimed(page);
270274
mi_assert_expensive(_mi_page_is_valid(page));
271275
}
272276

@@ -383,6 +387,13 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
383387

384388
mi_heap_t* pheap = mi_page_heap(page);
385389

390+
#ifdef Py_GIL_DISABLED
391+
if (page->qsbr_node.next != NULL) {
392+
// remove from QSBR queue, but keep the goal
393+
llist_remove(&page->qsbr_node);
394+
}
395+
#endif
396+
386397
// remove from our page list
387398
mi_segments_tld_t* segments_tld = &pheap->tld->segments;
388399
mi_page_queue_remove(pq, page);
@@ -417,6 +428,11 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
417428

418429
mi_heap_t* heap = mi_page_heap(page);
419430

431+
#ifdef Py_GIL_DISABLED
432+
mi_assert_internal(page->qsbr_goal == 0);
433+
mi_assert_internal(page->qsbr_node.next == NULL);
434+
#endif
435+
420436
// remove from the page list
421437
// (no need to do _mi_heap_delayed_free first as all blocks are already free)
422438
mi_segments_tld_t* segments_tld = &heap->tld->segments;
@@ -444,6 +460,9 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
444460

445461
mi_page_set_has_aligned(page, false);
446462

463+
// any previous QSBR goals are no longer valid because we reused the page
464+
_PyMem_mi_page_clear_qsbr(page);
465+
447466
// don't retire too often..
448467
// (or we end up retiring and re-allocating most of the time)
449468
// NOTE: refine this more: we should not retire if this
@@ -465,7 +484,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
465484
return; // dont't free after all
466485
}
467486
}
468-
_mi_page_free(page, pq, false);
487+
_PyMem_mi_page_maybe_free(page, pq, false);
469488
}
470489

471490
// free retired pages: we don't need to look at the entire queues
@@ -480,7 +499,10 @@ void _mi_heap_collect_retired(mi_heap_t* heap, bool force) {
480499
if (mi_page_all_free(page)) {
481500
page->retire_expire--;
482501
if (force || page->retire_expire == 0) {
483-
_mi_page_free(pq->first, pq, force);
502+
#ifdef Py_GIL_DISABLED
503+
mi_assert_internal(page->qsbr_goal == 0);
504+
#endif
505+
_PyMem_mi_page_maybe_free(page, pq, force);
484506
}
485507
else {
486508
// keep retired, update min/max
@@ -661,6 +683,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
661683
// set fields
662684
mi_page_set_heap(page, heap);
663685
page->tag = heap->tag;
686+
page->use_qsbr = heap->page_use_qsbr;
664687
page->debug_offset = heap->debug_offset;
665688
page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE); // initialize before _mi_segment_page_start
666689
size_t page_size;
@@ -691,6 +714,10 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
691714
mi_assert_internal(page->xthread_free == 0);
692715
mi_assert_internal(page->next == NULL);
693716
mi_assert_internal(page->prev == NULL);
717+
#ifdef Py_GIL_DISABLED
718+
mi_assert_internal(page->qsbr_goal == 0);
719+
mi_assert_internal(page->qsbr_node.next == NULL);
720+
#endif
694721
mi_assert_internal(page->retire_expire == 0);
695722
mi_assert_internal(!mi_page_has_aligned(page));
696723
#if (MI_PADDING || MI_ENCODE_FREELIST)
@@ -750,6 +777,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
750777
mi_heap_stat_counter_increase(heap, searches, count);
751778

752779
if (page == NULL) {
780+
_PyMem_mi_heap_collect_qsbr(heap); // some pages might be safe to free now
753781
_mi_heap_collect_retired(heap, false); // perhaps make a page available?
754782
page = mi_page_fresh(heap, pq);
755783
if (page == NULL && first_try) {
@@ -760,6 +788,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
760788
else {
761789
mi_assert(pq->first == page);
762790
page->retire_expire = 0;
791+
_PyMem_mi_page_clear_qsbr(page);
763792
}
764793
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
765794
return page;
@@ -785,6 +814,7 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
785814

786815
if (mi_page_immediate_available(page)) {
787816
page->retire_expire = 0;
817+
_PyMem_mi_page_clear_qsbr(page);
788818
return page; // fast path
789819
}
790820
}
@@ -878,6 +908,7 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignme
878908
return NULL;
879909
}
880910
else {
911+
_PyMem_mi_heap_collect_qsbr(heap);
881912
return mi_large_huge_page_alloc(heap,size,huge_alignment);
882913
}
883914
}

Objects/mimalloc/segment.c

+13-3
Original file line numberDiff line numberDiff line change
@@ -982,6 +982,10 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld
982982
mi_assert_internal(mi_page_all_free(page));
983983
mi_segment_t* segment = _mi_ptr_segment(page);
984984
mi_assert_internal(segment->used > 0);
985+
#ifdef Py_GIL_DISABLED
986+
mi_assert_internal(page->qsbr_goal == 0);
987+
mi_assert_internal(page->qsbr_node.next == NULL);
988+
#endif
985989

986990
size_t inuse = page->capacity * mi_page_block_size(page);
987991
_mi_stat_decrease(&tld->stats->page_committed, inuse);
@@ -1270,10 +1274,13 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s
12701274
// ensure used count is up to date and collect potential concurrent frees
12711275
mi_page_t* const page = mi_slice_to_page(slice);
12721276
_mi_page_free_collect(page, false);
1273-
if (mi_page_all_free(page)) {
1277+
if (mi_page_all_free(page) && _PyMem_mi_page_is_safe_to_free(page)) {
12741278
// if this page is all free now, free it without adding to any queues (yet)
12751279
mi_assert_internal(page->next == NULL && page->prev==NULL);
12761280
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
1281+
#ifdef Py_GIL_DISABLED
1282+
page->qsbr_goal = 0;
1283+
#endif
12771284
segment->abandoned--;
12781285
slice = mi_segment_page_clear(page, tld); // re-assign slice due to coalesce!
12791286
mi_assert_internal(!mi_slice_is_used(slice));
@@ -1344,15 +1351,18 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
13441351
mi_page_set_heap(page, target_heap);
13451352
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set)
13461353
_mi_page_free_collect(page, false); // ensure used count is up to date
1347-
if (mi_page_all_free(page)) {
1354+
if (mi_page_all_free(page) && _PyMem_mi_page_is_safe_to_free(page)) {
13481355
// if everything free by now, free the page
1356+
#ifdef Py_GIL_DISABLED
1357+
page->qsbr_goal = 0;
1358+
#endif
13491359
slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing
13501360
}
13511361
else {
13521362
// otherwise reclaim it into the heap
13531363
_mi_page_reclaim(target_heap, page);
13541364
if (requested_block_size == page->xblock_size && mi_page_has_any_available(page) &&
1355-
heap == target_heap) {
1365+
requested_block_size <= MI_MEDIUM_OBJ_SIZE_MAX && heap == target_heap) {
13561366
if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; }
13571367
}
13581368
}

Objects/obmalloc.c

+113
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,12 @@
1212
#include <stdlib.h> // malloc()
1313
#include <stdbool.h>
1414
#ifdef WITH_MIMALLOC
15+
// Forward declarations of functions used in our mimalloc modifications
16+
static void _PyMem_mi_page_clear_qsbr(mi_page_t *page);
17+
static bool _PyMem_mi_page_is_safe_to_free(mi_page_t *page);
18+
static bool _PyMem_mi_page_maybe_free(mi_page_t *page, mi_page_queue_t *pq, bool force);
19+
static void _PyMem_mi_page_reclaimed(mi_page_t *page);
20+
static void _PyMem_mi_heap_collect_qsbr(mi_heap_t *heap);
1521
# include "pycore_mimalloc.h"
1622
# include "mimalloc/static.c"
1723
# include "mimalloc/internal.h" // for stats
@@ -86,6 +92,113 @@ _PyMem_RawFree(void *Py_UNUSED(ctx), void *ptr)
8692

8793
#ifdef WITH_MIMALLOC
8894

95+
static void
96+
_PyMem_mi_page_clear_qsbr(mi_page_t *page)
97+
{
98+
#ifdef Py_GIL_DISABLED
99+
// Clear the QSBR goal and remove the page from the QSBR linked list.
100+
page->qsbr_goal = 0;
101+
if (page->qsbr_node.next != NULL) {
102+
llist_remove(&page->qsbr_node);
103+
}
104+
#endif
105+
}
106+
107+
// Check if an empty, newly reclaimed page is safe to free now.
108+
static bool
109+
_PyMem_mi_page_is_safe_to_free(mi_page_t *page)
110+
{
111+
assert(mi_page_all_free(page));
112+
#ifdef Py_GIL_DISABLED
113+
assert(page->qsbr_node.next == NULL);
114+
if (page->use_qsbr && page->qsbr_goal != 0) {
115+
_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)_PyThreadState_GET();
116+
if (tstate == NULL) {
117+
return false;
118+
}
119+
return _Py_qbsr_goal_reached(tstate->qsbr, page->qsbr_goal);
120+
}
121+
#endif
122+
return true;
123+
124+
}
125+
126+
static bool
127+
_PyMem_mi_page_maybe_free(mi_page_t *page, mi_page_queue_t *pq, bool force)
128+
{
129+
#ifdef Py_GIL_DISABLED
130+
assert(mi_page_all_free(page));
131+
if (page->use_qsbr) {
132+
_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)PyThreadState_GET();
133+
if (page->qsbr_goal != 0 && _Py_qbsr_goal_reached(tstate->qsbr, page->qsbr_goal)) {
134+
_PyMem_mi_page_clear_qsbr(page);
135+
_mi_page_free(page, pq, force);
136+
return true;
137+
}
138+
139+
_PyMem_mi_page_clear_qsbr(page);
140+
page->retire_expire = 0;
141+
page->qsbr_goal = _Py_qsbr_deferred_advance(tstate->qsbr);
142+
llist_insert_tail(&tstate->mimalloc.page_list, &page->qsbr_node);
143+
return false;
144+
}
145+
#endif
146+
_mi_page_free(page, pq, force);
147+
return true;
148+
}
149+
150+
static void
151+
_PyMem_mi_page_reclaimed(mi_page_t *page)
152+
{
153+
#ifdef Py_GIL_DISABLED
154+
assert(page->qsbr_node.next == NULL);
155+
if (page->qsbr_goal != 0) {
156+
if (mi_page_all_free(page)) {
157+
assert(page->qsbr_node.next == NULL);
158+
_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)PyThreadState_GET();
159+
page->retire_expire = 0;
160+
llist_insert_tail(&tstate->mimalloc.page_list, &page->qsbr_node);
161+
}
162+
else {
163+
page->qsbr_goal = 0;
164+
}
165+
}
166+
#endif
167+
}
168+
169+
static void
170+
_PyMem_mi_heap_collect_qsbr(mi_heap_t *heap)
171+
{
172+
#ifdef Py_GIL_DISABLED
173+
if (!heap->page_use_qsbr) {
174+
return;
175+
}
176+
177+
_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)_PyThreadState_GET();
178+
struct llist_node *head = &tstate->mimalloc.page_list;
179+
if (llist_empty(head)) {
180+
return;
181+
}
182+
183+
struct llist_node *node;
184+
llist_for_each_safe(node, head) {
185+
mi_page_t *page = llist_data(node, mi_page_t, qsbr_node);
186+
if (!mi_page_all_free(page)) {
187+
// We allocated from this page some point after the delayed free
188+
_PyMem_mi_page_clear_qsbr(page);
189+
continue;
190+
}
191+
192+
if (!_Py_qsbr_poll(tstate->qsbr, page->qsbr_goal)) {
193+
return;
194+
}
195+
196+
_PyMem_mi_page_clear_qsbr(page);
197+
_mi_page_free(page, mi_page_queue_of(page), false);
198+
}
199+
#endif
200+
}
201+
89202
void *
90203
_PyMem_MiMalloc(void *ctx, size_t size)
91204
{

0 commit comments

Comments
 (0)