Skip to content

Commit f0890ae

Browse files
gbaraldid-netto
authored andcommitted
Move assertion to correct place.
1 parent 7843330 commit f0890ae

File tree

5 files changed

+92
-20
lines changed

5 files changed

+92
-20
lines changed

src/gc-stacks.c

Lines changed: 34 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ JL_DLLEXPORT void *jl_malloc_stack(size_t *bufsz, jl_task_t *owner) JL_NOTSAFEPO
190190
return stk;
191191
}
192192

193-
void sweep_stack_pools(void)
193+
void sweep_stack_pool_loop(void) JL_NOTSAFEPOINT
194194
{
195195
// Stack sweeping algorithm:
196196
// // deallocate stacks if we have too many sitting around unused
@@ -203,27 +203,43 @@ void sweep_stack_pools(void)
203203
// bufsz = t->bufsz
204204
// if (stkbuf)
205205
// push(free_stacks[sz], stkbuf)
206-
assert(gc_n_threads);
207-
for (int i = 0; i < gc_n_threads; i++) {
206+
jl_atomic_fetch_add(&gc_n_threads_sweeping, 1);
207+
while (1) {
208+
int i = jl_atomic_fetch_add_relaxed(&gc_ptls_sweep_idx, -1);
209+
if (i < 0)
210+
break;
208211
jl_ptls_t ptls2 = gc_all_tls_states[i];
212+
if (ptls2 == NULL)
213+
continue;
209214

210215
// free half of stacks that remain unused since last sweep
211-
for (int p = 0; p < JL_N_STACK_POOLS; p++) {
212-
small_arraylist_t *al = &ptls2->gc_tls.heap.free_stacks[p];
213-
size_t n_to_free;
214-
if (al->len > MIN_STACK_MAPPINGS_PER_POOL) {
215-
n_to_free = al->len / 2;
216-
if (n_to_free > (al->len - MIN_STACK_MAPPINGS_PER_POOL))
217-
n_to_free = al->len - MIN_STACK_MAPPINGS_PER_POOL;
218-
}
219-
else {
220-
n_to_free = 0;
221-
}
222-
for (int n = 0; n < n_to_free; n++) {
223-
void *stk = small_arraylist_pop(al);
224-
free_stack(stk, pool_sizes[p]);
216+
if (i == jl_atomic_load_relaxed(&gc_stack_free_idx)) {
217+
for (int p = 0; p < JL_N_STACK_POOLS; p++) {
218+
small_arraylist_t *al = &ptls2->gc_tls.heap.free_stacks[p];
219+
size_t n_to_free;
220+
if (jl_atomic_load_relaxed(&ptls2->current_task) == NULL) {
221+
n_to_free = al->len; // not alive yet or dead, so it does not need these anymore
222+
}
223+
else if (al->len > MIN_STACK_MAPPINGS_PER_POOL) {
224+
n_to_free = al->len / 2;
225+
if (n_to_free > (al->len - MIN_STACK_MAPPINGS_PER_POOL))
226+
n_to_free = al->len - MIN_STACK_MAPPINGS_PER_POOL;
227+
}
228+
else {
229+
n_to_free = 0;
230+
}
231+
for (int n = 0; n < n_to_free; n++) {
232+
void *stk = small_arraylist_pop(al);
233+
free_stack(stk, pool_sizes[p]);
234+
}
235+
if (jl_atomic_load_relaxed(&ptls2->current_task) == NULL) {
236+
small_arraylist_free(al);
237+
}
225238
}
226239
}
240+
if (jl_atomic_load_relaxed(&ptls2->current_task) == NULL) {
241+
small_arraylist_free(ptls2->gc_tls.heap.free_stacks);
242+
}
227243

228244
small_arraylist_t *live_tasks = &ptls2->gc_tls.heap.live_tasks;
229245
size_t n = 0;
@@ -264,6 +280,7 @@ void sweep_stack_pools(void)
264280
}
265281
live_tasks->len -= ndel;
266282
}
283+
jl_atomic_fetch_add(&gc_n_threads_sweeping, -1);
267284
}
268285

269286
JL_DLLEXPORT jl_array_t *jl_live_tasks(void)

src/gc-tls.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,7 @@ typedef struct {
8282
jl_gc_markqueue_t mark_queue;
8383
jl_gc_mark_cache_t gc_cache;
8484
_Atomic(size_t) gc_sweeps_requested;
85+
_Atomic(uint8_t) gc_stack_sweep_requested;
8586
arraylist_t sweep_objs;
8687
} jl_gc_tls_states_t;
8788

src/gc.c

Lines changed: 43 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,10 @@ _Atomic(int) gc_n_threads_sweeping;
2626
_Atomic(jl_gc_padded_page_stack_t *) gc_allocd_scratch;
2727
// `tid` of mutator thread that triggered GC
2828
_Atomic(int) gc_master_tid;
29+
// counter for sharing work when sweeping stacks
30+
_Atomic(int) gc_ptls_sweep_idx;
31+
// counter for round robin of giving back stack pages to the OS
32+
_Atomic(int) gc_stack_free_idx;
2933
// `tid` of first GC thread
3034
int gc_first_tid;
3135
// Mutex/cond used to synchronize wakeup of GC threads on parallel marking
@@ -1525,6 +1529,44 @@ static void gc_sweep_other(jl_ptls_t ptls, int sweep_full) JL_NOTSAFEPOINT
15251529
gc_num.total_sweep_free_mallocd_memory_time += t_free_mallocd_memory_end - t_free_mallocd_memory_start;
15261530
}
15271531

1532+
// wake up all threads to sweep the stacks
1533+
void gc_sweep_wake_all_stacks(jl_ptls_t ptls) JL_NOTSAFEPOINT
1534+
{
1535+
uv_mutex_lock(&gc_threads_lock);
1536+
int first = gc_first_parallel_collector_thread_id();
1537+
int last = gc_last_parallel_collector_thread_id();
1538+
for (int i = first; i <= last; i++) {
1539+
jl_ptls_t ptls2 = gc_all_tls_states[i];
1540+
gc_check_ptls_of_parallel_collector_thread(ptls2);
1541+
jl_atomic_fetch_add(&ptls2->gc_tls.gc_stack_sweep_requested, 1);
1542+
}
1543+
uv_cond_broadcast(&gc_threads_cond);
1544+
uv_mutex_unlock(&gc_threads_lock);
1545+
return;
1546+
}
1547+
1548+
void gc_sweep_wait_for_all_stacks(void) JL_NOTSAFEPOINT
1549+
{
1550+
while ((jl_atomic_load_acquire(&gc_ptls_sweep_idx)>= 0 ) || jl_atomic_load_acquire(&gc_n_threads_sweeping) != 0) {
1551+
jl_cpu_pause();
1552+
}
1553+
}
1554+
1555+
void sweep_stack_pools(jl_ptls_t ptls) JL_NOTSAFEPOINT
1556+
{
1557+
// initialize ptls index for parallel sweeping of stack pools
1558+
assert(gc_n_threads);
1559+
int stack_free_idx = jl_atomic_load_relaxed(&gc_stack_free_idx);
1560+
if (stack_free_idx + 1 == gc_n_threads)
1561+
jl_atomic_store_relaxed(&gc_stack_free_idx, 0);
1562+
else
1563+
jl_atomic_store_relaxed(&gc_stack_free_idx, stack_free_idx + 1);
1564+
jl_atomic_store_release(&gc_ptls_sweep_idx, gc_n_threads - 1); // idx == gc_n_threads = release stacks to the OS so it's serial
1565+
gc_sweep_wake_all_stacks(ptls);
1566+
sweep_stack_pool_loop();
1567+
gc_sweep_wait_for_all_stacks();
1568+
}
1569+
15281570
static void gc_pool_sync_nfree(jl_gc_pagemeta_t *pg, jl_taggedvalue_t *last) JL_NOTSAFEPOINT
15291571
{
15301572
assert(pg->fl_begin_offset != UINT16_MAX);
@@ -3604,7 +3646,7 @@ static int _jl_gc_collect(jl_ptls_t ptls, jl_gc_collection_t collection)
36043646
#endif
36053647
current_sweep_full = sweep_full;
36063648
sweep_weak_refs();
3607-
sweep_stack_pools();
3649+
sweep_stack_pools(ptls);
36083650
gc_sweep_foreign_objs();
36093651
gc_sweep_other(ptls, sweep_full);
36103652
gc_scrub();

src/gc.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -565,6 +565,8 @@ extern uv_cond_t gc_threads_cond;
565565
extern uv_sem_t gc_sweep_assists_needed;
566566
extern _Atomic(int) gc_n_threads_marking;
567567
extern _Atomic(int) gc_n_threads_sweeping;
568+
extern _Atomic(int) gc_ptls_sweep_idx;
569+
extern _Atomic(int) gc_stack_free_idx;
568570
extern uv_barrier_t thread_init_done;
569571
void gc_mark_queue_all_roots(jl_ptls_t ptls, jl_gc_markqueue_t *mq);
570572
void gc_mark_finlist_(jl_gc_markqueue_t *mq, jl_value_t *fl_parent, jl_value_t **fl_begin, jl_value_t **fl_end) JL_NOTSAFEPOINT;
@@ -574,7 +576,7 @@ void gc_mark_loop_serial(jl_ptls_t ptls);
574576
void gc_mark_loop_parallel(jl_ptls_t ptls, int master);
575577
void gc_sweep_pool_parallel(jl_ptls_t ptls);
576578
void gc_free_pages(void);
577-
void sweep_stack_pools(void);
579+
void sweep_stack_pool_loop(void);
578580
void jl_gc_debug_init(void);
579581

580582
// GC pages

src/partr.c

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,11 @@ static inline int may_sweep(jl_ptls_t ptls) JL_NOTSAFEPOINT
118118
return (jl_atomic_load(&ptls->gc_tls.gc_sweeps_requested) > 0);
119119
}
120120

121+
static inline int may_sweep_stack(jl_ptls_t ptls) JL_NOTSAFEPOINT
122+
{
123+
return (jl_atomic_load(&ptls->gc_tls.gc_stack_sweep_requested) > 0);
124+
}
125+
121126
// parallel gc thread function
122127
void jl_parallel_gc_threadfun(void *arg)
123128
{
@@ -139,12 +144,17 @@ void jl_parallel_gc_threadfun(void *arg)
139144

140145
while (1) {
141146
uv_mutex_lock(&gc_threads_lock);
142-
while (!may_mark() && !may_sweep(ptls)) {
147+
while (!may_mark() && !may_sweep(ptls) && !may_sweep_stack(ptls)) {
143148
uv_cond_wait(&gc_threads_cond, &gc_threads_lock);
144149
}
145150
uv_mutex_unlock(&gc_threads_lock);
146151
assert(jl_atomic_load_relaxed(&ptls->gc_state) == JL_GC_PARALLEL_COLLECTOR_THREAD);
147152
gc_mark_loop_parallel(ptls, 0);
153+
if (may_sweep_stack(ptls)) {
154+
assert(jl_atomic_load_relaxed(&ptls->gc_state) == JL_GC_PARALLEL_COLLECTOR_THREAD);
155+
sweep_stack_pool_loop();
156+
jl_atomic_fetch_add(&ptls->gc_tls.gc_stack_sweep_requested, -1);
157+
}
148158
if (may_sweep(ptls)) {
149159
assert(jl_atomic_load_relaxed(&ptls->gc_state) == JL_GC_PARALLEL_COLLECTOR_THREAD);
150160
gc_sweep_pool_parallel(ptls);

0 commit comments

Comments
 (0)