@@ -323,6 +323,24 @@ JL_DLLEXPORT void jl_gc_prepare_to_collect(void)
323
323
errno = last_errno ;
324
324
}
325
325
326
+ JL_DLLEXPORT unsigned char jl_gc_pin_object (void * obj ) {
327
+ return mmtk_pin_object (obj );
328
+ }
329
+
330
+ JL_DLLEXPORT void jl_gc_notify_thread_yield (jl_ptls_t ptls , void * ctx ) {
331
+ if (ctx == NULL ) {
332
+ // Save the context for the thread as it was running at the time of the call
333
+ int r = getcontext (& ptls -> gc_tls .ctx_at_the_time_gc_started );
334
+ if (r == -1 ) {
335
+ jl_safe_printf ("Failed to save context for conservative scanning\n" );
336
+ abort ();
337
+ }
338
+ return ;
339
+ }
340
+ memcpy (& ptls -> gc_tls .ctx_at_the_time_gc_started , ctx , sizeof (ucontext_t ));
341
+ }
342
+
343
+
326
344
// ========================================================================= //
327
345
// GC Statistics
328
346
// ========================================================================= //
@@ -842,29 +860,43 @@ STATIC_INLINE void* bump_alloc_fast(MMTkMutatorContext* mutator, uintptr_t* curs
842
860
}
843
861
}
844
862
863
+ inline void mmtk_set_side_metadata (const void * side_metadata_base , void * obj ) {
864
+ intptr_t addr = (intptr_t ) obj ;
865
+ uint8_t * meta_addr = (uint8_t * ) side_metadata_base + (addr >> 6 );
866
+ intptr_t shift = (addr >> 3 ) & 0b111 ;
867
+ while (1 ) {
868
+ uint8_t old_val = * meta_addr ;
869
+ uint8_t new_val = old_val | (1 << shift );
870
+ if (jl_atomic_cmpswap ((_Atomic (uint8_t )* )meta_addr , & old_val , new_val )) {
871
+ break ;
872
+ }
873
+ }
874
+ }
875
+
845
876
STATIC_INLINE void * mmtk_immix_alloc_fast (MMTkMutatorContext * mutator , size_t size , size_t align , size_t offset ) {
846
877
ImmixAllocator * allocator = & mutator -> allocators .immix [MMTK_DEFAULT_IMMIX_ALLOCATOR ];
847
878
return bump_alloc_fast (mutator , (uintptr_t * )& allocator -> cursor , (intptr_t )allocator -> limit , size , align , offset , 0 );
848
879
}
849
880
850
- inline void mmtk_immix_post_alloc_slow (MMTkMutatorContext * mutator , void * obj , size_t size ) {
851
- mmtk_post_alloc (mutator , obj , size , 0 );
852
- }
853
-
854
881
STATIC_INLINE void mmtk_immix_post_alloc_fast (MMTkMutatorContext * mutator , void * obj , size_t size ) {
855
- // FIXME: for now, we do nothing
856
- // but when supporting moving, this is where we set the valid object (VO) bit
882
+ if (MMTK_NEEDS_VO_BIT ) {
883
+ mmtk_set_side_metadata (MMTK_SIDE_VO_BIT_BASE_ADDRESS , obj );
884
+ }
857
885
}
858
886
859
887
STATIC_INLINE void * mmtk_immortal_alloc_fast (MMTkMutatorContext * mutator , size_t size , size_t align , size_t offset ) {
860
888
BumpAllocator * allocator = & mutator -> allocators .bump_pointer [MMTK_IMMORTAL_BUMP_ALLOCATOR ];
861
889
return bump_alloc_fast (mutator , (uintptr_t * )& allocator -> cursor , (uintptr_t )allocator -> limit , size , align , offset , 1 );
862
890
}
863
891
864
- STATIC_INLINE void mmtk_immortal_post_alloc_fast (MMTkMutatorContext * mutator , void * obj , size_t size ) {
865
- // FIXME: Similarly, for now, we do nothing
866
- // but when supporting moving, this is where we set the valid object (VO) bit
867
- // and log (old gen) bit
892
+ STATIC_INLINE void mmtk_immortal_post_alloc_fast (MMTkMutatorContext * mutator , void * obj , size_t size ) {
893
+ if (MMTK_NEEDS_WRITE_BARRIER == MMTK_OBJECT_BARRIER ) {
894
+ mmtk_set_side_metadata (MMTK_SIDE_LOG_BIT_BASE_ADDRESS , obj );
895
+ }
896
+
897
+ if (MMTK_NEEDS_VO_BIT ) {
898
+ mmtk_set_side_metadata (MMTK_SIDE_VO_BIT_BASE_ADDRESS , obj );
899
+ }
868
900
}
869
901
870
902
JL_DLLEXPORT jl_value_t * jl_mmtk_gc_alloc_default (jl_ptls_t ptls , int osize , size_t align , void * ty )
@@ -1042,6 +1074,16 @@ jl_value_t *jl_gc_permobj(size_t sz, void *ty) JL_NOTSAFEPOINT
1042
1074
return jl_valueof (o );
1043
1075
}
1044
1076
1077
+ jl_value_t * jl_gc_permsymbol (size_t sz ) JL_NOTSAFEPOINT
1078
+ {
1079
+ jl_taggedvalue_t * tag = (jl_taggedvalue_t * )jl_gc_perm_alloc (sz , 0 , sizeof (void * ), 0 );
1080
+ jl_value_t * sym = jl_valueof (tag );
1081
+ jl_ptls_t ptls = jl_current_task -> ptls ;
1082
+ jl_set_typetagof (sym , jl_symbol_tag , 0 ); // We need to set symbol tag. The GC tag doesnt matter.
1083
+ mmtk_immortal_post_alloc_fast (& ptls -> gc_tls .mmtk_mutator , sym , sz );
1084
+ return sym ;
1085
+ }
1086
+
1045
1087
JL_DLLEXPORT void * jl_gc_managed_malloc (size_t sz )
1046
1088
{
1047
1089
jl_ptls_t ptls = jl_current_task -> ptls ;
@@ -1079,6 +1121,11 @@ void jl_gc_notify_image_load(const char* img_data, size_t len)
1079
1121
mmtk_set_vm_space ((void * )img_data , len );
1080
1122
}
1081
1123
1124
+ void jl_gc_notify_image_alloc (const char * img_data , size_t len )
1125
+ {
1126
+ mmtk_immortal_region_post_alloc ((void * )img_data , len );
1127
+ }
1128
+
1082
1129
// ========================================================================= //
1083
1130
// Code specific to stock that is not supported by MMTk
1084
1131
// ========================================================================= //
@@ -1208,6 +1255,53 @@ JL_DLLEXPORT jl_value_t *jl_gc_internal_obj_base_ptr(void *p)
1208
1255
return NULL ;
1209
1256
}
1210
1257
1258
+ #define jl_p_gcpreserve_stack (jl_current_task->gcpreserve_stack)
1259
+
1260
+ // This macro currently uses malloc instead of alloca because this function will exit
1261
+ // after pushing the roots into the gc_preserve_stack, which means that the preserve_begin function's
1262
+ // stack frame will be destroyed (together with its alloca variables). When we support lowering this code
1263
+ // inside the same function that is doing the preserve_begin/preserve_end calls we should be able to simple use allocas.
1264
+ // Note also that we use a separate stack for gc preserve roots to avoid the possibility of calling free
1265
+ // on a stack that has been allocated with alloca instead of malloc, which could happen depending on the order in which
1266
+ // JL_GC_POP() and jl_gc_preserve_end_hook() occurs.
1267
+
1268
+ #define JL_GC_PUSHARGS_PRESERVE_ROOT_OBJS (rts_var ,n ) \
1269
+ rts_var = ((jl_value_t**)malloc(((n)+2)*sizeof(jl_value_t*)))+2; \
1270
+ ((void**)rts_var)[-2] = (void*)JL_GC_ENCODE_PUSHARGS(n); \
1271
+ ((void**)rts_var)[-1] = jl_p_gcpreserve_stack; \
1272
+ memset((void*)rts_var, 0, (n)*sizeof(jl_value_t*)); \
1273
+ jl_p_gcpreserve_stack = (jl_gcframe_t*)&(((void**)rts_var)[-2]); \
1274
+
1275
+ #define JL_GC_POP_PRESERVE_ROOT_OBJS () \
1276
+ jl_gcframe_t *curr = jl_p_gcpreserve_stack; \
1277
+ if(curr) { \
1278
+ (jl_p_gcpreserve_stack = jl_p_gcpreserve_stack->prev); \
1279
+ free(curr); \
1280
+ }
1281
+
1282
+ // Add each argument as a tpin root object.
1283
+ // However, we cannot use JL_GC_PUSH and JL_GC_POP since the slots should live
1284
+ // beyond this function. Instead, we maintain a tpin stack by mallocing/freeing
1285
+ // the frames for each of the preserve regions we encounter
1286
+ JL_DLLEXPORT void jl_gc_preserve_begin_hook (int n , ...) JL_NOTSAFEPOINT
1287
+ {
1288
+ jl_value_t * * frame ;
1289
+ JL_GC_PUSHARGS_PRESERVE_ROOT_OBJS (frame , n );
1290
+ if (n == 0 ) return ;
1291
+
1292
+ va_list args ;
1293
+ va_start (args , n );
1294
+ for (int i = 0 ; i < n ; i ++ ) {
1295
+ frame [i ] = va_arg (args , jl_value_t * );
1296
+ }
1297
+ va_end (args );
1298
+ }
1299
+
1300
+ JL_DLLEXPORT void jl_gc_preserve_end_hook (void ) JL_NOTSAFEPOINT
1301
+ {
1302
+ JL_GC_POP_PRESERVE_ROOT_OBJS ();
1303
+ }
1304
+
1211
1305
#ifdef __cplusplus
1212
1306
}
1213
1307
#endif
0 commit comments