@@ -295,7 +295,8 @@ const char *jl_generate_ccallable(LLVMOrcThreadSafeModuleRef llvmmod, void *sysi
295
295
extern " C" JL_DLLEXPORT
296
296
int jl_compile_extern_c_impl (LLVMOrcThreadSafeModuleRef llvmmod, void *p, void *sysimg, jl_value_t *declrt, jl_value_t *sigt)
297
297
{
298
- JL_LOCK (&jl_codegen_lock);
298
+ auto ct = jl_current_task;
299
+ ct->reentrant_codegen ++;
299
300
uint64_t compiler_start_time = 0 ;
300
301
uint8_t measure_compile_time_enabled = jl_atomic_load_relaxed (&jl_measure_compile_time_enabled);
301
302
if (measure_compile_time_enabled)
@@ -311,6 +312,7 @@ int jl_compile_extern_c_impl(LLVMOrcThreadSafeModuleRef llvmmod, void *p, void *
311
312
backing = jl_create_llvm_module (" cextern" , pparams ? pparams->tsctx : ctx, pparams ? pparams->imaging : imaging_default ());
312
313
into = &backing;
313
314
}
315
+ JL_LOCK (&jl_codegen_lock);
314
316
jl_codegen_params_t params (into->getContext ());
315
317
if (pparams == NULL )
316
318
pparams = ¶ms;
@@ -330,12 +332,12 @@ int jl_compile_extern_c_impl(LLVMOrcThreadSafeModuleRef llvmmod, void *p, void *
330
332
if (success && llvmmod == NULL )
331
333
jl_ExecutionEngine->addModule (std::move (*into));
332
334
}
333
- if (jl_codegen_lock.count == 1 && measure_compile_time_enabled)
335
+ JL_UNLOCK (&jl_codegen_lock);
336
+ if (!--ct->reentrant_codegen && measure_compile_time_enabled)
334
337
jl_atomic_fetch_add_relaxed (&jl_cumulative_compile_time, (jl_hrtime () - compiler_start_time));
335
338
if (ctx.getContext ()) {
336
339
jl_ExecutionEngine->releaseContext (std::move (ctx));
337
340
}
338
- JL_UNLOCK (&jl_codegen_lock);
339
341
return success;
340
342
}
341
343
@@ -386,7 +388,8 @@ void jl_extern_c_impl(jl_value_t *declrt, jl_tupletype_t *sigt)
386
388
extern " C" JL_DLLEXPORT
387
389
jl_code_instance_t *jl_generate_fptr_impl (jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t world)
388
390
{
389
- JL_LOCK (&jl_codegen_lock); // also disables finalizers, to prevent any unexpected recursion
391
+ auto ct = jl_current_task;
392
+ ct->reentrant_codegen ++;
390
393
uint64_t compiler_start_time = 0 ;
391
394
uint8_t measure_compile_time_enabled = jl_atomic_load_relaxed (&jl_measure_compile_time_enabled);
392
395
bool is_recompile = false ;
@@ -395,6 +398,7 @@ jl_code_instance_t *jl_generate_fptr_impl(jl_method_instance_t *mi JL_PROPAGATES
395
398
// if we don't have any decls already, try to generate it now
396
399
jl_code_info_t *src = NULL ;
397
400
JL_GC_PUSH1 (&src);
401
+ JL_LOCK (&jl_codegen_lock); // also disables finalizers, to prevent any unexpected recursion
398
402
jl_value_t *ci = jl_rettype_inferred (mi, world, world);
399
403
jl_code_instance_t *codeinst = (ci == jl_nothing ? NULL : (jl_code_instance_t *)ci);
400
404
if (codeinst) {
@@ -437,13 +441,13 @@ jl_code_instance_t *jl_generate_fptr_impl(jl_method_instance_t *mi JL_PROPAGATES
437
441
else {
438
442
codeinst = NULL ;
439
443
}
440
- if (jl_codegen_lock.count == 1 && measure_compile_time_enabled) {
444
+ JL_UNLOCK (&jl_codegen_lock);
445
+ if (!--ct->reentrant_codegen && measure_compile_time_enabled) {
441
446
uint64_t t_comp = jl_hrtime () - compiler_start_time;
442
447
if (is_recompile)
443
448
jl_atomic_fetch_add_relaxed (&jl_cumulative_recompile_time, t_comp);
444
449
jl_atomic_fetch_add_relaxed (&jl_cumulative_compile_time, t_comp);
445
450
}
446
- JL_UNLOCK (&jl_codegen_lock);
447
451
JL_GC_POP ();
448
452
return codeinst;
449
453
}
@@ -454,11 +458,13 @@ void jl_generate_fptr_for_unspecialized_impl(jl_code_instance_t *unspec)
454
458
if (jl_atomic_load_relaxed (&unspec->invoke ) != NULL ) {
455
459
return ;
456
460
}
457
- JL_LOCK (&jl_codegen_lock);
461
+ auto ct = jl_current_task;
462
+ ct->reentrant_codegen ++;
458
463
uint64_t compiler_start_time = 0 ;
459
464
uint8_t measure_compile_time_enabled = jl_atomic_load_relaxed (&jl_measure_compile_time_enabled);
460
465
if (measure_compile_time_enabled)
461
466
compiler_start_time = jl_hrtime ();
467
+ JL_LOCK (&jl_codegen_lock);
462
468
if (jl_atomic_load_relaxed (&unspec->invoke ) == NULL ) {
463
469
jl_code_info_t *src = NULL ;
464
470
JL_GC_PUSH1 (&src);
@@ -486,9 +492,9 @@ void jl_generate_fptr_for_unspecialized_impl(jl_code_instance_t *unspec)
486
492
}
487
493
JL_GC_POP ();
488
494
}
489
- if (jl_codegen_lock.count == 1 && measure_compile_time_enabled)
490
- jl_atomic_fetch_add_relaxed (&jl_cumulative_compile_time, (jl_hrtime () - compiler_start_time));
491
495
JL_UNLOCK (&jl_codegen_lock); // Might GC
496
+ if (!--ct->reentrant_codegen && measure_compile_time_enabled)
497
+ jl_atomic_fetch_add_relaxed (&jl_cumulative_compile_time, (jl_hrtime () - compiler_start_time));
492
498
}
493
499
494
500
@@ -508,11 +514,13 @@ jl_value_t *jl_dump_method_asm_impl(jl_method_instance_t *mi, size_t world,
508
514
// normally we prevent native code from being generated for these functions,
509
515
// (using sentinel value `1` instead)
510
516
// so create an exception here so we can print pretty our lies
511
- JL_LOCK (&jl_codegen_lock); // also disables finalizers, to prevent any unexpected recursion
517
+ auto ct = jl_current_task;
518
+ ct->reentrant_codegen ++;
512
519
uint64_t compiler_start_time = 0 ;
513
520
uint8_t measure_compile_time_enabled = jl_atomic_load_relaxed (&jl_measure_compile_time_enabled);
514
521
if (measure_compile_time_enabled)
515
522
compiler_start_time = jl_hrtime ();
523
+ JL_LOCK (&jl_codegen_lock); // also disables finalizers, to prevent any unexpected recursion
516
524
specfptr = (uintptr_t )jl_atomic_load_relaxed (&codeinst->specptr .fptr );
517
525
if (specfptr == 0 ) {
518
526
jl_code_info_t *src = jl_type_infer (mi, world, 0 );
@@ -536,7 +544,7 @@ jl_value_t *jl_dump_method_asm_impl(jl_method_instance_t *mi, size_t world,
536
544
}
537
545
JL_GC_POP ();
538
546
}
539
- if (measure_compile_time_enabled)
547
+ if (!--ct-> reentrant_codegen && measure_compile_time_enabled)
540
548
jl_atomic_fetch_add_relaxed (&jl_cumulative_compile_time, (jl_hrtime () - compiler_start_time));
541
549
JL_UNLOCK (&jl_codegen_lock);
542
550
}
0 commit comments