@@ -499,42 +499,66 @@ PyEval_ThreadsInitialized(void)
499
499
return _PyEval_ThreadsInitialized ();
500
500
}
501
501
502
+ static inline int
503
+ current_thread_holds_gil (struct _gil_runtime_state * gil , PyThreadState * tstate )
504
+ {
505
+ if (((PyThreadState * )_Py_atomic_load_relaxed (& gil -> last_holder )) != tstate ) {
506
+ return 0 ;
507
+ }
508
+ return _Py_atomic_load_relaxed (& gil -> locked );
509
+ }
510
+
511
+ static void
512
+ init_shared_gil (PyInterpreterState * interp , struct _gil_runtime_state * gil )
513
+ {
514
+ assert (gil_created (gil ));
515
+ interp -> ceval .gil = gil ;
516
+ interp -> ceval .own_gil = 0 ;
517
+ }
518
+
519
+ static void
520
+ init_own_gil (PyInterpreterState * interp , struct _gil_runtime_state * gil )
521
+ {
522
+ assert (!gil_created (gil ));
523
+ create_gil (gil );
524
+ assert (gil_created (gil ));
525
+ interp -> ceval .gil = gil ;
526
+ interp -> ceval .own_gil = 1 ;
527
+ }
528
+
502
529
PyStatus
503
530
_PyEval_InitGIL (PyThreadState * tstate , int own_gil )
504
531
{
505
532
assert (tstate -> interp -> ceval .gil == NULL );
533
+ int locked ;
506
534
if (!own_gil ) {
507
535
PyInterpreterState * main_interp = _PyInterpreterState_Main ();
508
536
assert (tstate -> interp != main_interp );
509
537
struct _gil_runtime_state * gil = main_interp -> ceval .gil ;
510
- assert (gil_created (gil ));
511
- tstate -> interp -> ceval .gil = gil ;
512
- tstate -> interp -> ceval .own_gil = 0 ;
513
- return _PyStatus_OK ();
538
+ init_shared_gil (tstate -> interp , gil );
539
+ locked = current_thread_holds_gil (gil , tstate );
514
540
}
515
-
516
541
/* XXX per-interpreter GIL */
517
- struct _gil_runtime_state * gil = & tstate -> interp -> runtime -> ceval .gil ;
518
- if (!_Py_IsMainInterpreter (tstate -> interp )) {
542
+ else if (!_Py_IsMainInterpreter (tstate -> interp )) {
519
543
/* Currently, the GIL is shared by all interpreters,
520
544
and only the main interpreter is responsible to create
521
545
and destroy it. */
522
- assert ( gil_created ( gil )) ;
523
- tstate -> interp -> ceval . gil = gil ;
546
+ struct _gil_runtime_state * main_gil = _PyInterpreterState_Main () -> ceval . gil ;
547
+ init_shared_gil ( tstate -> interp , main_gil ) ;
524
548
// XXX For now we lie.
525
549
tstate -> interp -> ceval .own_gil = 1 ;
526
- return _PyStatus_OK ();
550
+ locked = current_thread_holds_gil (main_gil , tstate );
551
+ }
552
+ else {
553
+ PyThread_init_thread ();
554
+ // XXX per-interpreter GIL: switch to interp->_gil.
555
+ init_own_gil (tstate -> interp , & tstate -> interp -> runtime -> ceval .gil );
556
+ locked = 0 ;
557
+ }
558
+ if (!locked ) {
559
+ take_gil (tstate );
527
560
}
528
- assert (own_gil );
529
-
530
- assert (!gil_created (gil ));
531
561
532
- PyThread_init_thread ();
533
- create_gil (gil );
534
- assert (gil_created (gil ));
535
- tstate -> interp -> ceval .gil = gil ;
536
- tstate -> interp -> ceval .own_gil = 1 ;
537
- take_gil (tstate );
538
562
return _PyStatus_OK ();
539
563
}
540
564
@@ -611,9 +635,17 @@ PyEval_ReleaseLock(void)
611
635
drop_gil (ceval , tstate );
612
636
}
613
637
638
+ void
639
+ _PyEval_AcquireLock (PyThreadState * tstate )
640
+ {
641
+ _Py_EnsureTstateNotNULL (tstate );
642
+ take_gil (tstate );
643
+ }
644
+
614
645
void
615
646
_PyEval_ReleaseLock (PyThreadState * tstate )
616
647
{
648
+ _Py_EnsureTstateNotNULL (tstate );
617
649
struct _ceval_state * ceval = & tstate -> interp -> ceval ;
618
650
drop_gil (ceval , tstate );
619
651
}
@@ -625,7 +657,7 @@ PyEval_AcquireThread(PyThreadState *tstate)
625
657
626
658
take_gil (tstate );
627
659
628
- if (_PyThreadState_Swap ( tstate -> interp -> runtime , tstate ) != NULL ) {
660
+ if (_PyThreadState_SwapNoGIL ( tstate ) != NULL ) {
629
661
Py_FatalError ("non-NULL old thread state" );
630
662
}
631
663
}
@@ -635,8 +667,7 @@ PyEval_ReleaseThread(PyThreadState *tstate)
635
667
{
636
668
assert (is_tstate_valid (tstate ));
637
669
638
- _PyRuntimeState * runtime = tstate -> interp -> runtime ;
639
- PyThreadState * new_tstate = _PyThreadState_Swap (runtime , NULL );
670
+ PyThreadState * new_tstate = _PyThreadState_SwapNoGIL (NULL );
640
671
if (new_tstate != tstate ) {
641
672
Py_FatalError ("wrong thread state" );
642
673
}
@@ -684,8 +715,7 @@ _PyEval_SignalAsyncExc(PyInterpreterState *interp)
684
715
PyThreadState *
685
716
PyEval_SaveThread (void )
686
717
{
687
- _PyRuntimeState * runtime = & _PyRuntime ;
688
- PyThreadState * tstate = _PyThreadState_Swap (runtime , NULL );
718
+ PyThreadState * tstate = _PyThreadState_SwapNoGIL (NULL );
689
719
_Py_EnsureTstateNotNULL (tstate );
690
720
691
721
struct _ceval_state * ceval = & tstate -> interp -> ceval ;
@@ -701,7 +731,7 @@ PyEval_RestoreThread(PyThreadState *tstate)
701
731
702
732
take_gil (tstate );
703
733
704
- _PyThreadState_Swap ( tstate -> interp -> runtime , tstate );
734
+ _PyThreadState_SwapNoGIL ( tstate );
705
735
}
706
736
707
737
@@ -1005,7 +1035,7 @@ _Py_HandlePending(PyThreadState *tstate)
1005
1035
/* GIL drop request */
1006
1036
if (_Py_atomic_load_relaxed_int32 (& interp_ceval_state -> gil_drop_request )) {
1007
1037
/* Give another thread a chance */
1008
- if (_PyThreadState_Swap ( runtime , NULL ) != tstate ) {
1038
+ if (_PyThreadState_SwapNoGIL ( NULL ) != tstate ) {
1009
1039
Py_FatalError ("tstate mix-up" );
1010
1040
}
1011
1041
drop_gil (interp_ceval_state , tstate );
@@ -1014,7 +1044,7 @@ _Py_HandlePending(PyThreadState *tstate)
1014
1044
1015
1045
take_gil (tstate );
1016
1046
1017
- if (_PyThreadState_Swap ( runtime , tstate ) != NULL ) {
1047
+ if (_PyThreadState_SwapNoGIL ( tstate ) != NULL ) {
1018
1048
Py_FatalError ("orphan tstate" );
1019
1049
}
1020
1050
}
0 commit comments