@@ -205,59 +205,63 @@ static void recreate_gil(struct _gil_runtime_state *gil)
205
205
}
206
206
#endif
207
207
208
- static void
209
- drop_gil_impl (struct _gil_runtime_state * gil )
208
+ static inline void
209
+ drop_gil_impl (PyThreadState * tstate , struct _gil_runtime_state * gil )
210
210
{
211
211
MUTEX_LOCK (gil -> mutex );
212
212
_Py_ANNOTATE_RWLOCK_RELEASED (& gil -> locked , /*is_write=*/ 1 );
213
213
_Py_atomic_store_int_relaxed (& gil -> locked , 0 );
214
+ if (tstate != NULL ) {
215
+ tstate -> _status .holds_gil = 0 ;
216
+ }
214
217
COND_SIGNAL (gil -> cond );
215
218
MUTEX_UNLOCK (gil -> mutex );
216
219
}
217
220
218
221
static void
219
- drop_gil (PyInterpreterState * interp , PyThreadState * tstate )
222
+ drop_gil (PyInterpreterState * interp , PyThreadState * tstate , int final_release )
220
223
{
221
224
struct _ceval_state * ceval = & interp -> ceval ;
222
- /* If tstate is NULL , the caller is indicating that we're releasing
225
+ /* If final_release is true , the caller is indicating that we're releasing
223
226
the GIL for the last time in this thread. This is particularly
224
227
relevant when the current thread state is finalizing or its
225
228
interpreter is finalizing (either may be in an inconsistent
226
229
state). In that case the current thread will definitely
227
230
never try to acquire the GIL again. */
228
231
// XXX It may be more correct to check tstate->_status.finalizing.
229
- // XXX assert(tstate == NULL || !tstate->_status.cleared);
232
+ // XXX assert(final_release || !tstate->_status.cleared);
230
233
234
+ assert (final_release || tstate != NULL );
231
235
struct _gil_runtime_state * gil = ceval -> gil ;
232
236
#ifdef Py_GIL_DISABLED
233
- if (!_Py_atomic_load_int_relaxed (& gil -> enabled )) {
237
+ // Check if we have the GIL before dropping it. tstate will be NULL if
238
+ // take_gil() detected that this thread has been destroyed, in which case
239
+ // we know we have the GIL.
240
+ if (tstate != NULL && !tstate -> _status .holds_gil ) {
234
241
return ;
235
242
}
236
243
#endif
237
244
if (!_Py_atomic_load_int_relaxed (& gil -> locked )) {
238
245
Py_FatalError ("drop_gil: GIL is not locked" );
239
246
}
240
247
241
- /* tstate is allowed to be NULL (early interpreter init) */
242
- if (tstate != NULL ) {
248
+ if (!final_release ) {
243
249
/* Sub-interpreter support: threads might have been switched
244
250
under our feet using PyThreadState_Swap(). Fix the GIL last
245
251
holder variable so that our heuristics work. */
246
252
_Py_atomic_store_ptr_relaxed (& gil -> last_holder , tstate );
247
253
}
248
254
249
- drop_gil_impl (gil );
255
+ drop_gil_impl (tstate , gil );
250
256
251
257
#ifdef FORCE_SWITCHING
252
- /* We check tstate first in case we might be releasing the GIL for
253
- the last time in this thread. In that case there's a possible
254
- race with tstate->interp getting deleted after gil->mutex is
255
- unlocked and before the following code runs, leading to a crash.
256
- We can use (tstate == NULL) to indicate the thread is done with
257
- the GIL, and that's the only time we might delete the
258
- interpreter, so checking tstate first prevents the crash.
259
- See https://github.com/python/cpython/issues/104341. */
260
- if (tstate != NULL &&
258
+ /* We might be releasing the GIL for the last time in this thread. In that
259
+ case there's a possible race with tstate->interp getting deleted after
260
+ gil->mutex is unlocked and before the following code runs, leading to a
261
+ crash. We can use final_release to indicate the thread is done with the
262
+ GIL, and that's the only time we might delete the interpreter. See
263
+ https://github.com/python/cpython/issues/104341. */
264
+ if (!final_release &&
261
265
_Py_eval_breaker_bit_is_set (tstate , _PY_GIL_DROP_REQUEST_BIT )) {
262
266
MUTEX_LOCK (gil -> switch_mutex );
263
267
/* Not switched yet => wait */
@@ -284,7 +288,7 @@ drop_gil(PyInterpreterState *interp, PyThreadState *tstate)
284
288
tstate must be non-NULL.
285
289
286
290
Returns 1 if the GIL was acquired, or 0 if not. */
287
- static int
291
+ static void
288
292
take_gil (PyThreadState * tstate )
289
293
{
290
294
int err = errno ;
@@ -309,7 +313,7 @@ take_gil(PyThreadState *tstate)
309
313
struct _gil_runtime_state * gil = interp -> ceval .gil ;
310
314
#ifdef Py_GIL_DISABLED
311
315
if (!_Py_atomic_load_int_relaxed (& gil -> enabled )) {
312
- return 0 ;
316
+ return ;
313
317
}
314
318
#endif
315
319
@@ -358,10 +362,10 @@ take_gil(PyThreadState *tstate)
358
362
if (!_Py_atomic_load_int_relaxed (& gil -> enabled )) {
359
363
// Another thread disabled the GIL between our check above and
360
364
// now. Don't take the GIL, signal any other waiting threads, and
361
- // return 0 .
365
+ // return.
362
366
COND_SIGNAL (gil -> cond );
363
367
MUTEX_UNLOCK (gil -> mutex );
364
- return 0 ;
368
+ return ;
365
369
}
366
370
#endif
367
371
@@ -393,20 +397,21 @@ take_gil(PyThreadState *tstate)
393
397
in take_gil() while the main thread called
394
398
wait_for_thread_shutdown() from Py_Finalize(). */
395
399
MUTEX_UNLOCK (gil -> mutex );
396
- /* Passing NULL to drop_gil() indicates that this thread is about to
397
- terminate and will never hold the GIL again . */
398
- drop_gil (interp , NULL );
400
+ /* tstate could be a dangling pointer, so don't pass it to
401
+ drop_gil() . */
402
+ drop_gil (interp , NULL , 1 );
399
403
PyThread_exit_thread ();
400
404
}
401
405
assert (_PyThreadState_CheckConsistency (tstate ));
402
406
407
+ tstate -> _status .holds_gil = 1 ;
403
408
_Py_unset_eval_breaker_bit (tstate , _PY_GIL_DROP_REQUEST_BIT );
404
409
update_eval_breaker_for_thread (interp , tstate );
405
410
406
411
MUTEX_UNLOCK (gil -> mutex );
407
412
408
413
errno = err ;
409
- return 1 ;
414
+ return ;
410
415
}
411
416
412
417
void _PyEval_SetSwitchInterval (unsigned long microseconds )
@@ -451,10 +456,17 @@ PyEval_ThreadsInitialized(void)
451
456
static inline int
452
457
current_thread_holds_gil (struct _gil_runtime_state * gil , PyThreadState * tstate )
453
458
{
454
- if (((PyThreadState * )_Py_atomic_load_ptr_relaxed (& gil -> last_holder )) != tstate ) {
455
- return 0 ;
456
- }
457
- return _Py_atomic_load_int_relaxed (& gil -> locked );
459
+ int holds_gil = tstate -> _status .holds_gil ;
460
+
461
+ // holds_gil is the source of truth; check that last_holder and gil->locked
462
+ // are consistent with it.
463
+ int locked = _Py_atomic_load_int_relaxed (& gil -> locked );
464
+ int is_last_holder =
465
+ ((PyThreadState * )_Py_atomic_load_ptr_relaxed (& gil -> last_holder )) == tstate ;
466
+ assert (!holds_gil || locked );
467
+ assert (!holds_gil || is_last_holder );
468
+
469
+ return holds_gil ;
458
470
}
459
471
#endif
460
472
@@ -563,23 +575,24 @@ PyEval_ReleaseLock(void)
563
575
/* This function must succeed when the current thread state is NULL.
564
576
We therefore avoid PyThreadState_Get() which dumps a fatal error
565
577
in debug mode. */
566
- drop_gil (tstate -> interp , tstate );
578
+ drop_gil (tstate -> interp , tstate , 0 );
567
579
}
568
580
569
- int
581
+ void
570
582
_PyEval_AcquireLock (PyThreadState * tstate )
571
583
{
572
584
_Py_EnsureTstateNotNULL (tstate );
573
- return take_gil (tstate );
585
+ take_gil (tstate );
574
586
}
575
587
576
588
void
577
- _PyEval_ReleaseLock (PyInterpreterState * interp , PyThreadState * tstate )
589
+ _PyEval_ReleaseLock (PyInterpreterState * interp ,
590
+ PyThreadState * tstate ,
591
+ int final_release )
578
592
{
579
- /* If tstate is NULL then we do not expect the current thread
580
- to acquire the GIL ever again. */
581
- assert (tstate == NULL || tstate -> interp == interp );
582
- drop_gil (interp , tstate );
593
+ assert (tstate != NULL );
594
+ assert (tstate -> interp == interp );
595
+ drop_gil (interp , tstate , final_release );
583
596
}
584
597
585
598
void
@@ -1136,7 +1149,12 @@ _PyEval_DisableGIL(PyThreadState *tstate)
1136
1149
//
1137
1150
// Drop the GIL, which will wake up any threads waiting in take_gil()
1138
1151
// and let them resume execution without the GIL.
1139
- drop_gil_impl (gil );
1152
+ drop_gil_impl (tstate , gil );
1153
+
1154
+ // If another thread asked us to drop the GIL, they should be
1155
+ // free-threading by now. Remove any such request so we have a clean
1156
+ // slate if/when the GIL is enabled again.
1157
+ _Py_unset_eval_breaker_bit (tstate , _PY_GIL_DROP_REQUEST_BIT );
1140
1158
return 1 ;
1141
1159
}
1142
1160
return 0 ;
0 commit comments