Skip to content

gh-132641: fix race in lru_cache under free-threading #133787

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 13, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Include/internal/pycore_dict.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,8 @@ extern int _PyDict_Pop_KnownHash(
Py_hash_t hash,
PyObject **result);

extern void _PyDict_Clear_LockHeld(PyObject *op);

#ifdef Py_GIL_DISABLED
PyAPI_FUNC(void) _PyDict_EnsureSharedOnRead(PyDictObject *mp);
#endif
Expand Down
75 changes: 75 additions & 0 deletions Lib/test/test_free_threading/test_functools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import random
import unittest

from functools import lru_cache
from threading import Barrier, Thread

from test.support import threading_helper

@threading_helper.requires_working_threading()
class TestLRUCache(unittest.TestCase):

def _test_concurrent_operations(self, maxsize):
num_threads = 10
b = Barrier(num_threads)
@lru_cache(maxsize=maxsize)
def func(arg=0):
return object()


def thread_func():
b.wait()
for i in range(1000):
r = random.randint(0, 1000)
if i < 800:
func(i)
elif i < 900:
func.cache_info()
else:
func.cache_clear()

threads = []
for i in range(num_threads):
t = Thread(target=thread_func)
threads.append(t)

with threading_helper.start_threads(threads):
pass

def test_concurrent_operations_unbounded(self):
self._test_concurrent_operations(maxsize=None)

def test_concurrent_operations_bounded(self):
self._test_concurrent_operations(maxsize=128)

def _test_reentrant_cache_clear(self, maxsize):
num_threads = 10
b = Barrier(num_threads)
@lru_cache(maxsize=maxsize)
def func(arg=0):
func.cache_clear()
return object()


def thread_func():
b.wait()
for i in range(1000):
func(random.randint(0, 10000))

threads = []
for i in range(num_threads):
t = Thread(target=thread_func)
threads.append(t)

with threading_helper.start_threads(threads):
pass

def test_reentrant_cache_clear_unbounded(self):
self._test_reentrant_cache_clear(maxsize=None)

def test_reentrant_cache_clear_bounded(self):
self._test_reentrant_cache_clear(maxsize=128)


if __name__ == "__main__":
unittest.main()
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixed a race in :func:`functools.lru_cache` under free-threading.
16 changes: 11 additions & 5 deletions Modules/_functoolsmodule.c
Original file line number Diff line number Diff line change
Expand Up @@ -1383,8 +1383,8 @@ bounded_lru_cache_update_lock_held(lru_cache_object *self,
this same key, then this setitem call will update the cache dict
with this new link, leaving the old link as an orphan (i.e. not
having a cache dict entry that refers to it). */
if (_PyDict_SetItem_KnownHash(self->cache, key, (PyObject *)link,
hash) < 0) {
if (_PyDict_SetItem_KnownHash_LockHeld((PyDictObject *)self->cache, key,
(PyObject *)link, hash) < 0) {
Py_DECREF(link);
return NULL;
}
Expand Down Expand Up @@ -1453,8 +1453,8 @@ bounded_lru_cache_update_lock_held(lru_cache_object *self,
for successful insertion in the cache dict before adding the
link to the linked list. Otherwise, the potentially reentrant
__eq__ call could cause the then orphan link to be visited. */
if (_PyDict_SetItem_KnownHash(self->cache, key, (PyObject *)link,
hash) < 0) {
if (_PyDict_SetItem_KnownHash_LockHeld((PyDictObject *)self->cache, key,
(PyObject *)link, hash) < 0) {
/* Somehow the cache dict update failed. We no longer can
restore the old link. Let the error propagate upward and
leave the cache short one link. */
Expand Down Expand Up @@ -1689,7 +1689,13 @@ _functools__lru_cache_wrapper_cache_clear_impl(PyObject *self)
lru_list_elem *list = lru_cache_unlink_list(_self);
FT_ATOMIC_STORE_SSIZE_RELAXED(_self->hits, 0);
FT_ATOMIC_STORE_SSIZE_RELAXED(_self->misses, 0);
PyDict_Clear(_self->cache);
if (_self->wrapper == bounded_lru_cache_wrapper) {
/* The critical section on the lru cache itself protects the dictionary
for bounded_lru_cache instances. */
_PyDict_Clear_LockHeld(_self->cache);
} else {
PyDict_Clear(_self->cache);
}
lru_cache_clear_list(list);
Py_RETURN_NONE;
}
Expand Down
5 changes: 5 additions & 0 deletions Objects/dictobject.c
Original file line number Diff line number Diff line change
Expand Up @@ -2915,6 +2915,11 @@ clear_lock_held(PyObject *op)
ASSERT_CONSISTENT(mp);
}

void
_PyDict_Clear_LockHeld(PyObject *op) {
clear_lock_held(op);
}

void
PyDict_Clear(PyObject *op)
{
Expand Down
Loading