Skip to content

Commit ce4814f

Browse files
committed
refactor: Add clang thread safety annotations to EventLoop
Add basic thread safety annotations to EventLoop. Use could be expanded to other functions. Use can be expanded and deepened but this puts basic functionality in place. Use of annotations was discussed in #129 (comment)
1 parent 02f58e0 commit ce4814f

File tree

4 files changed

+71
-32
lines changed

4 files changed

+71
-32
lines changed

include/mp/proxy-io.h

+6-6
Original file line numberDiff line numberDiff line change
@@ -186,10 +186,10 @@ class EventLoop
186186
//! is important that ProxyServer::m_impl destructors do not run on the
187187
//! eventloop thread because they may need it to do I/O if they perform
188188
//! other IPC calls.
189-
void startAsyncThread(std::unique_lock<std::mutex>& lock);
189+
void startAsyncThread() MP_REQUIRES(m_mutex);
190190

191191
//! Check if loop should exit.
192-
bool done(std::unique_lock<std::mutex>& lock);
192+
bool done() MP_REQUIRES(m_mutex);
193193

194194
Logger log()
195195
{
@@ -212,10 +212,10 @@ class EventLoop
212212
std::thread m_async_thread;
213213

214214
//! Callback function to run on event loop thread during post() or sync() call.
215-
kj::Function<void()>* m_post_fn = nullptr;
215+
kj::Function<void()>* m_post_fn MP_GUARDED_BY(m_mutex) = nullptr;
216216

217217
//! Callback functions to run on async thread.
218-
CleanupList m_async_fns;
218+
CleanupList m_async_fns MP_GUARDED_BY(m_mutex);
219219

220220
//! Pipe read handle used to wake up the event loop thread.
221221
int m_wait_fd = -1;
@@ -225,11 +225,11 @@ class EventLoop
225225

226226
//! Number of clients holding references to ProxyServerBase objects that
227227
//! reference this event loop.
228-
int m_num_clients = 0;
228+
int m_num_clients MP_GUARDED_BY(m_mutex) = 0;
229229

230230
//! Mutex and condition variable used to post tasks to event loop and async
231231
//! thread.
232-
std::mutex m_mutex;
232+
Mutex m_mutex;
233233
std::condition_variable m_cv;
234234

235235
//! Capnp IO context.

include/mp/proxy.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -54,18 +54,18 @@ inline void CleanupRun(CleanupList& fns) {
5454
class EventLoopRef
5555
{
5656
public:
57-
explicit EventLoopRef(EventLoop& loop, std::unique_lock<std::mutex>* lock = nullptr);
57+
explicit EventLoopRef(EventLoop& loop, Lock* lock = nullptr);
5858
EventLoopRef(EventLoopRef&& other) noexcept : m_loop(other.m_loop) { other.m_loop = nullptr; }
5959
EventLoopRef(const EventLoopRef&) = delete;
6060
EventLoopRef& operator=(const EventLoopRef&) = delete;
6161
EventLoopRef& operator=(EventLoopRef&&) = delete;
6262
~EventLoopRef() { reset(); }
6363
EventLoop& operator*() const { assert(m_loop); return *m_loop; }
6464
EventLoop* operator->() const { assert(m_loop); return m_loop; }
65-
bool reset(std::unique_lock<std::mutex>* lock = nullptr);
65+
bool reset(Lock* lock = nullptr);
6666

6767
EventLoop* m_loop{nullptr};
68-
std::unique_lock<std::mutex>* m_lock{nullptr};
68+
Lock* m_lock{nullptr};
6969
};
7070

7171
//! Context data associated with proxy client and server classes.

include/mp/util.h

+40
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,15 @@
66
#define MP_UTIL_H
77

88
#include <capnp/schema.h>
9+
#include <cassert>
910
#include <cstddef>
1011
#include <functional>
1112
#include <future>
1213
#include <kj/common.h>
1314
#include <kj/exception.h>
1415
#include <kj/string-tree.h>
1516
#include <memory>
17+
#include <mutex>
1618
#include <string.h>
1719
#include <string>
1820
#include <tuple>
@@ -145,6 +147,44 @@ struct PtrOrValue {
145147
T* operator->() const { return &**this; }
146148
};
147149

150+
// Annotated mutex and lock class (https://clang.llvm.org/docs/ThreadSafetyAnalysis.html)
151+
#if defined(__clang__) && (!defined(SWIG))
152+
#define MP_TSA(x) __attribute__((x))
153+
#else
154+
#define MP_TSA(x) // no-op
155+
#endif
156+
157+
#define MP_CAPABILITY(x) MP_TSA(capability(x))
158+
#define MP_SCOPED_CAPABILITY MP_TSA(scoped_lockable)
159+
#define MP_REQUIRES(x) MP_TSA(requires_capability(x))
160+
#define MP_ACQUIRE(...) MP_TSA(acquire_capability(__VA_ARGS__))
161+
#define MP_RELEASE(...) MP_TSA(release_capability(__VA_ARGS__))
162+
#define MP_ASSERT_CAPABILITY(x) MP_TSA(assert_capability(x))
163+
#define MP_GUARDED_BY(x) MP_TSA(guarded_by(x))
164+
165+
class MP_CAPABILITY("mutex") Mutex {
166+
public:
167+
void lock() MP_ACQUIRE() { m_mutex.lock(); }
168+
void unlock() MP_RELEASE() { m_mutex.unlock(); }
169+
170+
std::mutex m_mutex;
171+
};
172+
173+
class MP_SCOPED_CAPABILITY Lock {
174+
public:
175+
explicit Lock(Mutex& m) MP_ACQUIRE(m) : m_lock(m.m_mutex) {}
176+
~Lock() MP_RELEASE() {}
177+
void unlock() MP_RELEASE() { m_lock.unlock(); }
178+
void lock() MP_ACQUIRE() { m_lock.lock(); }
179+
void assert_locked(Mutex& mutex) MP_ASSERT_CAPABILITY() MP_ASSERT_CAPABILITY(mutex)
180+
{
181+
assert(m_lock.mutex() == &mutex.m_mutex);
182+
assert(m_lock);
183+
}
184+
185+
std::unique_lock<std::mutex> m_lock;
186+
};
187+
148188
//! Analog to std::lock_guard that unlocks instead of locks.
149189
template <typename Lock>
150190
struct UnlockGuard

src/mp/proxy.cpp

+22-23
Original file line numberDiff line numberDiff line change
@@ -49,20 +49,21 @@ void LoggingErrorHandler::taskFailed(kj::Exception&& exception)
4949
m_loop.log() << "Uncaught exception in daemonized task.";
5050
}
5151

52-
EventLoopRef::EventLoopRef(EventLoop& loop, std::unique_lock<std::mutex>* lock) : m_loop(&loop), m_lock(lock)
52+
EventLoopRef::EventLoopRef(EventLoop& loop, Lock* lock) : m_loop(&loop), m_lock(lock)
5353
{
5454
auto loop_lock{PtrOrValue{m_lock, m_loop->m_mutex}};
5555
m_loop->m_num_clients += 1;
5656
}
5757

58-
bool EventLoopRef::reset(std::unique_lock<std::mutex>* lock)
58+
bool EventLoopRef::reset(Lock* lock)
5959
{
6060
bool done = false;
6161
if (m_loop) {
6262
auto loop_lock{PtrOrValue{lock ? lock : m_lock, m_loop->m_mutex}};
63+
loop_lock->assert_locked(m_loop->m_mutex);
6364
assert(m_loop->m_num_clients > 0);
6465
m_loop->m_num_clients -= 1;
65-
if (m_loop->done(*loop_lock)) {
66+
if (m_loop->done()) {
6667
done = true;
6768
m_loop->m_cv.notify_all();
6869
int post_fd{m_loop->m_post_fd};
@@ -133,18 +134,18 @@ Connection::~Connection()
133134
m_sync_cleanup_fns.pop_front();
134135
}
135136
while (!m_async_cleanup_fns.empty()) {
136-
const std::unique_lock<std::mutex> lock(m_loop->m_mutex);
137+
const Lock lock(m_loop->m_mutex);
137138
m_loop->m_async_fns.emplace_back(std::move(m_async_cleanup_fns.front()));
138139
m_async_cleanup_fns.pop_front();
139140
}
140-
std::unique_lock<std::mutex> lock(m_loop->m_mutex);
141-
m_loop->startAsyncThread(lock);
141+
Lock lock(m_loop->m_mutex);
142+
m_loop->startAsyncThread();
142143
m_loop.reset(&lock);
143144
}
144145

145146
CleanupIt Connection::addSyncCleanup(std::function<void()> fn)
146147
{
147-
const std::unique_lock<std::mutex> lock(m_loop->m_mutex);
148+
const Lock lock(m_loop->m_mutex);
148149
// Add cleanup callbacks to the front of list, so sync cleanup functions run
149150
// in LIFO order. This is a good approach because sync cleanup functions are
150151
// added as client objects are created, and it is natural to clean up
@@ -158,13 +159,13 @@ CleanupIt Connection::addSyncCleanup(std::function<void()> fn)
158159

159160
void Connection::removeSyncCleanup(CleanupIt it)
160161
{
161-
const std::unique_lock<std::mutex> lock(m_loop->m_mutex);
162+
const Lock lock(m_loop->m_mutex);
162163
m_sync_cleanup_fns.erase(it);
163164
}
164165

165166
void Connection::addAsyncCleanup(std::function<void()> fn)
166167
{
167-
const std::unique_lock<std::mutex> lock(m_loop->m_mutex);
168+
const Lock lock(m_loop->m_mutex);
168169
// Add async cleanup callbacks to the back of the list. Unlike the sync
169170
// cleanup list, this list order is more significant because it determines
170171
// the order server objects are destroyed when there is a sudden disconnect,
@@ -200,7 +201,7 @@ EventLoop::EventLoop(const char* exe_name, LogFn log_fn, void* context)
200201
EventLoop::~EventLoop()
201202
{
202203
if (m_async_thread.joinable()) m_async_thread.join();
203-
const std::lock_guard<std::mutex> lock(m_mutex);
204+
const Lock lock(m_mutex);
204205
KJ_ASSERT(m_post_fn == nullptr);
205206
KJ_ASSERT(m_async_fns.empty());
206207
KJ_ASSERT(m_wait_fd == -1);
@@ -225,12 +226,12 @@ void EventLoop::loop()
225226
for (;;) {
226227
const size_t read_bytes = wait_stream->read(&buffer, 0, 1).wait(m_io_context.waitScope);
227228
if (read_bytes != 1) throw std::logic_error("EventLoop wait_stream closed unexpectedly");
228-
std::unique_lock<std::mutex> lock(m_mutex);
229+
Lock lock(m_mutex);
229230
if (m_post_fn) {
230231
Unlock(lock, *m_post_fn);
231232
m_post_fn = nullptr;
232233
m_cv.notify_all();
233-
} else if (done(lock)) {
234+
} else if (done()) {
234235
// Intentionally do not break if m_post_fn was set, even if done()
235236
// would return true, to ensure that the EventLoopRef write(post_fd)
236237
// call always succeeds and the loop does not exit between the time
@@ -243,7 +244,7 @@ void EventLoop::loop()
243244
log() << "EventLoop::loop bye.";
244245
wait_stream = nullptr;
245246
KJ_SYSCALL(::close(post_fd));
246-
const std::unique_lock<std::mutex> lock(m_mutex);
247+
const Lock lock(m_mutex);
247248
m_wait_fd = -1;
248249
m_post_fd = -1;
249250
}
@@ -254,26 +255,26 @@ void EventLoop::post(kj::Function<void()> fn)
254255
fn();
255256
return;
256257
}
257-
std::unique_lock<std::mutex> lock(m_mutex);
258+
Lock lock(m_mutex);
258259
EventLoopRef ref(*this, &lock);
259-
m_cv.wait(lock, [this] { return m_post_fn == nullptr; });
260+
m_cv.wait(lock.m_lock, [this]() MP_REQUIRES(m_mutex) { return m_post_fn == nullptr; });
260261
m_post_fn = &fn;
261262
int post_fd{m_post_fd};
262263
Unlock(lock, [&] {
263264
char buffer = 0;
264265
KJ_SYSCALL(write(post_fd, &buffer, 1));
265266
});
266-
m_cv.wait(lock, [this, &fn] { return m_post_fn != &fn; });
267+
m_cv.wait(lock.m_lock, [this, &fn]() MP_REQUIRES(m_mutex) { return m_post_fn != &fn; });
267268
}
268269

269-
void EventLoop::startAsyncThread(std::unique_lock<std::mutex>& lock)
270+
void EventLoop::startAsyncThread()
270271
{
271272
if (m_async_thread.joinable()) {
272273
m_cv.notify_all();
273274
} else if (!m_async_fns.empty()) {
274275
m_async_thread = std::thread([this] {
275-
std::unique_lock<std::mutex> lock(m_mutex);
276-
while (!done(lock)) {
276+
Lock lock(m_mutex);
277+
while (!done()) {
277278
if (!m_async_fns.empty()) {
278279
EventLoopRef ref{*this, &lock};
279280
const std::function<void()> fn = std::move(m_async_fns.front());
@@ -290,17 +291,15 @@ void EventLoop::startAsyncThread(std::unique_lock<std::mutex>& lock)
290291
// Continue without waiting in case there are more async_fns
291292
continue;
292293
}
293-
m_cv.wait(lock);
294+
m_cv.wait(lock.m_lock);
294295
}
295296
});
296297
}
297298
}
298299

299-
bool EventLoop::done(std::unique_lock<std::mutex>& lock)
300+
bool EventLoop::done()
300301
{
301302
assert(m_num_clients >= 0);
302-
assert(lock.owns_lock());
303-
assert(lock.mutex() == &m_mutex);
304303
return m_num_clients == 0 && m_async_fns.empty();
305304
}
306305

0 commit comments

Comments
 (0)