Skip to content

Commit 0819ea6

Browse files
authored
seqlock: Enforce C11 Atomics (#19)
1 parent cdd36ee commit 0819ea6

File tree

3 files changed

+30
-24
lines changed

3 files changed

+30
-24
lines changed

seqlock/Makefile

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
all:
2-
gcc -o tests -std=gnu11 -Wall -O2 seqlock.c tests.c
2+
gcc -o tests -std=gnu11 -Wall -Wextra -O2 seqlock.c tests.c
33

44
clean:
55
rm -f tests

seqlock/seqlock.c

+27-22
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,17 @@
11
#include <assert.h>
2+
#include <stdatomic.h>
23
#include <stdbool.h>
34
#include <stdint.h>
45
#include <stdio.h>
6+
#include <stdlib.h>
57
#include <string.h>
68

79
#include "seqlock.h"
810

911
#define SEQLOCK_WRITER 1U
1012

1113
#if defined(__i386__) || defined(__x86_64__)
12-
#define spin_wait() __builtin_ia32_pause()
14+
#define spin_wait() atomic_thread_fence(memory_order_seq_cst)
1315
#elif defined(__aarch64__)
1416
#define spin_wait() __asm__ __volatile__("isb\n")
1517
#else
@@ -32,9 +34,9 @@ static inline int wfe(void)
3234
static inline uint32_t ldx(const uint8_t *var, int mm)
3335
{
3436
uint32_t old;
35-
if (mm == __ATOMIC_ACQUIRE)
37+
if (mm == memory_order_acquire)
3638
__asm volatile("ldaxrb %w0, [%1]" : "=&r"(old) : "r"(var) : "memory");
37-
else if (mm == __ATOMIC_RELAXED)
39+
else if (mm == memory_order_relaxed)
3840
__asm volatile("ldxrb %w0, [%1]" : "=&r"(old) : "r"(var) : "memory");
3941
else
4042
abort();
@@ -43,7 +45,7 @@ static inline uint32_t ldx(const uint8_t *var, int mm)
4345
#else /* generic */
4446
#define SEVL() (void) 0
4547
#define WFE() 1
46-
#define LDX(a, b) __atomic_load_n((a), (b))
48+
#define LDX(a, b) atomic_load_explicit((a), (b))
4749
#endif
4850

4951
#define UNLIKELY(x) __builtin_expect(!!(x), 0)
@@ -57,7 +59,8 @@ static inline seqlock_t wait_for_no_writer(const seqlock_t *sync, int mo)
5759
{
5860
seqlock_t l;
5961
SEVL(); /* Do SEVL early to avoid excessive loop alignment (NOPs) */
60-
if (UNLIKELY(((l = __atomic_load_n(sync, mo)) & SEQLOCK_WRITER) != 0)) {
62+
if (UNLIKELY(((l = atomic_load_explicit(sync, mo)) & SEQLOCK_WRITER) !=
63+
0)) {
6164
while (WFE() && ((l = LDX(sync, mo)) & SEQLOCK_WRITER) != 0)
6265
spin_wait();
6366
}
@@ -69,35 +72,35 @@ seqlock_t seqlock_acquire_rd(const seqlock_t *sync)
6972
{
7073
/* Wait for any present writer to go away */
7174
/* B: Synchronize with A */
72-
return wait_for_no_writer(sync, __ATOMIC_ACQUIRE);
75+
return wait_for_no_writer(sync, memory_order_acquire);
7376
}
7477

7578
bool seqlock_release_rd(const seqlock_t *sync, seqlock_t prv)
7679
{
7780
/* Enforce Load/Load order as if synchronizing with a store-release or
7881
* fence-release in another thread.
7982
*/
80-
__atomic_thread_fence(__ATOMIC_ACQUIRE);
83+
atomic_thread_fence(memory_order_acquire);
8184
/* Test if sync remains unchanged => success */
82-
return __atomic_load_n(sync, __ATOMIC_RELAXED) == prv;
85+
return atomic_load_explicit(sync, memory_order_relaxed) == prv;
8386
}
8487

8588
void seqlock_acquire_wr(seqlock_t *sync)
8689
{
8790
seqlock_t l;
8891
do {
8992
/* Wait for any present writer to go away */
90-
l = wait_for_no_writer(sync, __ATOMIC_RELAXED);
93+
l = wait_for_no_writer(sync, memory_order_relaxed);
9194
/* Attempt to increment, setting writer flag */
9295
} while (
9396
/* C: Synchronize with A */
94-
!__atomic_compare_exchange_n(sync, &l, l + SEQLOCK_WRITER,
95-
/*weak=*/true, __ATOMIC_ACQUIRE,
96-
__ATOMIC_RELAXED));
97+
!atomic_compare_exchange_strong_explicit(
98+
sync, (uint32_t *) &l, l + SEQLOCK_WRITER, memory_order_acquire,
99+
memory_order_relaxed));
97100
/* Enforce Store/Store order as if synchronizing with a load-acquire or
98101
* fence-acquire in another thread.
99102
*/
100-
__atomic_thread_fence(__ATOMIC_RELEASE);
103+
atomic_thread_fence(memory_order_release);
101104
}
102105

103106
void seqlock_release_wr(seqlock_t *sync)
@@ -110,17 +113,19 @@ void seqlock_release_wr(seqlock_t *sync)
110113

111114
/* Increment, clearing writer flag */
112115
/* A: Synchronize with B and C */
113-
__atomic_store_n(sync, cur + 1, __ATOMIC_RELEASE);
116+
atomic_store_explicit(sync, cur + SEQLOCK_WRITER, memory_order_release);
114117
}
115118

116-
#define ATOMIC_COPY(_d, _s, _sz, _type) \
117-
({ \
118-
_type val = __atomic_load_n((const _type *) (_s), __ATOMIC_RELAXED); \
119-
_s += sizeof(_type); \
120-
__atomic_store_n((_type *) (_d), val, __ATOMIC_RELAXED); \
121-
_d += sizeof(_type); \
122-
_sz -= sizeof(_type); \
123-
})
119+
#define ATOMIC_COPY(_d, _s, _sz, _type) \
120+
do { \
121+
const _Atomic _type *src_atomic = (_Atomic const _type *) (_s); \
122+
_type val = atomic_load_explicit(src_atomic, memory_order_relaxed); \
123+
_s += sizeof(_type); \
124+
_Atomic _type *dst_atomic = (_Atomic _type *) (_d); \
125+
atomic_store_explicit(dst_atomic, val, memory_order_relaxed); \
126+
_d += sizeof(_type); \
127+
_sz -= sizeof(_type); \
128+
} while (0)
124129

125130
static inline void atomic_memcpy(char *dst, const char *src, size_t sz)
126131
{

seqlock/seqlock.h

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
11
#pragma once
22

3+
#include <stdatomic.h>
34
#include <stdbool.h>
45
#include <stddef.h>
56
#include <stdint.h>
67

7-
typedef uint32_t seqlock_t;
8+
typedef _Atomic(uint32_t) seqlock_t;
89

910
/* Initialise a seqlock aka reader/writer synchronization */
1011
void seqlock_init(seqlock_t *sync);

0 commit comments

Comments
 (0)