1
1
#include <assert.h>
2
+ #include <stdatomic.h>
2
3
#include <stdbool.h>
3
4
#include <stdint.h>
4
5
#include <stdio.h>
6
+ #include <stdlib.h>
5
7
#include <string.h>
6
8
7
9
#include "seqlock.h"
8
10
9
11
#define SEQLOCK_WRITER 1U
10
12
11
13
#if defined(__i386__ ) || defined(__x86_64__ )
12
- #define spin_wait () __builtin_ia32_pause( )
14
+ #define spin_wait () atomic_thread_fence(memory_order_seq_cst )
13
15
#elif defined(__aarch64__ )
14
16
#define spin_wait () __asm__ __volatile__("isb\n")
15
17
#else
@@ -32,9 +34,9 @@ static inline int wfe(void)
32
34
static inline uint32_t ldx (const uint8_t * var , int mm )
33
35
{
34
36
uint32_t old ;
35
- if (mm == __ATOMIC_ACQUIRE )
37
+ if (mm == memory_order_acquire )
36
38
__asm volatile ("ldaxrb %w0, [%1]" : "=&r" (old ) : "r" (var ) : "memory" );
37
- else if (mm == __ATOMIC_RELAXED )
39
+ else if (mm == memory_order_relaxed )
38
40
__asm volatile ("ldxrb %w0, [%1]" : "=&r" (old ) : "r" (var ) : "memory" );
39
41
else
40
42
abort ();
@@ -43,7 +45,7 @@ static inline uint32_t ldx(const uint8_t *var, int mm)
43
45
#else /* generic */
44
46
#define SEVL () (void) 0
45
47
#define WFE () 1
46
- #define LDX (a , b ) __atomic_load_n ((a), (b))
48
+ #define LDX (a , b ) atomic_load_explicit ((a), (b))
47
49
#endif
48
50
49
51
#define UNLIKELY (x ) __builtin_expect(!!(x), 0)
@@ -57,7 +59,8 @@ static inline seqlock_t wait_for_no_writer(const seqlock_t *sync, int mo)
57
59
{
58
60
seqlock_t l ;
59
61
SEVL (); /* Do SEVL early to avoid excessive loop alignment (NOPs) */
60
- if (UNLIKELY (((l = __atomic_load_n (sync , mo )) & SEQLOCK_WRITER ) != 0 )) {
62
+ if (UNLIKELY (((l = atomic_load_explicit (sync , mo )) & SEQLOCK_WRITER ) !=
63
+ 0 )) {
61
64
while (WFE () && ((l = LDX (sync , mo )) & SEQLOCK_WRITER ) != 0 )
62
65
spin_wait ();
63
66
}
@@ -69,35 +72,35 @@ seqlock_t seqlock_acquire_rd(const seqlock_t *sync)
69
72
{
70
73
/* Wait for any present writer to go away */
71
74
/* B: Synchronize with A */
72
- return wait_for_no_writer (sync , __ATOMIC_ACQUIRE );
75
+ return wait_for_no_writer (sync , memory_order_acquire );
73
76
}
74
77
75
78
bool seqlock_release_rd (const seqlock_t * sync , seqlock_t prv )
76
79
{
77
80
/* Enforce Load/Load order as if synchronizing with a store-release or
78
81
* fence-release in another thread.
79
82
*/
80
- __atomic_thread_fence ( __ATOMIC_ACQUIRE );
83
+ atomic_thread_fence ( memory_order_acquire );
81
84
/* Test if sync remains unchanged => success */
82
- return __atomic_load_n (sync , __ATOMIC_RELAXED ) == prv ;
85
+ return atomic_load_explicit (sync , memory_order_relaxed ) == prv ;
83
86
}
84
87
85
88
void seqlock_acquire_wr (seqlock_t * sync )
86
89
{
87
90
seqlock_t l ;
88
91
do {
89
92
/* Wait for any present writer to go away */
90
- l = wait_for_no_writer (sync , __ATOMIC_RELAXED );
93
+ l = wait_for_no_writer (sync , memory_order_relaxed );
91
94
/* Attempt to increment, setting writer flag */
92
95
} while (
93
96
/* C: Synchronize with A */
94
- !__atomic_compare_exchange_n ( sync , & l , l + SEQLOCK_WRITER ,
95
- /*weak=*/ true, __ATOMIC_ACQUIRE ,
96
- __ATOMIC_RELAXED ));
97
+ !atomic_compare_exchange_strong_explicit (
98
+ sync , ( uint32_t * ) & l , l + SEQLOCK_WRITER , memory_order_acquire ,
99
+ memory_order_relaxed ));
97
100
/* Enforce Store/Store order as if synchronizing with a load-acquire or
98
101
* fence-acquire in another thread.
99
102
*/
100
- __atomic_thread_fence ( __ATOMIC_RELEASE );
103
+ atomic_thread_fence ( memory_order_release );
101
104
}
102
105
103
106
void seqlock_release_wr (seqlock_t * sync )
@@ -110,17 +113,19 @@ void seqlock_release_wr(seqlock_t *sync)
110
113
111
114
/* Increment, clearing writer flag */
112
115
/* A: Synchronize with B and C */
113
- __atomic_store_n (sync , cur + 1 , __ATOMIC_RELEASE );
116
+ atomic_store_explicit (sync , cur + SEQLOCK_WRITER , memory_order_release );
114
117
}
115
118
116
- #define ATOMIC_COPY (_d , _s , _sz , _type ) \
117
- ({ \
118
- _type val = __atomic_load_n((const _type *) (_s), __ATOMIC_RELAXED); \
119
- _s += sizeof(_type); \
120
- __atomic_store_n((_type *) (_d), val, __ATOMIC_RELAXED); \
121
- _d += sizeof(_type); \
122
- _sz -= sizeof(_type); \
123
- })
119
+ #define ATOMIC_COPY (_d , _s , _sz , _type ) \
120
+ do { \
121
+ const _Atomic _type *src_atomic = (_Atomic const _type *) (_s); \
122
+ _type val = atomic_load_explicit(src_atomic, memory_order_relaxed); \
123
+ _s += sizeof(_type); \
124
+ _Atomic _type *dst_atomic = (_Atomic _type *) (_d); \
125
+ atomic_store_explicit(dst_atomic, val, memory_order_relaxed); \
126
+ _d += sizeof(_type); \
127
+ _sz -= sizeof(_type); \
128
+ } while (0)
124
129
125
130
static inline void atomic_memcpy (char * dst , const char * src , size_t sz )
126
131
{
0 commit comments