26
26
27
27
using utest::v1::Case;
28
28
29
-
30
29
namespace {
31
30
32
31
/* Lock-free operations will be much faster - keep runtime down */
33
- #if MBED_ATOMIC_INT_LOCK_FREE
34
- #define ADD_ITERATIONS (SystemCoreClock / 1000 )
35
- #else
36
- #define ADD_ITERATIONS (SystemCoreClock / 8000 )
37
- #endif
32
+ #define ADD_UNLOCKED_ITERATIONS (SystemCoreClock / 1000 )
33
+ #define ADD_LOCKED_ITERATIONS (SystemCoreClock / 8000 )
38
34
39
- template <typename T >
40
- void add_incrementer (T *ptr )
35
+ template <typename A >
36
+ static inline long add_iterations (A &a )
41
37
{
42
- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
43
- core_util_atomic_fetch_add (ptr, T (1 ));
44
- }
38
+ return a.is_lock_free () ? ADD_UNLOCKED_ITERATIONS : ADD_LOCKED_ITERATIONS;
45
39
}
46
40
47
- template <typename T>
48
- void add_release_incrementer (T *ptr)
49
- {
50
- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
51
- core_util_atomic_fetch_add_explicit (ptr, T (1 ), mbed_memory_order_release);
41
+ template <typename A>
42
+ struct add_incrementer {
43
+ static void op (A *ptr)
44
+ {
45
+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
46
+ ++(*ptr);
47
+ }
52
48
}
53
- }
49
+ };
54
50
55
- template <typename T>
56
- void sub_incrementer (T *ptr)
57
- {
58
- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
59
- core_util_atomic_fetch_sub (ptr, T (-1 ));
51
+ template <typename A>
52
+ struct add_release_incrementer {
53
+ static void op (A *ptr)
54
+ {
55
+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
56
+ ptr->fetch_add (1 , mbed::memory_order_release);
57
+ }
60
58
}
61
- }
59
+ };
62
60
63
- template <typename T >
64
- void bitops_incrementer (T *ptr)
65
- {
66
- for ( long i = ADD_ITERATIONS; i > 0 ; i--) {
67
- core_util_atomic_fetch_add (ptr, T ( 1 ));
68
- core_util_atomic_fetch_and (ptr, T (-1 ) );
69
- core_util_atomic_fetch_or (ptr, T ( 0 ));
61
+ template <typename A >
62
+ struct sub_incrementer {
63
+ static void op (A *ptr)
64
+ {
65
+ for ( long i = add_iterations (*ptr); i > 0 ; i--) {
66
+ ptr-> fetch_sub (-1 );
67
+ }
70
68
}
71
- }
69
+ };
72
70
73
- template <typename T>
74
- void weak_incrementer (T *ptr)
75
- {
76
- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
77
- T val = core_util_atomic_load (ptr);
78
- do {
79
- } while (!core_util_atomic_compare_exchange_weak (ptr, &val, T (val + 1 )));
71
+ template <typename A>
72
+ struct bitops_incrementer {
73
+ static void op (A *ptr)
74
+ {
75
+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
76
+ (*ptr) += 1 ;
77
+ (*ptr) &= -1 ;
78
+ (*ptr) |= 0 ;
79
+ }
80
80
}
81
- }
81
+ };
82
82
83
- template <typename T>
84
- void strong_incrementer (T *ptr)
85
- {
86
- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
87
- T val = core_util_atomic_load (ptr);
88
- do {
89
- } while (!core_util_atomic_compare_exchange_strong (ptr, &val, T (val + 1 )));
83
+ template <typename A>
84
+ struct weak_incrementer {
85
+ static void op (A *ptr)
86
+ {
87
+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
88
+ typename A::value_type val = ptr->load ();
89
+ do {
90
+ } while (!ptr->compare_exchange_weak (val, val + 1 ));
91
+ }
90
92
}
91
- }
93
+ };
94
+
95
+ template <typename A>
96
+ struct strong_incrementer {
97
+ static void op (A *ptr)
98
+ {
99
+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
100
+ typename A::value_type val = ptr->load ();
101
+ do {
102
+ } while (!ptr->compare_exchange_strong (val, val + 1 ));
103
+ }
104
+ }
105
+ };
106
+
92
107
93
108
94
109
/*
@@ -100,32 +115,34 @@ void strong_incrementer(T *ptr)
100
115
* Using core_util_atomic_ templates, and exercising
101
116
* load and store briefly.
102
117
*/
103
- template <typename T, void (*Fn)(T *) >
118
+ template <typename T, template < typename A> class Fn >
104
119
void test_atomic_add ()
105
120
{
106
121
struct {
107
122
volatile T nonatomic1;
108
- T atomic1;
109
- T atomic2;
123
+ Atomic<T> atomic1;
124
+ volatile Atomic<T> atomic2; // use volatile just to exercise the templates' volatile methods
110
125
volatile T nonatomic2;
111
- } data;
126
+ } data = { 0 , { 0 }, { 1 }, 0 }; // test initialisation
127
+
128
+ TEST_ASSERT_EQUAL (sizeof (T), sizeof data.nonatomic1 );
129
+ TEST_ASSERT_EQUAL (sizeof (T), sizeof data.atomic1 );
130
+ TEST_ASSERT_EQUAL (4 * sizeof (T), sizeof data);
112
131
113
- data.nonatomic1 = 0 ;
114
- core_util_atomic_store (&data.atomic1 , T (0 ));
115
- core_util_atomic_store (&data.atomic2 , T (0 ));
116
- data.nonatomic2 = 0 ;
132
+ // test store
133
+ data.atomic2 = 0 ;
117
134
118
135
Thread t1 (osPriorityNormal, THREAD_STACK);
119
136
Thread t2 (osPriorityNormal, THREAD_STACK);
120
137
Thread t3 (osPriorityNormal, THREAD_STACK);
121
138
Thread t4 (osPriorityNormal, THREAD_STACK);
122
139
123
- TEST_ASSERT_EQUAL (osOK, t1.start (callback (Fn, &data.atomic1 )));
124
- TEST_ASSERT_EQUAL (osOK, t2.start (callback (Fn, &data.atomic1 )));
125
- TEST_ASSERT_EQUAL (osOK, t3.start (callback (Fn, &data.atomic2 )));
126
- TEST_ASSERT_EQUAL (osOK, t4.start (callback (Fn, &data.atomic2 )));
140
+ TEST_ASSERT_EQUAL (osOK, t1.start (callback (Fn< decltype (data. atomic1 )>::op , &data.atomic1 )));
141
+ TEST_ASSERT_EQUAL (osOK, t2.start (callback (Fn< decltype (data. atomic1 )>::op , &data.atomic1 )));
142
+ TEST_ASSERT_EQUAL (osOK, t3.start (callback (Fn< decltype (data. atomic2 )>::op , &data.atomic2 )));
143
+ TEST_ASSERT_EQUAL (osOK, t4.start (callback (Fn< decltype (data. atomic2 )>::op , &data.atomic2 )));
127
144
128
- for (long i = ADD_ITERATIONS ; i > 0 ; i--) {
145
+ for (long i = ADD_UNLOCKED_ITERATIONS ; i > 0 ; i--) {
129
146
data.nonatomic1 ++;
130
147
data.nonatomic2 ++;
131
148
}
@@ -135,10 +152,83 @@ void test_atomic_add()
135
152
t3.join ();
136
153
t4.join ();
137
154
138
- TEST_ASSERT_EQUAL (T (ADD_ITERATIONS), data.nonatomic1 );
139
- TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), core_util_atomic_load (&data.atomic1 ));
140
- TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), core_util_atomic_load (&data.atomic2 ));
141
- TEST_ASSERT_EQUAL (T (ADD_ITERATIONS), data.nonatomic2 );
155
+ TEST_ASSERT_EQUAL (T (ADD_UNLOCKED_ITERATIONS), data.nonatomic1 );
156
+ TEST_ASSERT_EQUAL (T (2 * add_iterations (data.atomic1 )), data.atomic1 );
157
+ TEST_ASSERT_EQUAL (T (2 * add_iterations (data.atomic2 )), data.atomic2 );
158
+ TEST_ASSERT_EQUAL (T (ADD_UNLOCKED_ITERATIONS), data.nonatomic2 );
159
+ }
160
+
161
+ // This should fit into a uint32_t container, and there
162
+ // will be 1 byte of padding to ignore.
163
+ struct small {
164
+ uint8_t a;
165
+ uint8_t b;
166
+ uint8_t c;
167
+ };
168
+
169
+ // An 11-byte weird structure. Should work with critical sections.
170
+ struct large {
171
+ uint8_t a;
172
+ uint8_t b;
173
+ uint8_t c;
174
+ uint8_t dummy[8 ];
175
+ };
176
+
177
+ template <typename A>
178
+ void struct_incrementer_a (A *data)
179
+ {
180
+ for (long i = add_iterations (*data); i > 0 ; i--) {
181
+ typename A::value_type curval = *data, newval;
182
+ do {
183
+ newval = curval;
184
+ newval.a ++;
185
+ } while (!data->compare_exchange_weak (curval, newval));
186
+ }
187
+ }
188
+
189
+ template <typename A>
190
+ void struct_incrementer_b (A *data)
191
+ {
192
+ for (long i = add_iterations (*data); i > 0 ; i--) {
193
+ typename A::value_type curval = *data, newval;
194
+ do {
195
+ newval = curval;
196
+ newval.b ++;
197
+ } while (!data->compare_exchange_weak (curval, newval));
198
+ }
199
+ }
200
+
201
+ template <typename T, size_t N>
202
+ void test_atomic_struct ()
203
+ {
204
+ TEST_ASSERT_EQUAL (N, sizeof (Atomic<T>));
205
+
206
+ // Small structures don't have value constructor implemented;
207
+ Atomic<T> data;
208
+ atomic_init (&data, T{0 , 0 , 0 });
209
+
210
+ Thread t1 (osPriorityNormal, THREAD_STACK);
211
+ Thread t2 (osPriorityNormal, THREAD_STACK);
212
+
213
+ TEST_ASSERT_EQUAL (osOK, t1.start (callback (struct_incrementer_a<Atomic<T> >, &data)));
214
+ TEST_ASSERT_EQUAL (osOK, t2.start (callback (struct_incrementer_b<Atomic<T> >, &data)));
215
+
216
+ for (long i = add_iterations (data); i > 0 ; i--) {
217
+ T curval = data, newval;
218
+ do {
219
+ newval = curval;
220
+ newval.c ++;
221
+ } while (!data.compare_exchange_weak (curval, newval));
222
+ }
223
+
224
+ t1.join ();
225
+ t2.join ();
226
+
227
+ T final_val = data;
228
+
229
+ TEST_ASSERT_EQUAL (uint8_t (add_iterations (data)), final_val.a );
230
+ TEST_ASSERT_EQUAL (uint8_t (add_iterations (data)), final_val.b );
231
+ TEST_ASSERT_EQUAL (uint8_t (add_iterations (data)), final_val.c );
142
232
}
143
233
144
234
} // namespace
@@ -174,7 +264,9 @@ Case cases[] = {
174
264
Case (" Test atomic compare exchange strong 8-bit" , test_atomic_add<uint8_t , strong_incrementer>),
175
265
Case (" Test atomic compare exchange strong 16-bit" , test_atomic_add<uint16_t , strong_incrementer>),
176
266
Case (" Test atomic compare exchange strong 32-bit" , test_atomic_add<uint32_t , strong_incrementer>),
177
- Case (" Test atomic compare exchange strong 64-bit" , test_atomic_add<uint64_t , strong_incrementer>)
267
+ Case (" Test atomic compare exchange strong 64-bit" , test_atomic_add<uint64_t , strong_incrementer>),
268
+ Case (" Test small atomic custom structure" , test_atomic_struct<small, 4 >),
269
+ Case (" Test large atomic custom structure" , test_atomic_struct<large, 11 >)
178
270
};
179
271
180
272
utest::v1::Specification specification (test_setup, cases);
0 commit comments