|
7 | 7 | #define ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
|
8 | 8 |
|
9 | 9 | #include <ksched.h>
|
| 10 | +#include <sys/atomic.h> |
10 | 11 | #include <spinlock.h>
|
11 | 12 | #include <kernel_arch_func.h>
|
12 | 13 |
|
@@ -48,19 +49,25 @@ void z_smp_release_global_lock(struct k_thread *thread);
|
48 | 49 | * treat this because the scheduler lock can't be released by the
|
49 | 50 | * switched-to thread, which is going to (obviously) be running its
|
50 | 51 | * own code and doesn't know it was switched out.
|
51 |
| - * |
52 |
| - * Note: future SMP architectures may need a fence/barrier or cache |
53 |
| - * invalidation here. Current ones don't, and sadly Zephyr doesn't |
54 |
| - * have a framework for that yet. |
55 | 52 | */
|
56 | 53 | static inline void z_sched_switch_spin(struct k_thread *thread)
|
57 | 54 | {
|
58 | 55 | #ifdef CONFIG_SMP
|
59 | 56 | volatile void **shp = (void *)&thread->switch_handle;
|
60 | 57 |
|
61 | 58 | while (*shp == NULL) {
|
62 |
| - k_busy_wait(1); |
| 59 | + __asm__ __volatile__ (""::: "memory"); |
63 | 60 | }
|
| 61 | + /* Read barrier: don't allow any subsequent loads in the |
| 62 | + * calling code to reorder before we saw switch_handle go |
| 63 | + * non-null. |
| 64 | + */ |
| 65 | +#if defined(__GNUC__) |
| 66 | + /* GCC-ism */ |
| 67 | + __atomic_thread_fence(__ATOMIC_SEQ_CST); |
| 68 | +#else |
| 69 | + atomic_thread_fence(memory_order_seq_cst); |
| 70 | +#endif |
64 | 71 | #endif
|
65 | 72 | }
|
66 | 73 |
|
@@ -152,8 +159,17 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
|
152 | 159 | void *newsh = new_thread->switch_handle;
|
153 | 160 |
|
154 | 161 | if (IS_ENABLED(CONFIG_SMP)) {
|
155 |
| - /* Active threads MUST have a null here */ |
| 162 | + /* Active threads must have a null here. And |
| 163 | + * it must be seen before the scheduler lock |
| 164 | + * is released! |
| 165 | + */ |
156 | 166 | new_thread->switch_handle = NULL;
|
| 167 | +#if defined(__GNUC__) |
| 168 | + /* GCC-ism */ |
| 169 | + __atomic_thread_fence(__ATOMIC_SEQ_CST); |
| 170 | +#else |
| 171 | + atomic_thread_fence(memory_order_seq_cst); |
| 172 | +#endif |
157 | 173 | }
|
158 | 174 | k_spin_release(&sched_spinlock);
|
159 | 175 | arch_switch(newsh, &old_thread->switch_handle);
|
|
0 commit comments