|
8 | 8 |
|
9 | 9 | #include <ksched.h>
|
10 | 10 | #include <zephyr/spinlock.h>
|
| 11 | +#include <zephyr/sys/barrier.h> |
11 | 12 | #include <kernel_arch_func.h>
|
12 | 13 |
|
13 | 14 | #ifdef CONFIG_STACK_SENTINEL
|
@@ -48,19 +49,20 @@ void z_smp_release_global_lock(struct k_thread *thread);
|
48 | 49 | * treat this because the scheduler lock can't be released by the
|
49 | 50 | * switched-to thread, which is going to (obviously) be running its
|
50 | 51 | * own code and doesn't know it was switched out.
|
51 |
| - * |
52 |
| - * Note: future SMP architectures may need a fence/barrier or cache |
53 |
| - * invalidation here. Current ones don't, and sadly Zephyr doesn't |
54 |
| - * have a framework for that yet. |
55 | 52 | */
|
56 | 53 | static inline void z_sched_switch_spin(struct k_thread *thread)
|
57 | 54 | {
|
58 | 55 | #ifdef CONFIG_SMP
|
59 | 56 | volatile void **shp = (void *)&thread->switch_handle;
|
60 | 57 |
|
61 | 58 | while (*shp == NULL) {
|
62 |
| - k_busy_wait(1); |
| 59 | + arch_spin_relax(); |
63 | 60 | }
|
| 61 | + /* Read barrier: don't allow any subsequent loads in the |
| 62 | + * calling code to reorder before we saw switch_handle go |
| 63 | + * non-null. |
| 64 | + */ |
| 65 | + barrier_dmem_fence_full(); |
64 | 66 | #endif
|
65 | 67 | }
|
66 | 68 |
|
@@ -154,8 +156,12 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
|
154 | 156 | void *newsh = new_thread->switch_handle;
|
155 | 157 |
|
156 | 158 | if (IS_ENABLED(CONFIG_SMP)) {
|
157 |
| - /* Active threads MUST have a null here */ |
| 159 | + /* Active threads must have a null here. And |
| 160 | + * it must be seen before the scheduler lock |
| 161 | + * is released! |
| 162 | + */ |
158 | 163 | new_thread->switch_handle = NULL;
|
| 164 | + barrier_dmem_fence_full(); /* write barrier */ |
159 | 165 | }
|
160 | 166 | k_spin_release(&sched_spinlock);
|
161 | 167 | arch_switch(newsh, &old_thread->switch_handle);
|
|
0 commit comments