|
8 | 8 | * 2011-09-15 Bernard first version
|
9 | 9 | * 2019-07-28 zdzn add smp support
|
10 | 10 | * 2023-02-21 GuEe-GUI mov cpu ofw init to setup
|
| 11 | + * 2024-04-29 Shell Add generic ticket spinlock using C11 atomic |
11 | 12 | */
|
12 | 13 |
|
13 | 14 | #include <rthw.h>
|
@@ -55,65 +56,101 @@ rt_weak rt_uint64_t rt_cpu_mpidr_early[] =
|
55 | 56 | };
|
56 | 57 | #endif /* RT_USING_SMART */
|
57 | 58 |
|
58 |
| -static inline void arch_spin_lock(arch_spinlock_t *lock) |
| 59 | +/* in support of C11 atomic */ |
| 60 | +#if __STDC_VERSION__ >= 201112L |
| 61 | +#include <stdatomic.h> |
| 62 | + |
| 63 | +union _spinlock |
| 64 | +{ |
| 65 | + _Atomic(rt_uint32_t) _value; |
| 66 | + struct |
| 67 | + { |
| 68 | + _Atomic(rt_uint16_t) owner; |
| 69 | + _Atomic(rt_uint16_t) next; |
| 70 | + } ticket; |
| 71 | +}; |
| 72 | + |
| 73 | +void rt_hw_spin_lock_init(rt_hw_spinlock_t *_lock) |
59 | 74 | {
|
60 |
| - unsigned int tmp; |
61 |
| - |
62 |
| - asm volatile( |
63 |
| - " sevl\n" |
64 |
| - "1: wfe\n" |
65 |
| - "2: ldaxr %w0, %1\n" |
66 |
| - " cbnz %w0, 1b\n" |
67 |
| - " stxr %w0, %w2, %1\n" |
68 |
| - " cbnz %w0, 2b\n" |
69 |
| - : "=&r" (tmp), "+Q" (lock->lock) |
70 |
| - : "r" (1) |
71 |
| - : "cc", "memory"); |
| 75 | + union _spinlock *lock = (void *)_lock; |
| 76 | + |
| 77 | + /** |
| 78 | + * just a dummy note that this is an atomic operation, though it alway is |
| 79 | + * even without usage of atomic API in arm64 |
| 80 | + */ |
| 81 | + atomic_store_explicit(&lock->_value, 0, memory_order_relaxed); |
72 | 82 | }
|
73 | 83 |
|
74 |
| -static inline int arch_spin_trylock(arch_spinlock_t *lock) |
| 84 | +rt_bool_t rt_hw_spin_trylock(rt_hw_spinlock_t *_lock) |
75 | 85 | {
|
76 |
| - unsigned int tmp; |
77 |
| - |
78 |
| - asm volatile( |
79 |
| - " ldaxr %w0, %1\n" |
80 |
| - " cbnz %w0, 1f\n" |
81 |
| - " stxr %w0, %w2, %1\n" |
82 |
| - "1:\n" |
83 |
| - : "=&r" (tmp), "+Q" (lock->lock) |
84 |
| - : "r" (1) |
85 |
| - : "cc", "memory"); |
86 |
| - |
87 |
| - return !tmp; |
| 86 | + rt_bool_t rc; |
| 87 | + rt_uint32_t readonce; |
| 88 | + union _spinlock temp; |
| 89 | + union _spinlock *lock = (void *)_lock; |
| 90 | + |
| 91 | + readonce = atomic_load_explicit(&lock->_value, memory_order_acquire); |
| 92 | + temp._value = readonce; |
| 93 | + |
| 94 | + if (temp.ticket.owner != temp.ticket.next) |
| 95 | + { |
| 96 | + rc = RT_FALSE; |
| 97 | + } |
| 98 | + else |
| 99 | + { |
| 100 | + temp.ticket.next += 1; |
| 101 | + rc = atomic_compare_exchange_strong_explicit( |
| 102 | + &lock->_value, &readonce, temp._value, |
| 103 | + memory_order_acquire, memory_order_relaxed); |
| 104 | + } |
| 105 | + return rc; |
88 | 106 | }
|
89 | 107 |
|
90 |
| -static inline void arch_spin_unlock(arch_spinlock_t *lock) |
| 108 | +rt_inline rt_base_t _load_acq_exclusive(_Atomic(rt_uint16_t) *halfword) |
91 | 109 | {
|
92 |
| - asm volatile( |
93 |
| - " stlr %w1, %0\n" |
94 |
| - : "=Q" (lock->lock) : "r" (0) : "memory"); |
| 110 | + rt_uint32_t old; |
| 111 | + __asm__ volatile("ldaxrh %w0, [%1]" |
| 112 | + : "=&r"(old) |
| 113 | + : "r"(halfword) |
| 114 | + : "memory"); |
| 115 | + return old; |
95 | 116 | }
|
96 | 117 |
|
97 |
| -void rt_hw_spin_lock_init(arch_spinlock_t *lock) |
| 118 | +rt_inline void _send_event_local(void) |
98 | 119 | {
|
99 |
| - lock->lock = 0; |
| 120 | + __asm__ volatile("sevl"); |
100 | 121 | }
|
101 | 122 |
|
102 |
| -void rt_hw_spin_lock(rt_hw_spinlock_t *lock) |
| 123 | +rt_inline void _wait_for_event(void) |
103 | 124 | {
|
104 |
| - arch_spin_lock(lock); |
| 125 | + __asm__ volatile("wfe" ::: "memory"); |
105 | 126 | }
|
106 | 127 |
|
107 |
| -void rt_hw_spin_unlock(rt_hw_spinlock_t *lock) |
| 128 | +void rt_hw_spin_lock(rt_hw_spinlock_t *_lock) |
108 | 129 | {
|
109 |
| - arch_spin_unlock(lock); |
| 130 | + union _spinlock *lock = (void *)_lock; |
| 131 | + rt_uint16_t ticket = |
| 132 | + atomic_fetch_add_explicit(&lock->ticket.next, 1, memory_order_relaxed); |
| 133 | + |
| 134 | + if (atomic_load_explicit(&lock->ticket.owner, memory_order_acquire) != |
| 135 | + ticket) |
| 136 | + { |
| 137 | + _send_event_local(); |
| 138 | + do |
| 139 | + { |
| 140 | + _wait_for_event(); |
| 141 | + } |
| 142 | + while (_load_acq_exclusive(&lock->ticket.owner) != ticket); |
| 143 | + } |
110 | 144 | }
|
111 | 145 |
|
112 |
| -rt_bool_t rt_hw_spin_trylock(rt_hw_spinlock_t *lock) |
| 146 | +void rt_hw_spin_unlock(rt_hw_spinlock_t *_lock) |
113 | 147 | {
|
114 |
| - return arch_spin_trylock(lock); |
| 148 | + union _spinlock *lock = (void *)_lock; |
| 149 | + atomic_fetch_add_explicit(&lock->ticket.owner, 1, memory_order_release); |
115 | 150 | }
|
116 | 151 |
|
| 152 | +#endif |
| 153 | + |
117 | 154 | static int _cpus_init_data_hardcoded(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
|
118 | 155 | {
|
119 | 156 | // load in cpu_hw_ids in cpuid_to_hwid,
|
|
0 commit comments