Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit e6a7cd3

Browse files
committedMay 13, 2024·
[libcpu] add hw-thread_self
Signed-off-by: Shell <[email protected]>
1 parent 74b60b7 commit e6a7cd3

File tree

9 files changed

+144
-45
lines changed

9 files changed

+144
-45
lines changed
 

‎include/rtdef.h

+3-1
Original file line numberDiff line numberDiff line change
@@ -731,8 +731,10 @@ struct rt_cpu
731731
struct rt_thread *current_thread;
732732

733733
rt_uint8_t irq_switch_flag:1;
734-
rt_uint8_t critical_switch_flag:1;
735734
rt_uint8_t sched_lock_flag:1;
735+
#ifndef ARCH_USING_HW_THREAD_SELF
736+
rt_uint8_t critical_switch_flag:1;
737+
#endif /* ARCH_USING_HW_THREAD_SELF */
736738

737739
rt_uint8_t current_priority;
738740
rt_list_t priority_table[RT_THREAD_PRIORITY_MAX];

‎include/rtsched.h

+1
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ struct rt_sched_thread_ctx
5555
rt_uint8_t stat; /**< thread status */
5656
rt_uint8_t sched_flag_locked:1; /**< calling thread have the scheduler locked */
5757
rt_uint8_t sched_flag_ttmr_set:1; /**< thread timer is start */
58+
rt_uint8_t critical_switch_flag:1; /**< critical switch pending */
5859

5960
#ifdef RT_USING_SMP
6061
rt_uint8_t bind_cpu; /**< thread is bind to cpu */

‎libcpu/Kconfig

+6
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@ if ARCH_ARMV8 && ARCH_CPU_64BIT
1414
default y
1515
config ARCH_USING_GENERIC_CPUID
1616
bool "Using generic cpuid implemenation"
17+
select ARCH_USING_HW_THREAD_SELF
18+
default y if RT_USING_OFW
1719
default n
1820
endmenu
1921
endif
@@ -270,3 +272,7 @@ config ARCH_HOST_SIMULATOR
270272
config ARCH_CPU_STACK_GROWS_UPWARD
271273
bool
272274
default n
275+
276+
config ARCH_USING_HW_THREAD_SELF
277+
bool
278+
default n

‎libcpu/aarch64/common/context_gcc.S

+16-4
Original file line numberDiff line numberDiff line change
@@ -27,15 +27,27 @@ rt_thread_switch_interrupt_flag: .zero 8
2727
#endif
2828

2929
.text
30+
31+
#ifdef ARCH_USING_GENERIC_CPUID
32+
.globl rt_hw_cpu_id_set
33+
#else /* !ARCH_USING_GENERIC_CPUID */
3034
.weak rt_hw_cpu_id_set
35+
#endif /* ARCH_USING_GENERIC_CPUID */
3136
.type rt_hw_cpu_id_set, @function
3237
rt_hw_cpu_id_set:
33-
mrs x0, mpidr_el1 /* MPIDR_EL1: Multi-Processor Affinity Register */
38+
#ifndef RT_USING_OFW
39+
mrs x0, mpidr_el1 /* MPIDR_EL1: Multi-Processor Affinity Register */
3440
#ifdef ARCH_ARM_CORTEX_A55
35-
lsr x0, x0, #8
41+
lsr x0, x0, #8
42+
#endif
43+
and x0, x0, #15
44+
#endif /* !RT_USING_OFW */
45+
46+
#ifdef ARCH_USING_GENERIC_CPUID
47+
msr tpidrro_el0, x0
48+
#else
49+
msr tpidr_el1, x0
3650
#endif
37-
and x0, x0, #15
38-
msr tpidr_el1, x0
3951
ret
4052

4153
/*

‎libcpu/aarch64/common/cpu.c

+23
Original file line numberDiff line numberDiff line change
@@ -231,6 +231,29 @@ int rt_hw_cpu_boot_secondary(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_o
231231

232232
#endif /*RT_USING_SMP*/
233233

234+
/**
235+
* Generic hw-cpu-id
236+
*/
237+
#ifdef ARCH_USING_GENERIC_CPUID
238+
#if RT_CPUS_NR > 1
239+
240+
int rt_hw_cpu_id(void)
241+
{
242+
long cpuid;
243+
__asm__ volatile("mrs %0, tpidrro_el0":"=r"(cpuid));
244+
return cpuid;
245+
}
246+
247+
#else
248+
249+
int rt_hw_cpu_id(void)
250+
{
251+
return 0;
252+
}
253+
254+
#endif /* RT_CPUS_NR > 1 */
255+
#endif /* ARCH_USING_GENERIC_CPUID */
256+
234257
/**
235258
* @addtogroup ARM CPU
236259
*/

‎libcpu/aarch64/common/cpuport.h

+14-25
Original file line numberDiff line numberDiff line change
@@ -27,31 +27,6 @@ typedef struct
2727
rt_uint32_t value;
2828
} rt_hw_spinlock_t;
2929

30-
/**
31-
* Generic hw-cpu-id
32-
*/
33-
#ifdef ARCH_USING_GENERIC_CPUID
34-
35-
#if RT_CPUS_NR > 0
36-
37-
rt_inline int rt_hw_cpu_id(void)
38-
{
39-
long cpuid;
40-
__asm__ volatile("mrs %0, tpidr_el1":"=r"(cpuid));
41-
return cpuid;
42-
}
43-
44-
#else
45-
46-
rt_inline int rt_hw_cpu_id(void)
47-
{
48-
return 0;
49-
}
50-
51-
#endif /* RT_CPUS_NR > 1 */
52-
53-
#endif /* ARCH_USING_GENERIC_CPUID */
54-
5530
#endif /* RT_USING_SMP */
5631

5732
#define rt_hw_barrier(cmd, ...) \
@@ -106,5 +81,19 @@ rt_inline int __rt_ffs(int value)
10681
}
10782

10883
#endif /* RT_USING_CPU_FFS */
84+
#ifdef ARCH_USING_HW_THREAD_SELF
85+
rt_inline struct rt_thread *rt_hw_thread_self(void)
86+
{
87+
struct rt_thread *thread;
88+
__asm__ volatile ("mrs %0, tpidr_el1":"=r"(thread));
89+
90+
return thread;
91+
}
92+
93+
rt_inline void rt_hw_thread_set_self(struct rt_thread *thread)
94+
{
95+
__asm__ volatile ("msr tpidr_el1, %0"::"r"(thread));
96+
}
97+
#endif /* ARCH_USING_HW_THREAD_SELF */
10998

11099
#endif /*CPUPORT_H__*/

‎libcpu/aarch64/cortex-a/entry_point.S

+6-4
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,8 @@ _start:
9595
/* Save cpu stack */
9696
get_phy stack_top, .boot_cpu_stack_top
9797
/* Save cpu id temp */
98+
msr tpidrro_el0, xzr
99+
/* Save thread self */
98100
msr tpidr_el1, xzr
99101

100102
bl init_cpu_el
@@ -149,11 +151,11 @@ _secondary_cpu_entry:
149151

150152
/* Get cpu id success */
151153
sub x0, x2, #1
152-
msr tpidr_el1, x0 /* Save cpu id global */
153-
#else
154-
bl rt_hw_cpu_id_set
155-
mrs x0, tpidr_el1
154+
155+
/* Save cpu id global */
156156
#endif /* RT_USING_OFW */
157+
bl rt_hw_cpu_id_set
158+
bl rt_hw_cpu_id
157159

158160
/* Set current cpu's stack top */
159161
sub x0, x0, #1

‎src/scheduler_mp.c

+68-10
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,14 @@ static struct rt_spinlock _mp_scheduler_lock;
9999
rt_hw_local_irq_enable(level); \
100100
} while (0)
101101

102+
#ifdef ARCH_USING_HW_THREAD_SELF
103+
#define CRITICAL_SWITCH_FLAG(pcpu, curthr) (RT_SCHED_CTX(curthr).critical_switch_flag)
104+
105+
#else /* !ARCH_USING_HW_THREAD_SELF */
106+
#define CRITICAL_SWITCH_FLAG(pcpu, curthr) ((pcpu)->critical_switch_flag)
107+
108+
#endif /* ARCH_USING_HW_THREAD_SELF */
109+
102110
static rt_uint32_t rt_thread_ready_priority_group;
103111
#if RT_THREAD_PRIORITY_MAX > 32
104112
/* Maximum priority level, 256 */
@@ -749,15 +757,15 @@ rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
749757
/* leaving critical region of global context since we can't schedule */
750758
SCHEDULER_CONTEXT_UNLOCK(pcpu);
751759

752-
pcpu->critical_switch_flag = 1;
760+
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 1;
753761
error = -RT_ESCHEDLOCKED;
754762

755763
SCHEDULER_EXIT_CRITICAL(current_thread);
756764
}
757765
else
758766
{
759767
/* flush critical switch flag since a scheduling is done */
760-
pcpu->critical_switch_flag = 0;
768+
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 0;
761769

762770
/* pick the highest runnable thread, and pass the control to it */
763771
to_thread = _prepare_context_switch_locked(cpu_id, pcpu, current_thread);
@@ -828,7 +836,7 @@ void rt_schedule(void)
828836
/* whether caller had locked the local scheduler already */
829837
if (RT_SCHED_CTX(current_thread).critical_lock_nest > 1)
830838
{
831-
pcpu->critical_switch_flag = 1;
839+
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 1;
832840

833841
SCHEDULER_EXIT_CRITICAL(current_thread);
834842

@@ -837,7 +845,7 @@ void rt_schedule(void)
837845
else
838846
{
839847
/* flush critical switch flag since a scheduling is done */
840-
pcpu->critical_switch_flag = 0;
848+
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 0;
841849
pcpu->irq_switch_flag = 0;
842850

843851
/**
@@ -912,13 +920,13 @@ void rt_scheduler_do_irq_switch(void *context)
912920
/* whether caller had locked the local scheduler already */
913921
if (RT_SCHED_CTX(current_thread).critical_lock_nest > 1)
914922
{
915-
pcpu->critical_switch_flag = 1;
923+
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 1;
916924
SCHEDULER_EXIT_CRITICAL(current_thread);
917925
}
918926
else if (rt_atomic_load(&(pcpu->irq_nest)) == 0)
919927
{
920928
/* flush critical & irq switch flag since a scheduling is done */
921-
pcpu->critical_switch_flag = 0;
929+
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 0;
922930
pcpu->irq_switch_flag = 0;
923931

924932
SCHEDULER_CONTEXT_LOCK(pcpu);
@@ -1056,6 +1064,9 @@ void rt_sched_post_ctx_switch(struct rt_thread *thread)
10561064
}
10571065
/* safe to access since irq is masked out */
10581066
pcpu->current_thread = thread;
1067+
#ifdef ARCH_USING_HW_THREAD_SELF
1068+
rt_hw_thread_set_self(thread);
1069+
#endif /* ARCH_USING_HW_THREAD_SELF */
10591070
}
10601071

10611072
#ifdef RT_DEBUGING_CRITICAL
@@ -1101,9 +1112,11 @@ RTM_EXPORT(rt_exit_critical_safe);
11011112
*/
11021113
rt_base_t rt_enter_critical(void)
11031114
{
1104-
rt_base_t level;
11051115
rt_base_t critical_level;
11061116
struct rt_thread *current_thread;
1117+
1118+
#ifndef ARCH_USING_HW_THREAD_SELF
1119+
rt_base_t level;
11071120
struct rt_cpu *pcpu;
11081121

11091122
/* disable interrupt */
@@ -1125,6 +1138,20 @@ rt_base_t rt_enter_critical(void)
11251138
/* enable interrupt */
11261139
rt_hw_local_irq_enable(level);
11271140

1141+
#else /* !ARCH_USING_HW_THREAD_SELF */
1142+
1143+
current_thread = rt_hw_thread_self();
1144+
if (!current_thread)
1145+
{
1146+
/* scheduler unavailable */
1147+
return -RT_EINVAL;
1148+
}
1149+
1150+
/* critical for local cpu */
1151+
RT_SCHED_CTX(current_thread).critical_lock_nest++;
1152+
critical_level = RT_SCHED_CTX(current_thread).critical_lock_nest;
1153+
1154+
#endif /* ARCH_USING_HW_THREAD_SELF */
11281155
return critical_level;
11291156
}
11301157
RTM_EXPORT(rt_enter_critical);
@@ -1134,9 +1161,11 @@ RTM_EXPORT(rt_enter_critical);
11341161
*/
11351162
void rt_exit_critical(void)
11361163
{
1137-
rt_base_t level;
11381164
struct rt_thread *current_thread;
11391165
rt_bool_t need_resched;
1166+
1167+
#ifndef ARCH_USING_HW_THREAD_SELF
1168+
rt_base_t level;
11401169
struct rt_cpu *pcpu;
11411170

11421171
/* disable interrupt */
@@ -1157,8 +1186,8 @@ void rt_exit_critical(void)
11571186
if (RT_SCHED_CTX(current_thread).critical_lock_nest == 0)
11581187
{
11591188
/* is there any scheduling request unfinished? */
1160-
need_resched = pcpu->critical_switch_flag;
1161-
pcpu->critical_switch_flag = 0;
1189+
need_resched = CRITICAL_SWITCH_FLAG(pcpu, current_thread);
1190+
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 0;
11621191

11631192
/* enable interrupt */
11641193
rt_hw_local_irq_enable(level);
@@ -1174,6 +1203,35 @@ void rt_exit_critical(void)
11741203
/* enable interrupt */
11751204
rt_hw_local_irq_enable(level);
11761205
}
1206+
1207+
#else /* !ARCH_USING_HW_THREAD_SELF */
1208+
1209+
current_thread = rt_hw_thread_self();
1210+
if (!current_thread)
1211+
{
1212+
return;
1213+
}
1214+
1215+
/* the necessary memory barrier is done on irq_(dis|en)able */
1216+
RT_SCHED_CTX(current_thread).critical_lock_nest--;
1217+
1218+
/* may need a rescheduling */
1219+
if (RT_SCHED_CTX(current_thread).critical_lock_nest == 0)
1220+
{
1221+
/* is there any scheduling request unfinished? */
1222+
need_resched = CRITICAL_SWITCH_FLAG(pcpu, current_thread);
1223+
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 0;
1224+
1225+
if (need_resched)
1226+
rt_schedule();
1227+
}
1228+
else
1229+
{
1230+
/* each exit_critical is strictly corresponding to an enter_critical */
1231+
RT_ASSERT(RT_SCHED_CTX(current_thread).critical_lock_nest > 0);
1232+
}
1233+
1234+
#endif /* ARCH_USING_HW_THREAD_SELF */
11771235
}
11781236
RTM_EXPORT(rt_exit_critical);
11791237

‎src/thread.c

+7-1
Original file line numberDiff line numberDiff line change
@@ -356,12 +356,18 @@ RTM_EXPORT(rt_thread_init);
356356
rt_thread_t rt_thread_self(void)
357357
{
358358
#ifdef RT_USING_SMP
359-
rt_base_t lock;
360359
rt_thread_t self;
361360

361+
#ifdef ARCH_USING_HW_THREAD_SELF
362+
self = rt_hw_thread_self();
363+
#else /* !ARCH_USING_HW_THREAD_SELF */
364+
rt_base_t lock;
365+
362366
lock = rt_hw_local_irq_disable();
363367
self = rt_cpu_self()->current_thread;
364368
rt_hw_local_irq_enable(lock);
369+
370+
#endif /* ARCH_USING_HW_THREAD_SELF */
365371
return self;
366372

367373
#else /* !RT_USING_SMP */

0 commit comments

Comments
 (0)
Please sign in to comment.