@@ -99,6 +99,14 @@ static struct rt_spinlock _mp_scheduler_lock;
99
99
rt_hw_local_irq_enable(level); \
100
100
} while (0)
101
101
102
+ #ifdef ARCH_USING_HW_THREAD_SELF
103
+ #define CRITICAL_SWITCH_FLAG (pcpu , curthr ) (RT_SCHED_CTX(curthr).critical_switch_flag)
104
+
105
+ #else /* !ARCH_USING_HW_THREAD_SELF */
106
+ #define CRITICAL_SWITCH_FLAG (pcpu , curthr ) ((pcpu)->critical_switch_flag)
107
+
108
+ #endif /* ARCH_USING_HW_THREAD_SELF */
109
+
102
110
static rt_uint32_t rt_thread_ready_priority_group ;
103
111
#if RT_THREAD_PRIORITY_MAX > 32
104
112
/* Maximum priority level, 256 */
@@ -749,15 +757,15 @@ rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
749
757
/* leaving critical region of global context since we can't schedule */
750
758
SCHEDULER_CONTEXT_UNLOCK (pcpu );
751
759
752
- pcpu -> critical_switch_flag = 1 ;
760
+ CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) = 1 ;
753
761
error = - RT_ESCHEDLOCKED ;
754
762
755
763
SCHEDULER_EXIT_CRITICAL (current_thread );
756
764
}
757
765
else
758
766
{
759
767
/* flush critical switch flag since a scheduling is done */
760
- pcpu -> critical_switch_flag = 0 ;
768
+ CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) = 0 ;
761
769
762
770
/* pick the highest runnable thread, and pass the control to it */
763
771
to_thread = _prepare_context_switch_locked (cpu_id , pcpu , current_thread );
@@ -828,7 +836,7 @@ void rt_schedule(void)
828
836
/* whether caller had locked the local scheduler already */
829
837
if (RT_SCHED_CTX (current_thread ).critical_lock_nest > 1 )
830
838
{
831
- pcpu -> critical_switch_flag = 1 ;
839
+ CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) = 1 ;
832
840
833
841
SCHEDULER_EXIT_CRITICAL (current_thread );
834
842
@@ -837,7 +845,7 @@ void rt_schedule(void)
837
845
else
838
846
{
839
847
/* flush critical switch flag since a scheduling is done */
840
- pcpu -> critical_switch_flag = 0 ;
848
+ CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) = 0 ;
841
849
pcpu -> irq_switch_flag = 0 ;
842
850
843
851
/**
@@ -912,13 +920,13 @@ void rt_scheduler_do_irq_switch(void *context)
912
920
/* whether caller had locked the local scheduler already */
913
921
if (RT_SCHED_CTX (current_thread ).critical_lock_nest > 1 )
914
922
{
915
- pcpu -> critical_switch_flag = 1 ;
923
+ CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) = 1 ;
916
924
SCHEDULER_EXIT_CRITICAL (current_thread );
917
925
}
918
926
else if (rt_atomic_load (& (pcpu -> irq_nest )) == 0 )
919
927
{
920
928
/* flush critical & irq switch flag since a scheduling is done */
921
- pcpu -> critical_switch_flag = 0 ;
929
+ CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) = 0 ;
922
930
pcpu -> irq_switch_flag = 0 ;
923
931
924
932
SCHEDULER_CONTEXT_LOCK (pcpu );
@@ -1056,6 +1064,9 @@ void rt_sched_post_ctx_switch(struct rt_thread *thread)
1056
1064
}
1057
1065
/* safe to access since irq is masked out */
1058
1066
pcpu -> current_thread = thread ;
1067
+ #ifdef ARCH_USING_HW_THREAD_SELF
1068
+ rt_hw_thread_set_self (thread );
1069
+ #endif /* ARCH_USING_HW_THREAD_SELF */
1059
1070
}
1060
1071
1061
1072
#ifdef RT_DEBUGING_CRITICAL
@@ -1101,9 +1112,11 @@ RTM_EXPORT(rt_exit_critical_safe);
1101
1112
*/
1102
1113
rt_base_t rt_enter_critical (void )
1103
1114
{
1104
- rt_base_t level ;
1105
1115
rt_base_t critical_level ;
1106
1116
struct rt_thread * current_thread ;
1117
+
1118
+ #ifndef ARCH_USING_HW_THREAD_SELF
1119
+ rt_base_t level ;
1107
1120
struct rt_cpu * pcpu ;
1108
1121
1109
1122
/* disable interrupt */
@@ -1125,6 +1138,20 @@ rt_base_t rt_enter_critical(void)
1125
1138
/* enable interrupt */
1126
1139
rt_hw_local_irq_enable (level );
1127
1140
1141
+ #else /* !ARCH_USING_HW_THREAD_SELF */
1142
+
1143
+ current_thread = rt_hw_thread_self ();
1144
+ if (!current_thread )
1145
+ {
1146
+ /* scheduler unavailable */
1147
+ return - RT_EINVAL ;
1148
+ }
1149
+
1150
+ /* critical for local cpu */
1151
+ RT_SCHED_CTX (current_thread ).critical_lock_nest ++ ;
1152
+ critical_level = RT_SCHED_CTX (current_thread ).critical_lock_nest ;
1153
+
1154
+ #endif /* ARCH_USING_HW_THREAD_SELF */
1128
1155
return critical_level ;
1129
1156
}
1130
1157
RTM_EXPORT (rt_enter_critical );
@@ -1134,9 +1161,11 @@ RTM_EXPORT(rt_enter_critical);
1134
1161
*/
1135
1162
void rt_exit_critical (void )
1136
1163
{
1137
- rt_base_t level ;
1138
1164
struct rt_thread * current_thread ;
1139
1165
rt_bool_t need_resched ;
1166
+
1167
+ #ifndef ARCH_USING_HW_THREAD_SELF
1168
+ rt_base_t level ;
1140
1169
struct rt_cpu * pcpu ;
1141
1170
1142
1171
/* disable interrupt */
@@ -1157,8 +1186,8 @@ void rt_exit_critical(void)
1157
1186
if (RT_SCHED_CTX (current_thread ).critical_lock_nest == 0 )
1158
1187
{
1159
1188
/* is there any scheduling request unfinished? */
1160
- need_resched = pcpu -> critical_switch_flag ;
1161
- pcpu -> critical_switch_flag = 0 ;
1189
+ need_resched = CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) ;
1190
+ CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) = 0 ;
1162
1191
1163
1192
/* enable interrupt */
1164
1193
rt_hw_local_irq_enable (level );
@@ -1174,6 +1203,35 @@ void rt_exit_critical(void)
1174
1203
/* enable interrupt */
1175
1204
rt_hw_local_irq_enable (level );
1176
1205
}
1206
+
1207
+ #else /* !ARCH_USING_HW_THREAD_SELF */
1208
+
1209
+ current_thread = rt_hw_thread_self ();
1210
+ if (!current_thread )
1211
+ {
1212
+ return ;
1213
+ }
1214
+
1215
+ /* the necessary memory barrier is done on irq_(dis|en)able */
1216
+ RT_SCHED_CTX (current_thread ).critical_lock_nest -- ;
1217
+
1218
+ /* may need a rescheduling */
1219
+ if (RT_SCHED_CTX (current_thread ).critical_lock_nest == 0 )
1220
+ {
1221
+ /* is there any scheduling request unfinished? */
1222
+ need_resched = CRITICAL_SWITCH_FLAG (pcpu , current_thread );
1223
+ CRITICAL_SWITCH_FLAG (pcpu , current_thread ) = 0 ;
1224
+
1225
+ if (need_resched )
1226
+ rt_schedule ();
1227
+ }
1228
+ else
1229
+ {
1230
+ /* each exit_critical is strictly corresponding to an enter_critical */
1231
+ RT_ASSERT (RT_SCHED_CTX (current_thread ).critical_lock_nest > 0 );
1232
+ }
1233
+
1234
+ #endif /* ARCH_USING_HW_THREAD_SELF */
1177
1235
}
1178
1236
RTM_EXPORT (rt_exit_critical );
1179
1237
0 commit comments