Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 3d4093f

Browse files
committedMay 27, 2024·
scheduler: add cpu object for UP scheduler
Also, maintain the rt_current_thread in cpu object on UP scheduler.
1 parent 54ddf8c commit 3d4093f

File tree

6 files changed

+74
-41
lines changed

6 files changed

+74
-41
lines changed
 

‎include/rtdef.h

+10-1
Original file line numberDiff line numberDiff line change
@@ -765,10 +765,19 @@ struct rt_cpu
765765
struct rt_cpu_usage_stats cpu_stat;
766766
#endif
767767
};
768-
typedef struct rt_cpu *rt_cpu_t;
768+
769+
#else /* !RT_USING_SMP */
770+
struct rt_cpu
771+
{
772+
struct rt_thread *current_thread;
773+
};
769774

770775
#endif /* RT_USING_SMP */
771776

777+
typedef struct rt_cpu *rt_cpu_t;
778+
/* Noted: As API to reject writing to this variable from application codes */
779+
#define rt_current_thread rt_thread_self()
780+
772781
struct rt_thread;
773782

774783
#ifdef RT_USING_SMART

‎include/rtthread.h

+6-3
Original file line numberDiff line numberDiff line change
@@ -669,6 +669,12 @@ rt_err_t rt_device_control(rt_device_t dev, int cmd, void *arg);
669669
void rt_interrupt_enter(void);
670670
void rt_interrupt_leave(void);
671671

672+
/**
673+
* CPU object
674+
*/
675+
struct rt_cpu *rt_cpu_self(void);
676+
struct rt_cpu *rt_cpu_index(int index);
677+
672678
#ifdef RT_USING_SMP
673679

674680
/*
@@ -679,9 +685,6 @@ rt_base_t rt_cpus_lock(void);
679685
void rt_cpus_unlock(rt_base_t level);
680686
void rt_cpus_lock_status_restore(struct rt_thread *thread);
681687

682-
struct rt_cpu *rt_cpu_self(void);
683-
struct rt_cpu *rt_cpu_index(int index);
684-
685688
#ifdef RT_USING_DEBUG
686689
rt_base_t rt_cpu_get_id(void);
687690
#else /* !RT_USING_DEBUG */

‎src/cpu_up.c

+26
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,14 @@
66
* Change Logs:
77
* Date Author Notes
88
* 2024-04-19 Shell Fixup UP irq spinlock
9+
* 2024-05-22 Shell Add UP cpu object and
10+
* maintain the rt_current_thread inside it
911
*/
1012
#include <rthw.h>
1113
#include <rtthread.h>
1214

15+
static struct rt_cpu _cpu;
16+
1317
/**
1418
* @brief Initialize a static spinlock object.
1519
*
@@ -80,3 +84,25 @@ void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
8084
rt_exit_critical_safe(critical_level);
8185
rt_hw_interrupt_enable(level);
8286
}
87+
88+
/**
89+
* @brief This fucntion will return current cpu object.
90+
*
91+
* @return Return a pointer to the current cpu object.
92+
*/
93+
struct rt_cpu *rt_cpu_self(void)
94+
{
95+
return &_cpu;
96+
}
97+
98+
/**
99+
* @brief This fucntion will return the cpu object corresponding to index.
100+
*
101+
* @param index is the index of target cpu object.
102+
*
103+
* @return Return a pointer to the cpu object corresponding to index.
104+
*/
105+
struct rt_cpu *rt_cpu_index(int index)
106+
{
107+
return index == 0 ? &_cpu : RT_NULL;
108+
}

‎src/scheduler_mp.c

-17
Original file line numberDiff line numberDiff line change
@@ -1331,22 +1331,5 @@ rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu)
13311331
return RT_EOK;
13321332
}
13331333

1334-
rt_thread_t rt_sched_thread_self(void)
1335-
{
1336-
#ifdef ARCH_USING_HW_THREAD_SELF
1337-
return rt_hw_thread_self();
1338-
1339-
#else /* !ARCH_USING_HW_THREAD_SELF */
1340-
rt_thread_t self;
1341-
rt_base_t lock;
1342-
1343-
lock = rt_hw_local_irq_disable();
1344-
self = rt_cpu_self()->current_thread;
1345-
rt_hw_local_irq_enable(lock);
1346-
1347-
return self;
1348-
#endif /* ARCH_USING_HW_THREAD_SELF */
1349-
}
1350-
13511334
/**@}*/
13521335
/**@endcond*/

‎src/scheduler_up.c

+16-19
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,6 @@ rt_uint8_t rt_thread_ready_table[32];
4848

4949
extern volatile rt_uint8_t rt_interrupt_nest;
5050
static rt_int16_t rt_scheduler_lock_nest;
51-
struct rt_thread *rt_current_thread = RT_NULL;
5251
rt_uint8_t rt_current_priority;
5352

5453
#if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
@@ -175,7 +174,7 @@ void rt_system_scheduler_start(void)
175174

176175
to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
177176

178-
rt_current_thread = to_thread;
177+
rt_cpu_self()->current_thread = to_thread;
179178

180179
rt_sched_remove_thread(to_thread);
181180
RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING;
@@ -203,6 +202,8 @@ void rt_schedule(void)
203202
rt_base_t level;
204203
struct rt_thread *to_thread;
205204
struct rt_thread *from_thread;
205+
/* using local variable to avoid unecessary function call */
206+
struct rt_thread *curr_thread = rt_thread_self();
206207

207208
/* disable interrupt */
208209
level = rt_hw_interrupt_disable();
@@ -219,28 +220,29 @@ void rt_schedule(void)
219220

220221
to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
221222

222-
if ((RT_SCHED_CTX(rt_current_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
223+
if ((RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
223224
{
224-
if (RT_SCHED_PRIV(rt_current_thread).current_priority < highest_ready_priority)
225+
if (RT_SCHED_PRIV(curr_thread).current_priority < highest_ready_priority)
225226
{
226-
to_thread = rt_current_thread;
227+
to_thread = curr_thread;
227228
}
228-
else if (RT_SCHED_PRIV(rt_current_thread).current_priority == highest_ready_priority && (RT_SCHED_CTX(rt_current_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0)
229+
else if (RT_SCHED_PRIV(curr_thread).current_priority == highest_ready_priority
230+
&& (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0)
229231
{
230-
to_thread = rt_current_thread;
232+
to_thread = curr_thread;
231233
}
232234
else
233235
{
234236
need_insert_from_thread = 1;
235237
}
236238
}
237239

238-
if (to_thread != rt_current_thread)
240+
if (to_thread != curr_thread)
239241
{
240242
/* if the destination thread is not the same as current thread */
241243
rt_current_priority = (rt_uint8_t)highest_ready_priority;
242-
from_thread = rt_current_thread;
243-
rt_current_thread = to_thread;
244+
from_thread = curr_thread;
245+
rt_cpu_self()->current_thread = to_thread;
244246

245247
RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread));
246248

@@ -282,11 +284,11 @@ void rt_schedule(void)
282284
#ifdef RT_USING_SIGNALS
283285
/* check stat of thread for signal */
284286
level = rt_hw_interrupt_disable();
285-
if (RT_SCHED_CTX(rt_current_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING)
287+
if (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING)
286288
{
287289
extern void rt_thread_handle_sig(rt_bool_t clean_state);
288290

289-
RT_SCHED_CTX(rt_current_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
291+
RT_SCHED_CTX(curr_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
290292

291293
rt_hw_interrupt_enable(level);
292294

@@ -310,8 +312,8 @@ void rt_schedule(void)
310312
}
311313
else
312314
{
313-
rt_sched_remove_thread(rt_current_thread);
314-
RT_SCHED_CTX(rt_current_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(rt_current_thread).stat & ~RT_THREAD_STAT_MASK);
315+
rt_sched_remove_thread(curr_thread);
316+
RT_SCHED_CTX(curr_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(curr_thread).stat & ~RT_THREAD_STAT_MASK);
315317
}
316318
}
317319
}
@@ -564,10 +566,5 @@ rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu)
564566
return -RT_EINVAL;
565567
}
566568

567-
rt_thread_t rt_sched_thread_self(void)
568-
{
569-
return rt_current_thread;
570-
}
571-
572569
/**@}*/
573570
/**@endcond*/

‎src/thread.c

+16-1
Original file line numberDiff line numberDiff line change
@@ -355,7 +355,22 @@ RTM_EXPORT(rt_thread_init);
355355
*/
356356
rt_thread_t rt_thread_self(void)
357357
{
358-
return rt_sched_thread_self();
358+
#ifndef RT_USING_SMP
359+
return rt_cpu_self()->current_thread;
360+
361+
#elif defined (ARCH_USING_HW_THREAD_SELF)
362+
return rt_hw_thread_self();
363+
364+
#else /* !ARCH_USING_HW_THREAD_SELF */
365+
rt_thread_t self;
366+
rt_base_t lock;
367+
368+
lock = rt_hw_local_irq_disable();
369+
self = rt_cpu_self()->current_thread;
370+
rt_hw_local_irq_enable(lock);
371+
372+
return self;
373+
#endif /* ARCH_USING_HW_THREAD_SELF */
359374
}
360375
RTM_EXPORT(rt_thread_self);
361376

0 commit comments

Comments
 (0)
Please sign in to comment.