Skip to content

Commit b13607b

Browse files
polarvidrcitach
authored andcommitted
feat: arm64: generic implementation of vector irq (RT-Thread#9336)
feat: overall implementation of vector irq This patch generalize the irq handling on up/mp system by adding the `rt_hw_irq_exit()` & `rt_hw_vector_irq_sched()` API. Changes: - Added `rt_hw_irq_exit()` and `rt_hw_vector_irq_sched()` APIs for unified IRQ management. - Refactored assembly code for both UP and MP systems to use the new IRQ handling flow. - Removed redundant code and optimized exception handling paths. Signed-off-by: Shell <[email protected]>
1 parent 3a40e58 commit b13607b

File tree

7 files changed

+97
-109
lines changed

7 files changed

+97
-109
lines changed

libcpu/aarch64/common/include/asm-generic.h

+8
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,14 @@
2323
.cfi_endproc; \
2424
.size name, .-name;
2525

26+
#define TRACE_SYMBOL(name)
27+
28+
.macro NEVER_RETURN
29+
#ifdef RT_USING_DEBUG
30+
b .
31+
#endif /* RT_USING_DEBUG */
32+
.endm
33+
2634
.macro GET_THREAD_SELF, dst:req
2735
#ifdef ARCH_USING_HW_THREAD_SELF
2836
mrs x0, tpidr_el1

libcpu/aarch64/common/include/vector_gcc.h

+19-51
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* Change Logs:
77
* Date Author Notes
88
* 2024-03-28 Shell Move vector handling codes from context_gcc.S
9+
* 2024-04-08 Shell Optimizing exception switch between u-space/kernel,
910
*/
1011

1112
#ifndef __ARM64_INC_VECTOR_H__
@@ -45,8 +46,6 @@
4546
mrs x2, elr_el1
4647

4748
stp x2, x3, [sp, #-0x10]!
48-
49-
mov x0, sp /* Move SP into X0 for saving. */
5049
.endm
5150

5251
#ifdef RT_USING_SMP
@@ -55,60 +54,29 @@
5554
#include "../up/context_gcc.h"
5655
#endif
5756

58-
.macro RESTORE_IRQ_CONTEXT_WITHOUT_MMU_SWITCH
59-
/* the SP is already ok */
60-
ldp x2, x3, [sp], #0x10 /* SPSR and ELR. */
61-
62-
tst x3, #0x1f
63-
msr spsr_el1, x3
64-
msr elr_el1, x2
65-
66-
ldp x29, x30, [sp], #0x10
67-
msr sp_el0, x29
68-
ldp x28, x29, [sp], #0x10
69-
msr fpcr, x28
70-
msr fpsr, x29
71-
ldp x28, x29, [sp], #0x10
72-
ldp x26, x27, [sp], #0x10
73-
ldp x24, x25, [sp], #0x10
74-
ldp x22, x23, [sp], #0x10
75-
ldp x20, x21, [sp], #0x10
76-
ldp x18, x19, [sp], #0x10
77-
ldp x16, x17, [sp], #0x10
78-
ldp x14, x15, [sp], #0x10
79-
ldp x12, x13, [sp], #0x10
80-
ldp x10, x11, [sp], #0x10
81-
ldp x8, x9, [sp], #0x10
82-
ldp x6, x7, [sp], #0x10
83-
ldp x4, x5, [sp], #0x10
84-
ldp x2, x3, [sp], #0x10
85-
ldp x0, x1, [sp], #0x10
86-
RESTORE_FPU sp
57+
.macro SAVE_USER_CTX, eframex, tmpx
8758
#ifdef RT_USING_SMART
88-
beq arch_ret_to_user
89-
#endif
90-
eret
91-
.endm
92-
93-
.macro SAVE_USER_CTX
94-
mrs x1, spsr_el1
95-
and x1, x1, 0xf
96-
cmp x1, xzr
97-
98-
bne 1f
99-
bl lwp_uthread_ctx_save
100-
ldp x0, x1, [sp]
59+
mrs \tmpx, spsr_el1
60+
and \tmpx, \tmpx, 0xf
61+
cbz \tmpx, 1f
62+
b 2f
10163
1:
64+
mov x0, \eframex
65+
bl lwp_uthread_ctx_save
66+
2:
67+
#endif /* RT_USING_SMART */
10268
.endm
10369

104-
.macro RESTORE_USER_CTX, ctx
105-
ldr x1, [\ctx, #CONTEXT_OFFSET_SPSR_EL1]
106-
and x1, x1, 0x1f
107-
cmp x1, xzr
108-
109-
bne 1f
110-
bl lwp_uthread_ctx_restore
70+
.macro RESTORE_USER_CTX, eframex, tmpx
71+
#ifdef RT_USING_SMART
72+
ldr \tmpx, [\eframex, #CONTEXT_OFFSET_SPSR_EL1]
73+
and \tmpx, \tmpx, 0x1f
74+
cbz \tmpx, 1f
75+
b 2f
11176
1:
77+
bl lwp_uthread_ctx_restore
78+
2:
79+
#endif /* RT_USING_SMART */
11280
.endm
11381

11482
#endif /* __ARM64_INC_VECTOR_H__ */

libcpu/aarch64/common/mp/context_gcc.S

+2
Original file line numberDiff line numberDiff line change
@@ -129,5 +129,7 @@ rt_hw_context_switch_interrupt:
129129
b _context_switch_exit
130130

131131
_context_switch_exit:
132+
.local _context_switch_exit
133+
132134
clrex
133135
RESTORE_CONTEXT_SWITCH

libcpu/aarch64/common/mp/vector_gcc.S

+9-27
Original file line numberDiff line numberDiff line change
@@ -12,41 +12,23 @@
1212
#define __ASSEMBLY__
1313
#endif
1414

15-
#include "../include/vector_gcc.h"
15+
#include "vector_gcc.h"
1616
#include "context_gcc.h"
1717

1818
.section .text
1919

20-
.globl vector_fiq
2120
vector_fiq:
21+
.globl vector_fiq
2222
b .
2323

2424
.globl rt_hw_irq_exit
2525

26-
START_POINT(vector_irq)
27-
SAVE_IRQ_CONTEXT
28-
stp x0, x1, [sp, #-0x10]! /* X0 is thread sp */
29-
30-
bl rt_interrupt_enter
31-
ldp x0, x1, [sp]
32-
33-
#ifdef RT_USING_SMART
34-
SAVE_USER_CTX
35-
#endif /* RT_USING_SMART */
36-
37-
bl rt_hw_trap_irq
38-
39-
#ifdef RT_USING_SMART
40-
ldp x0, x1, [sp]
41-
RESTORE_USER_CTX x0
42-
#endif /* RT_USING_SMART */
43-
44-
bl rt_interrupt_leave
26+
/**
27+
* void rt_hw_vector_irq_sched(void *eframe)
28+
* @brief do IRQ scheduling
29+
*/
30+
rt_hw_vector_irq_sched:
31+
.globl rt_hw_vector_irq_sched
4532

46-
ldp x0, x1, [sp], #0x10
4733
bl rt_scheduler_do_irq_switch
48-
49-
rt_hw_irq_exit:
50-
RESTORE_IRQ_CONTEXT
51-
52-
START_POINT_END(vector_irq)
34+
b rt_hw_irq_exit

libcpu/aarch64/common/up/context_gcc.S

+3
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ rt_hw_context_switch_to:
4545
clrex
4646
ldr x0, [x0]
4747
RESTORE_CONTEXT_SWITCH x0
48+
NEVER_RETURN
4849

4950
/*
5051
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
@@ -62,6 +63,7 @@ rt_hw_context_switch:
6263
ldr x0, [x1] // get new task stack pointer
6364

6465
RESTORE_CONTEXT_SWITCH x0
66+
NEVER_RETURN
6567

6668
.globl rt_thread_switch_interrupt_flag
6769
.globl rt_interrupt_from_thread
@@ -115,3 +117,4 @@ rt_hw_context_switch_interrupt_do:
115117
ldr x0, [x4] // get new task's stack pointer
116118

117119
RESTORE_CONTEXT_SWITCH x0
120+
NEVER_RETURN

libcpu/aarch64/common/up/vector_gcc.S

+13-16
Original file line numberDiff line numberDiff line change
@@ -22,28 +22,24 @@
2222

2323
.section .text
2424

25+
vector_fiq:
2526
.align 8
2627
.globl vector_fiq
27-
vector_fiq:
28+
2829
SAVE_IRQ_CONTEXT
2930
bl rt_hw_trap_fiq
3031
RESTORE_IRQ_CONTEXT
3132

32-
.globl rt_interrupt_enter
33-
.globl rt_interrupt_leave
3433
.globl rt_thread_switch_interrupt_flag
35-
.globl rt_interrupt_from_thread
36-
.globl rt_interrupt_to_thread
3734
.globl rt_hw_context_switch_interrupt_do
3835

36+
/**
37+
* void rt_hw_vector_irq_sched(void *eframe)
38+
* @brief do IRQ scheduling
39+
*/
40+
rt_hw_vector_irq_sched:
41+
.globl rt_hw_vector_irq_sched
3942
.align 8
40-
.globl vector_irq
41-
vector_irq:
42-
SAVE_IRQ_CONTEXT
43-
44-
bl rt_interrupt_enter
45-
bl rt_hw_trap_irq
46-
bl rt_interrupt_leave
4743

4844
/**
4945
* if rt_thread_switch_interrupt_flag set, jump to
@@ -52,12 +48,13 @@ vector_irq:
5248
ldr x1, =rt_thread_switch_interrupt_flag
5349
ldr x2, [x1]
5450
cmp x2, #1
55-
b.ne vector_irq_exit
51+
bne 1f
5652

57-
mov x2, #0 // clear flag
53+
/* clear flag */
54+
mov x2, #0
5855
str x2, [x1]
5956

6057
bl rt_hw_context_switch_interrupt_do
6158

62-
vector_irq_exit:
63-
RESTORE_IRQ_CONTEXT_WITHOUT_MMU_SWITCH
59+
1:
60+
b rt_hw_irq_exit

libcpu/aarch64/common/vector_gcc.S

+43-15
Original file line numberDiff line numberDiff line change
@@ -67,32 +67,60 @@ system_vectors:
6767
b vector_serror
6868

6969
#include "include/vector_gcc.h"
70+
#define EFRAMEX x19
7071

7172
START_POINT(vector_exception)
7273
SAVE_IRQ_CONTEXT
73-
stp x0, x1, [sp, #-0x10]!
74-
#ifdef RT_USING_SMART
75-
SAVE_USER_CTX
76-
#endif
74+
mov EFRAMEX, sp
75+
76+
SAVE_USER_CTX EFRAMEX, x0
7777

78+
mov x0, EFRAMEX
7879
bl rt_hw_trap_exception
79-
#ifdef RT_USING_SMART
80-
ldp x0, x1, [sp]
81-
RESTORE_USER_CTX x0
82-
#endif
80+
RESTORE_USER_CTX EFRAMEX, x0
8381

84-
ldp x0, x1, [sp], #0x10
85-
RESTORE_IRQ_CONTEXT_WITHOUT_MMU_SWITCH
82+
RESTORE_IRQ_CONTEXT
8683
START_POINT_END(vector_exception)
8784

8885
START_POINT(vector_serror)
8986
SAVE_IRQ_CONTEXT
87+
mov EFRAMEX, sp
9088

91-
#ifdef RT_USING_SMART
92-
SAVE_USER_CTX
93-
#endif
89+
SAVE_USER_CTX EFRAMEX, x0
9490

95-
stp x0, x1, [sp, #-0x10]!
91+
mov x0, EFRAMEX
9692
bl rt_hw_trap_serror
97-
b .
93+
94+
RESTORE_USER_CTX EFRAMEX, x0
95+
96+
NEVER_RETURN
9897
START_POINT_END(vector_serror)
98+
99+
START_POINT(vector_irq)
100+
SAVE_IRQ_CONTEXT
101+
mov EFRAMEX, sp
102+
103+
/* trace IRQ level */
104+
bl rt_interrupt_enter
105+
106+
SAVE_USER_CTX EFRAMEX, x0
107+
108+
/* handline IRQ */
109+
mov x0, EFRAMEX
110+
bl rt_hw_trap_irq
111+
112+
RESTORE_USER_CTX EFRAMEX, x0
113+
114+
/* restore IRQ level */
115+
bl rt_interrupt_leave
116+
117+
mov x0, EFRAMEX
118+
bl rt_hw_vector_irq_sched
119+
120+
b rt_hw_irq_exit
121+
START_POINT_END(vector_irq)
122+
123+
rt_hw_irq_exit:
124+
.globl rt_hw_irq_exit
125+
126+
RESTORE_IRQ_CONTEXT

0 commit comments

Comments
 (0)