Skip to content

Commit dacd176

Browse files
carlocaionenashif
authored andcommitted
aarch64: userspace: Implement syscalls
This patch adds the code managing the syscalls. The privileged stack is setup before jumping into the real syscall. Signed-off-by: Carlo Caione <[email protected]>
1 parent f2995bc commit dacd176

File tree

8 files changed

+211
-1
lines changed

8 files changed

+211
-1
lines changed

arch/arm/core/aarch64/Kconfig

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,9 @@ config AARCH64_IMAGE_HEADER
6464
This option enables standard ARM64 boot image header used by Linux
6565
and understood by loaders such as u-boot on Xen xl tool.
6666

67+
config PRIVILEGED_STACK_SIZE
68+
default 4096
69+
6770
if CPU_CORTEX_A
6871

6972
config ARMV8_A_NS

arch/arm/core/aarch64/switch.S

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,11 @@ SECTION_FUNC(TEXT, z_arm64_sync_exc)
111111
cmp x1, #_SVC_CALL_RUNTIME_EXCEPT
112112
beq oops
113113

114+
#ifdef CONFIG_USERSPACE
115+
cmp x1, #_SVC_CALL_SYSTEM_CALL
116+
beq z_arm64_do_syscall
117+
#endif
118+
114119
#ifdef CONFIG_IRQ_OFFLOAD
115120
cmp x1, #_SVC_CALL_IRQ_OFFLOAD
116121
beq offload

arch/arm/core/aarch64/thread.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
5454
}
5555

5656
pInitCtx->tpidrro_el0 = 0x0;
57+
thread->arch.priv_stack_start = 0;
5758
#else
5859
pInitCtx->elr = (uint64_t)z_thread_entry;
5960
#endif
@@ -82,6 +83,9 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
8283
z_arch_esf_t *pInitCtx;
8384
uintptr_t stack_ptr;
8485

86+
/* Setup the private stack */
87+
_current->arch.priv_stack_start = (uint64_t)(_current->stack_obj);
88+
8589
/* Reset the stack pointer to the base discarding any old context */
8690
stack_ptr = Z_STACK_PTR_ALIGN(_current->stack_info.start +
8791
_current->stack_info.size -

arch/arm/core/aarch64/userspace.S

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,61 @@ abv_fail:
8585
mov x0, #-1
8686
ret
8787

88+
/*
89+
* System call entry point.
90+
*/
91+
92+
GTEXT(z_arm64_do_syscall)
93+
SECTION_FUNC(TEXT, z_arm64_do_syscall)
94+
/* Recover the syscall parameters from the ESF */
95+
ldp x0, x1, [sp, ___esf_t_x0_x1_OFFSET]
96+
ldp x2, x3, [sp, ___esf_t_x2_x3_OFFSET]
97+
ldp x4, x5, [sp, ___esf_t_x4_x5_OFFSET]
98+
99+
/* Recover the syscall ID */
100+
ldr x8, [sp, ___esf_t_x8_x9_OFFSET]
101+
102+
/* Check whether the ID is valid */
103+
ldr x9, =K_SYSCALL_LIMIT
104+
cmp x8, x9
105+
blo valid_syscall_id
106+
ldr x8, =K_SYSCALL_BAD
107+
108+
valid_syscall_id:
109+
ldr x9, =_k_syscall_table
110+
ldr x9, [x9, x8, lsl #3]
111+
112+
/* Recover the privileged stack */
113+
#ifdef CONFIG_SMP
114+
get_cpu x10, x8
115+
ldr x10, [x10, #___cpu_t_current_OFFSET]
116+
#else
117+
ldr x10, =_kernel
118+
ldr x10, [x10, #_kernel_offset_to_current]
119+
#endif
120+
ldr x10, [x10, #_thread_offset_to_priv_stack_start]
121+
add x10, x10, #CONFIG_PRIVILEGED_STACK_SIZE
122+
123+
/* Save the original SP on the privileged stack */
124+
mov x11, sp
125+
mov sp, x10
126+
str x11, [sp, #-16]!
127+
128+
/* Jump into the syscall */
129+
msr daifclr, #(DAIFSET_IRQ_BIT)
130+
blr x9
131+
msr daifset, #(DAIFSET_IRQ_BIT)
132+
133+
/* Restore the original SP containing the ESF */
134+
ldr x11, [sp], #16
135+
mov sp, x11
136+
137+
/* Save the return value into the ESF */
138+
str x0, [sp, ___esf_t_x0_x1_OFFSET]
139+
140+
/* Return from exception */
141+
b z_arm64_exit_exc
142+
88143
/*
89144
* Routine to jump into userspace
90145
*

arch/arm/core/offsets/offsets_aarch64.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,10 @@
2929
#include <kernel_arch_data.h>
3030
#include <kernel_offsets.h>
3131

32+
#ifdef CONFIG_USERSPACE
33+
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
34+
#endif
35+
3236
GEN_NAMED_OFFSET_SYM(_callee_saved_t, x19, x19_x20);
3337
GEN_NAMED_OFFSET_SYM(_callee_saved_t, x21, x21_x22);
3438
GEN_NAMED_OFFSET_SYM(_callee_saved_t, x23, x23_x24);

arch/arm/include/aarch64/offsets_short_arch.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,4 +9,9 @@
99

1010
#include <offsets.h>
1111

12+
#ifdef CONFIG_USERSPACE
13+
#define _thread_offset_to_priv_stack_start \
14+
(___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_start_OFFSET)
15+
#endif
16+
1217
#endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH64_OFFSETS_SHORT_ARCH_H_ */

include/arch/arm/aarch64/syscall.h

Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#define _SVC_CALL_CONTEXT_SWITCH 0
2020
#define _SVC_CALL_IRQ_OFFLOAD 1
2121
#define _SVC_CALL_RUNTIME_EXCEPT 2
22+
#define _SVC_CALL_SYSTEM_CALL 3
2223

2324
#ifdef CONFIG_USERSPACE
2425
#ifndef _ASMLANGUAGE
@@ -31,6 +32,137 @@
3132
extern "C" {
3233
#endif
3334

35+
/*
36+
* Syscall invocation macros. arm-specific machine constraints used to ensure
37+
* args land in the proper registers.
38+
*/
39+
static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
40+
uintptr_t arg3, uintptr_t arg4,
41+
uintptr_t arg5, uintptr_t arg6,
42+
uintptr_t call_id)
43+
{
44+
register uint64_t ret __asm__("x0") = arg1;
45+
register uint64_t r1 __asm__("x1") = arg2;
46+
register uint64_t r2 __asm__("x2") = arg3;
47+
register uint64_t r3 __asm__("x3") = arg4;
48+
register uint64_t r4 __asm__("x4") = arg5;
49+
register uint64_t r5 __asm__("x5") = arg6;
50+
register uint64_t r8 __asm__("x8") = call_id;
51+
52+
__asm__ volatile("svc %[svid]\n"
53+
: "=r"(ret)
54+
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
55+
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
56+
"r" (r4), "r" (r5), "r" (r8)
57+
: "memory");
58+
59+
return ret;
60+
}
61+
62+
static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
63+
uintptr_t arg3, uintptr_t arg4,
64+
uintptr_t arg5,
65+
uintptr_t call_id)
66+
{
67+
register uint64_t ret __asm__("x0") = arg1;
68+
register uint64_t r1 __asm__("x1") = arg2;
69+
register uint64_t r2 __asm__("x2") = arg3;
70+
register uint64_t r3 __asm__("x3") = arg4;
71+
register uint64_t r4 __asm__("x4") = arg5;
72+
register uint64_t r8 __asm__("x8") = call_id;
73+
74+
__asm__ volatile("svc %[svid]\n"
75+
: "=r"(ret)
76+
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
77+
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
78+
"r" (r4), "r" (r8)
79+
: "memory");
80+
81+
return ret;
82+
}
83+
84+
static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
85+
uintptr_t arg3, uintptr_t arg4,
86+
uintptr_t call_id)
87+
{
88+
register uint64_t ret __asm__("x0") = arg1;
89+
register uint64_t r1 __asm__("x1") = arg2;
90+
register uint64_t r2 __asm__("x2") = arg3;
91+
register uint64_t r3 __asm__("x3") = arg4;
92+
register uint64_t r8 __asm__("x8") = call_id;
93+
94+
__asm__ volatile("svc %[svid]\n"
95+
: "=r"(ret)
96+
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
97+
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
98+
"r" (r8)
99+
: "memory");
100+
101+
return ret;
102+
}
103+
104+
static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
105+
uintptr_t arg3,
106+
uintptr_t call_id)
107+
{
108+
register uint64_t ret __asm__("x0") = arg1;
109+
register uint64_t r1 __asm__("x1") = arg2;
110+
register uint64_t r2 __asm__("x2") = arg3;
111+
register uint64_t r8 __asm__("x8") = call_id;
112+
113+
__asm__ volatile("svc %[svid]\n"
114+
: "=r"(ret)
115+
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
116+
"r" (ret), "r" (r1), "r" (r2), "r" (r8)
117+
: "memory");
118+
119+
return ret;
120+
}
121+
122+
static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
123+
uintptr_t call_id)
124+
{
125+
register uint64_t ret __asm__("x0") = arg1;
126+
register uint64_t r1 __asm__("x1") = arg2;
127+
register uint64_t r8 __asm__("x8") = call_id;
128+
129+
__asm__ volatile("svc %[svid]\n"
130+
: "=r"(ret)
131+
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
132+
"r" (ret), "r" (r1), "r" (r8)
133+
: "memory");
134+
135+
return ret;
136+
}
137+
138+
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
139+
uintptr_t call_id)
140+
{
141+
register uint64_t ret __asm__("x0") = arg1;
142+
register uint64_t r8 __asm__("x8") = call_id;
143+
144+
__asm__ volatile("svc %[svid]\n"
145+
: "=r"(ret)
146+
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
147+
"r" (ret), "r" (r8)
148+
: "memory");
149+
return ret;
150+
}
151+
152+
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
153+
{
154+
register uint64_t ret __asm__("x0");
155+
register uint64_t r8 __asm__("x8") = call_id;
156+
157+
__asm__ volatile("svc %[svid]\n"
158+
: "=r"(ret)
159+
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
160+
"r" (ret), "r" (r8)
161+
: "memory");
162+
163+
return ret;
164+
}
165+
34166
static inline bool arch_is_user_context(void)
35167
{
36168
uint64_t tpidrro_el0;

include/arch/arm/aarch64/thread.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,9 @@ struct _callee_saved {
4040
typedef struct _callee_saved _callee_saved_t;
4141

4242
struct _thread_arch {
43-
/* empty */
43+
#ifdef CONFIG_USERSPACE
44+
uint64_t priv_stack_start;
45+
#endif
4446
};
4547

4648
typedef struct _thread_arch _thread_arch_t;

0 commit comments

Comments
 (0)