Skip to content

Commit 1c047c9

Browse files
Andy Grossandrewboie
Andy Gross
authored andcommitted
arm: userspace: Add ARM userspace infrastructure
This patch adds support for userspace on ARM architectures. Arch specific calls for transitioning threads to user mode, system calls, and associated handlers. Signed-off-by: Andy Gross <[email protected]>
1 parent 9ccdcb9 commit 1c047c9

File tree

14 files changed

+542
-44
lines changed

14 files changed

+542
-44
lines changed

arch/arm/core/CMakeLists.txt

+1
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ zephyr_sources_ifdef(CONFIG_GEN_SW_ISR_TABLE isr_wrapper.S)
1616
zephyr_sources_ifdef(CONFIG_CPLUSPLUS __aeabi_atexit.c)
1717
zephyr_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)
1818
zephyr_sources_ifdef(CONFIG_CPU_CORTEX_M0 irq_relay.S)
19+
zephyr_sources_ifdef(CONFIG_USERSPACE userspace.S)
1920

2021
add_subdirectory_ifdef(CONFIG_CPU_CORTEX_M cortex_m)
2122
add_subdirectory_ifdef(CONFIG_CPU_HAS_MPU cortex_m/mpu)

arch/arm/core/Kconfig

+1-9
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ config CPU_CORTEX_M
2323
select HAS_FLASH_LOAD_OFFSET
2424
select HAS_DTS
2525
select ARCH_HAS_STACK_PROTECTION if ARM_CORE_MPU
26-
select ARCH_HAS_USERSPACE if ARM_USERSPACE
26+
select ARCH_HAS_USERSPACE if ARM_CORE_MPU
2727
help
2828
This option signifies the use of a CPU of the Cortex-M family.
2929

@@ -42,14 +42,6 @@ config ARM_STACK_PROTECTION
4242
This option enables MPU stack guard to cause a system fatal error
4343
if the bounds of the current process stack are overflowed.
4444

45-
config ARM_USERSPACE
46-
bool
47-
default n
48-
help
49-
This option enables APIs to drop a thread's privileges, supporting
50-
user-level threads that are protected from each other and from
51-
crashing the kernel.
52-
5345
menu "Architectue Floating Point Options"
5446
depends on CPU_HAS_FPU
5547

arch/arm/core/cortex_m/mpu/arm_core_mpu.c

+11-3
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,18 @@
2323
*/
2424
void configure_mpu_stack_guard(struct k_thread *thread)
2525
{
26+
u32_t guard_size = MPU_GUARD_ALIGN_AND_SIZE;
27+
#if defined(CONFIG_USERSPACE)
28+
u32_t guard_start = thread->arch.priv_stack_start ?
29+
(u32_t)thread->arch.priv_stack_start :
30+
(u32_t)thread->stack_obj;
31+
#else
32+
u32_t guard_start = thread->stack_info.start;
33+
#endif
34+
2635
arm_core_mpu_disable();
27-
arm_core_mpu_configure(THREAD_STACK_GUARD_REGION,
28-
thread->stack_info.start - MPU_GUARD_ALIGN_AND_SIZE,
29-
thread->stack_info.size);
36+
arm_core_mpu_configure(THREAD_STACK_GUARD_REGION, guard_start,
37+
guard_size);
3038
arm_core_mpu_enable();
3139
}
3240
#endif

arch/arm/core/fatal.c

+11
Original file line numberDiff line numberDiff line change
@@ -87,3 +87,14 @@ void _do_kernel_oops(const NANO_ESF *esf)
8787
{
8888
_NanoFatalErrorHandler(esf->r0, esf);
8989
}
90+
91+
FUNC_NORETURN void _arch_syscall_oops(void *ssf_ptr)
92+
{
93+
u32_t *ssf_contents = ssf_ptr;
94+
NANO_ESF oops_esf = { 0 };
95+
96+
oops_esf.pc = ssf_contents[3];
97+
98+
_do_kernel_oops(&oops_esf);
99+
CODE_UNREACHABLE;
100+
}

arch/arm/core/offsets/offsets.c

+1
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ GEN_OFFSET_SYM(_thread_arch_t, basepri);
3030
GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
3131

3232
#ifdef CONFIG_USERSPACE
33+
GEN_OFFSET_SYM(_thread_arch_t, mode);
3334
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
3435
#endif
3536

arch/arm/core/swap.S

+77-2
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ GTEXT(__swap)
2323
GTEXT(__svc)
2424
GTEXT(__pendsv)
2525
GTEXT(_do_kernel_oops)
26+
GTEXT(_arm_do_syscall)
2627
GDATA(_k_neg_eagain)
2728

2829
GDATA(_kernel)
@@ -176,12 +177,24 @@ _thread_irq_disabled:
176177
#endif /* CONFIG_MPU_STACK_GUARD */
177178

178179
#ifdef CONFIG_USERSPACE
180+
/* restore mode */
181+
ldr r0, [r2, #_thread_offset_to_mode]
182+
mrs r3, CONTROL
183+
bic r3, #1
184+
orr r3, r0
185+
msr CONTROL, r3
186+
179187
/* r2 contains k_thread */
180188
add r0, r2, #0
181189
push {r2, lr}
182190
blx configure_mpu_mem_domain
183191
pop {r2, lr}
184-
#endif /* CONFIG_USERSPACE */
192+
193+
add r0, r2, #0
194+
push {r2, lr}
195+
blx configure_mpu_user_context
196+
pop {r2, lr}
197+
#endif
185198

186199
/* load callee-saved + psp from thread */
187200
add r0, r2, #_thread_offset_to_callee_saved
@@ -268,7 +281,6 @@ _oops:
268281
*/
269282

270283
SECTION_FUNC(TEXT, __svc)
271-
272284
tst lr, #0x4 /* did we come from thread mode ? */
273285
ite eq /* if zero (equal), came from handler mode */
274286
mrseq r0, MSP /* handler mode, stack frame is on MSP */
@@ -283,10 +295,26 @@ SECTION_FUNC(TEXT, __svc)
283295
* 0: context switch
284296
* 1: irq_offload (if configured)
285297
* 2: kernel panic or oops (software generated fatal exception)
298+
* 3: System call
286299
* Planned implementation of system calls for memory protection will
287300
* expand this case.
288301
*/
289302
ands r1, #0xff
303+
#if CONFIG_USERSPACE
304+
mrs r2, CONTROL
305+
306+
cmp r1, #3
307+
beq _do_syscall
308+
309+
/*
310+
* check that we are privileged before invoking other SVCs
311+
* oops if we are unprivileged
312+
*/
313+
tst r2, #0x1
314+
bne _oops
315+
316+
cmp r1, #0
317+
#endif
290318
beq _context_switch
291319

292320
cmp r1, #2
@@ -324,6 +352,46 @@ _oops:
324352
blx _do_kernel_oops
325353
pop {pc}
326354

355+
#if CONFIG_USERSPACE
356+
/*
357+
* System call will setup a jump to the _do_arm_syscall function
358+
* when the SVC returns via the bx lr.
359+
*
360+
* There is some trickery involved here because we have to preserve
361+
* the original LR value so that we can return back to the caller of
362+
* the SVC.
363+
*
364+
* On SVC exeption, the stack looks like the following:
365+
* r0 - r1 - r2 - r3 - r12 - LR - PC - PSR
366+
* r5 - r6 - call id - saved LR
367+
*
368+
*/
369+
_do_syscall:
370+
ldr r1, [r0, #24] /* grab address of PC from stack frame */
371+
str r1, [r0, #44] /* store address to use for LR after syscall */
372+
ldr r1, =_arm_do_syscall
373+
str r1, [r0, #24] /* overwrite the LR to point to _arm_do_syscall */
374+
375+
/* validate syscall limit, only set priv mode if valid */
376+
ldr ip, =_SYSCALL_LIMIT
377+
ldr r1, [r0, #40]
378+
cmp r1, ip
379+
blt valid_syscall_id
380+
381+
/* bad syscall id. Set arg0 to bad id and set call_id to SYSCALL_BAD */
382+
str r1, [r0, #0]
383+
ldr r1, =_SYSCALL_BAD
384+
str r1, [r0, #40]
385+
386+
valid_syscall_id:
387+
/* set mode to privileged, r2 still contains value from CONTROL */
388+
bic r2, #1
389+
msr CONTROL, r2
390+
391+
/* return from SVC to the modified LR - _arm_do_syscall */
392+
bx lr
393+
#endif
394+
327395
#else
328396
#error Unknown ARM architecture
329397
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
@@ -381,6 +449,13 @@ SECTION_FUNC(TEXT, __swap)
381449
ldr r2, [r1, #_kernel_offset_to_current]
382450
str r0, [r2, #_thread_offset_to_basepri]
383451

452+
#ifdef CONFIG_USERSPACE
453+
mrs r0, CONTROL
454+
movs r3, #1
455+
ands r0, r3
456+
str r0, [r2, #_thread_offset_to_mode]
457+
#endif
458+
384459
/*
385460
* Set __swap()'s default return code to -EAGAIN. This eliminates the need
386461
* for the timeout code to set it itself.

arch/arm/core/thread.c

+50-3
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,10 @@
1919
#include <string.h>
2020
#endif /* CONFIG_INIT_STACKS */
2121

22+
#ifdef CONFIG_USERSPACE
23+
extern u8_t *_k_priv_stack_find(void *obj);
24+
#endif
25+
2226
/**
2327
*
2428
* @brief Initialize a new thread from its stack space
@@ -58,16 +62,33 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
5862

5963
_ASSERT_VALID_PRIO(priority, pEntry);
6064

65+
#if CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
66+
char *stackEnd = pStackMem + stackSize - MPU_GUARD_ALIGN_AND_SIZE;
67+
#else
6168
char *stackEnd = pStackMem + stackSize;
69+
#endif
6270
struct __esf *pInitCtx;
63-
_new_thread_init(thread, pStackMem, stackSize, priority, options);
6471

65-
/* carve the thread entry struct from the "base" of the stack */
72+
_new_thread_init(thread, pStackMem, stackEnd - pStackMem, priority,
73+
options);
6674

75+
/* carve the thread entry struct from the "base" of the stack */
6776
pInitCtx = (struct __esf *)(STACK_ROUND_DOWN(stackEnd -
6877
sizeof(struct __esf)));
6978

70-
pInitCtx->pc = ((u32_t)_thread_entry) & 0xfffffffe;
79+
#if CONFIG_USERSPACE
80+
if (options & K_USER) {
81+
pInitCtx->pc = (u32_t)_arch_user_mode_enter;
82+
} else {
83+
pInitCtx->pc = (u32_t)_thread_entry;
84+
}
85+
#else
86+
pInitCtx->pc = (u32_t)_thread_entry;
87+
#endif
88+
89+
/* force ARM mode by clearing LSB of address */
90+
pInitCtx->pc &= 0xfffffffe;
91+
7192
pInitCtx->a1 = (u32_t)pEntry;
7293
pInitCtx->a2 = (u32_t)parameter1;
7394
pInitCtx->a3 = (u32_t)parameter2;
@@ -78,6 +99,12 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
7899
thread->callee_saved.psp = (u32_t)pInitCtx;
79100
thread->arch.basepri = 0;
80101

102+
#if CONFIG_USERSPACE
103+
thread->arch.mode = 0;
104+
thread->arch.priv_stack_start = 0;
105+
thread->arch.priv_stack_size = 0;
106+
#endif
107+
81108
/* swap_return_value can contain garbage */
82109

83110
/*
@@ -94,3 +121,23 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
94121
thread_monitor_init(thread);
95122
#endif
96123
}
124+
125+
#ifdef CONFIG_USERSPACE
126+
127+
FUNC_NORETURN void _arch_user_mode_enter(k_thread_entry_t user_entry,
128+
void *p1, void *p2, void *p3)
129+
{
130+
131+
/* Set up privileged stack before entering user mode */
132+
_current->arch.priv_stack_start =
133+
(u32_t)_k_priv_stack_find(_current->stack_obj);
134+
_current->arch.priv_stack_size =
135+
(u32_t)CONFIG_PRIVILEGED_STACK_SIZE;
136+
137+
_arm_userspace_enter(user_entry, p1, p2, p3,
138+
(u32_t)_current->stack_info.start,
139+
_current->stack_info.size);
140+
CODE_UNREACHABLE;
141+
}
142+
143+
#endif

0 commit comments

Comments
 (0)