Skip to content

Commit 074ce88

Browse files
Charles E. Youseandrewboie
Charles E. Youse
authored andcommitted
arch/x86: (Intel64) migrate from __swap to z_arch_switch()
The latter primitive is required for SMP. Signed-off-by: Charles E. Youse <[email protected]>
1 parent 32fc239 commit 074ce88

File tree

4 files changed

+43
-46
lines changed

4 files changed

+43
-46
lines changed

arch/x86/Kconfig

+2
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,8 @@ config X86_LONGMODE
4545
prompt "Run in long (64-bit) mode"
4646
default n
4747
select 64BIT
48+
select USE_SWITCH_SUPPORTED
49+
select USE_SWITCH
4850

4951
config MAX_IRQ_LINES
5052
int "Number of IRQ lines"

arch/x86/core/intel64/locore.S

+39-41
Original file line numberDiff line numberDiff line change
@@ -174,25 +174,24 @@ mxcsr: .long X86_MXCSR_SANE
174174
#endif
175175

176176
/*
177-
* XXX: describe __swap, __resume, stacks
177+
* void z_arch_switch(void *switch_to, void **switched_from);
178+
*
179+
* Note that switch_handle for us is simply a pointer to the containing
180+
* 'struct k_thread', thus:
181+
*
182+
* RDI = (struct k_thread *) switch_to
183+
* RSI = (struct k_thread **) switched_from
178184
*/
179185

180-
.globl _k_neg_eagain /* from errno.c: int _k_neg_eagain = -EAGAIN; */
181-
182-
.globl __swap
183-
__swap:
184-
movq %gs:__x86_tss64_t_cpu_OFFSET, %rsi
185-
movq ___cpu_t_current_OFFSET(%rsi), %rsi
186+
.globl z_arch_switch
187+
z_arch_switch:
188+
movq (%rsi), %rsi
186189

187190
andb $~X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rsi)
188191

189-
movl _k_neg_eagain, %eax
190-
movl %eax, _thread_offset_to_rax(%rsi)
191192
popq %rax
192193
movq %rax, _thread_offset_to_rip(%rsi)
193194
movq %rsp, _thread_offset_to_rsp(%rsi)
194-
movl %edi, %edi /* N.B.: zero extend */
195-
movq %rdi, _thread_offset_to_rflags(%rsi)
196195
movq %rbx, _thread_offset_to_rbx(%rsi)
197196
movq %rbp, _thread_offset_to_rbp(%rsi)
198197
movq %r12, _thread_offset_to_r12(%rsi)
@@ -207,40 +206,36 @@ __swap:
207206
/*
208207
* Entry:
209208
* RSP = top of _interrupt_stack
209+
* RDI = (struct k_thread *) thread to resume
210210
*/
211211

212212
__resume:
213-
movq $_kernel, %rdi
214-
movq _kernel_offset_to_ready_q_cache(%rdi), %rsi
215-
movq %gs:__x86_tss64_t_cpu_OFFSET, %rdi
216-
movq %rsi, ___cpu_t_current_OFFSET(%rdi)
217-
218213
pushq $X86_KERNEL_DS_64 /* SS */
219-
pushq _thread_offset_to_rsp(%rsi) /* RSP */
220-
pushq _thread_offset_to_rflags(%rsi) /* RFLAGS */
214+
pushq _thread_offset_to_rsp(%rdi) /* RSP */
215+
pushq _thread_offset_to_rflags(%rdi) /* RFLAGS */
221216
pushq $X86_KERNEL_CS_64 /* CS */
222-
pushq _thread_offset_to_rip(%rsi) /* RIP */
217+
pushq _thread_offset_to_rip(%rdi) /* RIP */
223218

224-
movq _thread_offset_to_rbx(%rsi), %rbx
225-
movq _thread_offset_to_rbp(%rsi), %rbp
226-
movq _thread_offset_to_r12(%rsi), %r12
227-
movq _thread_offset_to_r13(%rsi), %r13
228-
movq _thread_offset_to_r14(%rsi), %r14
229-
movq _thread_offset_to_r15(%rsi), %r15
230-
movq _thread_offset_to_rax(%rsi), %rax
219+
movq _thread_offset_to_rbx(%rdi), %rbx
220+
movq _thread_offset_to_rbp(%rdi), %rbp
221+
movq _thread_offset_to_r12(%rdi), %r12
222+
movq _thread_offset_to_r13(%rdi), %r13
223+
movq _thread_offset_to_r14(%rdi), %r14
224+
movq _thread_offset_to_r15(%rdi), %r15
225+
movq _thread_offset_to_rax(%rdi), %rax
231226

232-
testb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rsi)
227+
testb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rdi)
233228
jz 1f
234229

235-
fxrstor _thread_offset_to_sse(%rsi)
236-
movq _thread_offset_to_rcx(%rsi), %rcx
237-
movq _thread_offset_to_rdx(%rsi), %rdx
238-
movq _thread_offset_to_rdi(%rsi), %rdi
239-
movq _thread_offset_to_r8(%rsi), %r8
240-
movq _thread_offset_to_r9(%rsi), %r9
241-
movq _thread_offset_to_r10(%rsi), %r10
242-
movq _thread_offset_to_r11(%rsi), %r11
243-
movq _thread_offset_to_rsi(%rsi), %rsi /* do last :-) */
230+
fxrstor _thread_offset_to_sse(%rdi)
231+
movq _thread_offset_to_rcx(%rdi), %rcx
232+
movq _thread_offset_to_rdx(%rdi), %rdx
233+
movq _thread_offset_to_rsi(%rdi), %rsi
234+
movq _thread_offset_to_r8(%rdi), %r8
235+
movq _thread_offset_to_r9(%rdi), %r9
236+
movq _thread_offset_to_r10(%rdi), %r10
237+
movq _thread_offset_to_r11(%rdi), %r11
238+
movq _thread_offset_to_rdi(%rdi), %rdi /* do last :-) */
244239

245240
1: iretq
246241

@@ -506,15 +501,18 @@ irq_dispatch:
506501
movl %eax, (CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_EOI)
507502
#endif
508503

509-
#ifdef CONFIG_STACK_SENTINEL
510-
call z_check_stack_sentinel
511-
#endif
512504
movq %gs:__x86_tss64_t_cpu_OFFSET, %rsi
513505
cli
514506
addq $CONFIG_ISR_SUBSTACK_SIZE, %gs:__x86_tss64_t_ist1_OFFSET
515507
decl ___cpu_t_nested_OFFSET(%rsi)
516-
/* if not nested, exit via __resume (might change threads) */
517-
jz __resume
508+
jnz irq_exit_nested
509+
510+
/* not nested; ask the scheduler who's up next and resume it */
511+
512+
movq ___cpu_t_current_OFFSET(%rsi), %rdi
513+
call z_get_next_switch_handle
514+
movq %rax, %rdi
515+
jmp __resume
518516

519517
irq_exit_nested:
520518
fxrstor (%rsp)

arch/x86/core/intel64/thread.c

+1
Original file line numberDiff line numberDiff line change
@@ -32,4 +32,5 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
3232
x86_sse_init(thread);
3333

3434
thread->arch.flags = X86_THREAD_FLAG_ALL;
35+
thread->switch_handle = thread;
3536
}

arch/x86/include/intel64/kernel_arch_func.h

+1-5
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,7 @@
88

99
#ifndef _ASMLANGUAGE
1010

11-
static ALWAYS_INLINE void
12-
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
13-
{
14-
thread->callee_saved.rax = value;
15-
}
11+
extern void z_arch_switch(void *switch_to, void **switched_from);
1612

1713
static inline void kernel_arch_init(void)
1814
{

0 commit comments

Comments
 (0)