@@ -174,25 +174,24 @@ mxcsr: .long X86_MXCSR_SANE
174
174
#endif
175
175
176
176
/*
177
- * XXX: describe __swap, __resume, stacks
177
+ * void z_arch_switch(void *switch_to, void **switched_from);
178
+ *
179
+ * Note that switch_handle for us is simply a pointer to the containing
180
+ * 'struct k_thread', thus:
181
+ *
182
+ * RDI = (struct k_thread *) switch_to
183
+ * RSI = (struct k_thread **) switched_from
178
184
*/
179
185
180
- .globl _k_neg_eagain /* from errno.c: int _k_neg_eagain = -EAGAIN; */
181
-
182
- .globl __swap
183
- __swap:
184
- movq %gs :__x86_tss64_t_cpu_OFFSET, %rsi
185
- movq ___cpu_t_current_OFFSET(%rsi ), %rsi
186
+ .globl z_arch_switch
187
+ z_arch_switch:
188
+ movq (%rsi ), %rsi
186
189
187
190
andb $~X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rsi )
188
191
189
- movl _k_neg_eagain, %eax
190
- movl %eax , _thread_offset_to_rax(%rsi )
191
192
popq %rax
192
193
movq %rax , _thread_offset_to_rip(%rsi )
193
194
movq %rsp , _thread_offset_to_rsp(%rsi )
194
- movl %edi , %edi /* N.B.: zero extend */
195
- movq %rdi , _thread_offset_to_rflags(%rsi )
196
195
movq %rbx , _thread_offset_to_rbx(%rsi )
197
196
movq %rbp , _thread_offset_to_rbp(%rsi )
198
197
movq %r12 , _thread_offset_to_r12(%rsi )
@@ -207,40 +206,36 @@ __swap:
207
206
/*
208
207
* Entry:
209
208
* RSP = top of _interrupt_stack
209
+ * RDI = (struct k_thread *) thread to resume
210
210
*/
211
211
212
212
__resume:
213
- movq $_kernel, %rdi
214
- movq _kernel_offset_to_ready_q_cache(%rdi ), %rsi
215
- movq %gs :__x86_tss64_t_cpu_OFFSET, %rdi
216
- movq %rsi , ___cpu_t_current_OFFSET(%rdi )
217
-
218
213
pushq $X86_KERNEL_DS_64 /* SS */
219
- pushq _thread_offset_to_rsp(%rsi ) /* RSP */
220
- pushq _thread_offset_to_rflags(%rsi ) /* RFLAGS */
214
+ pushq _thread_offset_to_rsp(%rdi ) /* RSP */
215
+ pushq _thread_offset_to_rflags(%rdi ) /* RFLAGS */
221
216
pushq $X86_KERNEL_CS_64 /* CS */
222
- pushq _thread_offset_to_rip(%rsi ) /* RIP */
217
+ pushq _thread_offset_to_rip(%rdi ) /* RIP */
223
218
224
- movq _thread_offset_to_rbx(%rsi ), %rbx
225
- movq _thread_offset_to_rbp(%rsi ), %rbp
226
- movq _thread_offset_to_r12(%rsi ), %r12
227
- movq _thread_offset_to_r13(%rsi ), %r13
228
- movq _thread_offset_to_r14(%rsi ), %r14
229
- movq _thread_offset_to_r15(%rsi ), %r15
230
- movq _thread_offset_to_rax(%rsi ), %rax
219
+ movq _thread_offset_to_rbx(%rdi ), %rbx
220
+ movq _thread_offset_to_rbp(%rdi ), %rbp
221
+ movq _thread_offset_to_r12(%rdi ), %r12
222
+ movq _thread_offset_to_r13(%rdi ), %r13
223
+ movq _thread_offset_to_r14(%rdi ), %r14
224
+ movq _thread_offset_to_r15(%rdi ), %r15
225
+ movq _thread_offset_to_rax(%rdi ), %rax
231
226
232
- testb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rsi )
227
+ testb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rdi )
233
228
jz 1f
234
229
235
- fxrstor _thread_offset_to_sse(%rsi )
236
- movq _thread_offset_to_rcx(%rsi ), %rcx
237
- movq _thread_offset_to_rdx(%rsi ), %rdx
238
- movq _thread_offset_to_rdi( %rsi ), %rdi
239
- movq _thread_offset_to_r8(%rsi ), %r8
240
- movq _thread_offset_to_r9(%rsi ), %r9
241
- movq _thread_offset_to_r10(%rsi ), %r10
242
- movq _thread_offset_to_r11(%rsi ), %r11
243
- movq _thread_offset_to_rsi( %rsi ), %rsi /* do last :-) */
230
+ fxrstor _thread_offset_to_sse(%rdi )
231
+ movq _thread_offset_to_rcx(%rdi ), %rcx
232
+ movq _thread_offset_to_rdx(%rdi ), %rdx
233
+ movq _thread_offset_to_rsi( %rdi ), %rsi
234
+ movq _thread_offset_to_r8(%rdi ), %r8
235
+ movq _thread_offset_to_r9(%rdi ), %r9
236
+ movq _thread_offset_to_r10(%rdi ), %r10
237
+ movq _thread_offset_to_r11(%rdi ), %r11
238
+ movq _thread_offset_to_rdi( %rdi ), %rdi /* do last :-) */
244
239
245
240
1: iretq
246
241
@@ -506,15 +501,18 @@ irq_dispatch:
506
501
movl %eax , (CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_EOI)
507
502
#endif
508
503
509
- #ifdef CONFIG_STACK_SENTINEL
510
- call z_check_stack_sentinel
511
- #endif
512
504
movq %gs :__x86_tss64_t_cpu_OFFSET, %rsi
513
505
cli
514
506
addq $CONFIG_ISR_SUBSTACK_SIZE, %gs :__x86_tss64_t_ist1_OFFSET
515
507
decl ___cpu_t_nested_OFFSET(%rsi )
516
- /* if not nested, exit via __resume (might change threads) */
517
- jz __resume
508
+ jnz irq_exit_nested
509
+
510
+ /* not nested; ask the scheduler who's up next and resume it */
511
+
512
+ movq ___cpu_t_current_OFFSET(%rsi ), %rdi
513
+ call z_get_next_switch_handle
514
+ movq %rax , %rdi
515
+ jmp __resume
518
516
519
517
irq_exit_nested:
520
518
fxrstor (%rsp )
0 commit comments