x86: fix trampoline stack clobber
We need to lock interrupts before setting the thread's stack pointer to the trampoline stack. Otherwise, we could unexpectedly take an interrupt on this stack instead of the thread stack as intended. The specific problem happens at the end of the interrupt, when we switch back to the thread stack and call swap. Doing this on a per-cpu trampoline stack instead of the thread stack causes data corruption. Fixes: #24869 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
7ede80be46
commit
dac61f450d
|
@ -110,14 +110,14 @@ SECTION_FUNC(TEXT, z_x86_trampoline_to_user_always)
|
|||
/* Stash EDI, need a free register */
|
||||
pushl %edi
|
||||
|
||||
/* Store old stack pointer and switch to trampoline stack */
|
||||
movl %esp, %edi
|
||||
movl $z_trampoline_stack_end, %esp
|
||||
|
||||
/* Lock IRQs until we get out, we don't want anyone else using the
|
||||
* trampoline stack
|
||||
/* Store old stack pointer and switch to trampoline stack.
|
||||
* Lock IRQs before changing stack pointer to the trampoline stack,
|
||||
* we don't want any interrupts also using the trampoline stack
|
||||
* during this time.
|
||||
*/
|
||||
movl %esp, %edi
|
||||
cli
|
||||
movl $z_trampoline_stack_end, %esp
|
||||
|
||||
/* Copy context */
|
||||
pushl 20(%edi) /* SS */
|
||||
|
|
Loading…
Reference in New Issue