arch/intel64: add SMP support

modified various intel64 files to support SMP

Signed-off-by: p-szafonimateusz <p-szafonimateusz@xiaomi.com>
This commit is contained in:
p-szafonimateusz 2024-04-03 11:53:46 +02:00 committed by Xiang Xiao
parent 8d4681a190
commit 8220b169f3
6 changed files with 250 additions and 20 deletions

View File

@ -45,14 +45,6 @@
# include <arch/intel64/irq.h>
#endif
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Types
****************************************************************************/
/****************************************************************************
* Public Data
****************************************************************************/
@ -103,7 +95,22 @@ extern "C"
*
****************************************************************************/
#define up_cpu_index() (0)
#ifdef CONFIG_SMP
static inline_function int up_cpu_index(void)
{
unsigned long cpu;
asm volatile(
"\tmovq %%gs:(%c1), %0\n"
: "=rm" (cpu)
: "i" (offsetof(struct intel64_cpu_s, id))
:);
return cpu;
}
#else
# define up_cpu_index() (0)
#endif
/****************************************************************************
* Inline functions

View File

@ -92,7 +92,17 @@ void up_exit(int status)
addrenv_switch(tcb);
#endif
/* Restore the cpu lock */
restore_critical_section();
/* Then switch contexts */
x86_64_fullcontextrestore(tcb->xcp.regs);
/* x86_64_fullcontextrestore() should not return but could if the software
* interrupts are disabled.
*/
PANIC();
}

View File

@ -57,6 +57,8 @@
void up_switch_context(struct tcb_s *tcb, struct tcb_s *rtcb)
{
int cpu;
/* Update scheduler parameters */
nxsched_suspend_scheduler(rtcb);
@ -107,6 +109,17 @@ void up_switch_context(struct tcb_s *tcb, struct tcb_s *rtcb)
nxsched_resume_scheduler(tcb);
/* Restore the cpu lock */
restore_critical_section();
/* Record the new "running" task. g_running_tasks[] is only used by
* assertion logic for reporting crashes.
*/
cpu = this_cpu();
g_running_tasks[cpu] = current_task(cpu);
/* Then switch contexts */
x86_64_fullcontextrestore(tcb->xcp.regs);

View File

@ -100,6 +100,10 @@ static uint64_t *common_handler(int irq, uint64_t *regs)
*/
g_running_tasks[this_cpu()] = this_task();
/* Restore the cpu lock */
restore_critical_section();
}
/* If a context switch occurred while processing the interrupt then

View File

@ -70,6 +70,7 @@
*
****************************************************************************/
#ifndef CONFIG_SMP
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver);
@ -109,7 +110,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
* Hmmm... there looks like a latent bug here: The following logic
* would fail in the strange case where we are in an interrupt
* handler, the thread is signalling itself, but a context switch
* to another task has occurred so that g_current_regs does not
* to another task has occurred so that CURRENT_REGS does not
* refer to the thread of this_task()!
*/
@ -121,7 +122,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
*/
tcb->xcp.saved_rip = up_current_regs()[REG_RIP];
tcb->xcp.saved_rsp = tcb->xcp.regs[REG_RSP];
tcb->xcp.saved_rsp = up_current_regs()[REG_RSP];
tcb->xcp.saved_rflags = up_current_regs()[REG_RFLAGS];
/* Then set up to vector to the trampoline with interrupts
@ -165,3 +166,156 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
}
}
}
#else /* !CONFIG_SMP */
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
int cpu;
int me;
sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver);
/* Refuse to handle nested signal actions */
if (tcb->xcp.sigdeliver == NULL)
{
tcb->xcp.sigdeliver = sigdeliver;
/* First, handle some special cases when the signal is being delivered
* to task that is currently executing on any CPU.
*/
sinfo("rtcb=0x%p CURRENT_REGS=0x%p\n", this_task(),
up_current_regs());
if (tcb->task_state == TSTATE_TASK_RUNNING)
{
me = this_cpu();
cpu = tcb->cpu;
/* CASE 1: We are not in an interrupt handler and a task is
* signaling itself for some reason.
*/
if (cpu == me && !up_current_regs())
{
/* In this case just deliver the signal now.
* REVISIT: Signal handler will run in a critical section!
*/
sigdeliver(tcb);
tcb->xcp.sigdeliver = NULL;
}
/* CASE 2: The task that needs to receive the signal is running.
* This could happen if the task is running on another CPU OR if
* we are in an interrupt handler and the task is running on this
* CPU. In the former case, we will have to PAUSE the other CPU
* first. But in either case, we will have to modify the return
* state as well as the state in the TCB.
*/
else
{
/* If we signaling a task running on the other CPU, we have
* to PAUSE the other CPU.
*/
if (cpu != me)
{
/* Pause the CPU */
up_cpu_pause(cpu);
/* Wait while the pause request is pending */
while (up_cpu_pausereq(cpu))
{
}
/* Now tcb on the other CPU can be accessed safely */
/* Copy tcb->xcp.regs to tcp.xcp.saved. These will be
* restored by the signal trampoline after the signal has
* been delivered.
*/
tcb->xcp.saved_rip = tcb->xcp.regs[REG_RIP];
tcb->xcp.saved_rsp = tcb->xcp.regs[REG_RSP];
tcb->xcp.saved_rflags = tcb->xcp.regs[REG_RFLAGS];
/* Then set up to vector to the trampoline with interrupts
* disabled
*/
tcb->xcp.regs[REG_RIP] = (uint64_t)x86_64_sigdeliver;
tcb->xcp.regs[REG_RFLAGS] = 0;
}
else
{
/* tcb is running on the same CPU */
/* Save the return lr and cpsr and one scratch register.
* These will be restored by the signal trampoline after
* the signals have been delivered.
*/
tcb->xcp.saved_rip = up_current_regs()[REG_RIP];
tcb->xcp.saved_rsp = up_current_regs()[REG_RSP];
tcb->xcp.saved_rflags = up_current_regs()[REG_RFLAGS];
/* Then set up to vector to the trampoline with interrupts
* disabled
*/
up_current_regs()[REG_RIP] = (uint64_t)x86_64_sigdeliver;
up_current_regs()[REG_RFLAGS] = 0;
/* And make sure that the saved context in the TCB
* is the same as the interrupt return context.
*/
x86_64_savestate(tcb->xcp.regs);
}
/* NOTE: If the task runs on another CPU(cpu), adjusting
* global IRQ controls will be done in the pause handler
* on the CPU(cpu) by taking a critical section.
* If the task is scheduled on this CPU(me), do nothing
* because this CPU already took a critical section
*/
/* RESUME the other CPU if it was PAUSED */
if (cpu != me)
{
up_cpu_resume(cpu);
}
}
}
/* Otherwise, we are (1) signaling a task is not running from an
* interrupt handler or (2) we are not in an interrupt handler and the
* running task is signaling some other non-running task.
*/
else
{
/* Save the return lr and cpsr and one scratch register
* These will be restored by the signal trampoline after
* the signals have been delivered.
*/
tcb->xcp.saved_rip = tcb->xcp.regs[REG_RIP];
tcb->xcp.saved_rsp = tcb->xcp.regs[REG_RSP];
tcb->xcp.saved_rflags = tcb->xcp.regs[REG_RFLAGS];
/* Then set up to vector to the trampoline with interrupts
* disabled
*/
tcb->xcp.regs[REG_RIP] = (uint64_t)x86_64_sigdeliver;
tcb->xcp.regs[REG_RFLAGS] = 0;
}
}
}
#endif /* CONFIG_SMP */

View File

@ -54,10 +54,24 @@
void x86_64_sigdeliver(void)
{
struct tcb_s *rtcb = this_task();
sig_deliver_t sighandler;
uint64_t regs_area[XCPTCONTEXT_REGS + 2];
uint64_t *regs;
#ifdef CONFIG_SMP
/* In the SMP case, we must terminate the critical section while the signal
* handler executes, but we also need to restore the irqcount when the
* we resume the main thread of the task.
*/
int16_t saved_irqcount;
#endif
board_autoled_on(LED_SIGNAL);
sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n",
rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head);
DEBUGASSERT(rtcb->xcp.sigdeliver != NULL);
/* Align regs to 16 byte boundary for SSE instructions. */
regs = (uint64_t *)(((uint64_t)(regs_area) + 15) & (~(uint64_t)15));
@ -70,17 +84,24 @@ void x86_64_sigdeliver(void)
x86_64_copystate(regs, rtcb->xcp.regs);
/* grab on a copy of the signal hander function pointer before any
* possibility to be switched out.
#ifdef CONFIG_SMP
/* In the SMP case, up_schedule_sigaction(0) will have incremented
* 'irqcount' in order to force us into a critical section. Save the
* pre-incremented irqcount.
*/
ASSERT(rtcb->xcp.sigdeliver != NULL);
sighandler = rtcb->xcp.sigdeliver;
saved_irqcount = rtcb->irqcount;
DEBUGASSERT(saved_irqcount >= 0);
board_autoled_on(LED_SIGNAL);
/* Now we need call leave_critical_section() repeatedly to get the irqcount
* to zero, freeing all global spinlocks that enforce the critical section.
*/
sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n",
rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head);
while (rtcb->irqcount > 0)
{
leave_critical_section((uint8_t)regs[REG_RFLAGS]);
}
#endif /* CONFIG_SMP */
#ifndef CONFIG_SUPPRESS_INTERRUPTS
/* Then make sure that interrupts are enabled. Signal handlers must always
@ -92,7 +113,7 @@ void x86_64_sigdeliver(void)
/* Deliver the signals */
sighandler(rtcb);
((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb);
/* Output any debug messages BEFORE restoring errno (because they may
* alter errno), then disable interrupts again and restore the original
@ -100,7 +121,12 @@ void x86_64_sigdeliver(void)
*/
sinfo("Resuming\n");
#ifdef CONFIG_SMP
enter_critical_section();
#else
up_irq_save();
#endif
/* Modify the saved return state with the actual saved values in the
* TCB. This depends on the fact that nested signal handling is
@ -117,6 +143,22 @@ void x86_64_sigdeliver(void)
regs[REG_RFLAGS] = rtcb->xcp.saved_rflags;
rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */
#ifdef CONFIG_SMP
/* Restore the saved 'irqcount' and recover the critical section
* spinlocks.
*
* REVISIT: irqcount should be one from the above call to
* enter_critical_section(). Could the saved_irqcount be zero? That
* would be a problem.
*/
DEBUGASSERT(rtcb->irqcount == 1);
while (rtcb->irqcount < saved_irqcount)
{
enter_critical_section();
}
#endif
/* Then restore the correct state for this thread of execution. */
board_autoled_off(LED_SIGNAL);