diff --git a/arch/arm/src/armv7-a/arm_cpupause.c b/arch/arm/src/armv7-a/arm_cpupause.c index 4abfac25b1..55ffbf9385 100644 --- a/arch/arm/src/armv7-a/arm_cpupause.c +++ b/arch/arm/src/armv7-a/arm_cpupause.c @@ -59,7 +59,7 @@ * up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows * * 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m] - * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_wait[m]. + * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m]. * 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and * (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second * blocks CPUm in the interrupt handler. diff --git a/arch/sim/src/up_internal.h b/arch/sim/src/up_internal.h index ac0a2269ab..9facb343af 100644 --- a/arch/sim/src/up_internal.h +++ b/arch/sim/src/up_internal.h @@ -204,7 +204,7 @@ extern volatile int g_uart_data_available; * up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows * * 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m] - * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_wait[m]. + * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m]. * 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and * (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second * blocks CPUm in the interrupt handler. diff --git a/arch/sim/src/up_simsmp.c b/arch/sim/src/up_simsmp.c index 5b24f6015c..8a1469976d 100644 --- a/arch/sim/src/up_simsmp.c +++ b/arch/sim/src/up_simsmp.c @@ -83,7 +83,7 @@ static pthread_t g_sim_cputhread[CONFIG_SMP_NCPUS]; * up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows * * 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m] - * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_wait[m]. + * and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m]. * 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and * (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second * blocks CPUm in the interrupt handler. diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index cabe791587..96fb3abb1b 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -104,10 +104,8 @@ static uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS]; ****************************************************************************/ #ifdef CONFIG_SMP -static void irq_waitlock(void) +static inline void irq_waitlock(int cpu) { - int cpu = this_cpu(); - /* Duplicate the spin_lock() logic from spinlock.c, but adding the check * for the deadlock condition. */ @@ -262,10 +260,10 @@ irqstate_t enter_critical_section(void) if ((g_cpu_irqset & (1 << cpu)) == 0) { /* Wait until we can get the spinlock (meaning that we are - * no longer in the critical section). + * no longer blocked by the critical section). */ - irq_waitlock(); + irq_waitlock(cpu); } /* In any event, the nesting count is now one */