SMP: Update some comments; trivial improvement by inlining static function.

This commit is contained in:
Gregory Nutt 2016-11-22 16:48:57 -06:00
parent d95b8f64f5
commit f90525a5d1
4 changed files with 6 additions and 8 deletions

View File

@ -59,7 +59,7 @@
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
*
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_wait[m].
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
* blocks CPUm in the interrupt handler.

View File

@ -204,7 +204,7 @@ extern volatile int g_uart_data_available;
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
*
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_wait[m].
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
* blocks CPUm in the interrupt handler.

View File

@ -83,7 +83,7 @@ static pthread_t g_sim_cputhread[CONFIG_SMP_NCPUS];
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
*
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_wait[m].
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
* blocks CPUm in the interrupt handler.

View File

@ -104,10 +104,8 @@ static uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS];
****************************************************************************/
#ifdef CONFIG_SMP
static void irq_waitlock(void)
static inline void irq_waitlock(int cpu)
{
int cpu = this_cpu();
/* Duplicate the spin_lock() logic from spinlock.c, but adding the check
* for the deadlock condition.
*/
@ -262,10 +260,10 @@ irqstate_t enter_critical_section(void)
if ((g_cpu_irqset & (1 << cpu)) == 0)
{
/* Wait until we can get the spinlock (meaning that we are
* no longer in the critical section).
* no longer blocked by the critical section).
*/
irq_waitlock();
irq_waitlock(cpu);
}
/* In any event, the nesting count is now one */