From 3b2aea204c950035e1189f471b91ae817b239cc7 Mon Sep 17 00:00:00 2001 From: Abdelatif Guettouche Date: Thu, 16 Sep 2021 16:52:46 +0200 Subject: [PATCH] sched/irq/irq_csection.c: Fix typos and correct some comments. Signed-off-by: Abdelatif Guettouche --- include/nuttx/irq.h | 2 +- sched/irq/irq_csection.c | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/nuttx/irq.h b/include/nuttx/irq.h index 60ab8bd1bd..ba43f42385 100644 --- a/include/nuttx/irq.h +++ b/include/nuttx/irq.h @@ -162,7 +162,7 @@ int irqchain_detach(int irq, xcpt_t isr, FAR void *arg); * instrumentation): * * Take the CPU IRQ lock and disable interrupts on all CPUs. A thread- - * specific counter is increment to indicate that the thread has IRQs + * specific counter is incremented to indicate that the thread has IRQs * disabled and to support nested calls to enter_critical_section(). * * NOTE: Most architectures do not support disabling all CPUs from one diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index 4c7622107d..f922fd1681 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -66,7 +66,7 @@ volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS]; * Name: irq_waitlock * * Description: - * Spin to get g_irq_waitlock, handling a known deadlock condition: + * Spin to get g_cpu_irqlock, handling a known deadlock condition: * * A deadlock may occur if enter_critical_section is called from an * interrupt handler. Suppose: @@ -92,7 +92,7 @@ volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS]; * section. Since it is spinning with interrupts disabled, CPUm cannot * process the pending pause interrupt, causing the deadlock. * - * This function detects this deadlock condition while spinning with \ + * This function detects this deadlock condition while spinning with * interrupts disabled. * * Input Parameters: @@ -131,7 +131,7 @@ static bool irq_waitlock(int cpu) */ #ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS - /* Notify that we are waiting for a spinlock */ + /* Notify that we have aborted the wait for the spinlock */ sched_note_spinabort(tcb, &g_cpu_irqlock); #endif @@ -161,7 +161,7 @@ static bool irq_waitlock(int cpu) * * Description: * Take the CPU IRQ lock and disable interrupts on all CPUs. A thread- - * specific counter is increment to indicate that the thread has IRQs + * specific counter is incremented to indicate that the thread has IRQs * disabled and to support nested calls to enter_critical_section(). * ****************************************************************************/ @@ -253,7 +253,7 @@ try_again: else { - /* Make sure that the g_cpu_irqlock() was not already set + /* Make sure that the g_cpu_irqset was not already set * by previous logic on this CPU that was executed by the * interrupt handler. We know that the bit in g_cpu_irqset * for this CPU was zero on entry into the interrupt handler, @@ -270,8 +270,8 @@ try_again_in_irq: if (!irq_waitlock(cpu)) { /* We are in a deadlock condition due to a pending - * pause request interrupt request. Break the - * deadlock by handling the pause interrupt now. + * pause request interrupt. Break the deadlock by + * handling the pause request now. */ DEBUGVERIFY(up_cpu_paused(cpu)); @@ -360,7 +360,7 @@ try_again_in_irq: goto try_again; } - /* The set the lock count to 1. + /* Then set the lock count to 1. * * Interrupts disables must follow a stacked order. We * cannot other context switches to re-order the enabling