From 8439296a50f69d95768eb232e3ad5f4b8d674ab9 Mon Sep 17 00:00:00 2001 From: hujun5 Date: Wed, 20 Dec 2023 20:09:51 +0800 Subject: [PATCH] irq: inline restore_critical_section reason: In the SMP, when a context switch occurs, restore_critical_section is executed. In order to reduce the time taken for context switching, we inline the restore_critical_section function. Given that restore_critical_section is small in size and is called from only one location, inlining it does not increase the size of the image. Signed-off-by: hujun5 --- include/nuttx/irq.h | 14 ++++++++++++- sched/irq/irq_csection.c | 43 ---------------------------------------- sched/sched/sched.h | 10 ++++++++++ 3 files changed, 23 insertions(+), 44 deletions(-) diff --git a/include/nuttx/irq.h b/include/nuttx/irq.h index b281eb9a41..ed89b46c95 100644 --- a/include/nuttx/irq.h +++ b/include/nuttx/irq.h @@ -312,7 +312,19 @@ void leave_critical_section(irqstate_t flags) noinstrument_function; ****************************************************************************/ #ifdef CONFIG_SMP -void restore_critical_section(void); +# define restore_critical_section() \ + do { \ + FAR struct tcb_s *tcb; \ + int me = this_cpu(); \ + tcb = current_task(me); \ + if (tcb->irqcount <= 0) \ + {\ + if ((g_cpu_irqset & (1 << me)) != 0) \ + { \ + cpu_irqlock_clear(); \ + } \ + } \ + } while (0) #else # define restore_critical_section() #endif diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index 7909f58cd7..fb23acab7b 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -644,47 +644,4 @@ inline_function void leave_critical_section_nonirq(irqstate_t flags) up_irq_restore(flags); } #endif - -/**************************************************************************** - * Name: restore_critical_section - * - * Description: - * Restore the critical_section - * - * Input Parameters: - * None - * - * Returned Value: - * None - * - ****************************************************************************/ - -#ifdef CONFIG_SMP -void restore_critical_section(void) -{ - /* NOTE: The following logic for adjusting global IRQ controls were - * derived from nxsched_add_readytorun() and sched_removedreadytorun() - * Here, we only handles clearing logic to defer unlocking IRQ lock - * followed by context switching. - */ - - FAR struct tcb_s *tcb; - int me = this_cpu(); - - /* Adjust global IRQ controls. If irqcount is greater than zero, - * then this task/this CPU holds the IRQ lock - */ - - tcb = current_task(me); - DEBUGASSERT(g_cpu_nestcount[me] <= 0); - if (tcb->irqcount <= 0) - { - if ((g_cpu_irqset & (1 << me)) != 0) - { - cpu_irqlock_clear(); - } - } -} -#endif /* CONFIG_SMP */ - #endif /* CONFIG_IRQCOUNT */ diff --git a/sched/sched/sched.h b/sched/sched/sched.h index 660a997fc6..d255d04bdf 100644 --- a/sched/sched/sched.h +++ b/sched/sched/sched.h @@ -291,6 +291,16 @@ extern volatile clock_t g_cpuload_total; extern volatile cpu_set_t g_cpu_lockset; +/* This is the spinlock that enforces critical sections when interrupts are + * disabled. + */ + +extern volatile spinlock_t g_cpu_irqlock; + +/* Used to keep track of which CPU(s) hold the IRQ lock. */ + +extern volatile cpu_set_t g_cpu_irqset; + /* Used to lock tasklist to prevent from concurrent access */ extern volatile spinlock_t g_cpu_tasklistlock;