irq: inline restore_critical_section

reason:
In the SMP, when a context switch occurs, restore_critical_section is executed.
In order to reduce the time taken for context switching,
we inline the restore_critical_section function.
Given that restore_critical_section is small in size
and is called from only one location, inlining it does not increase the size of the image.

Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
hujun5 2023-12-20 20:09:51 +08:00 committed by Xiang Xiao
parent c9eef2d697
commit 8439296a50
3 changed files with 23 additions and 44 deletions

View File

@ -312,7 +312,19 @@ void leave_critical_section(irqstate_t flags) noinstrument_function;
****************************************************************************/
#ifdef CONFIG_SMP
void restore_critical_section(void);
# define restore_critical_section() \
do { \
FAR struct tcb_s *tcb; \
int me = this_cpu(); \
tcb = current_task(me); \
if (tcb->irqcount <= 0) \
{\
if ((g_cpu_irqset & (1 << me)) != 0) \
{ \
cpu_irqlock_clear(); \
} \
} \
} while (0)
#else
# define restore_critical_section()
#endif

View File

@ -644,47 +644,4 @@ inline_function void leave_critical_section_nonirq(irqstate_t flags)
up_irq_restore(flags);
}
#endif
/****************************************************************************
* Name: restore_critical_section
*
* Description:
* Restore the critical_section
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/
#ifdef CONFIG_SMP
void restore_critical_section(void)
{
/* NOTE: The following logic for adjusting global IRQ controls were
* derived from nxsched_add_readytorun() and sched_removedreadytorun()
* Here, we only handles clearing logic to defer unlocking IRQ lock
* followed by context switching.
*/
FAR struct tcb_s *tcb;
int me = this_cpu();
/* Adjust global IRQ controls. If irqcount is greater than zero,
* then this task/this CPU holds the IRQ lock
*/
tcb = current_task(me);
DEBUGASSERT(g_cpu_nestcount[me] <= 0);
if (tcb->irqcount <= 0)
{
if ((g_cpu_irqset & (1 << me)) != 0)
{
cpu_irqlock_clear();
}
}
}
#endif /* CONFIG_SMP */
#endif /* CONFIG_IRQCOUNT */

View File

@ -291,6 +291,16 @@ extern volatile clock_t g_cpuload_total;
extern volatile cpu_set_t g_cpu_lockset;
/* This is the spinlock that enforces critical sections when interrupts are
* disabled.
*/
extern volatile spinlock_t g_cpu_irqlock;
/* Used to keep track of which CPU(s) hold the IRQ lock. */
extern volatile cpu_set_t g_cpu_irqset;
/* Used to lock tasklist to prevent from concurrent access */
extern volatile spinlock_t g_cpu_tasklistlock;