Revert "sched/spinlock: remove nesting spinlock support"

This reverts commit 5aa13bc490.
This commit is contained in:
chenrun1 2024-10-12 21:20:58 +08:00 committed by archer
parent 505adfa277
commit 9e81f5efac
2 changed files with 33 additions and 6 deletions

View File

@ -151,6 +151,10 @@ void sched_note_spinlock_unlock(FAR volatile spinlock_t *spinlock);
extern volatile spinlock_t g_irq_spin; extern volatile spinlock_t g_irq_spin;
/* Handles nested calls to spin_lock_irqsave and spin_unlock_irqrestore */
extern volatile uint8_t g_irq_spin_count[CONFIG_SMP_NCPUS];
/**************************************************************************** /****************************************************************************
* Name: up_testset * Name: up_testset
* *
@ -527,7 +531,14 @@ irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)
if (NULL == lock) if (NULL == lock)
{ {
spin_lock_wo_note(&g_irq_spin); int me = this_cpu();
if (0 == g_irq_spin_count[me])
{
spin_lock_wo_note(&g_irq_spin);
}
g_irq_spin_count[me]++;
DEBUGASSERT(0 != g_irq_spin_count[me]);
} }
else else
{ {
@ -546,7 +557,10 @@ irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)
* Description: * Description:
* If SMP is enabled: * If SMP is enabled:
* If the argument lock is not specified (i.e. NULL), * If the argument lock is not specified (i.e. NULL),
* disable local interrupts and take the global spinlock (g_irq_spin). * disable local interrupts and take the global spinlock (g_irq_spin)
* if the call counter (g_irq_spin_count[cpu]) equals to 0. Then the
* counter on the CPU is incremented to allow nested calls and return
* the interrupt state.
* *
* If the argument lock is specified, * If the argument lock is specified,
* disable local interrupts and take the lock spinlock and return * disable local interrupts and take the lock spinlock and return
@ -684,7 +698,14 @@ void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock,
{ {
if (NULL == lock) if (NULL == lock)
{ {
spin_unlock_wo_note(&g_irq_spin); int me = this_cpu();
DEBUGASSERT(0 < g_irq_spin_count[me]);
g_irq_spin_count[me]--;
if (0 == g_irq_spin_count[me])
{
spin_unlock_wo_note(&g_irq_spin);
}
} }
else else
{ {
@ -702,9 +723,11 @@ void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock,
* *
* Description: * Description:
* If SMP is enabled: * If SMP is enabled:
* If the argument lock is not specified (i.e. NULL), release the * If the argument lock is not specified (i.e. NULL),
* spinlock (g_irq_spin) and restore the interrupt state as it was * decrement the call counter (g_irq_spin_count[cpu]) and if it
* prior to the previous call to spin_lock_irqsave(NULL). * decrements to zero then release the spinlock (g_irq_spin) and
* restore the interrupt state as it was prior to the previous call to
* spin_lock_irqsave(NULL).
* *
* If the argument lock is specified, release the lock and * If the argument lock is specified, release the lock and
* restore the interrupt state as it was prior to the previous call to * restore the interrupt state as it was prior to the previous call to

View File

@ -43,6 +43,10 @@
volatile spinlock_t g_irq_spin = SP_UNLOCKED; volatile spinlock_t g_irq_spin = SP_UNLOCKED;
/* Handles nested calls to spin_lock_irqsave and spin_unlock_irqrestore */
volatile uint8_t g_irq_spin_count[CONFIG_SMP_NCPUS];
#ifdef CONFIG_RW_SPINLOCK #ifdef CONFIG_RW_SPINLOCK
/* Used for access control */ /* Used for access control */