spinlock: inline irqsaved spinlock
Reference pull request: #12599 Signed-off-by: chao an <anchao@lixiang.com>
This commit is contained in:
parent
500ebd6498
commit
555dab3da3
|
@ -159,6 +159,18 @@ void sched_note_spinlock_unlock(FAR volatile spinlock_t *spinlock);
|
|||
# define sched_note_spinlock_unlock(spinlock)
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Data Types
|
||||
****************************************************************************/
|
||||
|
||||
/* Used for access control */
|
||||
|
||||
extern volatile spinlock_t g_irq_spin;
|
||||
|
||||
/* Handles nested calls to spin_lock_irqsave and spin_unlock_irqrestore */
|
||||
|
||||
extern volatile uint8_t g_irq_spin_count[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_testset
|
||||
*
|
||||
|
@ -508,6 +520,43 @@ static inline_function void spin_unlock(FAR volatile spinlock_t *lock)
|
|||
|
||||
#define spin_initialize(l,s) do { *(l) = (s); } while (0)
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_lock_irqsave_wo_note
|
||||
*
|
||||
* Description:
|
||||
* This function is no trace version of spin_lock_irqsave()
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#if defined(CONFIG_SPINLOCK)
|
||||
static inline_function
|
||||
irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)
|
||||
{
|
||||
irqstate_t ret;
|
||||
ret = up_irq_save();
|
||||
|
||||
if (NULL == lock)
|
||||
{
|
||||
int me = up_cpu_index();
|
||||
if (0 == g_irq_spin_count[me])
|
||||
{
|
||||
spin_lock_wo_note(&g_irq_spin);
|
||||
}
|
||||
|
||||
g_irq_spin_count[me]++;
|
||||
DEBUGASSERT(0 != g_irq_spin_count[me]);
|
||||
}
|
||||
else
|
||||
{
|
||||
spin_lock_wo_note(lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
# define spin_lock_irqsave_wo_note(l) ((void)(l), up_irq_save())
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_lock_irqsave
|
||||
*
|
||||
|
@ -542,19 +591,62 @@ static inline_function void spin_unlock(FAR volatile spinlock_t *lock)
|
|||
****************************************************************************/
|
||||
|
||||
#if defined(CONFIG_SPINLOCK)
|
||||
irqstate_t spin_lock_irqsave(FAR spinlock_t *lock);
|
||||
static inline_function
|
||||
irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)
|
||||
{
|
||||
irqstate_t flags;
|
||||
|
||||
/* Notify that we are waiting for a spinlock */
|
||||
|
||||
sched_note_spinlock_lock(lock);
|
||||
|
||||
/* Lock without trace note */
|
||||
|
||||
flags = spin_lock_irqsave_wo_note(lock);
|
||||
|
||||
/* Notify that we have the spinlock */
|
||||
|
||||
sched_note_spinlock_locked(lock);
|
||||
|
||||
return flags;
|
||||
}
|
||||
#else
|
||||
# define spin_lock_irqsave(l) ((void)(l), up_irq_save())
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_lock_irqsave_wo_note
|
||||
* Name: spin_unlock_irqrestore_wo_note
|
||||
*
|
||||
* Description:
|
||||
* This function is no trace version of spin_unlock_irqrestore()
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#if defined(CONFIG_SPINLOCK)
|
||||
irqstate_t spin_lock_irqsave_wo_note(FAR spinlock_t *lock);
|
||||
static inline_function
|
||||
void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock,
|
||||
irqstate_t flags)
|
||||
{
|
||||
if (NULL == lock)
|
||||
{
|
||||
int me = up_cpu_index();
|
||||
DEBUGASSERT(0 < g_irq_spin_count[me]);
|
||||
g_irq_spin_count[me]--;
|
||||
|
||||
if (0 == g_irq_spin_count[me])
|
||||
{
|
||||
spin_unlock_wo_note(&g_irq_spin);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
spin_unlock_wo_note(lock);
|
||||
}
|
||||
|
||||
up_irq_restore(flags);
|
||||
}
|
||||
#else
|
||||
# define spin_lock_irqsave_wo_note(l) ((void)(l), up_irq_save())
|
||||
# define spin_unlock_irqrestore_wo_note(l, f) ((void)(l), up_irq_restore(f))
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -587,21 +679,22 @@ irqstate_t spin_lock_irqsave_wo_note(FAR spinlock_t *lock);
|
|||
****************************************************************************/
|
||||
|
||||
#if defined(CONFIG_SPINLOCK)
|
||||
void spin_unlock_irqrestore(FAR spinlock_t *lock, irqstate_t flags);
|
||||
static inline_function
|
||||
void spin_unlock_irqrestore(FAR volatile spinlock_t *lock,
|
||||
irqstate_t flags)
|
||||
{
|
||||
/* Unlock without trace note */
|
||||
|
||||
spin_unlock_irqrestore_wo_note(lock, flags);
|
||||
|
||||
/* Notify that we are unlocking the spinlock */
|
||||
|
||||
sched_note_spinlock_unlock(lock);
|
||||
}
|
||||
#else
|
||||
# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_unlock_irqrestore_wo_note
|
||||
****************************************************************************/
|
||||
|
||||
#if defined(CONFIG_SPINLOCK)
|
||||
void spin_unlock_irqrestore_wo_note(FAR spinlock_t *lock, irqstate_t flags);
|
||||
#else
|
||||
# define spin_unlock_irqrestore_wo_note(l, f) ((void)(l), up_irq_restore(f))
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_RW_SPINLOCK)
|
||||
|
||||
/****************************************************************************
|
||||
|
|
|
@ -39,11 +39,11 @@
|
|||
|
||||
/* Used for access control */
|
||||
|
||||
static volatile spinlock_t g_irq_spin = SP_UNLOCKED;
|
||||
volatile spinlock_t g_irq_spin = SP_UNLOCKED;
|
||||
|
||||
/* Handles nested calls to spin_lock_irqsave and spin_unlock_irqrestore */
|
||||
|
||||
static volatile uint8_t g_irq_spin_count[CONFIG_SMP_NCPUS];
|
||||
volatile uint8_t g_irq_spin_count[CONFIG_SMP_NCPUS];
|
||||
|
||||
#ifdef CONFIG_RW_SPINLOCK
|
||||
/* Used for access control */
|
||||
|
@ -60,166 +60,6 @@ static volatile uint8_t g_irq_rwspin_count[CONFIG_SMP_NCPUS];
|
|||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_lock_irqsave
|
||||
*
|
||||
* Description:
|
||||
* If SMP is enabled:
|
||||
* If the argument lock is not specified (i.e. NULL),
|
||||
* disable local interrupts and take the global spinlock (g_irq_spin)
|
||||
* if the call counter (g_irq_spin_count[cpu]) equals to 0. Then the
|
||||
* counter on the CPU is incremented to allow nested call and return
|
||||
* the interrupt state.
|
||||
*
|
||||
* If the argument lock is specified,
|
||||
* disable local interrupts and take the given lock and return the
|
||||
* interrupt state.
|
||||
*
|
||||
* NOTE: This API is very simple to protect data (e.g. H/W register
|
||||
* or internal data structure) in SMP mode. But do not use this API
|
||||
* with kernel APIs which suspend a caller thread. (e.g. nxsem_wait)
|
||||
*
|
||||
* If SMP is not enabled:
|
||||
* This function is equivalent to up_irq_save().
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - Caller specific spinlock. If specified NULL, g_irq_spin is used
|
||||
* and can be nested. Otherwise, nested call for the same lock
|
||||
* would cause a deadlock
|
||||
*
|
||||
* Returned Value:
|
||||
* An opaque, architecture-specific value that represents the state of
|
||||
* the interrupts prior to the call to spin_lock_irqsave(lock);
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
irqstate_t spin_lock_irqsave(spinlock_t *lock)
|
||||
{
|
||||
irqstate_t ret;
|
||||
ret = up_irq_save();
|
||||
|
||||
if (NULL == lock)
|
||||
{
|
||||
int me = up_cpu_index();
|
||||
if (0 == g_irq_spin_count[me])
|
||||
{
|
||||
spin_lock(&g_irq_spin);
|
||||
}
|
||||
|
||||
g_irq_spin_count[me]++;
|
||||
DEBUGASSERT(0 != g_irq_spin_count[me]);
|
||||
}
|
||||
else
|
||||
{
|
||||
spin_lock(lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_lock_irqsave_wo_note
|
||||
****************************************************************************/
|
||||
|
||||
irqstate_t spin_lock_irqsave_wo_note(spinlock_t *lock)
|
||||
{
|
||||
irqstate_t ret;
|
||||
ret = up_irq_save();
|
||||
|
||||
if (NULL == lock)
|
||||
{
|
||||
int me = up_cpu_index();
|
||||
if (0 == g_irq_spin_count[me])
|
||||
{
|
||||
spin_lock_wo_note(&g_irq_spin);
|
||||
}
|
||||
|
||||
g_irq_spin_count[me]++;
|
||||
DEBUGASSERT(0 != g_irq_spin_count[me]);
|
||||
}
|
||||
else
|
||||
{
|
||||
spin_lock_wo_note(lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_unlock_irqrestore
|
||||
*
|
||||
* Description:
|
||||
* If SMP is enabled:
|
||||
* If the argument lock is not specified (i.e. NULL),
|
||||
* decrement the call counter (g_irq_spin_count[cpu]) and if it
|
||||
* decrements to zero then release the spinlock (g_irq_spin) and
|
||||
* restore the interrupt state as it was prior to the previous call to
|
||||
* spin_lock_irqsave(NULL).
|
||||
*
|
||||
* If the argument lock is specified, release the lock and restore
|
||||
* the interrupt state as it was prior to the previous call to
|
||||
* spin_lock_irqsave(lock).
|
||||
*
|
||||
* If SMP is not enabled:
|
||||
* This function is equivalent to up_irq_restore().
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - Caller specific spinlock. If specified NULL, g_irq_spin is used.
|
||||
*
|
||||
* flags - The architecture-specific value that represents the state of
|
||||
* the interrupts prior to the call to spin_lock_irqsave(lock);
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void spin_unlock_irqrestore(spinlock_t *lock, irqstate_t flags)
|
||||
{
|
||||
if (NULL == lock)
|
||||
{
|
||||
int me = up_cpu_index();
|
||||
DEBUGASSERT(0 < g_irq_spin_count[me]);
|
||||
g_irq_spin_count[me]--;
|
||||
|
||||
if (0 == g_irq_spin_count[me])
|
||||
{
|
||||
spin_unlock(&g_irq_spin);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
spin_unlock(lock);
|
||||
}
|
||||
|
||||
up_irq_restore(flags);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_unlock_irqrestore_wo_note
|
||||
****************************************************************************/
|
||||
|
||||
void spin_unlock_irqrestore_wo_note(spinlock_t *lock, irqstate_t flags)
|
||||
{
|
||||
if (NULL == lock)
|
||||
{
|
||||
int me = up_cpu_index();
|
||||
DEBUGASSERT(0 < g_irq_spin_count[me]);
|
||||
g_irq_spin_count[me]--;
|
||||
|
||||
if (0 == g_irq_spin_count[me])
|
||||
{
|
||||
spin_unlock_wo_note(&g_irq_spin);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
spin_unlock_wo_note(lock);
|
||||
}
|
||||
|
||||
up_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RW_SPINLOCK
|
||||
|
||||
/****************************************************************************
|
||||
|
|
Loading…
Reference in New Issue