spinlock: implement read writer spinlock
spinlock.c: Implement read write spinlock. Readers can take lock simultaneously but only one writer can take lock. irq_spinlock.c: Align g_irq_spin_count. If the lock is NULL, the caller will get global lock (e.g. g_irq_spin) and spin_lock_irqsave() support nest on the same CPU. If the CPU can write lock, it can call write_lock_irqsave() again (e.g. support nest). Signed-off-by: TaiJu Wu <tjwu1217@gmail.com> Co-authored-by: David Sidrane <David.Sidrane@Nscdg.com>
This commit is contained in:
parent
8bdb78b446
commit
68a4d3df7e
|
@ -32,6 +32,13 @@
|
|||
|
||||
#include <nuttx/irq.h>
|
||||
|
||||
#ifdef CONFIG_RW_SPINLOCK
|
||||
typedef int32_t rwlock_t;
|
||||
#define RW_SP_UNLOCKED 0
|
||||
#define RW_SP_READ_LOCKED 1
|
||||
#define RW_SP_WRITE_LOCKED -1
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_SPINLOCK
|
||||
# define SP_UNLOCKED 0 /* The Un-locked state */
|
||||
# define SP_LOCKED 1 /* The Locked state */
|
||||
|
@ -402,7 +409,7 @@ void spin_clrbit(FAR volatile cpu_set_t *set, unsigned int cpu,
|
|||
* Name: spin_lock_irqsave
|
||||
*
|
||||
* Description:
|
||||
* If SMP is are enabled:
|
||||
* If SMP is enabled:
|
||||
* If the argument lock is not specified (i.e. NULL),
|
||||
* disable local interrupts and take the global spinlock (g_irq_spin)
|
||||
* if the call counter (g_irq_spin_count[cpu]) equals to 0. Then the
|
||||
|
@ -492,4 +499,313 @@ void spin_unlock_irqrestore_wo_note(FAR spinlock_t *lock, irqstate_t flags);
|
|||
# define spin_unlock_irqrestore_wo_note(l, f) up_irq_restore(f)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RW_SPINLOCK
|
||||
|
||||
/****************************************************************************
|
||||
* Name: rwlock_init
|
||||
*
|
||||
* Description:
|
||||
* Initialize a non-reentrant spinlock object to its initial,
|
||||
* unlocked state.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to be initialized.
|
||||
*
|
||||
* Returned Value:
|
||||
* None.
|
||||
*
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#define rwlock_init(l) do { *(l) = RW_SP_UNLOCKED; } while(0)
|
||||
|
||||
/****************************************************************************
|
||||
* Name: read_lock
|
||||
*
|
||||
* Description:
|
||||
* If this task does not already hold the spinlock, then loop until the
|
||||
* spinlock is successfully locked.
|
||||
*
|
||||
* This implementation is non-reentrant and set a bit of lock.
|
||||
*
|
||||
* The priority of reader is higher than writter if a reader hold the
|
||||
* lock, a new reader can get its lock but writer can't get this lock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to lock.
|
||||
*
|
||||
* Returned Value:
|
||||
* None. When the function returns, the spinlock was successfully locked
|
||||
* by this CPU.
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void read_lock(FAR volatile rwlock_t *lock);
|
||||
|
||||
/****************************************************************************
|
||||
* Name: read_trylock
|
||||
*
|
||||
* Description:
|
||||
* If this task does not already hold the spinlock, then try to get the
|
||||
* lock.
|
||||
*
|
||||
* This implementation is non-reentrant and set a bit of lock.
|
||||
*
|
||||
* The priority of reader is higher than writter if a reader hold the
|
||||
* lock, a new reader can get its lock but writer can't get this lock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to lock.
|
||||
*
|
||||
* Returned Value:
|
||||
* false - Failure, the spinlock was already locked
|
||||
* true - Success, the spinlock was successfully locked
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool read_trylock(FAR volatile rwlock_t *lock);
|
||||
|
||||
/****************************************************************************
|
||||
* Name: read_unlock
|
||||
*
|
||||
* Description:
|
||||
* Release a bit on a non-reentrant spinlock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to unlock.
|
||||
*
|
||||
* Returned Value:
|
||||
* None.
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void read_unlock(FAR volatile rwlock_t *lock);
|
||||
|
||||
/****************************************************************************
|
||||
* Name: write_lock
|
||||
*
|
||||
* Description:
|
||||
* If this CPU does not already hold the spinlock, then loop until the
|
||||
* spinlock is successfully locked.
|
||||
*
|
||||
* This implementation is non-reentrant and set all bit on lock to avoid
|
||||
* readers and writers.
|
||||
*
|
||||
* The priority of reader is higher than writter if a reader hold the
|
||||
* lock, a new reader can get its lock but writer can't get this lock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to lock.
|
||||
*
|
||||
* Returned Value:
|
||||
* None. When the function returns, the spinlock was successfully locked
|
||||
* by this CPU.
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void write_lock(FAR volatile rwlock_t *lock);
|
||||
|
||||
/****************************************************************************
|
||||
* Name: write_trylock
|
||||
*
|
||||
* Description:
|
||||
* If this task does not already hold the spinlock, then loop until the
|
||||
* spinlock is successfully locked.
|
||||
*
|
||||
* This implementation is non-reentrant and set all bit on lock to avoid
|
||||
* readers and writers.
|
||||
*
|
||||
* The priority of reader is higher than writter if a reader hold the
|
||||
* lock, a new reader can get its lock but writer can't get this lock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to lock.
|
||||
*
|
||||
* Returned Value:
|
||||
* false - Failure, the spinlock was already locked
|
||||
* true - Success, the spinlock was successfully locked
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool write_trylock(FAR volatile rwlock_t *lock);
|
||||
|
||||
/****************************************************************************
|
||||
* Name: write_unlock
|
||||
*
|
||||
* Description:
|
||||
* Release all bit on a non-reentrant spinlock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to unlock.
|
||||
*
|
||||
* Returned Value:
|
||||
* None.
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void write_unlock(FAR volatile rwlock_t *lock);
|
||||
|
||||
/****************************************************************************
|
||||
* Name: read_lock_irqsave
|
||||
*
|
||||
* Description:
|
||||
* If SMP is enabled:
|
||||
* If the argument lock is not specified (i.e. NULL), disable local
|
||||
* interrupts and take the global read write spinlock (g_irq_rw_spin)
|
||||
* and increase g_irq_rw_spin.
|
||||
*
|
||||
* If the argument lock is specified,
|
||||
* disable local interrupts and take the lock spinlock and return
|
||||
* the interrupt state.
|
||||
*
|
||||
* NOTE: This API is very simple to protect data (e.g. H/W register
|
||||
* or internal data structure) in SMP mode. But do not use this API
|
||||
* with kernel APIs which suspend a caller thread. (e.g. nxsem_wait)
|
||||
*
|
||||
* If SMP is not enabled:
|
||||
* This function is equivalent to up_irq_save().
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - Caller specific spinlock. If specified NULL, g_irq_spin is used
|
||||
* and can be nested. Otherwise, nested call for the same lock
|
||||
* would cause a deadlock
|
||||
*
|
||||
* Returned Value:
|
||||
* An opaque, architecture-specific value that represents the state of
|
||||
* the interrupts prior to the call to write_lock_irqsave(lock);
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
irqstate_t read_lock_irqsave(FAR rwlock_t *lock);
|
||||
#else
|
||||
# define read_lock_irqsave(l) ((void)(l), up_irq_save())
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: read_unlock_irqrestore
|
||||
*
|
||||
* Description:
|
||||
* If SMP is enabled:
|
||||
* If the argument lock is not specified (i.e. NULL),
|
||||
* decrement the call counter (g_irq_rw_spin) and restore the interrupt
|
||||
* state as it was prior to the previous call to read_lock_irqsave(NULL).
|
||||
*
|
||||
* If the argument lock is specified, release the lock and
|
||||
* restore the interrupt state as it was prior to the previous call to
|
||||
* read_lock_irqsave(lock).
|
||||
*
|
||||
* If SMP is not enabled:
|
||||
* This function is equivalent to up_irq_restore().
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - Caller specific spinlock. If specified NULL, g_irq_spin is used.
|
||||
*
|
||||
* flags - The architecture-specific value that represents the state of
|
||||
* the interrupts prior to the call to read_lock_irqsave(lock);
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
void read_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags);
|
||||
#else
|
||||
# define read_unlock_irqrestore(l, f) up_irq_restore(f)
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: write_lock_irqsave
|
||||
*
|
||||
* Description:
|
||||
* If SMP is enabled:
|
||||
* If the argument lock is not specified (i.e. NULL),
|
||||
* disable local interrupts and take the global spinlock (g_irq_rw_spin)
|
||||
* if the call counter (g_irq_write_spin_count[cpu]) equals to 0. Then
|
||||
* the counter on the CPU is incremented to allow nested calls and return
|
||||
* the interrupt state.
|
||||
*
|
||||
* If the argument lock is specified,
|
||||
* disable local interrupts and take the lock spinlock and return
|
||||
* the interrupt state.
|
||||
*
|
||||
* NOTE: This API is very simple to protect data (e.g. H/W register
|
||||
* or internal data structure) in SMP mode. But do not use this API
|
||||
* with kernel APIs which suspend a caller thread. (e.g. nxsem_wait)
|
||||
*
|
||||
* If SMP is not enabled:
|
||||
* This function is equivalent to up_irq_save().
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - Caller specific spinlock. If specified NULL, g_irq_spin is used
|
||||
* and can be nested. Otherwise, nested call for the same lock
|
||||
* would cause a deadlock
|
||||
*
|
||||
* Returned Value:
|
||||
* An opaque, architecture-specific value that represents the state of
|
||||
* the interrupts prior to the call to write_lock_irqsave(lock);
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
irqstate_t write_lock_irqsave(FAR rwlock_t *lock);
|
||||
#else
|
||||
# define write_lock_irqsave(l) ((void)(l), up_irq_save())
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: write_unlock_irqrestore
|
||||
*
|
||||
* Description:
|
||||
* If SMP is enabled:
|
||||
* If the argument lock is not specified (i.e. NULL),
|
||||
* decrement the call counter (g_irq_rw_spin_count[cpu]) and if it
|
||||
* decrements to zero then release the spinlock (g_irq_rw_spin) and
|
||||
* restore the interrupt state as it was prior to the previous call to
|
||||
* write_lock_irqsave(NULL).
|
||||
*
|
||||
* If the argument lock is specified, release the lock and
|
||||
* restore the interrupt state as it was prior to the previous call to
|
||||
* write_lock_irqsave(lock).
|
||||
*
|
||||
* If SMP is not enabled:
|
||||
* This function is equivalent to up_irq_restore().
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - Caller specific spinlock. If specified NULL, g_irq_spin is used.
|
||||
*
|
||||
* flags - The architecture-specific value that represents the state of
|
||||
* the interrupts prior to the call to write_lock_irqsave(lock);
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
void write_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags);
|
||||
#else
|
||||
# define write_unlock_irqrestore(l, f) up_irq_restore(f)
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_RW_SPINLOCK */
|
||||
#endif /* __INCLUDE_NUTTX_SPINLOCK_H */
|
||||
|
|
|
@ -318,6 +318,14 @@ config TICKET_SPINLOCK
|
|||
|
||||
endif # SPINLOCK
|
||||
|
||||
config RW_SPINLOCK
|
||||
bool "Support read-write Spinlocks"
|
||||
default y
|
||||
---help---
|
||||
Spinlocks are spilit into read and write lock.
|
||||
Reader can take read lock simultaneously and only one writer
|
||||
can take write lock.
|
||||
|
||||
config IRQCHAIN
|
||||
bool "Enable multi handler sharing a IRQ"
|
||||
default n
|
||||
|
|
|
@ -45,6 +45,17 @@ static volatile spinlock_t g_irq_spin = SP_UNLOCKED;
|
|||
|
||||
static volatile uint8_t g_irq_spin_count[CONFIG_SMP_NCPUS];
|
||||
|
||||
#ifdef CONFIG_RW_SPINLOCK
|
||||
/* Used for access control */
|
||||
|
||||
static volatile rwlock_t g_irq_rwspin = RW_SP_UNLOCKED;
|
||||
|
||||
/* Handles nested calls to write_lock_irqsave and write_unlock_irqrestore */
|
||||
|
||||
static volatile uint8_t g_irq_rwspin_count[CONFIG_SMP_NCPUS];
|
||||
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
@ -209,4 +220,202 @@ void spin_unlock_irqrestore_wo_note(spinlock_t *lock, irqstate_t flags)
|
|||
up_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RW_SPINLOCK
|
||||
|
||||
/****************************************************************************
|
||||
* Name: read_lock_irqsave
|
||||
*
|
||||
* Description:
|
||||
* If SMP is enabled:
|
||||
* If the 'lock' argument is not specified (i.e. NULL), disable local
|
||||
* interrupts and take the global read write spinlock (g_irq_rwspin)
|
||||
* and increase g_irq_rwspin.
|
||||
*
|
||||
* If the 'lock' argument is specified,
|
||||
* disable local interrupts and take the lock spinlock and return
|
||||
* the interrupt state.
|
||||
*
|
||||
* NOTE: This API is very simple to protect data (e.g. H/W register
|
||||
* or internal data structure) in SMP mode. Do not use this API
|
||||
* with kernel APIs which suspend a caller thread. (e.g. nxsem_wait)
|
||||
*
|
||||
* If SMP is not enabled:
|
||||
* This function is equivalent to up_irq_save().
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - Caller specific spinlock. If specified NULL, g_irq_spin is used
|
||||
* and can be nested. Otherwise, nested call for the same lock
|
||||
* would cause a deadlock
|
||||
*
|
||||
* Returned Value:
|
||||
* An opaque, architecture-specific value that represents the state of
|
||||
* the interrupts prior to the call to write_lock_irqsave(lock);
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
irqstate_t read_lock_irqsave(FAR rwlock_t *lock)
|
||||
{
|
||||
irqstate_t ret;
|
||||
ret = up_irq_save();
|
||||
|
||||
if (NULL == lock)
|
||||
{
|
||||
read_lock(&g_irq_rwspin);
|
||||
}
|
||||
else
|
||||
{
|
||||
read_lock(lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: read_unlock_irqrestore
|
||||
*
|
||||
* Description:
|
||||
* If SMP is enabled:
|
||||
* If the argument lock is not specified (i.e. NULL),
|
||||
* decrement the call counter (g_irq_rwspin) and restore the interrupt
|
||||
* state as it was prior to the previous call to read_lock_irqsave(NULL).
|
||||
*
|
||||
* If the argument lock is specified, release the lock and
|
||||
* restore the interrupt state as it was prior to the previous call to
|
||||
* read_lock_irqsave(lock).
|
||||
*
|
||||
* If SMP is not enabled:
|
||||
* This function is equivalent to up_irq_restore().
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - Caller specific spinlock. If specified NULL, g_irq_spin is used.
|
||||
*
|
||||
* flags - The architecture-specific value that represents the state of
|
||||
* the interrupts prior to the call to read_lock_irqsave(lock);
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void read_unlock_irqrestore(rwlock_t *lock, irqstate_t flags)
|
||||
{
|
||||
if (NULL == lock)
|
||||
{
|
||||
read_unlock(&g_irq_rwspin);
|
||||
}
|
||||
else
|
||||
{
|
||||
read_unlock(lock);
|
||||
}
|
||||
|
||||
up_irq_restore(flags);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: write_lock_irqsave
|
||||
*
|
||||
* Description:
|
||||
* If SMP is enabled:
|
||||
* If the argument lock is not specified (i.e. NULL),
|
||||
* disable local interrupts and take the global spinlock (g_irq_rwspin)
|
||||
* if the call counter (g_irq_rwspin_count[cpu]) equals to 0. Then
|
||||
* the counter on the CPU is incremented to allow nested calls and return
|
||||
* the interrupt state.
|
||||
*
|
||||
* If the argument lock is specified,
|
||||
* disable local interrupts and take the lock spinlock and return
|
||||
* the interrupt state.
|
||||
*
|
||||
* NOTE: This API is very simple to protect data (e.g. H/W register
|
||||
* or internal data structure) in SMP mode. But do not use this API
|
||||
* with kernel APIs which suspend a caller thread. (e.g. nxsem_wait)
|
||||
*
|
||||
* If SMP is not enabled:
|
||||
* This function is equivalent to up_irq_save().
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - Caller specific spinlock. If specified NULL, g_irq_spin is used
|
||||
* and can be nested. Otherwise, nested call for the same lock
|
||||
* would cause a deadlock
|
||||
*
|
||||
* Returned Value:
|
||||
* An opaque, architecture-specific value that represents the state of
|
||||
* the interrupts prior to the call to write_lock_irqsave(lock);
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
irqstate_t write_lock_irqsave(rwlock_t *lock)
|
||||
{
|
||||
irqstate_t ret;
|
||||
ret = up_irq_save();
|
||||
|
||||
if (NULL == lock)
|
||||
{
|
||||
int me = this_cpu();
|
||||
if (0 == g_irq_rwspin_count[me])
|
||||
{
|
||||
write_lock(&g_irq_rwspin);
|
||||
}
|
||||
|
||||
g_irq_rwspin_count[me]++;
|
||||
DEBUGASSERT(0 != g_irq_rwspin_count[me]);
|
||||
}
|
||||
else
|
||||
{
|
||||
write_lock(lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: write_unlock_irqrestore
|
||||
*
|
||||
* Description:
|
||||
* If SMP is enabled:
|
||||
* If the argument lock is not specified (i.e. NULL),
|
||||
* decrement the call counter (g_irq_rwspin_count[cpu]) and if it
|
||||
* decrements to zero then release the spinlock (g_irq_rwspin) and
|
||||
* restore the interrupt state as it was prior to the previous call to
|
||||
* write_lock_irqsave(NULL).
|
||||
*
|
||||
* If the argument lock is specified, release the lock and
|
||||
* restore the interrupt state as it was prior to the previous call to
|
||||
* write_lock_irqsave(lock).
|
||||
*
|
||||
* If SMP is not enabled:
|
||||
* This function is equivalent to up_irq_restore().
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - Caller specific spinlock. If specified NULL, g_irq_spin is used.
|
||||
*
|
||||
* flags - The architecture-specific value that represents the state of
|
||||
* the interrupts prior to the call to write_lock_irqsave(lock);
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void write_unlock_irqrestore(rwlock_t *lock, irqstate_t flags)
|
||||
{
|
||||
if (NULL == lock)
|
||||
{
|
||||
int me = this_cpu();
|
||||
DEBUGASSERT(0 < g_irq_rwspin_count[me]);
|
||||
g_irq_rwspin_count[me]--;
|
||||
|
||||
if (0 == g_irq_rwspin_count[me])
|
||||
{
|
||||
write_unlock(&g_irq_rwspin);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
write_unlock(lock);
|
||||
}
|
||||
|
||||
up_irq_restore(flags);
|
||||
}
|
||||
#endif /* CONFIG_RW_SPINLOCK */
|
||||
#endif /* CONFIG_SMP */
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#include <nuttx/sched_note.h>
|
||||
#include <arch/irq.h>
|
||||
|
||||
#ifdef CONFIG_TICKET_SPINLOCK
|
||||
#if defined(CONFIG_TICKET_SPINLOCK) || defined(CONFIG_RW_SPINLOCK)
|
||||
# include <stdatomic.h>
|
||||
#endif
|
||||
|
||||
|
@ -446,4 +446,230 @@ void spin_clrbit(FAR volatile cpu_set_t *set, unsigned int cpu,
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RW_SPINLOCK
|
||||
|
||||
/****************************************************************************
|
||||
* Name: read_lock
|
||||
*
|
||||
* Description:
|
||||
* If this task does not already hold the spinlock, then loop until the
|
||||
* spinlock is successfully locked.
|
||||
*
|
||||
* This implementation is non-reentrant and set a bit of lock.
|
||||
*
|
||||
* The priority of reader is higher than writter if a reader hold the
|
||||
* lock, a new reader can get its lock but writer can't get this lock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to lock.
|
||||
*
|
||||
* Returned Value:
|
||||
* None. When the function returns, the spinlock was successfully locked
|
||||
* by this CPU.
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void read_lock(FAR volatile rwlock_t *lock)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
rwlock_t old = atomic_load(lock);
|
||||
|
||||
if (old <= RW_SP_WRITE_LOCKED)
|
||||
{
|
||||
DEBUGASSERT(old == RW_SP_WRITE_LOCKED);
|
||||
SP_DSB();
|
||||
SP_WFE();
|
||||
}
|
||||
else if(atomic_compare_exchange_strong(lock, &old, old + 1))
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
SP_DMB();
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: read_trylock
|
||||
*
|
||||
* Description:
|
||||
* If this task does not already hold the spinlock, then try to get the
|
||||
* lock.
|
||||
*
|
||||
* This implementation is non-reentrant and set a bit of lock.
|
||||
*
|
||||
* The priority of reader is higher than writter if a reader hold the
|
||||
* lock, a new reader can get its lock but writer can't get this lock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to lock.
|
||||
*
|
||||
* Returned Value:
|
||||
* false - Failure, the spinlock was already locked
|
||||
* true - Success, the spinlock was successfully locked
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool read_trylock(FAR volatile rwlock_t *lock)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
rwlock_t old = atomic_load(lock);
|
||||
|
||||
if (old <= RW_SP_WRITE_LOCKED)
|
||||
{
|
||||
DEBUGASSERT(old == RW_SP_WRITE_LOCKED);
|
||||
return false;
|
||||
}
|
||||
else if (atomic_compare_exchange_strong(lock, &old, old + 1))
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
SP_DMB();
|
||||
return true;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: read_unlock
|
||||
*
|
||||
* Description:
|
||||
* Release a bit on a non-reentrant spinlock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to unlock.
|
||||
*
|
||||
* Returned Value:
|
||||
* None.
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void read_unlock(FAR volatile rwlock_t *lock)
|
||||
{
|
||||
DEBUGASSERT(atomic_load(lock) >= RW_SP_READ_LOCKED);
|
||||
|
||||
SP_DMB();
|
||||
atomic_fetch_add(lock, -1);
|
||||
SP_DSB();
|
||||
SP_SEV();
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: write_lock
|
||||
*
|
||||
* Description:
|
||||
* If this task does not already hold the spinlock, then loop until the
|
||||
* spinlock is successfully locked.
|
||||
*
|
||||
* This implementation is non-reentrant and set all bit on lock to avoid
|
||||
* readers and writers.
|
||||
*
|
||||
* The priority of reader is higher than writter if a reader hold the
|
||||
* lock, a new reader can get its lock but writer can't get this lock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to lock.
|
||||
*
|
||||
* Returned Value:
|
||||
* None. When the function returns, the spinlock was successfully locked
|
||||
* by this CPU.
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void write_lock(FAR volatile rwlock_t *lock)
|
||||
{
|
||||
rwlock_t zero = RW_SP_UNLOCKED;
|
||||
|
||||
while (!atomic_compare_exchange_strong(lock, &zero, RW_SP_WRITE_LOCKED))
|
||||
{
|
||||
SP_DSB();
|
||||
SP_WFE();
|
||||
}
|
||||
|
||||
SP_DMB();
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: write_trylock
|
||||
*
|
||||
* Description:
|
||||
* If this task does not already hold the spinlock, then loop until the
|
||||
* spinlock is successfully locked.
|
||||
*
|
||||
* This implementation is non-reentrant and set all bit on lock to avoid
|
||||
* readers and writers.
|
||||
*
|
||||
* The priority of reader is higher than writter if a reader hold the
|
||||
* lock, a new reader can get its lock but writer can't get this lock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to lock.
|
||||
*
|
||||
* Returned Value:
|
||||
* false - Failure, the spinlock was already locked
|
||||
* true - Success, the spinlock was successfully locked
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool write_trylock(FAR volatile rwlock_t *lock)
|
||||
{
|
||||
rwlock_t zero = RW_SP_UNLOCKED;
|
||||
|
||||
if (atomic_compare_exchange_strong(lock, &zero, RW_SP_WRITE_LOCKED))
|
||||
{
|
||||
SP_DMB();
|
||||
return true;
|
||||
}
|
||||
|
||||
SP_DSB();
|
||||
return false;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: write_unlock
|
||||
*
|
||||
* Description:
|
||||
* Release write lock on a non-reentrant spinlock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to unlock.
|
||||
*
|
||||
* Returned Value:
|
||||
* None.
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void write_unlock(FAR volatile rwlock_t *lock)
|
||||
{
|
||||
/* Ensure this cpu already get write lock */
|
||||
|
||||
DEBUGASSERT(atomic_load(lock) == RW_SP_WRITE_LOCKED);
|
||||
|
||||
SP_DMB();
|
||||
atomic_store(lock, RW_SP_UNLOCKED);
|
||||
SP_DSB();
|
||||
SP_SEV();
|
||||
}
|
||||
|
||||
#endif /* CONFIG_RW_SPINLOCK */
|
||||
#endif /* CONFIG_SPINLOCK */
|
||||
|
|
Loading…
Reference in New Issue