sched/sched: Extend the last global lock change to work with the lc823450-xgevk which does not support the atomic fetch add but does support disabling interprocessor interrupts. Disabling interprocessor interrupts will also guarantee that the TCB addres calculation is atomic.

This commit is contained in:
Gregory Nutt 2018-02-05 13:32:09 -06:00
parent 37c9b3d54a
commit b884fb9fed
3 changed files with 32 additions and 12 deletions

View File

@ -615,7 +615,7 @@ bool irq_cpu_locked(int cpu)
return false;
}
#ifdef CONFIG_ARCH_HAVE_FETCHADD
#if defined(CONFIG_ARCH_HAVE_FETCHADD) && !defined(CONFIG_ARCH_GLOBAL_IRQDISABLE)
/* If the global lockcount has been incremented then simply return true */
if (g_global_lockcount > 0)

View File

@ -374,7 +374,7 @@ extern volatile cpu_set_t g_cpu_lockset SP_SECTION;
extern volatile spinlock_t g_cpu_tasklistlock SP_SECTION;
#ifdef CONFIG_ARCH_HAVE_FETCHADD
#if defined(CONFIG_ARCH_HAVE_FETCHADD) && !defined(CONFIG_ARCH_GLOBAL_IRQDISABLE)
/* This is part of the sched_lock() logic to handle atomic operations when
* locking the scheduler.
*/
@ -451,7 +451,7 @@ int sched_cpu_pause(FAR struct tcb_s *tcb);
irqstate_t sched_tasklist_lock(void);
void sched_tasklist_unlock(irqstate_t lock);
#ifdef CONFIG_ARCH_HAVE_FETCHADD
#if defined(CONFIG_ARCH_HAVE_FETCHADD) && !defined(CONFIG_ARCH_GLOBAL_IRQDISABLE)
# define sched_islocked_global() \
(spin_islocked(&g_cpu_schedlock) || g_global_lockcount > 0)
#else

View File

@ -43,6 +43,9 @@
#include <sched.h>
#include <assert.h>
#include <arch/irq.h>
#include <nuttx/irq.h>
#include <nuttx/arch.h>
#include <nuttx/sched_note.h>
@ -116,7 +119,7 @@ volatile spinlock_t g_cpu_schedlock SP_SECTION = SP_UNLOCKED;
volatile spinlock_t g_cpu_locksetlock SP_SECTION;
volatile cpu_set_t g_cpu_lockset SP_SECTION;
#ifdef CONFIG_ARCH_HAVE_FETCHADD
#if defined(CONFIG_ARCH_HAVE_FETCHADD) && !defined(CONFIG_ARCH_GLOBAL_IRQDISABLE)
/* This is part of the sched_lock() logic to handle atomic operations when
* locking the scheduler.
*/
@ -152,15 +155,30 @@ volatile int16_t g_global_lockcount;
int sched_lock(void)
{
FAR struct tcb_s *rtcb;
#if defined(CONFIG_ARCH_GLOBAL_IRQDISABLE)
irqstate_t flags;
#endif
int cpu;
/* The following operation is non-atomic unless CONFIG_ARCH_HAVE_FETCHADD
* defined.
/* The following operation is non-atomic unless CONFIG_ARCH_GLOBAL_IRQDISABLE
* or CONFIG_ARCH_HAVE_FETCHADD is defined.
*/
#ifdef CONFIG_ARCH_HAVE_FETCHADD
DEBUGASSERT((uint16_t)g_global_lockcount < INT16_MAX); /* Not atomic! */
(void)up_fetchadd16(&g_global_lockcount, 1);
#if defined(CONFIG_ARCH_GLOBAL_IRQDISABLE)
/* If the CPU supports suppression of interprocessor interrupts, then simple
* disabling interrupts will provide sufficient protection for the following
* operation.
*/
flags = up_irq_save();
#elif defined(CONFIG_ARCH_HAVE_FETCHADD)
/* If the CPU supports an atomic fetch add operation, then we can use the
* global lockcount to assure that the following operation is atomic.
*/
DEBUGASSERT((uint16_t)g_global_lockcount < INT16_MAX); /* Not atomic! */
(void)up_fetchadd16(&g_global_lockcount, 1);
#endif
/* This operation is save if CONFIG_ARCH_HAVE_FETCHADD is defined. NOTE
@ -234,9 +252,11 @@ int sched_lock(void)
TSTATE_TASK_PENDING);
}
#ifdef CONFIG_ARCH_HAVE_FETCHADD
DEBUGASSERT(g_global_lockcount > 0);
(void)up_fetchsub16(&g_global_lockcount, 1);
#if defined(CONFIG_ARCH_GLOBAL_IRQDISABLE)
up_irq_restore(flags);
#elif defined(CONFIG_ARCH_HAVE_FETCHADD)
DEBUGASSERT(g_global_lockcount > 0);
(void)up_fetchsub16(&g_global_lockcount, 1);
#endif
return OK;