diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index a688b8b44c..bc32f37f8e 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -615,7 +615,7 @@ bool irq_cpu_locked(int cpu) return false; } -#ifdef CONFIG_ARCH_HAVE_FETCHADD +#if defined(CONFIG_ARCH_HAVE_FETCHADD) && !defined(CONFIG_ARCH_GLOBAL_IRQDISABLE) /* If the global lockcount has been incremented then simply return true */ if (g_global_lockcount > 0) diff --git a/sched/sched/sched.h b/sched/sched/sched.h index 0c1c681e9e..dca36d1f6c 100644 --- a/sched/sched/sched.h +++ b/sched/sched/sched.h @@ -374,7 +374,7 @@ extern volatile cpu_set_t g_cpu_lockset SP_SECTION; extern volatile spinlock_t g_cpu_tasklistlock SP_SECTION; -#ifdef CONFIG_ARCH_HAVE_FETCHADD +#if defined(CONFIG_ARCH_HAVE_FETCHADD) && !defined(CONFIG_ARCH_GLOBAL_IRQDISABLE) /* This is part of the sched_lock() logic to handle atomic operations when * locking the scheduler. */ @@ -451,7 +451,7 @@ int sched_cpu_pause(FAR struct tcb_s *tcb); irqstate_t sched_tasklist_lock(void); void sched_tasklist_unlock(irqstate_t lock); -#ifdef CONFIG_ARCH_HAVE_FETCHADD +#if defined(CONFIG_ARCH_HAVE_FETCHADD) && !defined(CONFIG_ARCH_GLOBAL_IRQDISABLE) # define sched_islocked_global() \ (spin_islocked(&g_cpu_schedlock) || g_global_lockcount > 0) #else diff --git a/sched/sched/sched_lock.c b/sched/sched/sched_lock.c index 3722e4a3a1..1f2d3b9a15 100644 --- a/sched/sched/sched_lock.c +++ b/sched/sched/sched_lock.c @@ -43,6 +43,9 @@ #include #include +#include + +#include #include #include @@ -116,7 +119,7 @@ volatile spinlock_t g_cpu_schedlock SP_SECTION = SP_UNLOCKED; volatile spinlock_t g_cpu_locksetlock SP_SECTION; volatile cpu_set_t g_cpu_lockset SP_SECTION; -#ifdef CONFIG_ARCH_HAVE_FETCHADD +#if defined(CONFIG_ARCH_HAVE_FETCHADD) && !defined(CONFIG_ARCH_GLOBAL_IRQDISABLE) /* This is part of the sched_lock() logic to handle atomic operations when * locking the scheduler. */ @@ -152,15 +155,30 @@ volatile int16_t g_global_lockcount; int sched_lock(void) { FAR struct tcb_s *rtcb; +#if defined(CONFIG_ARCH_GLOBAL_IRQDISABLE) + irqstate_t flags; +#endif int cpu; - /* The following operation is non-atomic unless CONFIG_ARCH_HAVE_FETCHADD - * defined. + /* The following operation is non-atomic unless CONFIG_ARCH_GLOBAL_IRQDISABLE + * or CONFIG_ARCH_HAVE_FETCHADD is defined. */ -#ifdef CONFIG_ARCH_HAVE_FETCHADD - DEBUGASSERT((uint16_t)g_global_lockcount < INT16_MAX); /* Not atomic! */ - (void)up_fetchadd16(&g_global_lockcount, 1); +#if defined(CONFIG_ARCH_GLOBAL_IRQDISABLE) + /* If the CPU supports suppression of interprocessor interrupts, then simple + * disabling interrupts will provide sufficient protection for the following + * operation. + */ + + flags = up_irq_save(); + +#elif defined(CONFIG_ARCH_HAVE_FETCHADD) + /* If the CPU supports an atomic fetch add operation, then we can use the + * global lockcount to assure that the following operation is atomic. + */ + + DEBUGASSERT((uint16_t)g_global_lockcount < INT16_MAX); /* Not atomic! */ + (void)up_fetchadd16(&g_global_lockcount, 1); #endif /* This operation is save if CONFIG_ARCH_HAVE_FETCHADD is defined. NOTE @@ -234,9 +252,11 @@ int sched_lock(void) TSTATE_TASK_PENDING); } -#ifdef CONFIG_ARCH_HAVE_FETCHADD - DEBUGASSERT(g_global_lockcount > 0); - (void)up_fetchsub16(&g_global_lockcount, 1); +#if defined(CONFIG_ARCH_GLOBAL_IRQDISABLE) + up_irq_restore(flags); +#elif defined(CONFIG_ARCH_HAVE_FETCHADD) + DEBUGASSERT(g_global_lockcount > 0); + (void)up_fetchsub16(&g_global_lockcount, 1); #endif return OK;