diff --git a/sched/semaphore/spinlock.c b/sched/semaphore/spinlock.c index 349cdec57f..fc46f05aae 100644 --- a/sched/semaphore/spinlock.c +++ b/sched/semaphore/spinlock.c @@ -405,8 +405,15 @@ void spin_setbit(FAR volatile cpu_set_t *set, unsigned int cpu, #ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS cpu_set_t prev; #endif + irqstate_t flags; - /* First, get the 'setlock' spinlock */ + /* Disable local interrupts to prevent being re-entered from an interrupt + * on the same CPU. This does not effect the behavior on other CPUs. + */ + + flags = up_irq_save(); + + /* Then, get the 'setlock' spinlock */ spin_lock(setlock); @@ -427,9 +434,10 @@ void spin_setbit(FAR volatile cpu_set_t *set, unsigned int cpu, } #endif - /* Release the 'setlock' */ + /* Release the 'setlock' and restore local interrupts */ spin_unlock(setlock); + up_irq_restore(flags); } /**************************************************************************** @@ -456,6 +464,13 @@ void spin_clrbit(FAR volatile cpu_set_t *set, unsigned int cpu, #ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS cpu_set_t prev; #endif + irqstate_t flags; + + /* Disable local interrupts to prevent being re-entered from an interrupt + * on the same CPU. This does not effect the behavior on other CPUs. + */ + + flags = up_irq_save(); /* First, get the 'setlock' spinlock */ @@ -480,9 +495,10 @@ void spin_clrbit(FAR volatile cpu_set_t *set, unsigned int cpu, } #endif - /* Release the 'setlock' */ + /* Release the 'setlock' and restore local interrupts */ spin_unlock(setlock); + up_irq_restore(flags); } #endif /* CONFIG_SPINLOCK */