diff --git a/configs b/configs index ebc3a71916..ff5400544e 160000 --- a/configs +++ b/configs @@ -1 +1 @@ -Subproject commit ebc3a719168c1e7524a64498cd6d9312f26d36b0 +Subproject commit ff5400544e8ab499fcf20a1a88ebec05c9cbd2e9 diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index 34e77f3b1c..d8e671d977 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -181,22 +181,36 @@ void leave_critical_section(irqstate_t flags) sched_note_csection(rtcb, false); #endif - /* Release the spinlock to allow other access. */ - - g_cpu_irqset &= ~(1 << this_cpu()); - rtcb->irqcount = 0; - spin_unlock(g_cpu_irqlock); - - /* Release any ready-to-run tasks that have collected in - * g_pendingtasks if the scheduler is not locked. - * - * NOTE: This operation has a very high likelihood of causing - * this task to be switched out! + /* Decrement our count on the lock. If all CPUs have released, + * then unlock the spinlock. */ - if (g_pendingtasks.head != NULL && rtcb->lockcount <= 0) + rtcb->irqcount = 0; + g_cpu_irqset &= ~(1 << this_cpu()); + + /* Have all CPUs release the lock? */ + + if (g_cpu_irqset == 0) { - up_release_pending(); + /* Unlock the IRQ spinlock */ + + spin_unlock(g_cpu_irqlock); + + /* Check if there are pending tasks and that pre-emption is + * also enabled. + */ + + if (g_pendingtasks.head != NULL && !spin_islocked(&g_cpu_schedlock)) + { + /* Release any ready-to-run tasks that have collected in + * g_pendingtasks if the scheduler is not locked. + * + * NOTE: This operation has a very high likelihood of causing + * this task to be switched out! + */ + + up_release_pending(); + } } } diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c index 2157e01ea0..a0d1dc822c 100644 --- a/sched/sched/sched_addreadytorun.c +++ b/sched/sched/sched_addreadytorun.c @@ -113,7 +113,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb) * is now the new active task! */ - ASSERT(rtcb->lockcount == 0 && btcb->flink != NULL); + ASSERT(!spin_islocked(&g_cpu_schedlock) && btcb->flink != NULL); btcb->task_state = TSTATE_TASK_RUNNING; btcb->flink->task_state = TSTATE_TASK_READYTORUN; @@ -341,7 +341,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb) */ next = (FAR struct tcb_s *)btcb->flink; - ASSERT(!rtcb->lockcount && next != NULL); + ASSERT(!spin_islocked(&g_cpu_schedlock) && next != NULL); if ((next->flags & TCB_FLAG_CPU_ASSIGNED) != 0) {