SMP: Enforce this rule: Tasks which are normally restored when sched_unlock() is called must remain pending (1) if we are in a critical section, i.e., g_cpu_irqlock is locked , or (2) other CPUs still have pre-emption disabled, i.e., g_cpu_schedlock is locked. In those cases, the release of the pending tasks must be deferred until those conditions are met.
This commit is contained in:
parent
d45a81d643
commit
849a5dc2a9
|
@ -13466,5 +13466,13 @@
|
||||||
(2016-12-24).
|
(2016-12-24).
|
||||||
* STM32 F4: Allow dma in 1 bit mode in STM32F4xxx. From David Sidrane
|
* STM32 F4: Allow dma in 1 bit mode in STM32F4xxx. From David Sidrane
|
||||||
(2016-12-24).
|
(2016-12-24).
|
||||||
|
* termios.h: Fix CRTSCTS define to include input and output flow.
|
||||||
|
From Lorenz Meier (2016-12-26).
|
||||||
|
* SMP: Enforce this rule: Tasks which are normally restored when
|
||||||
|
sched_unlock() is called must remain pending (1) if we are in a
|
||||||
|
critical section, i.e., g_cpu_irqlock is locked , or (2) other CPUs
|
||||||
|
still have pre-emption disabled, i.e., g_cpu_schedlock is locked. In
|
||||||
|
those cases, the release of the pending tasks must be deferred until
|
||||||
|
those conditions are met (2016-12-26).
|
||||||
|
|
||||||
7.20 2017-xx-xx Gregory Nutt <gnutt@nuttx.org>
|
7.20 2017-xx-xx Gregory Nutt <gnutt@nuttx.org>
|
||||||
|
|
|
@ -513,12 +513,9 @@ void leave_critical_section(irqstate_t flags)
|
||||||
if (!spin_islocked(&g_cpu_irqlock))
|
if (!spin_islocked(&g_cpu_irqlock))
|
||||||
{
|
{
|
||||||
/* Check if there are pending tasks and that pre-emption
|
/* Check if there are pending tasks and that pre-emption
|
||||||
* is also enabled.
|
* is also enabled. This is necessary becaue we may have
|
||||||
*
|
* deferred the up_release_pending() call in sched_unlock()
|
||||||
* REVISIT: Is there an issue here? up_release_pending()
|
* because we were within a critical section then.
|
||||||
* must be called from within a critical section but here
|
|
||||||
* we have just left the critical section. At least we
|
|
||||||
* still have interrupts disabled on this CPU.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (g_pendingtasks.head != NULL &&
|
if (g_pendingtasks.head != NULL &&
|
||||||
|
@ -529,9 +526,6 @@ void leave_critical_section(irqstate_t flags)
|
||||||
*
|
*
|
||||||
* NOTE: This operation has a very high likelihood of
|
* NOTE: This operation has a very high likelihood of
|
||||||
* causing this task to be switched out!
|
* causing this task to be switched out!
|
||||||
*
|
|
||||||
* REVISIT: Should this not be done while we are in the
|
|
||||||
* critical section.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
up_release_pending();
|
up_release_pending();
|
||||||
|
|
|
@ -44,6 +44,7 @@
|
||||||
#include <nuttx/arch.h>
|
#include <nuttx/arch.h>
|
||||||
#include <nuttx/sched_note.h>
|
#include <nuttx/sched_note.h>
|
||||||
|
|
||||||
|
#include "irq/irq.h"
|
||||||
#include "sched/sched.h"
|
#include "sched/sched.h"
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
|
@ -113,16 +114,30 @@ int sched_unlock(void)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Release any ready-to-run tasks that have collected in
|
/* Release any ready-to-run tasks that have collected in
|
||||||
* g_pendingtasks. In the SMP case, the scheduler remains
|
* g_pendingtasks.
|
||||||
* locked if interrupts are disabled.
|
|
||||||
*
|
*
|
||||||
* NOTE: This operation has a very high likelihood of causing
|
* NOTE: This operation has a very high likelihood of causing
|
||||||
* this task to be switched out!
|
* this task to be switched out!
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (!spin_islocked(&g_cpu_schedlock) && g_pendingtasks.head != NULL)
|
/* In the SMP case, the tasks remains pend(1) if we are
|
||||||
|
* in a critical section, i.e., g_cpu_irqlock is locked , or (2)
|
||||||
|
* other CPUs still have pre-emption disabled, i.e.,
|
||||||
|
* g_cpu_schedlock is locked. In those cases, the release of the
|
||||||
|
* pending tasks must be deferred until those conditions are met.ing
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (!spin_islocked(&g_cpu_schedlock) &&
|
||||||
|
!spin_islocked(&g_cpu_irqlock) &&
|
||||||
|
g_pendingtasks.head != NULL)
|
||||||
#else
|
#else
|
||||||
|
/* In the single CPU case, decrementing irqcount to zero is
|
||||||
|
* sufficient to release the pending tasks. Further, in that
|
||||||
|
* configuration, critical sections and pre-emption can operate
|
||||||
|
* fully independently.
|
||||||
|
*/
|
||||||
|
|
||||||
if (g_pendingtasks.head != NULL)
|
if (g_pendingtasks.head != NULL)
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue