202 lines
5.0 KiB
C
202 lines
5.0 KiB
C
/*
|
|
* Copyright (c) 2016 Wind River Systems, Inc.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
#include <kernel.h>
|
|
#include <toolchain.h>
|
|
#include <linker/sections.h>
|
|
#include <drivers/timer/system_timer.h>
|
|
#include <wait_q.h>
|
|
#include <power/power.h>
|
|
#include <stdbool.h>
|
|
#include <logging/log.h>
|
|
#include <ksched.h>
|
|
|
|
LOG_MODULE_DECLARE(os);
|
|
|
|
#ifdef CONFIG_TICKLESS_IDLE_THRESH
|
|
#define IDLE_THRESH CONFIG_TICKLESS_IDLE_THRESH
|
|
#else
|
|
#define IDLE_THRESH 1
|
|
#endif
|
|
|
|
/* Fallback idle spin loop for SMP platforms without a working IPI */
|
|
#if (defined(CONFIG_SMP) && !defined(CONFIG_SCHED_IPI_SUPPORTED))
|
|
#define SMP_FALLBACK 1
|
|
#else
|
|
#define SMP_FALLBACK 0
|
|
#endif
|
|
|
|
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
|
/*
|
|
* Used to allow _sys_suspend() implementation to control notification
|
|
* of the event that caused exit from kernel idling after pm operations.
|
|
*/
|
|
unsigned char sys_pm_idle_exit_notify;
|
|
|
|
|
|
/* LCOV_EXCL_START
|
|
* These are almost certainly overidden and in any event do nothing
|
|
*/
|
|
#if defined(CONFIG_SYS_POWER_SLEEP_STATES)
|
|
void __attribute__((weak)) _sys_resume(void)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#if defined(CONFIG_SYS_POWER_DEEP_SLEEP_STATES)
|
|
void __attribute__((weak)) _sys_resume_from_deep_sleep(void)
|
|
{
|
|
}
|
|
#endif
|
|
/* LCOV_EXCL_STOP */
|
|
|
|
#endif /* CONFIG_SYS_POWER_MANAGEMENT */
|
|
|
|
/**
|
|
*
|
|
* @brief Indicate that kernel is idling in tickless mode
|
|
*
|
|
* Sets the kernel data structure idle field to either a positive value or
|
|
* K_FOREVER.
|
|
*
|
|
* @param ticks the number of ticks to idle
|
|
*
|
|
* @return N/A
|
|
*/
|
|
#if !SMP_FALLBACK
|
|
static void set_kernel_idle_time_in_ticks(int32_t ticks)
|
|
{
|
|
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
|
_kernel.idle = ticks;
|
|
#endif
|
|
}
|
|
|
|
static void sys_power_save_idle(void)
|
|
{
|
|
int32_t ticks = z_get_next_timeout_expiry();
|
|
|
|
/* The documented behavior of CONFIG_TICKLESS_IDLE_THRESH is
|
|
* that the system should not enter a tickless idle for
|
|
* periods less than that. This seems... silly, given that it
|
|
* saves no power and does not improve latency. But it's an
|
|
* API we need to honor...
|
|
*/
|
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
|
z_set_timeout_expiry((ticks < IDLE_THRESH) ? 1 : ticks, true);
|
|
#endif
|
|
|
|
set_kernel_idle_time_in_ticks(ticks);
|
|
#if (defined(CONFIG_SYS_POWER_SLEEP_STATES) || \
|
|
defined(CONFIG_SYS_POWER_DEEP_SLEEP_STATES))
|
|
|
|
sys_pm_idle_exit_notify = 1U;
|
|
|
|
/*
|
|
* Call the suspend hook function of the soc interface to allow
|
|
* entry into a low power state. The function returns
|
|
* SYS_POWER_STATE_ACTIVE if low power state was not entered, in which
|
|
* case, kernel does normal idle processing.
|
|
*
|
|
* This function is entered with interrupts disabled. If a low power
|
|
* state was entered, then the hook function should enable inerrupts
|
|
* before exiting. This is because the kernel does not do its own idle
|
|
* processing in those cases i.e. skips k_cpu_idle(). The kernel's
|
|
* idle processing re-enables interrupts which is essential for
|
|
* the kernel's scheduling logic.
|
|
*/
|
|
if (_sys_suspend(ticks) == SYS_POWER_STATE_ACTIVE) {
|
|
sys_pm_idle_exit_notify = 0U;
|
|
k_cpu_idle();
|
|
}
|
|
#else
|
|
k_cpu_idle();
|
|
#endif
|
|
}
|
|
#endif
|
|
|
|
void z_sys_power_save_idle_exit(int32_t ticks)
|
|
{
|
|
#if defined(CONFIG_SYS_POWER_SLEEP_STATES)
|
|
/* Some CPU low power states require notification at the ISR
|
|
* to allow any operations that needs to be done before kernel
|
|
* switches task or processes nested interrupts. This can be
|
|
* disabled by calling _sys_pm_idle_exit_notification_disable().
|
|
* Alternatively it can be simply ignored if not required.
|
|
*/
|
|
if (sys_pm_idle_exit_notify) {
|
|
_sys_resume();
|
|
}
|
|
#endif
|
|
|
|
z_clock_idle_exit();
|
|
}
|
|
|
|
|
|
#if K_IDLE_PRIO < 0
|
|
#define IDLE_YIELD_IF_COOP() k_yield()
|
|
#else
|
|
#define IDLE_YIELD_IF_COOP() do { } while (false)
|
|
#endif
|
|
|
|
void idle(void *p1, void *unused2, void *unused3)
|
|
{
|
|
struct _cpu *cpu = p1;
|
|
|
|
ARG_UNUSED(unused2);
|
|
ARG_UNUSED(unused3);
|
|
|
|
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
|
|
/* record timestamp when idling begins */
|
|
|
|
extern uint32_t z_timestamp_idle;
|
|
|
|
z_timestamp_idle = k_cycle_get_32();
|
|
#endif
|
|
|
|
while (true) {
|
|
/* Lock interrupts to atomically check if to_abort is non-NULL,
|
|
* and if so clear it
|
|
*/
|
|
int key = arch_irq_lock();
|
|
struct k_thread *to_abort = cpu->pending_abort;
|
|
|
|
if (to_abort) {
|
|
cpu->pending_abort = NULL;
|
|
arch_irq_unlock(key);
|
|
|
|
/* Safe to unlock interrupts here. We've atomically
|
|
* checked and stashed cpu->pending_abort into a stack
|
|
* variable. If we get preempted here and another
|
|
* thread aborts, cpu->pending abort will get set
|
|
* again and we'll handle it when the loop iteration
|
|
* is continued below.
|
|
*/
|
|
LOG_DBG("idle %p aborting thread %p",
|
|
_current, to_abort);
|
|
|
|
z_thread_single_abort(to_abort);
|
|
|
|
/* We have to invoke this scheduler now. If we got
|
|
* here, the idle thread preempted everything else
|
|
* in order to abort the thread, and we now need to
|
|
* figure out what to do next, it's not necessarily
|
|
* the case that there are no other runnable threads.
|
|
*/
|
|
z_reschedule_unlocked();
|
|
continue;
|
|
}
|
|
arch_irq_unlock(key);
|
|
#if SMP_FALLBACK
|
|
k_busy_wait(100);
|
|
k_yield();
|
|
#else
|
|
(void)arch_irq_lock();
|
|
sys_power_save_idle();
|
|
IDLE_YIELD_IF_COOP();
|
|
#endif
|
|
}
|
|
}
|