2015-04-11 07:44:37 +08:00
|
|
|
/*
|
2018-09-30 23:48:11 +08:00
|
|
|
* Copyright (c) 2018 Intel Corporation
|
2015-04-11 07:44:37 +08:00
|
|
|
*
|
2017-01-19 09:01:01 +08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-04-11 07:44:37 +08:00
|
|
|
*/
|
2022-05-06 16:25:46 +08:00
|
|
|
#include <zephyr/device.h>
|
|
|
|
#include <zephyr/drivers/timer/system_timer.h>
|
|
|
|
#include <zephyr/sys_clock.h>
|
|
|
|
#include <zephyr/spinlock.h>
|
|
|
|
#include <zephyr/arch/arm/aarch32/cortex_m/cmsis.h>
|
2022-10-17 16:24:11 +08:00
|
|
|
#include <zephyr/irq.h>
|
2023-04-11 21:34:39 +08:00
|
|
|
#include <zephyr/sys/util.h>
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2018-09-30 23:48:11 +08:00
|
|
|
#define COUNTER_MAX 0x00ffffff
|
|
|
|
#define TIMER_STOPPED 0xff000000
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2019-04-23 21:08:00 +08:00
|
|
|
#define CYC_PER_TICK (sys_clock_hw_cycles_per_sec() \
|
2018-09-30 23:48:11 +08:00
|
|
|
/ CONFIG_SYS_CLOCK_TICKS_PER_SEC)
|
2023-02-08 00:54:33 +08:00
|
|
|
#define MAX_TICKS ((k_ticks_t)(COUNTER_MAX / CYC_PER_TICK) - 1)
|
2018-09-30 23:48:11 +08:00
|
|
|
#define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
|
2015-04-11 07:44:37 +08:00
|
|
|
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-06 00:59:19 +08:00
|
|
|
/* Minimum cycles in the future to try to program. Note that this is
|
|
|
|
* NOT simply "enough cycles to get the counter read and reprogrammed
|
|
|
|
* reliably" -- it becomes the minimum value of the LOAD register, and
|
|
|
|
* thus reflects how much time we can reliably see expire between
|
|
|
|
* calls to elapsed() to read the COUNTFLAG bit. So it needs to be
|
|
|
|
* set to be larger than the maximum time the interrupt might be
|
|
|
|
* masked. Choosing a fraction of a tick is probably a good enough
|
|
|
|
* default, with an absolute minimum of 1k cyc.
|
|
|
|
*/
|
2023-02-08 00:54:33 +08:00
|
|
|
#define MIN_DELAY MAX(1024U, ((uint32_t)CYC_PER_TICK/16U))
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-06 00:59:19 +08:00
|
|
|
|
2019-09-09 22:27:53 +08:00
|
|
|
#define TICKLESS (IS_ENABLED(CONFIG_TICKLESS_KERNEL))
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2018-09-30 23:48:11 +08:00
|
|
|
static struct k_spinlock lock;
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2020-05-28 00:26:57 +08:00
|
|
|
static uint32_t last_load;
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2023-03-10 02:55:33 +08:00
|
|
|
#ifdef CONFIG_CORTEX_M_SYSTICK_64BIT_CYCLE_COUNTER
|
|
|
|
#define cycle_t uint64_t
|
|
|
|
#else
|
|
|
|
#define cycle_t uint32_t
|
|
|
|
#endif
|
|
|
|
|
2019-04-16 21:06:07 +08:00
|
|
|
/*
|
|
|
|
* This local variable holds the amount of SysTick HW cycles elapsed
|
2021-03-13 21:02:16 +08:00
|
|
|
* and it is updated in sys_clock_isr() and sys_clock_set_timeout().
|
2019-04-16 21:06:07 +08:00
|
|
|
*
|
|
|
|
* Note:
|
|
|
|
* At an arbitrary point in time the "current" value of the SysTick
|
|
|
|
* HW timer is calculated as:
|
|
|
|
*
|
|
|
|
* t = cycle_counter + elapsed();
|
|
|
|
*/
|
2023-03-10 02:55:33 +08:00
|
|
|
static cycle_t cycle_count;
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2019-04-16 21:06:07 +08:00
|
|
|
/*
|
|
|
|
* This local variable holds the amount of elapsed SysTick HW cycles
|
|
|
|
* that have been announced to the kernel.
|
2023-03-10 02:55:33 +08:00
|
|
|
*
|
|
|
|
* Note:
|
|
|
|
* Additions/subtractions/comparisons of 64-bits values on 32-bits systems
|
|
|
|
* are very cheap. Divisions are not. Make sure the difference between
|
|
|
|
* cycle_count and announced_cycles is stored in a 32-bit variable before
|
|
|
|
* dividing it by CYC_PER_TICK.
|
2019-04-16 21:06:07 +08:00
|
|
|
*/
|
2023-03-10 02:55:33 +08:00
|
|
|
static cycle_t announced_cycles;
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2019-04-16 21:06:07 +08:00
|
|
|
/*
|
|
|
|
* This local variable holds the amount of elapsed HW cycles due to
|
|
|
|
* SysTick timer wraps ('overflows') and is used in the calculation
|
|
|
|
* in elapsed() function, as well as in the updates to cycle_count.
|
|
|
|
*
|
|
|
|
* Note:
|
|
|
|
* Each time cycle_count is updated with the value from overflow_cyc,
|
|
|
|
* the overflow_cyc must be reset to zero.
|
|
|
|
*/
|
2020-05-28 00:26:57 +08:00
|
|
|
static volatile uint32_t overflow_cyc;
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2019-04-16 21:06:07 +08:00
|
|
|
/* This internal function calculates the amount of HW cycles that have
|
|
|
|
* elapsed since the last time the absolute HW cycles counter has been
|
|
|
|
* updated. 'cycle_count' may be updated either by the ISR, or when we
|
2021-02-26 04:33:15 +08:00
|
|
|
* re-program the SysTick.LOAD register, in sys_clock_set_timeout().
|
2019-04-16 21:06:07 +08:00
|
|
|
*
|
|
|
|
* Additionally, the function updates the 'overflow_cyc' counter, that
|
|
|
|
* holds the amount of elapsed HW cycles due to (possibly) multiple
|
|
|
|
* timer wraps (overflows).
|
|
|
|
*
|
|
|
|
* Prerequisites:
|
|
|
|
* - reprogramming of SysTick.LOAD must be clearing the SysTick.COUNTER
|
|
|
|
* register and the 'overflow_cyc' counter.
|
|
|
|
* - ISR must be clearing the 'overflow_cyc' counter.
|
|
|
|
* - no more than one counter-wrap has occurred between
|
|
|
|
* - the timer reset or the last time the function was called
|
|
|
|
* - and until the current call of the function is completed.
|
|
|
|
* - the function is invoked with interrupts disabled.
|
|
|
|
*/
|
2020-05-28 00:26:57 +08:00
|
|
|
static uint32_t elapsed(void)
|
2015-04-11 07:44:37 +08:00
|
|
|
{
|
2020-05-28 00:26:57 +08:00
|
|
|
uint32_t val1 = SysTick->VAL; /* A */
|
|
|
|
uint32_t ctrl = SysTick->CTRL; /* B */
|
|
|
|
uint32_t val2 = SysTick->VAL; /* C */
|
2019-11-26 07:21:53 +08:00
|
|
|
|
|
|
|
/* SysTick behavior: The counter wraps at zero automatically,
|
|
|
|
* setting the COUNTFLAG field of the CTRL register when it
|
|
|
|
* does. Reading the control register automatically clears
|
|
|
|
* that field.
|
|
|
|
*
|
|
|
|
* If the count wrapped...
|
|
|
|
* 1) Before A then COUNTFLAG will be set and val1 >= val2
|
|
|
|
* 2) Between A and B then COUNTFLAG will be set and val1 < val2
|
|
|
|
* 3) Between B and C then COUNTFLAG will be clear and val1 < val2
|
|
|
|
* 4) After C we'll see it next time
|
|
|
|
*
|
|
|
|
* So the count in val2 is post-wrap and last_load needs to be
|
|
|
|
* added if and only if COUNTFLAG is set or val1 < val2.
|
2019-04-16 21:06:07 +08:00
|
|
|
*/
|
2019-11-26 07:21:53 +08:00
|
|
|
if ((ctrl & SysTick_CTRL_COUNTFLAG_Msk)
|
|
|
|
|| (val1 < val2)) {
|
|
|
|
overflow_cyc += last_load;
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-06 00:59:19 +08:00
|
|
|
|
2019-11-26 07:21:53 +08:00
|
|
|
/* We know there was a wrap, but we might not have
|
|
|
|
* seen it in CTRL, so clear it. */
|
|
|
|
(void)SysTick->CTRL;
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-06 00:59:19 +08:00
|
|
|
}
|
2017-01-12 02:50:23 +08:00
|
|
|
|
2019-11-26 07:21:53 +08:00
|
|
|
return (last_load - val2) + overflow_cyc;
|
2015-04-11 07:44:37 +08:00
|
|
|
}
|
|
|
|
|
2018-09-30 23:48:11 +08:00
|
|
|
/* Callout out of platform assembly, not hooked via IRQ_CONNECT... */
|
2021-03-13 21:02:16 +08:00
|
|
|
void sys_clock_isr(void *arg)
|
2015-04-11 07:44:37 +08:00
|
|
|
{
|
2018-09-30 23:48:11 +08:00
|
|
|
ARG_UNUSED(arg);
|
2023-03-10 02:55:33 +08:00
|
|
|
uint32_t dcycles;
|
2020-05-28 00:26:57 +08:00
|
|
|
uint32_t dticks;
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2019-04-16 21:08:56 +08:00
|
|
|
/* Update overflow_cyc and clear COUNTFLAG by invoking elapsed() */
|
|
|
|
elapsed();
|
2017-04-06 17:45:10 +08:00
|
|
|
|
2019-04-16 21:08:56 +08:00
|
|
|
/* Increment the amount of HW cycles elapsed (complete counter
|
|
|
|
* cycles) and announce the progress to the kernel.
|
|
|
|
*/
|
|
|
|
cycle_count += overflow_cyc;
|
|
|
|
overflow_cyc = 0;
|
|
|
|
|
|
|
|
if (TICKLESS) {
|
|
|
|
/* In TICKLESS mode, the SysTick.LOAD is re-programmed
|
2021-02-26 04:33:15 +08:00
|
|
|
* in sys_clock_set_timeout(), followed by resetting of
|
2019-04-16 21:08:56 +08:00
|
|
|
* the counter (VAL = 0).
|
|
|
|
*
|
|
|
|
* If a timer wrap occurs right when we re-program LOAD,
|
2021-02-26 04:33:15 +08:00
|
|
|
* the ISR is triggered immediately after sys_clock_set_timeout()
|
2019-04-16 21:08:56 +08:00
|
|
|
* returns; in that case we shall not increment the cycle_count
|
|
|
|
* because the value has been updated before LOAD re-program.
|
|
|
|
*
|
|
|
|
* We can assess if this is the case by inspecting COUNTFLAG.
|
|
|
|
*/
|
|
|
|
|
2023-03-10 02:55:33 +08:00
|
|
|
dcycles = cycle_count - announced_cycles;
|
|
|
|
dticks = dcycles / CYC_PER_TICK;
|
2019-04-16 21:08:56 +08:00
|
|
|
announced_cycles += dticks * CYC_PER_TICK;
|
2021-02-26 04:33:15 +08:00
|
|
|
sys_clock_announce(dticks);
|
2019-04-16 21:08:56 +08:00
|
|
|
} else {
|
2021-02-26 04:33:15 +08:00
|
|
|
sys_clock_announce(1);
|
2019-04-16 21:08:56 +08:00
|
|
|
}
|
2020-04-08 14:17:32 +08:00
|
|
|
z_arm_int_exit();
|
2018-09-30 23:48:11 +08:00
|
|
|
}
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2021-02-26 04:33:15 +08:00
|
|
|
void sys_clock_set_timeout(int32_t ticks, bool idle)
|
2015-04-11 07:44:37 +08:00
|
|
|
{
|
2018-09-30 23:48:11 +08:00
|
|
|
/* Fast CPUs and a 24 bit counter mean that even idle systems
|
|
|
|
* need to wake up multiple times per second. If the kernel
|
|
|
|
* allows us to miss tick announcements in idle, then shut off
|
|
|
|
* the counter. (Note: we can assume if idle==true that
|
|
|
|
* interrupts are already disabled)
|
2015-04-11 07:44:37 +08:00
|
|
|
*/
|
2021-03-13 02:13:22 +08:00
|
|
|
if (IS_ENABLED(CONFIG_TICKLESS_KERNEL) && idle && ticks == K_TICKS_FOREVER) {
|
2018-09-30 23:48:11 +08:00
|
|
|
SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
|
|
|
|
last_load = TIMER_STOPPED;
|
2017-04-06 17:45:10 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-09-09 22:27:53 +08:00
|
|
|
#if defined(CONFIG_TICKLESS_KERNEL)
|
2020-05-28 00:26:57 +08:00
|
|
|
uint32_t delay;
|
2021-05-07 20:20:29 +08:00
|
|
|
uint32_t val1, val2;
|
|
|
|
uint32_t last_load_ = last_load;
|
2017-04-06 17:45:10 +08:00
|
|
|
|
kernel/timeout: Make timeout arguments an opaque type
Add a k_timeout_t type, and use it everywhere that kernel API
functions were accepting a millisecond timeout argument. Instead of
forcing milliseconds everywhere (which are often not integrally
representable as system ticks), do the conversion to ticks at the
point where the timeout is created. This avoids an extra unit
conversion in some application code, and allows us to express the
timeout in units other than milliseconds to achieve greater precision.
The existing K_MSEC() et. al. macros now return initializers for a
k_timeout_t.
The K_NO_WAIT and K_FOREVER constants have now become k_timeout_t
values, which means they cannot be operated on as integers.
Applications which have their own APIs that need to inspect these
vs. user-provided timeouts can now use a K_TIMEOUT_EQ() predicate to
test for equality.
Timer drivers, which receive an integer tick count in ther
z_clock_set_timeout() functions, now use the integer-valued
K_TICKS_FOREVER constant instead of K_FOREVER.
For the initial release, to preserve source compatibility, a
CONFIG_LEGACY_TIMEOUT_API kconfig is provided. When true, the
k_timeout_t will remain a compatible 32 bit value that will work with
any legacy Zephyr application.
Some subsystems present timeout (or timeout-like) values to their own
users as APIs that would re-use the kernel's own constants and
conventions. These will require some minor design work to adapt to
the new scheme (in most cases just using k_timeout_t directly in their
own API), and they have not been changed in this patch, instead
selecting CONFIG_LEGACY_TIMEOUT_API via kconfig. These subsystems
include: CAN Bus, the Microbit display driver, I2S, LoRa modem
drivers, the UART Async API, Video hardware drivers, the console
subsystem, and the network buffer abstraction.
k_sleep() now takes a k_timeout_t argument, with a k_msleep() variant
provided that works identically to the original API.
Most of the changes here are just type/configuration management and
documentation, but there are logic changes in mempool, where a loop
that used a timeout numerically has been reworked using a new
z_timeout_end_calc() predicate. Also in queue.c, a (when POLL was
enabled) a similar loop was needlessly used to try to retry the
k_poll() call after a spurious failure. But k_poll() does not fail
spuriously, so the loop was removed.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2020-03-06 07:18:14 +08:00
|
|
|
ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
|
2020-10-27 19:27:25 +08:00
|
|
|
ticks = CLAMP(ticks - 1, 0, (int32_t)MAX_TICKS);
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2018-09-30 23:48:11 +08:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2020-05-28 00:26:57 +08:00
|
|
|
uint32_t pending = elapsed();
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2021-05-07 20:20:29 +08:00
|
|
|
val1 = SysTick->VAL;
|
|
|
|
|
2019-11-12 04:20:03 +08:00
|
|
|
cycle_count += pending;
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-06 00:59:19 +08:00
|
|
|
overflow_cyc = 0U;
|
2019-11-12 04:20:03 +08:00
|
|
|
|
2020-05-28 00:26:57 +08:00
|
|
|
uint32_t unannounced = cycle_count - announced_cycles;
|
2019-11-12 04:20:03 +08:00
|
|
|
|
2020-05-28 00:26:57 +08:00
|
|
|
if ((int32_t)unannounced < 0) {
|
2019-11-12 04:20:03 +08:00
|
|
|
/* We haven't announced for more than half the 32-bit
|
|
|
|
* wrap duration, because new timeouts keep being set
|
|
|
|
* before the existing one fires. Force an announce
|
|
|
|
* to avoid loss of a wrap event, making sure the
|
|
|
|
* delay is at least the minimum delay possible.
|
|
|
|
*/
|
|
|
|
last_load = MIN_DELAY;
|
|
|
|
} else {
|
2019-11-25 18:26:26 +08:00
|
|
|
/* Desired delay in the future */
|
|
|
|
delay = ticks * CYC_PER_TICK;
|
|
|
|
|
2019-11-12 04:20:03 +08:00
|
|
|
/* Round delay up to next tick boundary */
|
|
|
|
delay += unannounced;
|
2023-04-11 21:34:39 +08:00
|
|
|
delay = DIV_ROUND_UP(delay, CYC_PER_TICK) * CYC_PER_TICK;
|
2019-11-12 04:20:03 +08:00
|
|
|
delay -= unannounced;
|
2019-11-25 18:26:26 +08:00
|
|
|
delay = MAX(delay, MIN_DELAY);
|
|
|
|
if (delay > MAX_CYCLES) {
|
|
|
|
last_load = MAX_CYCLES;
|
|
|
|
} else {
|
|
|
|
last_load = delay;
|
|
|
|
}
|
2019-11-12 04:20:03 +08:00
|
|
|
}
|
2021-05-07 20:20:29 +08:00
|
|
|
|
|
|
|
val2 = SysTick->VAL;
|
|
|
|
|
2019-04-11 18:53:17 +08:00
|
|
|
SysTick->LOAD = last_load - 1;
|
2018-09-30 23:48:11 +08:00
|
|
|
SysTick->VAL = 0; /* resets timer to last_load */
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2021-05-07 20:20:29 +08:00
|
|
|
/*
|
|
|
|
* Add elapsed cycles while computing the new load to cycle_count.
|
|
|
|
*
|
|
|
|
* Note that comparing val1 and val2 is normaly not good enough to
|
|
|
|
* guess if the counter wrapped during this interval. Indeed if val1 is
|
|
|
|
* close to LOAD, then there are little chances to catch val2 between
|
|
|
|
* val1 and LOAD after a wrap. COUNTFLAG should be checked in addition.
|
|
|
|
* But since the load computation is faster than MIN_DELAY, then we
|
|
|
|
* don't need to worry about this case.
|
|
|
|
*/
|
|
|
|
if (val1 < val2) {
|
|
|
|
cycle_count += (val1 + (last_load_ - val2));
|
|
|
|
} else {
|
|
|
|
cycle_count += (val1 - val2);
|
|
|
|
}
|
2018-09-30 23:48:11 +08:00
|
|
|
k_spin_unlock(&lock, key);
|
2017-05-03 15:41:51 +08:00
|
|
|
#endif
|
2015-04-11 07:44:37 +08:00
|
|
|
}
|
|
|
|
|
2021-02-26 04:33:15 +08:00
|
|
|
uint32_t sys_clock_elapsed(void)
|
2017-04-06 17:45:10 +08:00
|
|
|
{
|
2018-09-30 23:48:11 +08:00
|
|
|
if (!TICKLESS) {
|
2017-04-06 17:45:10 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-30 23:48:11 +08:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2023-03-10 02:55:33 +08:00
|
|
|
uint32_t unannounced = cycle_count - announced_cycles;
|
|
|
|
uint32_t cyc = elapsed() + unannounced;
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2018-09-30 23:48:11 +08:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
return cyc / CYC_PER_TICK;
|
2015-04-11 07:44:37 +08:00
|
|
|
}
|
|
|
|
|
2021-03-13 01:46:52 +08:00
|
|
|
uint32_t sys_clock_cycle_get_32(void)
|
2015-04-11 07:44:37 +08:00
|
|
|
{
|
2018-09-30 23:48:11 +08:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2023-03-10 02:55:33 +08:00
|
|
|
uint32_t ret = cycle_count;
|
2015-04-11 07:44:37 +08:00
|
|
|
|
2023-03-10 02:55:33 +08:00
|
|
|
ret += elapsed();
|
2018-09-30 23:48:11 +08:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
return ret;
|
2015-04-11 07:44:37 +08:00
|
|
|
}
|
|
|
|
|
2023-03-10 02:55:33 +08:00
|
|
|
#ifdef CONFIG_CORTEX_M_SYSTICK_64BIT_CYCLE_COUNTER
|
|
|
|
uint64_t sys_clock_cycle_get_64(void)
|
|
|
|
{
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
uint64_t ret = cycle_count + elapsed();
|
|
|
|
|
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-02-26 04:33:15 +08:00
|
|
|
void sys_clock_idle_exit(void)
|
2015-04-11 07:44:37 +08:00
|
|
|
{
|
2018-09-30 23:48:11 +08:00
|
|
|
if (last_load == TIMER_STOPPED) {
|
|
|
|
SysTick->CTRL |= SysTick_CTRL_ENABLE_Msk;
|
2017-04-06 17:45:10 +08:00
|
|
|
}
|
2015-04-11 07:44:37 +08:00
|
|
|
}
|
|
|
|
|
2015-09-29 02:23:35 +08:00
|
|
|
void sys_clock_disable(void)
|
2015-04-11 07:44:37 +08:00
|
|
|
{
|
2018-12-12 18:53:09 +08:00
|
|
|
SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
|
2015-04-11 07:44:37 +08:00
|
|
|
}
|
2021-11-04 19:51:39 +08:00
|
|
|
|
init: remove the need for a dummy device pointer in SYS_INIT functions
The init infrastructure, found in `init.h`, is currently used by:
- `SYS_INIT`: to call functions before `main`
- `DEVICE_*`: to initialize devices
They are all sorted according to an initialization level + a priority.
`SYS_INIT` calls are really orthogonal to devices, however, the required
function signature requires a `const struct device *dev` as a first
argument. The only reason for that is because the same init machinery is
used by devices, so we have something like:
```c
struct init_entry {
int (*init)(const struct device *dev);
/* only set by DEVICE_*, otherwise NULL */
const struct device *dev;
}
```
As a result, we end up with such weird/ugly pattern:
```c
static int my_init(const struct device *dev)
{
/* always NULL! add ARG_UNUSED to avoid compiler warning */
ARG_UNUSED(dev);
...
}
```
This is really a result of poor internals isolation. This patch proposes
a to make init entries more flexible so that they can accept sytem
initialization calls like this:
```c
static int my_init(void)
{
...
}
```
This is achieved using a union:
```c
union init_function {
/* for SYS_INIT, used when init_entry.dev == NULL */
int (*sys)(void);
/* for DEVICE*, used when init_entry.dev != NULL */
int (*dev)(const struct device *dev);
};
struct init_entry {
/* stores init function (either for SYS_INIT or DEVICE*)
union init_function init_fn;
/* stores device pointer for DEVICE*, NULL for SYS_INIT. Allows
* to know which union entry to call.
*/
const struct device *dev;
}
```
This solution **does not increase ROM usage**, and allows to offer clean
public APIs for both SYS_INIT and DEVICE*. Note that however, init
machinery keeps a coupling with devices.
**NOTE**: This is a breaking change! All `SYS_INIT` functions will need
to be converted to the new signature. See the script offered in the
following commit.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
init: convert SYS_INIT functions to the new signature
Conversion scripted using scripts/utils/migrate_sys_init.py.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
manifest: update projects for SYS_INIT changes
Update modules with updated SYS_INIT calls:
- hal_ti
- lvgl
- sof
- TraceRecorderSource
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: devicetree: devices: adjust test
Adjust test according to the recently introduced SYS_INIT
infrastructure.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: kernel: threads: adjust SYS_INIT call
Adjust to the new signature: int (*init_fn)(void);
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
2022-10-19 15:33:44 +08:00
|
|
|
static int sys_clock_driver_init(void)
|
2021-11-04 19:51:39 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
NVIC_SetPriority(SysTick_IRQn, _IRQ_PRIO_OFFSET);
|
|
|
|
last_load = CYC_PER_TICK - 1;
|
|
|
|
overflow_cyc = 0U;
|
|
|
|
SysTick->LOAD = last_load;
|
|
|
|
SysTick->VAL = 0; /* resets timer to last_load */
|
|
|
|
SysTick->CTRL |= (SysTick_CTRL_ENABLE_Msk |
|
|
|
|
SysTick_CTRL_TICKINT_Msk |
|
|
|
|
SysTick_CTRL_CLKSOURCE_Msk);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
|
|
|
|
CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
|