129 lines
2.7 KiB
C
129 lines
2.7 KiB
C
/*
|
|
* Copyright (c) 2018 Intel Corporation
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
#include <drivers/timer/system_timer.h>
|
|
#include <sys_clock.h>
|
|
#include <spinlock.h>
|
|
#include <xtensa_rtos.h>
|
|
|
|
#define TIMER_IRQ UTIL_CAT(XCHAL_TIMER, \
|
|
UTIL_CAT(CONFIG_XTENSA_TIMER_ID, _INTERRUPT))
|
|
|
|
#define CYC_PER_TICK (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC \
|
|
/ CONFIG_SYS_CLOCK_TICKS_PER_SEC)
|
|
#define MAX_TICKS ((0xffffffffu - CYC_PER_TICK) / CYC_PER_TICK)
|
|
#define MIN_DELAY 1000
|
|
|
|
static struct k_spinlock lock;
|
|
static unsigned int last_count;
|
|
|
|
static void set_ccompare(u32_t val)
|
|
{
|
|
__asm__ volatile ("wsr.CCOMPARE" STRINGIFY(CONFIG_XTENSA_TIMER_ID) " %0"
|
|
:: "r"(val));
|
|
}
|
|
|
|
static u32_t ccount(void)
|
|
{
|
|
u32_t val;
|
|
|
|
__asm__ volatile ("rsr.CCOUNT %0" : "=r"(val));
|
|
return val;
|
|
}
|
|
|
|
static void ccompare_isr(void *arg)
|
|
{
|
|
ARG_UNUSED(arg);
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
u32_t curr = ccount();
|
|
u32_t dticks = (curr - last_count) / CYC_PER_TICK;
|
|
|
|
last_count += dticks * CYC_PER_TICK;
|
|
|
|
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL) ||
|
|
IS_ENABLED(CONFIG_QEMU_TICKLESS_WORKAROUND)) {
|
|
u32_t next = last_count + CYC_PER_TICK;
|
|
|
|
if ((s32_t)(next - curr) < MIN_DELAY) {
|
|
next += CYC_PER_TICK;
|
|
}
|
|
set_ccompare(next);
|
|
}
|
|
|
|
k_spin_unlock(&lock, key);
|
|
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
|
|
}
|
|
|
|
/* The legacy Xtensa platform code handles the timer interrupt via a
|
|
* special path and must find it via this name. Remove once ASM2 is
|
|
* pervasive.
|
|
*/
|
|
#ifndef CONFIG_XTENSA_ASM2
|
|
void timer_int_handler(void *arg)
|
|
{
|
|
return ccompare_isr(arg);
|
|
}
|
|
#endif
|
|
|
|
int z_clock_driver_init(struct device *device)
|
|
{
|
|
IRQ_CONNECT(TIMER_IRQ, 0, ccompare_isr, 0, 0);
|
|
set_ccompare(ccount() + CYC_PER_TICK);
|
|
irq_enable(TIMER_IRQ);
|
|
return 0;
|
|
}
|
|
|
|
void z_clock_set_timeout(s32_t ticks, bool idle)
|
|
{
|
|
ARG_UNUSED(idle);
|
|
|
|
#if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_QEMU_TICKLESS_WORKAROUND)
|
|
ticks = ticks == K_FOREVER ? MAX_TICKS : ticks;
|
|
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
u32_t curr = ccount(), cyc;
|
|
|
|
/* Round up to next tick boundary */
|
|
cyc = ticks * CYC_PER_TICK + (curr - last_count) + (CYC_PER_TICK - 1);
|
|
cyc = (cyc / CYC_PER_TICK) * CYC_PER_TICK;
|
|
cyc += last_count;
|
|
|
|
if ((cyc - curr) < MIN_DELAY) {
|
|
cyc += CYC_PER_TICK;
|
|
}
|
|
|
|
set_ccompare(cyc);
|
|
k_spin_unlock(&lock, key);
|
|
#endif
|
|
}
|
|
|
|
u32_t z_clock_elapsed(void)
|
|
{
|
|
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
|
|
return 0;
|
|
}
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
u32_t ret = (ccount() - last_count) / CYC_PER_TICK;
|
|
|
|
k_spin_unlock(&lock, key);
|
|
return ret;
|
|
}
|
|
|
|
u32_t z_timer_cycle_get_32(void)
|
|
{
|
|
return ccount();
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
void smp_timer_init(void)
|
|
{
|
|
set_ccompare(ccount() + CYC_PER_TICK);
|
|
irq_enable(TIMER_IRQ);
|
|
}
|
|
#endif
|