2023-07-01 10:22:35 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018 Intel Corporation
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <zephyr/kernel.h>
|
|
|
|
#include <zephyr/drivers/timer/system_timer.h>
|
|
|
|
#include <zephyr/sys_clock.h>
|
|
|
|
#include <kernel_arch_interface.h>
|
|
|
|
|
|
|
|
void z_impl_k_busy_wait(uint32_t usec_to_wait)
|
|
|
|
{
|
|
|
|
SYS_PORT_TRACING_FUNC_ENTER(k_thread, busy_wait, usec_to_wait);
|
|
|
|
if (usec_to_wait == 0U) {
|
|
|
|
SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
|
|
|
|
arch_busy_wait(usec_to_wait);
|
2023-07-20 02:45:19 +08:00
|
|
|
#elif defined(CONFIG_SYS_CLOCK_EXISTS)
|
2023-07-01 10:22:35 +08:00
|
|
|
uint32_t start_cycles = k_cycle_get_32();
|
2023-12-05 02:33:28 +08:00
|
|
|
uint32_t cycles_to_wait = k_us_to_cyc_ceil32(usec_to_wait);
|
2023-07-01 10:22:35 +08:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
uint32_t current_cycles = k_cycle_get_32();
|
|
|
|
|
|
|
|
/* this handles the rollover on an unsigned 32-bit value */
|
|
|
|
if ((current_cycles - start_cycles) >= cycles_to_wait) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2023-07-20 02:45:19 +08:00
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* Crude busy loop for the purpose of being able to configure out
|
|
|
|
* system timer support.
|
|
|
|
*/
|
|
|
|
unsigned int loops_per_usec = CONFIG_BUSYWAIT_CPU_LOOPS_PER_USEC;
|
|
|
|
unsigned int loops = loops_per_usec * usec_to_wait;
|
|
|
|
|
|
|
|
while (loops-- > 0) {
|
|
|
|
arch_nop();
|
|
|
|
}
|
2023-07-01 10:22:35 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
|
|
static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait)
|
|
|
|
{
|
|
|
|
z_impl_k_busy_wait(usec_to_wait);
|
|
|
|
}
|
|
|
|
#include <syscalls/k_busy_wait_mrsh.c>
|
|
|
|
#endif /* CONFIG_USERSPACE */
|