2020-09-01 22:12:09 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2020 Lexmark International, Inc.
|
2022-04-26 05:39:28 +08:00
|
|
|
* Copyright (c) 2022 Qualcomm Innovation Center, Inc.
|
2020-09-01 22:12:09 +08:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <tracing_user.h>
|
|
|
|
#include <kernel_internal.h>
|
2022-05-06 17:12:04 +08:00
|
|
|
#include <zephyr/kernel_structs.h>
|
2020-09-01 22:12:09 +08:00
|
|
|
#include <ksched.h>
|
|
|
|
|
2022-10-12 23:55:36 +08:00
|
|
|
static int nested_interrupts[CONFIG_MP_MAX_NUM_CPUS];
|
2020-09-01 22:12:09 +08:00
|
|
|
|
2022-04-26 05:39:28 +08:00
|
|
|
void __weak sys_trace_thread_create_user(struct k_thread *thread) {}
|
|
|
|
void __weak sys_trace_thread_abort_user(struct k_thread *thread) {}
|
|
|
|
void __weak sys_trace_thread_suspend_user(struct k_thread *thread) {}
|
|
|
|
void __weak sys_trace_thread_resume_user(struct k_thread *thread) {}
|
|
|
|
void __weak sys_trace_thread_name_set_user(struct k_thread *thread) {}
|
2020-09-01 22:12:09 +08:00
|
|
|
void __weak sys_trace_thread_switched_in_user(struct k_thread *thread) {}
|
|
|
|
void __weak sys_trace_thread_switched_out_user(struct k_thread *thread) {}
|
2022-04-26 05:39:28 +08:00
|
|
|
void __weak sys_trace_thread_info_user(struct k_thread *thread) {}
|
|
|
|
void __weak sys_trace_thread_sched_ready_user(struct k_thread *thread) {}
|
|
|
|
void __weak sys_trace_thread_pend_user(struct k_thread *thread) {}
|
|
|
|
void __weak sys_trace_thread_priority_set_user(struct k_thread *thread, int prio) {}
|
2022-01-13 10:55:15 +08:00
|
|
|
void __weak sys_trace_isr_enter_user(int nested_interrupts) {}
|
|
|
|
void __weak sys_trace_isr_exit_user(int nested_interrupts) {}
|
2020-09-01 22:12:09 +08:00
|
|
|
void __weak sys_trace_idle_user(void) {}
|
|
|
|
|
2022-04-26 05:39:28 +08:00
|
|
|
void sys_trace_thread_create(struct k_thread *thread)
|
|
|
|
{
|
|
|
|
sys_trace_thread_create_user(thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
void sys_trace_thread_abort(struct k_thread *thread)
|
|
|
|
{
|
|
|
|
sys_trace_thread_abort_user(thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
void sys_trace_thread_suspend(struct k_thread *thread)
|
|
|
|
{
|
|
|
|
sys_trace_thread_suspend_user(thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
void sys_trace_thread_resume(struct k_thread *thread)
|
|
|
|
{
|
|
|
|
sys_trace_thread_resume_user(thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
void sys_trace_thread_name_set(struct k_thread *thread)
|
|
|
|
{
|
|
|
|
sys_trace_thread_name_set_user(thread);
|
|
|
|
}
|
|
|
|
|
2020-09-01 22:12:09 +08:00
|
|
|
void sys_trace_k_thread_switched_in(void)
|
|
|
|
{
|
2022-06-27 12:53:19 +08:00
|
|
|
/* FIXME: Limitation of the current x86 EFI cosnole implementation. */
|
|
|
|
#if !defined(CONFIG_X86_EFI_CONSOLE) && !defined(CONFIG_UART_CONSOLE)
|
|
|
|
|
2022-07-13 23:11:05 +08:00
|
|
|
unsigned int key = irq_lock();
|
2020-09-01 22:12:09 +08:00
|
|
|
|
2022-01-13 10:55:15 +08:00
|
|
|
__ASSERT_NO_MSG(nested_interrupts[_current_cpu->id] == 0);
|
2022-04-23 04:12:20 +08:00
|
|
|
/* Can't use k_current_get as thread base and z_tls_current might be incorrect */
|
|
|
|
sys_trace_thread_switched_in_user(z_current_get());
|
2020-09-01 22:12:09 +08:00
|
|
|
|
|
|
|
irq_unlock(key);
|
2022-06-27 12:53:19 +08:00
|
|
|
#endif
|
2020-09-01 22:12:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void sys_trace_k_thread_switched_out(void)
|
|
|
|
{
|
2022-06-27 12:53:19 +08:00
|
|
|
#if !defined(CONFIG_X86_EFI_CONSOLE) && !defined(CONFIG_UART_CONSOLE)
|
2022-07-13 23:11:05 +08:00
|
|
|
unsigned int key = irq_lock();
|
2020-09-01 22:12:09 +08:00
|
|
|
|
2022-01-13 10:55:15 +08:00
|
|
|
__ASSERT_NO_MSG(nested_interrupts[_current_cpu->id] == 0);
|
2022-04-23 04:12:20 +08:00
|
|
|
/* Can't use k_current_get as thread base and z_tls_current might be incorrect */
|
|
|
|
sys_trace_thread_switched_out_user(z_current_get());
|
2020-09-01 22:12:09 +08:00
|
|
|
|
|
|
|
irq_unlock(key);
|
2022-06-27 12:53:19 +08:00
|
|
|
#endif
|
2020-09-01 22:12:09 +08:00
|
|
|
}
|
|
|
|
|
2022-04-26 05:39:28 +08:00
|
|
|
void sys_trace_thread_info(struct k_thread *thread)
|
|
|
|
{
|
|
|
|
sys_trace_thread_info_user(thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
void sys_trace_thread_sched_priority_set(struct k_thread *thread, int prio)
|
|
|
|
{
|
|
|
|
sys_trace_thread_priority_set_user(thread, prio);
|
|
|
|
}
|
|
|
|
|
|
|
|
void sys_trace_thread_sched_ready(struct k_thread *thread)
|
|
|
|
{
|
|
|
|
sys_trace_thread_sched_ready_user(thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
void sys_trace_thread_pend(struct k_thread *thread)
|
|
|
|
{
|
|
|
|
sys_trace_thread_pend_user(thread);
|
|
|
|
}
|
|
|
|
|
2020-09-01 22:12:09 +08:00
|
|
|
void sys_trace_isr_enter(void)
|
|
|
|
{
|
2022-07-13 23:11:05 +08:00
|
|
|
unsigned int key = irq_lock();
|
2022-01-13 10:55:15 +08:00
|
|
|
_cpu_t *curr_cpu = _current_cpu;
|
|
|
|
|
|
|
|
sys_trace_isr_enter_user(nested_interrupts[curr_cpu->id]);
|
|
|
|
nested_interrupts[curr_cpu->id]++;
|
2020-09-01 22:12:09 +08:00
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
void sys_trace_isr_exit(void)
|
|
|
|
{
|
2022-07-13 23:11:05 +08:00
|
|
|
unsigned int key = irq_lock();
|
2022-01-13 10:55:15 +08:00
|
|
|
_cpu_t *curr_cpu = _current_cpu;
|
|
|
|
|
|
|
|
nested_interrupts[curr_cpu->id]--;
|
|
|
|
sys_trace_isr_exit_user(nested_interrupts[curr_cpu->id]);
|
2020-09-01 22:12:09 +08:00
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
void sys_trace_idle(void)
|
|
|
|
{
|
|
|
|
sys_trace_idle_user();
|
|
|
|
}
|