systemview: add support natively using tracing hooks

Add needed hooks as a subsystem that can be enabled in any application.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2018-04-05 21:06:33 -04:00
parent a2248782a2
commit 483910ab4b
32 changed files with 296 additions and 38 deletions

View File

@ -37,7 +37,7 @@ SECTION_FUNC(TEXT, k_cpu_idle)
#ifdef CONFIG_TRACING
push_s blink
jl sys_trace_idle
jl z_sys_trace_idle
pop_s blink
#endif
@ -58,7 +58,7 @@ SECTION_FUNC(TEXT, k_cpu_atomic_idle)
#ifdef CONFIG_TRACING
push_s blink
jl sys_trace_idle
jl z_sys_trace_idle
pop_s blink
#endif

View File

@ -333,13 +333,13 @@ rirq_path:
#endif
#if defined(CONFIG_TRACING)
GTEXT(sys_trace_isr_enter)
GTEXT(z_sys_trace_isr_enter)
.macro log_interrupt_k_event
clri r0 /* do not interrupt event logger operations */
push_s r0
push_s blink
jl sys_trace_isr_enter
jl z_sys_trace_isr_enter
pop_s blink
pop_s r0
seti r0

View File

@ -115,7 +115,7 @@ SECTION_FUNC(TEXT, _NanoIdleValClear)
SECTION_FUNC(TEXT, k_cpu_idle)
#ifdef CONFIG_TRACING
push {lr}
bl sys_trace_idle
bl z_sys_trace_idle
pop {r0}
mov lr, r0
#endif
@ -159,7 +159,7 @@ SECTION_FUNC(TEXT, k_cpu_idle)
SECTION_FUNC(TEXT, k_cpu_atomic_idle)
#ifdef CONFIG_TRACING
push {lr}
bl sys_trace_idle
bl z_sys_trace_idle
pop {r1}
mov lr, r1
#endif

View File

@ -179,7 +179,7 @@ void _arch_isr_direct_pm(void)
void _arch_isr_direct_header(void)
{
sys_trace_isr_enter();
z_sys_trace_isr_enter();
}
#if defined(CONFIG_ARM_SECURE_FIRMWARE)

View File

@ -48,7 +48,7 @@ SECTION_FUNC(TEXT, _isr_wrapper)
#endif
#ifdef CONFIG_TRACING
bl sys_trace_isr_enter
bl z_sys_trace_isr_enter
#endif
#ifdef CONFIG_SYS_POWER_MANAGEMENT

View File

@ -46,7 +46,7 @@ SECTION_FUNC(TEXT, __pendsv)
#ifdef CONFIG_TRACING
/* Register the context switch */
push {lr}
bl sys_trace_thread_switched_in
bl z_sys_trace_thread_switched_in
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0}
mov lr, r0

View File

@ -86,7 +86,7 @@ void _enter_irq(u32_t ipending)
while (ipending) {
struct _isr_table_entry *ite;
sys_trace_isr_enter();
z_sys_trace_isr_enter();
index = find_lsb_set(ipending) - 1;
ipending &= ~(1 << index);

View File

@ -13,7 +13,7 @@ GTEXT(__swap)
GTEXT(_thread_entry_wrapper)
/* imports */
GTEXT(sys_trace_thread_switched_in)
GTEXT(z_sys_trace_thread_switched_in)
GTEXT(_k_neg_eagain)
/* unsigned int __swap(unsigned int key)
@ -85,7 +85,7 @@ SECTION_FUNC(exception.other, __swap)
stw r4, _thread_offset_to_retval(r11)
#if CONFIG_TRACING
call sys_trace_thread_switched_in
call z_sys_trace_thread_switched_in
/* restore caller-saved r10 */
movhi r10, %hi(_kernel)
ori r10, r10, %lo(_kernel)

View File

@ -35,7 +35,7 @@
*/
void k_cpu_idle(void)
{
sys_trace_idle();
z_sys_trace_idle();
posix_irq_full_unlock();
posix_halt_cpu();
}
@ -62,6 +62,6 @@ void k_cpu_idle(void)
void k_cpu_atomic_idle(unsigned int imask)
{
sys_trace_idle();
z_sys_trace_idle();
posix_atomic_halt_cpu(imask);
}

View File

@ -50,7 +50,7 @@ unsigned int __swap(unsigned int key)
_kernel.current->callee_saved.retval = -EAGAIN;
/* retval may be modified with a call to _set_thread_return_value() */
sys_trace_thread_switched_in();
z_sys_trace_thread_switched_in();
posix_thread_status_t *ready_thread_ptr =
(posix_thread_status_t *)

View File

@ -22,8 +22,8 @@ GTEXT(_is_next_thread_current)
GTEXT(_get_next_ready_thread)
#ifdef CONFIG_TRACING
GTEXT(sys_trace_thread_switched_in)
GTEXT(sys_trace_isr_enter)
GTEXT(z_sys_trace_thread_switched_in)
GTEXT(z_sys_trace_isr_enter)
#endif
#ifdef CONFIG_IRQ_OFFLOAD
@ -225,7 +225,7 @@ on_irq_stack:
call_irq:
#ifdef CONFIG_TRACING
call sys_trace_isr_enter
call z_sys_trace_isr_enter
#endif
/* Get IRQ causing interrupt */
@ -300,7 +300,7 @@ reschedule:
call _update_time_slice_before_swap
#endif
#if CONFIG_TRACING
call sys_trace_thread_switched_in
call z_sys_trace_thread_switched_in
#endif
/* Get reference to _kernel */
la t0, _kernel

View File

@ -12,7 +12,7 @@
static ALWAYS_INLINE void pulpino_idle(unsigned int key)
{
sys_trace_idle();
z_sys_trace_idle();
/* unlock interrupts */
irq_unlock(key);

View File

@ -13,7 +13,7 @@
static ALWAYS_INLINE void riscv_idle(unsigned int key)
{
sys_trace_idle();
z_sys_trace_idle();
/* unlock interrupts */
irq_unlock(key);

View File

@ -45,7 +45,7 @@ extern u64_t __idle_time_stamp; /* timestamp when CPU went idle */
void k_cpu_idle(void)
{
_int_latency_stop();
sys_trace_idle();
z_sys_trace_idle();
#if defined(CONFIG_BOOT_TIME_MEASUREMENT)
__idle_time_stamp = (u64_t)k_cycle_get_32();
#endif
@ -76,7 +76,7 @@ void k_cpu_idle(void)
void k_cpu_atomic_idle(unsigned int imask)
{
_int_latency_stop();
sys_trace_idle();
z_sys_trace_idle();
__asm__ volatile (
"sti\n\t"

View File

@ -152,7 +152,7 @@ SECTION_FUNC(TEXT, _interrupt_enter)
call _int_latency_start
#endif
call sys_trace_isr_enter
call z_sys_trace_isr_enter
popl %edx
popl %eax

View File

@ -62,7 +62,7 @@ void _arch_irq_direct_pm(void)
void _arch_isr_direct_header(void)
{
_int_latency_start();
sys_trace_isr_enter();
z_sys_trace_isr_enter();
/* We're not going to unlock IRQs, but we still need to increment this
* so that _is_in_isr() works

View File

@ -144,7 +144,7 @@ SECTION_FUNC(TEXT, __swap)
#ifdef CONFIG_TRACING
/* Register the context switch */
push %edx
call sys_trace_thread_switched_in
call z_sys_trace_thread_switched_in
pop %edx
#endif
movl _kernel_offset_to_ready_q_cache(%edi), %eax

View File

@ -16,7 +16,7 @@
*/
void k_cpu_idle(void)
{
sys_trace_idle();
z_sys_trace_idle();
__asm__ volatile ("waiti 0");
}
/*
@ -28,7 +28,7 @@ void k_cpu_idle(void)
*/
void k_cpu_atomic_idle(unsigned int key)
{
sys_trace_idle();
z_sys_trace_idle();
__asm__ volatile ("waiti 0\n\t"
"wsr.ps %0\n\t"
"rsync" :: "a"(key));

View File

@ -78,9 +78,9 @@ __swap:
#ifdef CONFIG_TRACING
/* Register the context switch */
#ifdef __XTENSA_CALL0_ABI__
call0 sys_trace_thread_switched_in
call0 z_sys_trace_thread_switched_in
#else
call4 sys_trace_thread_switched_in
call4 z_sys_trace_thread_switched_in
#endif
#endif
/* _thread := _kernel.ready_q.cache */

View File

@ -172,9 +172,9 @@
* We just saved all registers.
*/
#ifdef __XTENSA_CALL0_ABI__
call0 sys_trace_isr_enter
call0 z_sys_trace_isr_enter
#else
call4 sys_trace_isr_enter
call4 z_sys_trace_isr_enter
#endif
#endif

View File

@ -12,6 +12,9 @@ if HAS_SEGGER_RTT
config SEGGER_SYSTEMVIEW
bool "Segger SystemView support"
select RTT_CONSOLE
select THREAD_MONITOR
select THREAD_STACK_INFO
select TRACING
config SEGGER_RTT_MAX_NUM_UP_BUFFERS
int "Maximum number of up-buffers"

View File

@ -116,7 +116,7 @@ extern void _arch_isr_direct_header(void);
extern void _IntExit(void);
#ifdef CONFIG_TRACING
extern void sys_trace_isr_exit_to_scheduler(void);
extern void z_sys_trace_isr_exit_to_scheduler(void);
#endif
static inline void _arch_isr_direct_footer(int maybe_swap)
@ -124,7 +124,7 @@ static inline void _arch_isr_direct_footer(int maybe_swap)
if (maybe_swap) {
#ifdef CONFIG_TRACING
sys_trace_isr_exit_to_scheduler();
z_sys_trace_isr_exit_to_scheduler();
#endif
_IntExit();
}

View File

@ -25,6 +25,10 @@ void z_sys_trace_isr_exit_to_scheduler(void);
void z_sys_trace_thread_switched_in(void);
#endif
#ifdef CONFIG_SEGGER_SYSTEMVIEW
#include "tracing_sysview.h"
#else
/**
* @brief Called before a thread has been selected to run
*/
@ -119,3 +123,4 @@ void z_sys_trace_thread_switched_in(void);
#define z_sys_trace_thread_switched_in()
#endif
#endif

View File

@ -32,10 +32,6 @@ extern void _check_stack_sentinel(void);
void _smp_reacquire_global_lock(struct k_thread *thread);
void _smp_release_global_lock(struct k_thread *thread);
#ifdef CONFIG_TRACING
extern void sys_trace_thread_switched_out(void);
#endif
/* context switching and scheduling-related routines */
#ifdef CONFIG_USE_SWITCH

View File

@ -2,3 +2,5 @@ zephyr_sources_ifdef(
CONFIG_OPENOCD_SUPPORT
openocd.c
)
add_subdirectory(tracing)

View File

@ -15,6 +15,11 @@ config DEBUG
only disables optimization, more debugging variants can be selected
from here to allow more debugging.
config TRACING
bool "Enabling Tracing"
help
Enable system tracing. This requires a backend such as SEGGER
Systemview to be enabled as well.
config ASAN
bool "Build with address sanitizer"
depends on ARCH_POSIX

View File

@ -0,0 +1,7 @@
zephyr_include_directories_ifdef(CONFIG_TRACING include)
zephyr_sources_ifdef(
CONFIG_SEGGER_SYSTEMVIEW
sysview_config.c
sysview.c
)

View File

@ -0,0 +1,15 @@
/*
* Copyright (c) 2018 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef SYSVIEW_ZEPHYR_H
#define SYSVIEW_ZEPHYR_H
#include <systemview/SEGGER_SYSVIEW.h>
/* Services provided to SYSVIEW by Zephyr */
extern const SEGGER_SYSVIEW_OS_API SYSVIEW_X_OS_TraceAPI;
#endif

View File

@ -0,0 +1,97 @@
/*
* Copyright (c) 2018 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _TRACE_SYSVIEW_H
#define _TRACE_SYSVIEW_H
#include <kernel.h>
#include <kernel_structs.h>
#include <init.h>
#include <systemview/SEGGER_SYSVIEW.h>
#include <systemview/Global.h>
#include "SEGGER_SYSVIEW_Zephyr.h"
#ifndef CONFIG_SMP
extern k_tid_t const _idle_thread;
#endif
static inline int is_idle_thread(struct k_thread *thread)
{
#ifdef CONFIG_SMP
return thread->base.is_idle;
#else
return thread == _idle_thread;
#endif
}
static inline void _sys_trace_thread_switched_in(void)
{
struct k_thread *thread;
thread = k_current_get();
if (is_idle_thread(thread)) {
SEGGER_SYSVIEW_OnIdle();
} else {
SEGGER_SYSVIEW_OnTaskStartExec((u32_t)(uintptr_t)thread);
}
}
#define sys_trace_thread_switched_in() _sys_trace_thread_switched_in()
#define sys_trace_thread_switched_out() SEGGER_SYSVIEW_OnTaskStopExec()
#define sys_trace_isr_enter() SEGGER_SYSVIEW_RecordEnterISR()
#define sys_trace_isr_exit() SEGGER_SYSVIEW_RecordExitISR()
#define sys_trace_isr_exit_to_scheduler() \
SEGGER_SYSVIEW_RecordExitISRToScheduler()
#define sys_trace_thread_priority_set(thread)
static inline void sys_trace_thread_info(struct k_thread *thread)
{
char name[20];
snprintk(name, sizeof(name), "T%xE%x", (uintptr_t)thread,
(uintptr_t)&thread->entry);
SEGGER_SYSVIEW_TASKINFO Info;
Info.TaskID = (u32_t)(uintptr_t)thread;
Info.sName = name;
Info.Prio = thread->base.prio;
Info.StackBase = thread->stack_info.size;
Info.StackSize = thread->stack_info.start;
SEGGER_SYSVIEW_SendTaskInfo(&Info);
}
#define sys_trace_thread_create(thread) \
do { \
SEGGER_SYSVIEW_OnTaskCreate((u32_t)(uintptr_t)thread); \
sys_trace_thread_info(thread); \
} while (0)
#define sys_trace_thread_abort(thread)
#define sys_trace_thread_suspend(thread)
#define sys_trace_thread_resume(thread)
#define sys_trace_thread_ready(thread) \
SEGGER_SYSVIEW_OnTaskStartReady((u32_t)(uintptr_t)thread)
#define sys_trace_thread_pend(thread) \
SEGGER_SYSVIEW_OnTaskStopReady((u32_t)(uintptr_t)thread, 3 << 3)
#define sys_trace_void(id) SEGGER_SYSVIEW_RecordVoid(id)
#define sys_trace_idle() SEGGER_SYSVIEW_OnIdle()
#define sys_trace_end_call(id) SEGGER_SYSVIEW_RecordEndCall(id)
#endif /* _TRACE_SYSVIEW_H */

View File

@ -0,0 +1,88 @@
#include <zephyr.h>
#include <kernel_structs.h>
#include <init.h>
#include <systemview/SEGGER_SYSVIEW.h>
#include <systemview/Global.h>
#include "SEGGER_SYSVIEW_Zephyr.h"
static u32_t interrupt;
u32_t sysview_get_timestamp(void)
{
return k_cycle_get_32();
}
u32_t sysview_get_interrupt(void)
{
return interrupt;
}
void z_sys_trace_idle(void)
{
sys_trace_idle();
}
void z_sys_trace_isr_enter(void)
{
sys_trace_isr_enter();
}
void z_sys_trace_isr_exit_to_scheduler(void)
{
sys_trace_isr_exit_to_scheduler();
}
void z_sys_trace_thread_switched_in(void)
{
sys_trace_thread_switched_in();
}
static void send_task_list_cb(void)
{
struct k_thread *thread;
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
char name[20];
if (is_idle_thread(thread)) {
continue;
}
snprintk(name, sizeof(name), "T%xE%x", (uintptr_t)thread,
(uintptr_t)&thread->entry);
SEGGER_SYSVIEW_SendTaskInfo(&(SEGGER_SYSVIEW_TASKINFO) {
.TaskID = (u32_t)(uintptr_t)thread,
.sName = name,
.StackSize = thread->stack_info.size,
.StackBase = thread->stack_info.start,
.Prio = thread->base.prio,
});
}
}
static U64 get_time_cb(void)
{
return (U64)k_cycle_get_32();
}
const SEGGER_SYSVIEW_OS_API SYSVIEW_X_OS_TraceAPI = {
get_time_cb,
send_task_list_cb,
};
static int sysview_init(struct device *arg)
{
ARG_UNUSED(arg);
SEGGER_SYSVIEW_Conf();
SEGGER_SYSVIEW_Start();
return 0;
}
SYS_INIT(sysview_init, PRE_KERNEL_1, 0);

View File

@ -0,0 +1,6 @@
33 MUTEX_INIT
34 MUTEX_UNLOCK
35 MUTEX_LOCK
36 SEMAPHORE_INIT
37 SEMAPHORE_GIVE
38 SEMAPHORE_TAKE

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2018 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <systemview/SEGGER_SYSVIEW.h>
#include "SEGGER_SYSVIEW_Zephyr.h"
static void _cbSendSystemDesc(void)
{
SEGGER_SYSVIEW_SendSysDesc("N=ZephyrSysView");
SEGGER_SYSVIEW_SendSysDesc("D=" CONFIG_BOARD " "
CONFIG_SOC_SERIES " " CONFIG_ARCH);
SEGGER_SYSVIEW_SendSysDesc("O=Zephyr");
}
void SEGGER_SYSVIEW_Conf(void)
{
SEGGER_SYSVIEW_Init(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC,
CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC,
&SYSVIEW_X_OS_TraceAPI, _cbSendSystemDesc);
#if defined(CONFIG_PHYS_RAM_ADDR) /* x86 */
SEGGER_SYSVIEW_SetRAMBase(CONFIG_PHYS_RAM_ADDR);
#elif defined(CONFIG_SRAM_BASE_ADDRESS) /* arm, default */
SEGGER_SYSVIEW_SetRAMBase(CONFIG_SRAM_BASE_ADDRESS);
#else
/* Setting RAMBase is just an optimization: this value is subtracted
* from all pointers in order to save bandwidth. It's not an error
* if a platform does not set this value.
*/
#endif
}