2021-03-12 02:51:35 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017 Intel Corporation
|
2023-03-24 16:30:40 +08:00
|
|
|
* Copyright 2023 NXP
|
2021-03-12 02:51:35 +08:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
2022-05-06 17:12:04 +08:00
|
|
|
#include <zephyr/kernel.h>
|
2021-03-12 02:51:35 +08:00
|
|
|
|
|
|
|
#define THREAD_INFO_UNIMPLEMENTED 0xffffffff
|
|
|
|
|
|
|
|
enum {
|
|
|
|
THREAD_INFO_OFFSET_VERSION,
|
|
|
|
THREAD_INFO_OFFSET_K_CURR_THREAD,
|
|
|
|
THREAD_INFO_OFFSET_K_THREADS,
|
|
|
|
THREAD_INFO_OFFSET_T_ENTRY,
|
|
|
|
THREAD_INFO_OFFSET_T_NEXT_THREAD,
|
|
|
|
THREAD_INFO_OFFSET_T_STATE,
|
|
|
|
THREAD_INFO_OFFSET_T_USER_OPTIONS,
|
|
|
|
THREAD_INFO_OFFSET_T_PRIO,
|
|
|
|
THREAD_INFO_OFFSET_T_STACK_PTR,
|
|
|
|
THREAD_INFO_OFFSET_T_NAME,
|
|
|
|
THREAD_INFO_OFFSET_T_ARCH,
|
|
|
|
THREAD_INFO_OFFSET_T_PREEMPT_FLOAT,
|
|
|
|
THREAD_INFO_OFFSET_T_COOP_FLOAT,
|
2022-10-28 11:30:24 +08:00
|
|
|
THREAD_INFO_OFFSET_T_ARM_EXC_RETURN,
|
2023-08-03 00:40:22 +08:00
|
|
|
THREAD_INFO_OFFSET_T_ARC_RELINQUISH_CAUSE,
|
2021-03-12 02:51:35 +08:00
|
|
|
};
|
|
|
|
|
2022-10-20 05:47:27 +08:00
|
|
|
#if CONFIG_MP_MAX_NUM_CPUS > 1
|
2021-03-12 02:51:35 +08:00
|
|
|
#error "This code doesn't work properly with multiple CPUs enabled"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Forward-compatibility notes: 1) Only append items to this table; otherwise
|
|
|
|
* debugger plugin versions that expect fewer items will read garbage values.
|
|
|
|
* 2) Avoid incompatible changes that affect the interpretation of existing
|
|
|
|
* items. But if you have to do them, increment THREAD_INFO_OFFSET_VERSION
|
|
|
|
* and submit a patch for debugger plugins to deal with both the old and new
|
|
|
|
* scheme.
|
|
|
|
* Only version 1 is backward compatible to version 0.
|
|
|
|
*/
|
2021-03-12 19:21:50 +08:00
|
|
|
__attribute__((used, section(".dbg_thread_info")))
|
2023-09-11 07:33:11 +08:00
|
|
|
const size_t _kernel_thread_info_offsets[] = {
|
2021-03-12 02:51:35 +08:00
|
|
|
/* Version 0 starts */
|
|
|
|
[THREAD_INFO_OFFSET_VERSION] = 1,
|
|
|
|
[THREAD_INFO_OFFSET_K_CURR_THREAD] = offsetof(struct _cpu, current),
|
|
|
|
[THREAD_INFO_OFFSET_K_THREADS] = offsetof(struct z_kernel, threads),
|
|
|
|
[THREAD_INFO_OFFSET_T_ENTRY] = offsetof(struct k_thread, entry),
|
|
|
|
[THREAD_INFO_OFFSET_T_NEXT_THREAD] = offsetof(struct k_thread,
|
|
|
|
next_thread),
|
|
|
|
[THREAD_INFO_OFFSET_T_STATE] = offsetof(struct _thread_base,
|
|
|
|
thread_state),
|
|
|
|
[THREAD_INFO_OFFSET_T_USER_OPTIONS] = offsetof(struct _thread_base,
|
|
|
|
user_options),
|
|
|
|
[THREAD_INFO_OFFSET_T_PRIO] = offsetof(struct _thread_base, prio),
|
|
|
|
#if defined(CONFIG_ARM64)
|
arm64: Rework stack usage
The ARM64 port is currently using SP_EL0 for everything: kernel threads,
user threads and exceptions. In addition when taking an exception the
exception code is still using the thread SP without relying on any
interrupt stack.
If from one hand this makes the context switch really quick because the
thread context is already on the thread stack so we have only to save
one register (SP) for the whole context, on the other hand the major
limitation introduced by this choice is that if for some reason the
thread SP is corrupted or pointing to some unaccessible location (for
example in case of stack overflow), the exception code is unable to
recover or even deal with it.
The usual way of dealing with this kind of problems is to use a
dedicated interrupt stack on SP_EL1 when servicing the exceptions. The
real drawback of this is that, in case of context switch, all the
context must be copied from the shared interrupt stack into a
thread-specific stack or structure, so it is really slow.
We use here an hybrid approach, sacrificing a bit of stack space for a
quicker context switch. While nothing really changes for kernel threads,
for user threads we now use the privileged stack (already present to
service syscalls) as interrupt stack.
When an exception arrives the code now switches to use SP_EL1 that for
user threads is always pointing inside the privileged portion of the
stack of the current running thread. This achieves two things: (1)
isolate exceptions and syscall code to use a stack that is isolated,
privileged and not accessible to user threads and (2) the thread SP is
not touched at all during exceptions, so it can be invalid or corrupted
without any direct consequence.
Signed-off-by: Carlo Caione <ccaione@baylibre.com>
2021-04-22 02:14:14 +08:00
|
|
|
/* We are assuming that the SP of interest is SP_EL1 */
|
2021-03-12 02:51:35 +08:00
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
arm64: Rework stack usage
The ARM64 port is currently using SP_EL0 for everything: kernel threads,
user threads and exceptions. In addition when taking an exception the
exception code is still using the thread SP without relying on any
interrupt stack.
If from one hand this makes the context switch really quick because the
thread context is already on the thread stack so we have only to save
one register (SP) for the whole context, on the other hand the major
limitation introduced by this choice is that if for some reason the
thread SP is corrupted or pointing to some unaccessible location (for
example in case of stack overflow), the exception code is unable to
recover or even deal with it.
The usual way of dealing with this kind of problems is to use a
dedicated interrupt stack on SP_EL1 when servicing the exceptions. The
real drawback of this is that, in case of context switch, all the
context must be copied from the shared interrupt stack into a
thread-specific stack or structure, so it is really slow.
We use here an hybrid approach, sacrificing a bit of stack space for a
quicker context switch. While nothing really changes for kernel threads,
for user threads we now use the privileged stack (already present to
service syscalls) as interrupt stack.
When an exception arrives the code now switches to use SP_EL1 that for
user threads is always pointing inside the privileged portion of the
stack of the current running thread. This achieves two things: (1)
isolate exceptions and syscall code to use a stack that is isolated,
privileged and not accessible to user threads and (2) the thread SP is
not touched at all during exceptions, so it can be invalid or corrupted
without any direct consequence.
Signed-off-by: Carlo Caione <ccaione@baylibre.com>
2021-04-22 02:14:14 +08:00
|
|
|
callee_saved.sp_elx),
|
2021-03-12 02:51:35 +08:00
|
|
|
#elif defined(CONFIG_ARM)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.psp),
|
|
|
|
#elif defined(CONFIG_ARC)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.sp),
|
|
|
|
#elif defined(CONFIG_X86)
|
|
|
|
#if defined(CONFIG_X86_64)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.rsp),
|
|
|
|
#else
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.esp),
|
|
|
|
#endif
|
2020-12-01 18:29:58 +08:00
|
|
|
#elif defined(CONFIG_MIPS)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.sp),
|
2021-03-12 02:51:35 +08:00
|
|
|
#elif defined(CONFIG_NIOS2)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.sp),
|
|
|
|
#elif defined(CONFIG_RISCV)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.sp),
|
|
|
|
#elif defined(CONFIG_SPARC)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.o6),
|
2021-08-30 00:01:03 +08:00
|
|
|
#elif defined(CONFIG_ARCH_POSIX)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.thread_status),
|
2021-09-10 01:08:39 +08:00
|
|
|
#elif defined(CONFIG_XTENSA)
|
|
|
|
/* Xtensa does not store stack pointers inside thread objects.
|
|
|
|
* The registers are saved in thread stack where there is
|
|
|
|
* no fixed location for this to work. So mark this as
|
|
|
|
* unimplemented to avoid the #warning below.
|
|
|
|
*/
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = THREAD_INFO_UNIMPLEMENTED,
|
2021-03-12 02:51:35 +08:00
|
|
|
#else
|
|
|
|
/* Use a special value so that OpenOCD knows that obtaining the stack
|
|
|
|
* pointer is not possible on this particular architecture.
|
|
|
|
*/
|
|
|
|
#warning Please define THREAD_INFO_OFFSET_T_STACK_PTR for this architecture
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = THREAD_INFO_UNIMPLEMENTED,
|
|
|
|
#endif
|
|
|
|
/* Version 0 ends */
|
|
|
|
|
|
|
|
[THREAD_INFO_OFFSET_T_NAME] = offsetof(struct k_thread, name),
|
|
|
|
[THREAD_INFO_OFFSET_T_ARCH] = offsetof(struct k_thread, arch),
|
|
|
|
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) && defined(CONFIG_ARM)
|
|
|
|
[THREAD_INFO_OFFSET_T_PREEMPT_FLOAT] = offsetof(struct _thread_arch,
|
|
|
|
preempt_float),
|
|
|
|
[THREAD_INFO_OFFSET_T_COOP_FLOAT] = THREAD_INFO_UNIMPLEMENTED,
|
2023-03-24 16:30:40 +08:00
|
|
|
#elif defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) && defined(CONFIG_ARM64)
|
|
|
|
[THREAD_INFO_OFFSET_T_PREEMPT_FLOAT] = offsetof(struct _thread_arch,
|
|
|
|
saved_fp_context),
|
|
|
|
[THREAD_INFO_OFFSET_T_COOP_FLOAT] = THREAD_INFO_UNIMPLEMENTED,
|
2021-03-12 02:51:35 +08:00
|
|
|
#elif defined(CONFIG_FPU) && defined(CONFIG_X86)
|
|
|
|
#if defined(CONFIG_X86_64)
|
|
|
|
[THREAD_INFO_OFFSET_T_PREEMPT_FLOAT] = offsetof(struct _thread_arch,
|
|
|
|
sse),
|
|
|
|
#else
|
|
|
|
[THREAD_INFO_OFFSET_T_PREEMPT_FLOAT] = offsetof(struct _thread_arch,
|
|
|
|
preempFloatReg),
|
|
|
|
#endif
|
|
|
|
[THREAD_INFO_OFFSET_T_COOP_FLOAT] = THREAD_INFO_UNIMPLEMENTED,
|
|
|
|
#else
|
|
|
|
[THREAD_INFO_OFFSET_T_PREEMPT_FLOAT] = THREAD_INFO_UNIMPLEMENTED,
|
|
|
|
[THREAD_INFO_OFFSET_T_COOP_FLOAT] = THREAD_INFO_UNIMPLEMENTED,
|
|
|
|
#endif
|
|
|
|
/* Version is still 1, but existence of following elements must be
|
|
|
|
* checked with _kernel_thread_info_num_offsets.
|
|
|
|
*/
|
2022-10-28 11:30:24 +08:00
|
|
|
#ifdef CONFIG_ARM_STORE_EXC_RETURN
|
|
|
|
/* ARM overwrites the LSB of the Link Register on the stack when
|
|
|
|
* this option is enabled. If this offset is not THREAD_INFO_UNIMPLEMENTED
|
|
|
|
* then the LSB needs to be restored from mode_exc_return.
|
|
|
|
*/
|
|
|
|
[THREAD_INFO_OFFSET_T_ARM_EXC_RETURN] = offsetof(struct _thread_arch,
|
|
|
|
mode_exc_return),
|
|
|
|
#else
|
|
|
|
[THREAD_INFO_OFFSET_T_ARM_EXC_RETURN] = THREAD_INFO_UNIMPLEMENTED,
|
|
|
|
#endif /* CONFIG_ARM_STORE_EXC_RETURN */
|
2023-08-03 00:40:22 +08:00
|
|
|
#if defined(CONFIG_ARC)
|
|
|
|
[THREAD_INFO_OFFSET_T_ARC_RELINQUISH_CAUSE] = offsetof(struct _thread_arch,
|
|
|
|
relinquish_cause),
|
|
|
|
#else
|
|
|
|
[THREAD_INFO_OFFSET_T_ARC_RELINQUISH_CAUSE] = THREAD_INFO_UNIMPLEMENTED,
|
|
|
|
#endif /* CONFIG_ARC */
|
2021-03-12 02:51:35 +08:00
|
|
|
};
|
2023-08-03 00:40:22 +08:00
|
|
|
|
2023-09-11 07:33:11 +08:00
|
|
|
extern const size_t __attribute__((alias("_kernel_thread_info_offsets")))
|
2022-02-02 18:37:56 +08:00
|
|
|
_kernel_openocd_offsets;
|
2021-03-12 02:51:35 +08:00
|
|
|
|
2021-03-12 19:21:50 +08:00
|
|
|
__attribute__((used, section(".dbg_thread_info")))
|
2023-09-11 07:33:11 +08:00
|
|
|
const size_t _kernel_thread_info_num_offsets = ARRAY_SIZE(_kernel_thread_info_offsets);
|
|
|
|
extern const size_t __attribute__((alias("_kernel_thread_info_num_offsets")))
|
2022-02-02 18:37:56 +08:00
|
|
|
_kernel_openocd_num_offsets;
|
2021-03-12 02:51:35 +08:00
|
|
|
|
2021-03-12 19:21:50 +08:00
|
|
|
__attribute__((used, section(".dbg_thread_info")))
|
2023-09-11 07:33:11 +08:00
|
|
|
const uint8_t _kernel_thread_info_size_t_size = (uint8_t)sizeof(size_t);
|
|
|
|
extern const uint8_t __attribute__((alias("_kernel_thread_info_size_t_size")))
|
2022-02-02 18:37:56 +08:00
|
|
|
_kernel_openocd_size_t_size;
|