2021-03-12 02:51:35 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017 Intel Corporation
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <kernel.h>
|
|
|
|
|
|
|
|
#define THREAD_INFO_UNIMPLEMENTED 0xffffffff
|
|
|
|
|
|
|
|
enum {
|
|
|
|
THREAD_INFO_OFFSET_VERSION,
|
|
|
|
THREAD_INFO_OFFSET_K_CURR_THREAD,
|
|
|
|
THREAD_INFO_OFFSET_K_THREADS,
|
|
|
|
THREAD_INFO_OFFSET_T_ENTRY,
|
|
|
|
THREAD_INFO_OFFSET_T_NEXT_THREAD,
|
|
|
|
THREAD_INFO_OFFSET_T_STATE,
|
|
|
|
THREAD_INFO_OFFSET_T_USER_OPTIONS,
|
|
|
|
THREAD_INFO_OFFSET_T_PRIO,
|
|
|
|
THREAD_INFO_OFFSET_T_STACK_PTR,
|
|
|
|
THREAD_INFO_OFFSET_T_NAME,
|
|
|
|
THREAD_INFO_OFFSET_T_ARCH,
|
|
|
|
THREAD_INFO_OFFSET_T_PREEMPT_FLOAT,
|
|
|
|
THREAD_INFO_OFFSET_T_COOP_FLOAT,
|
|
|
|
};
|
|
|
|
|
|
|
|
#if CONFIG_MP_NUM_CPUS > 1
|
|
|
|
#error "This code doesn't work properly with multiple CPUs enabled"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Forward-compatibility notes: 1) Only append items to this table; otherwise
|
|
|
|
* debugger plugin versions that expect fewer items will read garbage values.
|
|
|
|
* 2) Avoid incompatible changes that affect the interpretation of existing
|
|
|
|
* items. But if you have to do them, increment THREAD_INFO_OFFSET_VERSION
|
|
|
|
* and submit a patch for debugger plugins to deal with both the old and new
|
|
|
|
* scheme.
|
|
|
|
* Only version 1 is backward compatible to version 0.
|
|
|
|
*/
|
2021-03-12 19:21:50 +08:00
|
|
|
__attribute__((used, section(".dbg_thread_info")))
|
2021-03-12 02:51:35 +08:00
|
|
|
size_t _kernel_thread_info_offsets[] = {
|
|
|
|
/* Version 0 starts */
|
|
|
|
[THREAD_INFO_OFFSET_VERSION] = 1,
|
|
|
|
[THREAD_INFO_OFFSET_K_CURR_THREAD] = offsetof(struct _cpu, current),
|
|
|
|
[THREAD_INFO_OFFSET_K_THREADS] = offsetof(struct z_kernel, threads),
|
|
|
|
[THREAD_INFO_OFFSET_T_ENTRY] = offsetof(struct k_thread, entry),
|
|
|
|
[THREAD_INFO_OFFSET_T_NEXT_THREAD] = offsetof(struct k_thread,
|
|
|
|
next_thread),
|
|
|
|
[THREAD_INFO_OFFSET_T_STATE] = offsetof(struct _thread_base,
|
|
|
|
thread_state),
|
|
|
|
[THREAD_INFO_OFFSET_T_USER_OPTIONS] = offsetof(struct _thread_base,
|
|
|
|
user_options),
|
|
|
|
[THREAD_INFO_OFFSET_T_PRIO] = offsetof(struct _thread_base, prio),
|
|
|
|
#if defined(CONFIG_ARM64)
|
arm64: Rework stack usage
The ARM64 port is currently using SP_EL0 for everything: kernel threads,
user threads and exceptions. In addition when taking an exception the
exception code is still using the thread SP without relying on any
interrupt stack.
If from one hand this makes the context switch really quick because the
thread context is already on the thread stack so we have only to save
one register (SP) for the whole context, on the other hand the major
limitation introduced by this choice is that if for some reason the
thread SP is corrupted or pointing to some unaccessible location (for
example in case of stack overflow), the exception code is unable to
recover or even deal with it.
The usual way of dealing with this kind of problems is to use a
dedicated interrupt stack on SP_EL1 when servicing the exceptions. The
real drawback of this is that, in case of context switch, all the
context must be copied from the shared interrupt stack into a
thread-specific stack or structure, so it is really slow.
We use here an hybrid approach, sacrificing a bit of stack space for a
quicker context switch. While nothing really changes for kernel threads,
for user threads we now use the privileged stack (already present to
service syscalls) as interrupt stack.
When an exception arrives the code now switches to use SP_EL1 that for
user threads is always pointing inside the privileged portion of the
stack of the current running thread. This achieves two things: (1)
isolate exceptions and syscall code to use a stack that is isolated,
privileged and not accessible to user threads and (2) the thread SP is
not touched at all during exceptions, so it can be invalid or corrupted
without any direct consequence.
Signed-off-by: Carlo Caione <ccaione@baylibre.com>
2021-04-22 02:14:14 +08:00
|
|
|
/* We are assuming that the SP of interest is SP_EL1 */
|
2021-03-12 02:51:35 +08:00
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
arm64: Rework stack usage
The ARM64 port is currently using SP_EL0 for everything: kernel threads,
user threads and exceptions. In addition when taking an exception the
exception code is still using the thread SP without relying on any
interrupt stack.
If from one hand this makes the context switch really quick because the
thread context is already on the thread stack so we have only to save
one register (SP) for the whole context, on the other hand the major
limitation introduced by this choice is that if for some reason the
thread SP is corrupted or pointing to some unaccessible location (for
example in case of stack overflow), the exception code is unable to
recover or even deal with it.
The usual way of dealing with this kind of problems is to use a
dedicated interrupt stack on SP_EL1 when servicing the exceptions. The
real drawback of this is that, in case of context switch, all the
context must be copied from the shared interrupt stack into a
thread-specific stack or structure, so it is really slow.
We use here an hybrid approach, sacrificing a bit of stack space for a
quicker context switch. While nothing really changes for kernel threads,
for user threads we now use the privileged stack (already present to
service syscalls) as interrupt stack.
When an exception arrives the code now switches to use SP_EL1 that for
user threads is always pointing inside the privileged portion of the
stack of the current running thread. This achieves two things: (1)
isolate exceptions and syscall code to use a stack that is isolated,
privileged and not accessible to user threads and (2) the thread SP is
not touched at all during exceptions, so it can be invalid or corrupted
without any direct consequence.
Signed-off-by: Carlo Caione <ccaione@baylibre.com>
2021-04-22 02:14:14 +08:00
|
|
|
callee_saved.sp_elx),
|
2021-03-12 02:51:35 +08:00
|
|
|
#elif defined(CONFIG_ARM)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.psp),
|
|
|
|
#elif defined(CONFIG_ARC)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.sp),
|
|
|
|
#elif defined(CONFIG_X86)
|
|
|
|
#if defined(CONFIG_X86_64)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.rsp),
|
|
|
|
#else
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.esp),
|
|
|
|
#endif
|
|
|
|
#elif defined(CONFIG_NIOS2)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.sp),
|
|
|
|
#elif defined(CONFIG_RISCV)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.sp),
|
|
|
|
#elif defined(CONFIG_SPARC)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.o6),
|
2021-08-30 00:01:03 +08:00
|
|
|
#elif defined(CONFIG_ARCH_POSIX)
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
|
|
|
|
callee_saved.thread_status),
|
2021-03-12 02:51:35 +08:00
|
|
|
#else
|
|
|
|
/* Use a special value so that OpenOCD knows that obtaining the stack
|
|
|
|
* pointer is not possible on this particular architecture.
|
|
|
|
*/
|
|
|
|
#warning Please define THREAD_INFO_OFFSET_T_STACK_PTR for this architecture
|
|
|
|
[THREAD_INFO_OFFSET_T_STACK_PTR] = THREAD_INFO_UNIMPLEMENTED,
|
|
|
|
#endif
|
|
|
|
/* Version 0 ends */
|
|
|
|
|
|
|
|
[THREAD_INFO_OFFSET_T_NAME] = offsetof(struct k_thread, name),
|
|
|
|
[THREAD_INFO_OFFSET_T_ARCH] = offsetof(struct k_thread, arch),
|
|
|
|
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) && defined(CONFIG_ARM)
|
|
|
|
[THREAD_INFO_OFFSET_T_PREEMPT_FLOAT] = offsetof(struct _thread_arch,
|
|
|
|
preempt_float),
|
|
|
|
[THREAD_INFO_OFFSET_T_COOP_FLOAT] = THREAD_INFO_UNIMPLEMENTED,
|
|
|
|
#elif defined(CONFIG_FPU) && defined(CONFIG_X86)
|
|
|
|
#if defined(CONFIG_X86_64)
|
|
|
|
[THREAD_INFO_OFFSET_T_PREEMPT_FLOAT] = offsetof(struct _thread_arch,
|
|
|
|
sse),
|
|
|
|
#else
|
|
|
|
[THREAD_INFO_OFFSET_T_PREEMPT_FLOAT] = offsetof(struct _thread_arch,
|
|
|
|
preempFloatReg),
|
|
|
|
#endif
|
|
|
|
[THREAD_INFO_OFFSET_T_COOP_FLOAT] = THREAD_INFO_UNIMPLEMENTED,
|
|
|
|
#else
|
|
|
|
[THREAD_INFO_OFFSET_T_PREEMPT_FLOAT] = THREAD_INFO_UNIMPLEMENTED,
|
|
|
|
[THREAD_INFO_OFFSET_T_COOP_FLOAT] = THREAD_INFO_UNIMPLEMENTED,
|
|
|
|
#endif
|
|
|
|
/* Version is still 1, but existence of following elements must be
|
|
|
|
* checked with _kernel_thread_info_num_offsets.
|
|
|
|
*/
|
|
|
|
};
|
|
|
|
extern size_t __attribute__((alias("_kernel_thread_info_offsets")))
|
|
|
|
_kernel_openocd_offsets;
|
|
|
|
|
2021-03-12 19:21:50 +08:00
|
|
|
__attribute__((used, section(".dbg_thread_info")))
|
2021-03-12 02:51:35 +08:00
|
|
|
size_t _kernel_thread_info_num_offsets = ARRAY_SIZE(_kernel_thread_info_offsets);
|
|
|
|
extern size_t __attribute__((alias("_kernel_thread_info_num_offsets")))
|
|
|
|
_kernel_openocd_num_offsets;
|
|
|
|
|
2021-03-12 19:21:50 +08:00
|
|
|
__attribute__((used, section(".dbg_thread_info")))
|
2021-03-12 02:51:35 +08:00
|
|
|
uint8_t _kernel_thread_info_size_t_size = (uint8_t)sizeof(size_t);
|
|
|
|
extern uint8_t __attribute__((alias("_kernel_thread_info_size_t_size")))
|
|
|
|
_kernel_openocd_size_t_size;
|