zephyr/arch/arm64/include/kernel_arch_func.h

59 lines
1.4 KiB
C
Raw Normal View History

/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com<
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Private kernel definitions (ARM64)
*
* This file contains private kernel function definitions and various
* other definitions for the ARM Cortex-A processor architecture family.
*
* This file is also included by assembly language files which must #define
* _ASMLANGUAGE before including this header file. Note that kernel
* assembly source files obtains structure offset values via "absolute symbols"
* in the offsets.o module.
*/
#ifndef ZEPHYR_ARCH_ARM64_INCLUDE_KERNEL_ARCH_FUNC_H_
#define ZEPHYR_ARCH_ARM64_INCLUDE_KERNEL_ARCH_FUNC_H_
#include <kernel_arch_data.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
static ALWAYS_INLINE void arch_kernel_init(void)
{
}
static inline void arch_switch(void *switch_to, void **switched_from)
{
z_arm64_call_svc(switch_to, switched_from);
return;
}
extern void z_arm64_fatal_error(z_arch_esf_t *esf, unsigned int reason);
arm64: Rework stack usage The ARM64 port is currently using SP_EL0 for everything: kernel threads, user threads and exceptions. In addition when taking an exception the exception code is still using the thread SP without relying on any interrupt stack. If from one hand this makes the context switch really quick because the thread context is already on the thread stack so we have only to save one register (SP) for the whole context, on the other hand the major limitation introduced by this choice is that if for some reason the thread SP is corrupted or pointing to some unaccessible location (for example in case of stack overflow), the exception code is unable to recover or even deal with it. The usual way of dealing with this kind of problems is to use a dedicated interrupt stack on SP_EL1 when servicing the exceptions. The real drawback of this is that, in case of context switch, all the context must be copied from the shared interrupt stack into a thread-specific stack or structure, so it is really slow. We use here an hybrid approach, sacrificing a bit of stack space for a quicker context switch. While nothing really changes for kernel threads, for user threads we now use the privileged stack (already present to service syscalls) as interrupt stack. When an exception arrives the code now switches to use SP_EL1 that for user threads is always pointing inside the privileged portion of the stack of the current running thread. This achieves two things: (1) isolate exceptions and syscall code to use a stack that is isolated, privileged and not accessible to user threads and (2) the thread SP is not touched at all during exceptions, so it can be invalid or corrupted without any direct consequence. Signed-off-by: Carlo Caione <ccaione@baylibre.com>
2021-04-22 02:14:14 +08:00
extern void z_arm64_userspace_enter(z_arch_esf_t *esf, uintptr_t sp_el0);
extern void z_arm64_set_ttbr0(uintptr_t ttbr0);
extern void z_arm64_ptable_ipi(void);
#ifdef CONFIG_FPU_SHARING
void z_arm64_flush_local_fpu(void);
void z_arm64_flush_fpu_ipi(unsigned int cpu);
#endif
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_ARCH_ARM64_INCLUDE_KERNEL_ARCH_FUNC_H_ */