2019-11-11 00:17:19 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com<
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* @brief Private kernel definitions (ARM64)
|
|
|
|
*
|
|
|
|
* This file contains private kernel function definitions and various
|
|
|
|
* other definitions for the ARM Cortex-A processor architecture family.
|
|
|
|
*
|
|
|
|
* This file is also included by assembly language files which must #define
|
|
|
|
* _ASMLANGUAGE before including this header file. Note that kernel
|
|
|
|
* assembly source files obtains structure offset values via "absolute symbols"
|
|
|
|
* in the offsets.o module.
|
|
|
|
*/
|
|
|
|
|
2021-03-26 22:22:59 +08:00
|
|
|
#ifndef ZEPHYR_ARCH_ARM64_INCLUDE_KERNEL_ARCH_FUNC_H_
|
|
|
|
#define ZEPHYR_ARCH_ARM64_INCLUDE_KERNEL_ARCH_FUNC_H_
|
2019-11-11 00:17:19 +08:00
|
|
|
|
|
|
|
#include <kernel_arch_data.h>
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef _ASMLANGUAGE
|
|
|
|
|
|
|
|
static ALWAYS_INLINE void arch_kernel_init(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2020-08-04 18:52:16 +08:00
|
|
|
static inline void arch_switch(void *switch_to, void **switched_from)
|
2019-11-11 00:17:19 +08:00
|
|
|
{
|
2020-08-04 18:52:16 +08:00
|
|
|
z_arm64_call_svc(switch_to, switched_from);
|
|
|
|
|
|
|
|
return;
|
2019-11-11 00:17:19 +08:00
|
|
|
}
|
|
|
|
|
2020-11-09 23:03:35 +08:00
|
|
|
extern void z_arm64_fatal_error(z_arch_esf_t *esf, unsigned int reason);
|
arm64: Rework stack usage
The ARM64 port is currently using SP_EL0 for everything: kernel threads,
user threads and exceptions. In addition when taking an exception the
exception code is still using the thread SP without relying on any
interrupt stack.
If from one hand this makes the context switch really quick because the
thread context is already on the thread stack so we have only to save
one register (SP) for the whole context, on the other hand the major
limitation introduced by this choice is that if for some reason the
thread SP is corrupted or pointing to some unaccessible location (for
example in case of stack overflow), the exception code is unable to
recover or even deal with it.
The usual way of dealing with this kind of problems is to use a
dedicated interrupt stack on SP_EL1 when servicing the exceptions. The
real drawback of this is that, in case of context switch, all the
context must be copied from the shared interrupt stack into a
thread-specific stack or structure, so it is really slow.
We use here an hybrid approach, sacrificing a bit of stack space for a
quicker context switch. While nothing really changes for kernel threads,
for user threads we now use the privileged stack (already present to
service syscalls) as interrupt stack.
When an exception arrives the code now switches to use SP_EL1 that for
user threads is always pointing inside the privileged portion of the
stack of the current running thread. This achieves two things: (1)
isolate exceptions and syscall code to use a stack that is isolated,
privileged and not accessible to user threads and (2) the thread SP is
not touched at all during exceptions, so it can be invalid or corrupted
without any direct consequence.
Signed-off-by: Carlo Caione <ccaione@baylibre.com>
2021-04-22 02:14:14 +08:00
|
|
|
extern void z_arm64_userspace_enter(z_arch_esf_t *esf, uintptr_t sp_el0);
|
2020-11-26 19:57:36 +08:00
|
|
|
extern void z_arm64_set_ttbr0(uintptr_t ttbr0);
|
2021-03-25 10:27:47 +08:00
|
|
|
extern void z_arm64_ptable_ipi(void);
|
2019-11-11 00:17:19 +08:00
|
|
|
|
2021-04-08 11:31:44 +08:00
|
|
|
#ifdef CONFIG_FPU_SHARING
|
|
|
|
void z_arm64_flush_local_fpu(void);
|
|
|
|
void z_arm64_flush_fpu_ipi(unsigned int cpu);
|
|
|
|
#endif
|
|
|
|
|
2019-11-11 00:17:19 +08:00
|
|
|
#endif /* _ASMLANGUAGE */
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-03-26 22:22:59 +08:00
|
|
|
#endif /* ZEPHYR_ARCH_ARM64_INCLUDE_KERNEL_ARCH_FUNC_H_ */
|