2019-06-03 13:44:50 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2017-11-02 20:12:34 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2017 ARM Ltd.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_DAIFFLAGS_H
|
|
|
|
#define __ASM_DAIFFLAGS_H
|
|
|
|
|
|
|
|
#include <linux/irqflags.h>
|
|
|
|
|
2019-06-11 17:38:10 +08:00
|
|
|
#include <asm/arch_gicv3.h>
|
2019-10-02 17:06:12 +08:00
|
|
|
#include <asm/barrier.h>
|
2019-01-31 22:58:51 +08:00
|
|
|
#include <asm/cpufeature.h>
|
2019-10-26 00:42:12 +08:00
|
|
|
#include <asm/ptrace.h>
|
2019-01-31 22:58:51 +08:00
|
|
|
|
2017-11-02 20:12:36 +08:00
|
|
|
#define DAIF_PROCCTX 0
|
2021-03-15 19:56:28 +08:00
|
|
|
#define DAIF_PROCCTX_NOIRQ (PSR_I_BIT | PSR_F_BIT)
|
|
|
|
#define DAIF_ERRCTX (PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
|
arm64: kprobes: Recover pstate.D in single-step exception handler
kprobes manipulates the interrupted PSTATE for single step, and
doesn't restore it. Thus, if we put a kprobe where the pstate.D
(debug) masked, the mask will be cleared after the kprobe hits.
Moreover, in the most complicated case, this can lead a kernel
crash with below message when a nested kprobe hits.
[ 152.118921] Unexpected kernel single-step exception at EL1
When the 1st kprobe hits, do_debug_exception() will be called.
At this point, debug exception (= pstate.D) must be masked (=1).
But if another kprobes hits before single-step of the first kprobe
(e.g. inside user pre_handler), it unmask the debug exception
(pstate.D = 0) and return.
Then, when the 1st kprobe setting up single-step, it saves current
DAIF, mask DAIF, enable single-step, and restore DAIF.
However, since "D" flag in DAIF is cleared by the 2nd kprobe, the
single-step exception happens soon after restoring DAIF.
This has been introduced by commit 7419333fa15e ("arm64: kprobe:
Always clear pstate.D in breakpoint exception handler")
To solve this issue, this stores all DAIF bits and restore it
after single stepping.
Reported-by: Naresh Kamboju <naresh.kamboju@linaro.org>
Fixes: 7419333fa15e ("arm64: kprobe: Always clear pstate.D in breakpoint exception handler")
Reviewed-by: James Morse <james.morse@arm.com>
Tested-by: James Morse <james.morse@arm.com>
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Will Deacon <will@kernel.org>
2019-08-01 22:25:49 +08:00
|
|
|
#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
|
|
|
|
|
2017-11-02 20:12:36 +08:00
|
|
|
|
2017-11-02 20:12:34 +08:00
|
|
|
/* mask/save/unmask/restore all exceptions, including interrupts. */
|
|
|
|
static inline void local_daif_mask(void)
|
|
|
|
{
|
2019-06-11 17:38:11 +08:00
|
|
|
WARN_ON(system_has_prio_mask_debugging() &&
|
|
|
|
(read_sysreg_s(SYS_ICC_PMR_EL1) == (GIC_PRIO_IRQOFF |
|
|
|
|
GIC_PRIO_PSR_I_SET)));
|
|
|
|
|
2017-11-02 20:12:34 +08:00
|
|
|
asm volatile(
|
|
|
|
"msr daifset, #0xf // local_daif_mask\n"
|
|
|
|
:
|
|
|
|
:
|
|
|
|
: "memory");
|
2019-06-11 17:38:10 +08:00
|
|
|
|
|
|
|
/* Don't really care for a dsb here, we don't intend to enable IRQs */
|
|
|
|
if (system_uses_irq_prio_masking())
|
|
|
|
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
|
|
|
|
2017-11-02 20:12:34 +08:00
|
|
|
trace_hardirqs_off();
|
|
|
|
}
|
|
|
|
|
2020-01-22 20:45:46 +08:00
|
|
|
static inline unsigned long local_daif_save_flags(void)
|
2017-11-02 20:12:34 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
2019-01-31 22:58:51 +08:00
|
|
|
flags = read_sysreg(daif);
|
|
|
|
|
|
|
|
if (system_uses_irq_prio_masking()) {
|
|
|
|
/* If IRQs are masked with PMR, reflect it in the flags */
|
2019-06-11 17:38:10 +08:00
|
|
|
if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON)
|
2021-03-15 19:56:28 +08:00
|
|
|
flags |= PSR_I_BIT | PSR_F_BIT;
|
2019-01-31 22:58:51 +08:00
|
|
|
}
|
2018-08-28 23:51:14 +08:00
|
|
|
|
2020-01-22 20:45:46 +08:00
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long local_daif_save(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
flags = local_daif_save_flags();
|
|
|
|
|
2017-11-02 20:12:34 +08:00
|
|
|
local_daif_mask();
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void local_daif_restore(unsigned long flags)
|
|
|
|
{
|
2019-01-31 22:58:51 +08:00
|
|
|
bool irq_disabled = flags & PSR_I_BIT;
|
|
|
|
|
2019-06-11 17:38:11 +08:00
|
|
|
WARN_ON(system_has_prio_mask_debugging() &&
|
2021-03-15 19:56:28 +08:00
|
|
|
(read_sysreg(daif) & (PSR_I_BIT | PSR_F_BIT)) != (PSR_I_BIT | PSR_F_BIT));
|
2019-06-11 17:38:11 +08:00
|
|
|
|
2019-01-31 22:58:51 +08:00
|
|
|
if (!irq_disabled) {
|
2017-11-02 20:12:34 +08:00
|
|
|
trace_hardirqs_on();
|
2018-08-28 23:51:14 +08:00
|
|
|
|
2019-01-31 22:58:51 +08:00
|
|
|
if (system_uses_irq_prio_masking()) {
|
2019-06-11 17:38:10 +08:00
|
|
|
gic_write_pmr(GIC_PRIO_IRQON);
|
2019-10-02 17:06:12 +08:00
|
|
|
pmr_sync();
|
2019-06-11 17:38:10 +08:00
|
|
|
}
|
|
|
|
} else if (system_uses_irq_prio_masking()) {
|
|
|
|
u64 pmr;
|
|
|
|
|
|
|
|
if (!(flags & PSR_A_BIT)) {
|
2019-01-31 22:58:51 +08:00
|
|
|
/*
|
2019-06-11 17:38:10 +08:00
|
|
|
* If interrupts are disabled but we can take
|
|
|
|
* asynchronous errors, we can take NMIs
|
2019-01-31 22:58:51 +08:00
|
|
|
*/
|
2021-03-15 19:56:28 +08:00
|
|
|
flags &= ~(PSR_I_BIT | PSR_F_BIT);
|
2019-06-11 17:38:10 +08:00
|
|
|
pmr = GIC_PRIO_IRQOFF;
|
|
|
|
} else {
|
|
|
|
pmr = GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET;
|
2019-01-31 22:58:51 +08:00
|
|
|
}
|
2019-06-11 17:38:10 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There has been concern that the write to daif
|
|
|
|
* might be reordered before this write to PMR.
|
|
|
|
* From the ARM ARM DDI 0487D.a, section D1.7.1
|
|
|
|
* "Accessing PSTATE fields":
|
|
|
|
* Writes to the PSTATE fields have side-effects on
|
|
|
|
* various aspects of the PE operation. All of these
|
|
|
|
* side-effects are guaranteed:
|
|
|
|
* - Not to be visible to earlier instructions in
|
|
|
|
* the execution stream.
|
|
|
|
* - To be visible to later instructions in the
|
|
|
|
* execution stream
|
|
|
|
*
|
|
|
|
* Also, writes to PMR are self-synchronizing, so no
|
|
|
|
* interrupts with a lower priority than PMR is signaled
|
|
|
|
* to the PE after the write.
|
|
|
|
*
|
|
|
|
* So we don't need additional synchronization here.
|
|
|
|
*/
|
|
|
|
gic_write_pmr(pmr);
|
2019-01-31 22:58:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
write_sysreg(flags, daif);
|
2018-08-28 23:51:14 +08:00
|
|
|
|
2019-01-31 22:58:51 +08:00
|
|
|
if (irq_disabled)
|
2017-11-02 20:12:34 +08:00
|
|
|
trace_hardirqs_off();
|
|
|
|
}
|
|
|
|
|
2019-10-26 00:42:12 +08:00
|
|
|
/*
|
|
|
|
* Called by synchronous exception handlers to restore the DAIF bits that were
|
|
|
|
* modified by taking an exception.
|
|
|
|
*/
|
|
|
|
static inline void local_daif_inherit(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long flags = regs->pstate & DAIF_MASK;
|
|
|
|
|
2020-11-30 19:59:48 +08:00
|
|
|
if (interrupts_enabled(regs))
|
|
|
|
trace_hardirqs_on();
|
|
|
|
|
arm64: entry: always set GIC_PRIO_PSR_I_SET during entry
Zenghui reports that booting a kernel with "irqchip.gicv3_pseudo_nmi=1"
on the command line hits a warning during kernel entry, due to the way
we manipulate the PMR.
Early in the entry sequence, we call lockdep_hardirqs_off() to inform
lockdep that interrupts have been masked (as the HW sets DAIF wqhen
entering an exception). Architecturally PMR_EL1 is not affected by
exception entry, and we don't set GIC_PRIO_PSR_I_SET in the PMR early in
the exception entry sequence, so early in exception entry the PMR can
indicate that interrupts are unmasked even though they are masked by
DAIF.
If DEBUG_LOCKDEP is selected, lockdep_hardirqs_off() will check that
interrupts are masked, before we set GIC_PRIO_PSR_I_SET in any of the
exception entry paths, and hence lockdep_hardirqs_off() will WARN() that
something is amiss.
We can avoid this by consistently setting GIC_PRIO_PSR_I_SET during
exception entry so that kernel code sees a consistent environment. We
must also update local_daif_inherit() to undo this, as currently only
touches DAIF. For other paths, local_daif_restore() will update both
DAIF and the PMR. With this done, we can remove the existing special
cases which set this later in the entry code.
We always use (GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET) for consistency with
local_daif_save(), as this will warn if it ever encounters
(GIC_PRIO_IRQOFF | GIC_PRIO_PSR_I_SET), and never sets this itself. This
matches the gic_prio_kentry_setup that we have to retain for
ret_to_user.
The original splat from Zenghui's report was:
| DEBUG_LOCKS_WARN_ON(!irqs_disabled())
| WARNING: CPU: 3 PID: 125 at kernel/locking/lockdep.c:4258 lockdep_hardirqs_off+0xd4/0xe8
| Modules linked in:
| CPU: 3 PID: 125 Comm: modprobe Tainted: G W 5.12.0-rc8+ #463
| Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015
| pstate: 604003c5 (nZCv DAIF +PAN -UAO -TCO BTYPE=--)
| pc : lockdep_hardirqs_off+0xd4/0xe8
| lr : lockdep_hardirqs_off+0xd4/0xe8
| sp : ffff80002a39bad0
| pmr_save: 000000e0
| x29: ffff80002a39bad0 x28: ffff0000de214bc0
| x27: ffff0000de1c0400 x26: 000000000049b328
| x25: 0000000000406f30 x24: ffff0000de1c00a0
| x23: 0000000020400005 x22: ffff8000105f747c
| x21: 0000000096000044 x20: 0000000000498ef9
| x19: ffff80002a39bc88 x18: ffffffffffffffff
| x17: 0000000000000000 x16: ffff800011c61eb0
| x15: ffff800011700a88 x14: 0720072007200720
| x13: 0720072007200720 x12: 0720072007200720
| x11: 0720072007200720 x10: 0720072007200720
| x9 : ffff80002a39bad0 x8 : ffff80002a39bad0
| x7 : ffff8000119f0800 x6 : c0000000ffff7fff
| x5 : ffff8000119f07a8 x4 : 0000000000000001
| x3 : 9bcdab23f2432800 x2 : ffff800011730538
| x1 : 9bcdab23f2432800 x0 : 0000000000000000
| Call trace:
| lockdep_hardirqs_off+0xd4/0xe8
| enter_from_kernel_mode.isra.5+0x7c/0xa8
| el1_abort+0x24/0x100
| el1_sync_handler+0x80/0xd0
| el1_sync+0x6c/0x100
| __arch_clear_user+0xc/0x90
| load_elf_binary+0x9fc/0x1450
| bprm_execve+0x404/0x880
| kernel_execve+0x180/0x188
| call_usermodehelper_exec_async+0xdc/0x158
| ret_from_fork+0x10/0x18
Fixes: 23529049c684 ("arm64: entry: fix non-NMI user<->kernel transitions")
Fixes: 7cd1ea1010ac ("arm64: entry: fix non-NMI kernel<->kernel transitions")
Fixes: f0cd5ac1e4c5 ("arm64: entry: fix NMI {user, kernel}->kernel transitions")
Fixes: 2a9b3e6ac69a ("arm64: entry: fix EL1 debug transitions")
Link: https://lore.kernel.org/r/f4012761-026f-4e51-3a0c-7524e434e8b3@huawei.com
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reported-by: Zenghui Yu <yuzenghui@huawei.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Will Deacon <will@kernel.org>
Acked-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210428111555.50880-1-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-04-28 19:15:55 +08:00
|
|
|
if (system_uses_irq_prio_masking())
|
|
|
|
gic_write_pmr(regs->pmr_save);
|
|
|
|
|
2019-10-26 00:42:12 +08:00
|
|
|
/*
|
|
|
|
* We can't use local_daif_restore(regs->pstate) here as
|
|
|
|
* system_has_prio_mask_debugging() won't restore the I bit if it can
|
|
|
|
* use the pmr instead.
|
|
|
|
*/
|
|
|
|
write_sysreg(flags, daif);
|
|
|
|
}
|
2017-11-02 20:12:34 +08:00
|
|
|
#endif
|