2019-06-03 07:44:50 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-03-05 11:49:34 +00:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/include/asm/exception.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_EXCEPTION_H
|
|
|
|
#define __ASM_EXCEPTION_H
|
|
|
|
|
2018-01-15 19:38:59 +00:00
|
|
|
#include <asm/esr.h>
|
2019-10-25 17:42:11 +01:00
|
|
|
#include <asm/ptrace.h>
|
2018-01-15 19:38:59 +00:00
|
|
|
|
2016-03-25 14:22:05 -07:00
|
|
|
#include <linux/interrupt.h>
|
2015-08-12 15:16:19 +01:00
|
|
|
|
|
|
|
#define __exception_irq_entry __irq_entry
|
2012-03-05 11:49:34 +00:00
|
|
|
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 12:44:42 +01:00
|
|
|
static inline unsigned long disr_to_esr(u64 disr)
|
2018-01-15 19:38:59 +00:00
|
|
|
{
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 12:44:42 +01:00
|
|
|
unsigned long esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT;
|
2018-01-15 19:38:59 +00:00
|
|
|
|
|
|
|
if ((disr & DISR_EL1_IDS) == 0)
|
|
|
|
esr |= (disr & DISR_EL1_ESR_MASK);
|
|
|
|
else
|
|
|
|
esr |= (disr & ESR_ELx_ISS_MASK);
|
|
|
|
|
|
|
|
return esr;
|
|
|
|
}
|
|
|
|
|
2023-04-12 16:49:34 -07:00
|
|
|
asmlinkage void __noreturn handle_bad_stack(struct pt_regs *regs);
|
2021-06-07 10:46:20 +01:00
|
|
|
|
arm64: entry: handle all vectors with C
We have 16 architectural exception vectors, and depending on kernel
configuration we handle 8 or 12 of these with C code, with the remaining
8 or 4 of these handled as special cases in the entry assembly.
It would be nicer if the entry assembly were uniform for all exceptions,
and we deferred any specific handling of the exceptions to C code. This
way the entry assembly can be more easily templated without ifdeffery or
special cases, and it's easier to modify the handling of these cases in
future (e.g. to dump additional registers other context).
This patch reworks the entry code so that we always have a C handler for
every architectural exception vector, with the entry assembly being
completely uniform. We now have to handle exceptions from EL1t and EL1h,
and also have to handle exceptions from AArch32 even when the kernel is
built without CONFIG_COMPAT. To make this clear and to simplify
templating, we rename the top-level exception handlers with a consistent
naming scheme:
asm: <el+sp>_<regsize>_<type>
c: <el+sp>_<regsize>_<type>_handler
.. where:
<el+sp> is `el1t`, `el1h`, or `el0t`
<regsize> is `64` or `32`
<type> is `sync`, `irq`, `fiq`, or `error`
... e.g.
asm: el1h_64_sync
c: el1h_64_sync_handler
... with lower-level handlers simply using "el1" and "compat" as today.
For unexpected exceptions, this information is passed to
__panic_unhandled(), so it can report the specific vector an unexpected
exception was taken from, e.g.
| Unhandled 64-bit el1t sync exception
For vectors we never expect to enter legitimately, the C code is
generated using a macro to avoid code duplication. The exceptions are
handled via __panic_unhandled(), replacing bad_mode() (which is
removed).
The `kernel_ventry` and `entry_handler` assembly macros are updated to
handle the new naming scheme. In theory it should be possible to
generate the entry functions at the same time as the vectors using a
single table, but this will require reworking the linker script to split
the two into separate sections, so for now we have separate tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 10:46:18 +01:00
|
|
|
asmlinkage void el1t_64_sync_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1t_64_irq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1t_64_fiq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1t_64_error_handler(struct pt_regs *regs);
|
|
|
|
|
|
|
|
asmlinkage void el1h_64_sync_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1h_64_irq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1h_64_fiq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1h_64_error_handler(struct pt_regs *regs);
|
|
|
|
|
|
|
|
asmlinkage void el0t_64_sync_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_64_irq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_64_fiq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_64_error_handler(struct pt_regs *regs);
|
|
|
|
|
|
|
|
asmlinkage void el0t_32_sync_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_32_irq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_32_fiq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_32_error_handler(struct pt_regs *regs);
|
2020-12-14 11:33:53 +00:00
|
|
|
|
2021-06-07 10:46:10 +01:00
|
|
|
asmlinkage void call_on_irq_stack(struct pt_regs *regs,
|
|
|
|
void (*func)(struct pt_regs *));
|
arm64: entry: move bulk of ret_to_user to C
In `ret_to_user` we perform some conditional work depending on the
thread flags, then perform some IRQ/context tracking which is intended
to balance with the IRQ/context tracking performed in the entry C code.
For simplicity and consistency, it would be preferable to move this all
to C. As a step towards that, this patch moves the conditional work and
IRQ/context tracking into a C helper function. To aid bisectability,
this is called from the `ret_to_user` assembly, and a subsequent patch
will move the call to C code.
As local_daif_mask() handles all necessary tracing and PMR manipulation,
we no longer need to handle this explicitly. As we call
exit_to_user_mode() directly, the `user_enter_irqoff` macro is no longer
used, and can be removed. As enter_from_user_mode() and
exit_to_user_mode() are no longer called from assembly, these can be
made static, and as these are typically very small, they are marked
__always_inline to avoid the overhead of a function call.
For now, enablement of single-step is left in entry.S, and for this we
still need to read the flags in ret_to_user(). It is safe to read this
separately as TIF_SINGLESTEP is not part of _TIF_WORK_MASK.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Joey Gouly <joey.gouly@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Will Deacon <will@kernel.org>
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Link: https://lore.kernel.org/r/20210802140733.52716-4-mark.rutland@arm.com
[catalin.marinas@arm.com: removed unused gic_prio_kentry_setup macro]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 15:07:32 +01:00
|
|
|
asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs);
|
|
|
|
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 12:44:42 +01:00
|
|
|
void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs);
|
arm64: split EL0/EL1 UNDEF handlers
In general, exceptions taken from EL1 need to be handled separately from
exceptions taken from EL0, as the logic to handle the two cases can be
significantly divergent, and exceptions taken from EL1 typically have
more stringent requirements on locking and instrumentation.
Subsequent patches will rework the way EL1 UNDEFs are handled in order
to address longstanding soundness issues with instrumentation and RCU.
In preparation for that rework, this patch splits the existing
do_undefinstr() handler into separate do_el0_undef() and do_el1_undef()
handlers.
Prior to this patch, do_undefinstr() was marked with NOKPROBE_SYMBOL(),
preventing instrumentation via kprobes. However, do_undefinstr() invokes
other code which can be instrumented, and:
* For UNDEFINED exceptions taken from EL0, there is no risk of recursion
within kprobes. Therefore it is safe for do_el0_undef to be
instrumented with kprobes, and it does not need to be marked with
NOKPROBE_SYMBOL().
* For UNDEFINED exceptions taken from EL1, either:
(a) The exception is has been taken when manipulating SSBS; these cases
are limited and do not occur within code that can be invoked
recursively via kprobes. Hence, in these cases instrumentation
with kprobes is benign.
(b) The exception has been taken for an unknown reason, as other than
manipulating SSBS we do not expect to take UNDEFINED exceptions
from EL1. Any handling of these exception is best-effort.
... and in either case, marking do_el1_undef() with NOKPROBE_SYMBOL()
isn't sufficient to prevent recursion via kprobes as functions it
calls (including die()) are instrumentable via kprobes.
Hence, it's not worthwhile to mark do_el1_undef() with
NOKPROBE_SYMBOL(). The same applies to do_el1_bti() and do_el1_fpac(),
so their NOKPROBE_SYMBOL() annotations are also removed.
Aside from the new instrumentability, there should be no functional
change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Joey Gouly <joey.gouly@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20221019144123.612388-3-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2022-10-19 15:41:16 +01:00
|
|
|
void do_el0_undef(struct pt_regs *regs, unsigned long esr);
|
|
|
|
void do_el1_undef(struct pt_regs *regs, unsigned long esr);
|
arm64: rework BTI exception handling
If a BTI exception is taken from EL1, the entry code will treat this as
an unhandled exception and will panic() the kernel. This is inconsistent
with the way we handle FPAC exceptions, which have a dedicated handler
and only necessarily kill the thread from which the exception was taken
from, and we don't log all the information that could be relevant to
debug the issue.
The code in do_bti() has:
BUG_ON(!user_mode(regs));
... and it seems like the intent was to call this for EL1 BTI
exceptions, as with FPAC, but this was omitted due to an oversight.
This patch adds separate EL0 and EL1 BTI exception handlers, with the
latter calling die() directly to report the original context the BTI
exception was taken from. This matches our handling of FPAC exceptions.
Prior to this patch, a BTI failure is reported as:
| Unhandled 64-bit el1h sync exception on CPU0, ESR 0x0000000034000002 -- BTI
| CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00131-g7d937ff0221d-dirty #9
| Hardware name: linux,dummy-virt (DT)
| pstate: 20400809 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=-c)
| pc : test_bti_callee+0x4/0x10
| lr : test_bti_caller+0x1c/0x28
| sp : ffff80000800bdf0
| x29: ffff80000800bdf0 x28: 0000000000000000 x27: 0000000000000000
| x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000
| x23: ffff80000a2b8000 x22: 0000000000000000 x21: 0000000000000000
| x20: ffff8000099fa5b0 x19: ffff800009ff7000 x18: fffffbfffda37000
| x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000041a90000
| x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000
| x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000040000000
| x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000f83
| x5 : ffff80000a2b6000 x4 : ffff0000028d0000 x3 : ffff800009f78378
| x2 : 0000000000000000 x1 : 0000000040210000 x0 : ffff8000080257e4
| Kernel panic - not syncing: Unhandled exception
| CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00131-g7d937ff0221d-dirty #9
| Hardware name: linux,dummy-virt (DT)
| Call trace:
| dump_backtrace.part.0+0xcc/0xe0
| show_stack+0x18/0x5c
| dump_stack_lvl+0x64/0x80
| dump_stack+0x18/0x34
| panic+0x170/0x360
| arm64_exit_nmi.isra.0+0x0/0x80
| el1h_64_sync_handler+0x64/0xd0
| el1h_64_sync+0x64/0x68
| test_bti_callee+0x4/0x10
| smp_cpus_done+0xb0/0xbc
| smp_init+0x7c/0x8c
| kernel_init_freeable+0x128/0x28c
| kernel_init+0x28/0x13c
| ret_from_fork+0x10/0x20
With this patch applied, a BTI failure is reported as:
| Internal error: Oops - BTI: 0000000034000002 [#1] PREEMPT SMP
| Modules linked in:
| CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00132-g0ad98265d582-dirty #8
| Hardware name: linux,dummy-virt (DT)
| pstate: 20400809 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=-c)
| pc : test_bti_callee+0x4/0x10
| lr : test_bti_caller+0x1c/0x28
| sp : ffff80000800bdf0
| x29: ffff80000800bdf0 x28: 0000000000000000 x27: 0000000000000000
| x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000
| x23: ffff80000a2b8000 x22: 0000000000000000 x21: 0000000000000000
| x20: ffff8000099fa5b0 x19: ffff800009ff7000 x18: fffffbfffda37000
| x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000041a90000
| x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000
| x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000040000000
| x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000f83
| x5 : ffff80000a2b6000 x4 : ffff0000028d0000 x3 : ffff800009f78378
| x2 : 0000000000000000 x1 : 0000000040210000 x0 : ffff800008025804
| Call trace:
| test_bti_callee+0x4/0x10
| smp_cpus_done+0xb0/0xbc
| smp_init+0x7c/0x8c
| kernel_init_freeable+0x128/0x28c
| kernel_init+0x28/0x13c
| ret_from_fork+0x10/0x20
| Code: d50323bf d53cd040 d65f03c0 d503233f (d50323bf)
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Mark Brown <broonie@kernel.org>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Alexandru Elisei <alexandru.elisei@arm.com>
Cc: Amit Daniel Kachhap <amit.kachhap@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20220913101732.3925290-6-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-09-13 11:17:32 +01:00
|
|
|
void do_el0_bti(struct pt_regs *regs);
|
|
|
|
void do_el1_bti(struct pt_regs *regs, unsigned long esr);
|
2024-10-01 23:58:58 +01:00
|
|
|
void do_el0_gcs(struct pt_regs *regs, unsigned long esr);
|
|
|
|
void do_el1_gcs(struct pt_regs *regs, unsigned long esr);
|
arm64: debug: split hardware breakpoint exception entry
Currently all debug exceptions share common entry code and are routed
to `do_debug_exception()`, which calls dynamically-registered
handlers for each specific debug exception. This is unfortunate as
different debug exceptions have different entry handling requirements,
and it would be better to handle these distinct requirements earlier.
Hardware breakpoints exceptions are generated by the hardware after user
configuration. As such, they can be exploited when training branch
predictors outside of the userspace VA range: they still need to call
`arm64_apply_bp_hardening()` if needed to mitigate against this attack.
However, they do not need to handle the Cortex-A76 erratum #1463225 as
it only applies to single stepping exceptions.
It does not set an address in FAR_EL1 either, only the hardware
watchpoint does.
As the hardware breakpoint handler only returns 0 and never triggers
the call to `arm64_notify_die()`, we can call it directly from
`entry-common.c`.
Split the hardware breakpoint exception entry, adjust
the function signature, and handling of the Cortex-A76 erratum to fit
the behaviour of the exception.
Move the call to `arm64_apply_bp_hardening()` to `entry-common.c` so that
we can do it as early as possible, and only for the exceptions coming
from EL0, where it is needed.
This is safe to do as it is `noinstr`, as are all the functions it
may call. `el0_ia()` and `el0_pc()` already call it this way.
Signed-off-by: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Tested-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
Reviewed-by: Will Deacon <will@kernel.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20250707114109.35672-8-ada.coupriediaz@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2025-07-07 12:41:03 +01:00
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
|
void do_breakpoint(unsigned long esr, struct pt_regs *regs);
|
2025-07-07 12:41:06 +01:00
|
|
|
void do_watchpoint(unsigned long addr, unsigned long esr,
|
|
|
|
struct pt_regs *regs);
|
arm64: debug: split hardware breakpoint exception entry
Currently all debug exceptions share common entry code and are routed
to `do_debug_exception()`, which calls dynamically-registered
handlers for each specific debug exception. This is unfortunate as
different debug exceptions have different entry handling requirements,
and it would be better to handle these distinct requirements earlier.
Hardware breakpoints exceptions are generated by the hardware after user
configuration. As such, they can be exploited when training branch
predictors outside of the userspace VA range: they still need to call
`arm64_apply_bp_hardening()` if needed to mitigate against this attack.
However, they do not need to handle the Cortex-A76 erratum #1463225 as
it only applies to single stepping exceptions.
It does not set an address in FAR_EL1 either, only the hardware
watchpoint does.
As the hardware breakpoint handler only returns 0 and never triggers
the call to `arm64_notify_die()`, we can call it directly from
`entry-common.c`.
Split the hardware breakpoint exception entry, adjust
the function signature, and handling of the Cortex-A76 erratum to fit
the behaviour of the exception.
Move the call to `arm64_apply_bp_hardening()` to `entry-common.c` so that
we can do it as early as possible, and only for the exceptions coming
from EL0, where it is needed.
This is safe to do as it is `noinstr`, as are all the functions it
may call. `el0_ia()` and `el0_pc()` already call it this way.
Signed-off-by: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Tested-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
Reviewed-by: Will Deacon <will@kernel.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20250707114109.35672-8-ada.coupriediaz@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2025-07-07 12:41:03 +01:00
|
|
|
#else
|
|
|
|
static inline void do_breakpoint(unsigned long esr, struct pt_regs *regs) {}
|
2025-07-07 12:41:06 +01:00
|
|
|
static inline void do_watchpoint(unsigned long addr, unsigned long esr,
|
|
|
|
struct pt_regs *regs) {}
|
arm64: debug: split hardware breakpoint exception entry
Currently all debug exceptions share common entry code and are routed
to `do_debug_exception()`, which calls dynamically-registered
handlers for each specific debug exception. This is unfortunate as
different debug exceptions have different entry handling requirements,
and it would be better to handle these distinct requirements earlier.
Hardware breakpoints exceptions are generated by the hardware after user
configuration. As such, they can be exploited when training branch
predictors outside of the userspace VA range: they still need to call
`arm64_apply_bp_hardening()` if needed to mitigate against this attack.
However, they do not need to handle the Cortex-A76 erratum #1463225 as
it only applies to single stepping exceptions.
It does not set an address in FAR_EL1 either, only the hardware
watchpoint does.
As the hardware breakpoint handler only returns 0 and never triggers
the call to `arm64_notify_die()`, we can call it directly from
`entry-common.c`.
Split the hardware breakpoint exception entry, adjust
the function signature, and handling of the Cortex-A76 erratum to fit
the behaviour of the exception.
Move the call to `arm64_apply_bp_hardening()` to `entry-common.c` so that
we can do it as early as possible, and only for the exceptions coming
from EL0, where it is needed.
This is safe to do as it is `noinstr`, as are all the functions it
may call. `el0_ia()` and `el0_pc()` already call it this way.
Signed-off-by: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Tested-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
Reviewed-by: Will Deacon <will@kernel.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20250707114109.35672-8-ada.coupriediaz@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2025-07-07 12:41:03 +01:00
|
|
|
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
arm64: debug: split single stepping exception entry
Currently all debug exceptions share common entry code and are routed
to `do_debug_exception()`, which calls dynamically-registered
handlers for each specific debug exception. This is unfortunate as
different debug exceptions have different entry handling requirements,
and it would be better to handle these distinct requirements earlier.
The single stepping exception has the most constraints : it can be
exploited to train branch predictors and it needs special handling at EL1
for the Cortex-A76 erratum #1463225. We need to conserve all those
mitigations.
However, it does not write an address at FAR_EL1, as only hardware
watchpoints do so.
The single-step handler does its own signaling if it needs to and only
returns 0, so we can call it directly from `entry-common.c`.
Split the single stepping exception entry, adjust the function signature,
keep the security mitigation and erratum handling.
Further, as the EL0 and EL1 code paths are cleanly separated, we can split
`do_softstep()` into `do_el0_softstep()` and `do_el1_softstep()` and
call them directly from the relevant entry paths.
We can also remove `NOKPROBE_SYMBOL` for the EL0 path, as it cannot
lead to a kprobe recursion.
Move the call to `arm64_apply_bp_hardening()` to `entry-common.c` so that
we can do it as early as possible, and only for the exceptions coming
from EL0, where it is needed.
This is safe to do as it is `noinstr`, as are all the functions it
may call. `el0_ia()` and `el0_pc()` already call it this way.
When taking a soft-step exception from EL0, most of the single stepping
handling is safely preemptible : the only possible handler is
`uprobe_single_step_handler()`. It only operates on task-local data and
properly checks its validity, then raises a Thread Information Flag,
processed before returning to userspace in `do_notify_resume()`, which
is already preemptible.
However, the soft-step handler first calls `reinstall_suspended_bps()`
to check if there is any hardware breakpoint or watchpoint pending
or already stepped through.
This cannot be preempted as it manipulates the hardware breakpoint and
watchpoint registers.
Move the call to `try_step_suspended_breakpoints()` to `entry-common.c`
and adjust the relevant comments.
We can now safely unmask interrupts before handling the step itself,
fixing a PREEMPT_RT issue where the handler could call a sleeping function
with preemption disabled.
Signed-off-by: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Closes: https://lore.kernel.org/linux-arm-kernel/Z6YW_Kx4S2tmj2BP@uudg.org/
Tested-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
Reviewed-by: Will Deacon <will@kernel.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20250707114109.35672-10-ada.coupriediaz@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2025-07-07 12:41:05 +01:00
|
|
|
void do_el0_softstep(unsigned long esr, struct pt_regs *regs);
|
|
|
|
void do_el1_softstep(unsigned long esr, struct pt_regs *regs);
|
arm64: debug: split brk64 exception entry
Currently all debug exceptions share common entry code and are routed
to `do_debug_exception()`, which calls dynamically-registered
handlers for each specific debug exception. This is unfortunate as
different debug exceptions have different entry handling requirements,
and it would be better to handle these distinct requirements earlier.
The BRK64 instruction can only be triggered by a BRK instruction. Thus,
we know that the PC is a legitimate address and isn't being used to train
a branch predictor with a bogus address : we don't need to call
`arm64_apply_bp_hardening()`.
We do not need to handle the Cortex-A76 erratum #1463225 either, as it
only relevant for single stepping at EL1.
BRK64 does not write FAR_EL1 either, as only hardware watchpoints do so.
Split the BRK64 exception entry, adjust the function signature, and its
behaviour to match the lack of needed mitigations.
Further, as the EL0 and EL1 code paths are cleanly separated, we can split
`do_brk64()` into `do_el0_brk64()` and `do_el1_brk64()`, and call them
directly from the relevant entry paths.
Use `die()` directly for the EL1 error path, as in `do_el1_bti()` and
`do_el1_undef()`.
We can also remove `NOKRPOBE_SYMBOL` for the EL0 path, as it cannot
lead to a kprobe recursion.
When taking a BRK64 exception from EL0, the exception handling is safely
preemptible : the only possible handler is `uprobe_brk_handler()`.
It only operates on task-local data and properly checks its validity,
then raises a Thread Information Flag, processed before returning
to userspace in `do_notify_resume()`, which is already preemptible.
Thus we can safely unmask interrupts and enable preemption before
handling the break itself, fixing a PREEMPT_RT issue where the handler
could call a sleeping function with preemption disabled.
Given that the break hook registration is handled statically in
`call_break_hook` since
(arm64: debug: call software break handlers statically)
and that we now bypass the exception handler registration, this change
renders `early_brk64` redundant : its functionality is now handled through
the post-init path.
This also removes the last usage of `el1_dbg()`.
This also removes the last usage of `el0_dbg()` without `CONFIG_COMPAT`.
Mark it `__maybe_unused`, to prevent a warning when building this patch
without `CONFIG_COMPAT`, as the following patch removes `el0_dbg()`.
Signed-off-by: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Tested-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
Reviewed-by: Will Deacon <will@kernel.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20250707114109.35672-12-ada.coupriediaz@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2025-07-07 12:41:07 +01:00
|
|
|
void do_el0_brk64(unsigned long esr, struct pt_regs *regs);
|
|
|
|
void do_el1_brk64(unsigned long esr, struct pt_regs *regs);
|
2025-07-07 12:41:08 +01:00
|
|
|
void do_bkpt32(unsigned long esr, struct pt_regs *regs);
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 12:44:42 +01:00
|
|
|
void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs);
|
|
|
|
void do_sve_acc(unsigned long esr, struct pt_regs *regs);
|
2022-05-20 18:51:54 +01:00
|
|
|
void do_sme_acc(unsigned long esr, struct pt_regs *regs);
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 12:44:42 +01:00
|
|
|
void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs);
|
2022-10-19 15:41:15 +01:00
|
|
|
void do_el0_sys(unsigned long esr, struct pt_regs *regs);
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 12:44:42 +01:00
|
|
|
void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs);
|
|
|
|
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr);
|
2022-10-19 15:41:15 +01:00
|
|
|
void do_el0_cp15(unsigned long esr, struct pt_regs *regs);
|
2022-07-01 15:53:22 +02:00
|
|
|
int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs);
|
2020-01-16 18:35:47 +00:00
|
|
|
void do_el0_svc(struct pt_regs *regs);
|
|
|
|
void do_el0_svc_compat(struct pt_regs *regs);
|
2022-09-13 11:17:31 +01:00
|
|
|
void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
|
|
|
|
void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
|
2023-05-09 15:22:31 +01:00
|
|
|
void do_el0_mops(struct pt_regs *regs, unsigned long esr);
|
2024-09-30 17:10:48 +01:00
|
|
|
void do_el1_mops(struct pt_regs *regs, unsigned long esr);
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 12:44:42 +01:00
|
|
|
void do_serror(struct pt_regs *regs, unsigned long esr);
|
2024-02-06 12:38:47 +00:00
|
|
|
void do_signal(struct pt_regs *regs);
|
2021-06-07 10:46:20 +01:00
|
|
|
|
2023-04-12 16:49:34 -07:00
|
|
|
void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far);
|
2012-03-05 11:49:34 +00:00
|
|
|
#endif /* __ASM_EXCEPTION_H */
|