2019-06-03 07:44:50 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-03-05 11:49:26 +00:00
|
|
|
/*
|
2016-04-27 17:47:00 +01:00
|
|
|
* Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
|
2012-03-05 11:49:26 +00:00
|
|
|
*
|
|
|
|
* Copyright (C) 1996-2000 Russell King
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#error "Only include this from assembly code"
|
|
|
|
#endif
|
|
|
|
|
2015-02-20 13:53:13 +00:00
|
|
|
#ifndef __ASM_ASSEMBLER_H
|
|
|
|
#define __ASM_ASSEMBLER_H
|
|
|
|
|
2023-11-27 00:10:45 +09:00
|
|
|
#include <linux/export.h>
|
2018-12-07 18:08:16 +00:00
|
|
|
|
2021-03-02 10:01:12 +01:00
|
|
|
#include <asm/alternative.h>
|
2021-03-22 12:09:51 +00:00
|
|
|
#include <asm/asm-bug.h>
|
arm64: extable: consolidate definitions
In subsequent patches we'll alter the structure and usage of struct
exception_table_entry. For inline assembly, we create these using the
`_ASM_EXTABLE()` CPP macro defined in <asm/uaccess.h>, and for plain
assembly code we use the `_asm_extable()` GAS macro defined in
<asm/assembler.h>, which are largely identical save for different
escaping and stringification requirements.
This patch moves the common definitions to a new <asm/asm-extable.h>
header, so that it's easier to keep the two in-sync, and to remove the
implication that these are only used for uaccess helpers (as e.g.
load_unaligned_zeropad() is only used on kernel memory, and depends upon
`_ASM_EXTABLE()`.
At the same time, a few minor modifications are made for clarity and in
preparation for subsequent patches:
* The structure creation is factored out into an `__ASM_EXTABLE_RAW()`
macro. This will make it easier to support different fixup variants in
subsequent patches without needing to update all users of
`_ASM_EXTABLE()`, and makes it easier to see tha the CPP and GAS
variants of the macros are structurally identical.
For the CPP macro, the stringification of fields is left to the
wrapper macro, `_ASM_EXTABLE()`, as in subsequent patches it will be
necessary to stringify fields in wrapper macros to safely concatenate
strings which cannot be token-pasted together in CPP.
* The fields of the structure are created separately on their own lines.
This will make it easier to add/remove/modify individual fields
clearly.
* Additional parentheses are added around the use of macro arguments in
field definitions to avoid any potential problems with evaluation due
to operator precedence, and to make errors upon misuse clearer.
* USER() is moved into <asm/asm-uaccess.h>, as it is not required by all
assembly code, and is already refered to by comments in that file.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20211019160219.5202-8-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-10-19 17:02:13 +01:00
|
|
|
#include <asm/asm-extable.h>
|
|
|
|
#include <asm/asm-offsets.h>
|
2016-06-28 18:07:29 +01:00
|
|
|
#include <asm/cpufeature.h>
|
2019-02-26 18:43:41 +00:00
|
|
|
#include <asm/cputype.h>
|
2017-10-25 10:04:32 +01:00
|
|
|
#include <asm/debug-monitors.h>
|
2016-04-27 17:47:10 +01:00
|
|
|
#include <asm/page.h>
|
2016-04-27 17:47:00 +01:00
|
|
|
#include <asm/pgtable-hwdef.h>
|
2012-03-05 11:49:26 +00:00
|
|
|
#include <asm/ptrace.h>
|
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 19:04:06 +01:00
|
|
|
#include <asm/thread_info.h>
|
2012-03-05 11:49:26 +00:00
|
|
|
|
arm64: assembler: introduce wxN aliases for wN registers
The AArch64 asm syntax has this slightly tedious property that the names
used in mnemonics to refer to registers depend on whether the opcode in
question targets the entire 64-bits (xN), or only the least significant
8, 16 or 32 bits (wN). When writing parameterized code such as macros,
this can be annoying, as macro arguments don't lend themselves to
indexed lookups, and so generating a reference to wN in a macro that
receives xN as an argument is problematic.
For instance, an upcoming patch that modifies the implementation of the
cond_yield macro to be able to refer to 32-bit registers would need to
modify invocations such as
cond_yield 3f, x8
to
cond_yield 3f, 8
so that the second argument can be token pasted after x or w to emit the
correct register reference. Unfortunately, this interferes with the self
documenting nature of the first example, where the second argument is
obviously a register, whereas in the second example, one would need to
go and look at the code to find out what '8' means.
So let's fix this by defining wxN aliases for all xN registers, which
resolve to the 32-bit alias of each respective 64-bit register. This
allows the macro implementation to paste the xN reference after a w to
obtain the correct register name.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210302090118.30666-3-ardb@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-03-02 10:01:11 +01:00
|
|
|
/*
|
|
|
|
* Provide a wxN alias for each wN register so what we can paste a xN
|
|
|
|
* reference after a 'w' to obtain the 32-bit version.
|
|
|
|
*/
|
|
|
|
.irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
|
|
|
|
wx\n .req w\n
|
|
|
|
.endr
|
|
|
|
|
2017-11-02 12:12:34 +00:00
|
|
|
.macro disable_daif
|
|
|
|
msr daifset, #0xf
|
|
|
|
.endm
|
|
|
|
|
2012-03-05 11:49:26 +00:00
|
|
|
/*
|
2019-01-31 14:58:40 +00:00
|
|
|
* Save/restore interrupts.
|
2012-03-05 11:49:26 +00:00
|
|
|
*/
|
arm64/entry: Mask DAIF in cpu_switch_to(), call_on_irq_stack()
`cpu_switch_to()` and `call_on_irq_stack()` manipulate SP to change
to different stacks along with the Shadow Call Stack if it is enabled.
Those two stack changes cannot be done atomically and both functions
can be interrupted by SErrors or Debug Exceptions which, though unlikely,
is very much broken : if interrupted, we can end up with mismatched stacks
and Shadow Call Stack leading to clobbered stacks.
In `cpu_switch_to()`, it can happen when SP_EL0 points to the new task,
but x18 stills points to the old task's SCS. When the interrupt handler
tries to save the task's SCS pointer, it will save the old task
SCS pointer (x18) into the new task struct (pointed to by SP_EL0),
clobbering it.
In `call_on_irq_stack()`, it can happen when switching from the task stack
to the IRQ stack and when switching back. In both cases, we can be
interrupted when the SCS pointer points to the IRQ SCS, but SP points to
the task stack. The nested interrupt handler pushes its return addresses
on the IRQ SCS. It then detects that SP points to the task stack,
calls `call_on_irq_stack()` and clobbers the task SCS pointer with
the IRQ SCS pointer, which it will also use !
This leads to tasks returning to addresses on the wrong SCS,
or even on the IRQ SCS, triggering kernel panics via CONFIG_VMAP_STACK
or FPAC if enabled.
This is possible on a default config, but unlikely.
However, when enabling CONFIG_ARM64_PSEUDO_NMI, DAIF is unmasked and
instead the GIC is responsible for filtering what interrupts the CPU
should receive based on priority.
Given the goal of emulating NMIs, pseudo-NMIs can be received by the CPU
even in `cpu_switch_to()` and `call_on_irq_stack()`, possibly *very*
frequently depending on the system configuration and workload, leading
to unpredictable kernel panics.
Completely mask DAIF in `cpu_switch_to()` and restore it when returning.
Do the same in `call_on_irq_stack()`, but restore and mask around
the branch.
Mask DAIF even if CONFIG_SHADOW_CALL_STACK is not enabled for consistency
of behaviour between all configurations.
Introduce and use an assembly macro for saving and masking DAIF,
as the existing one saves but only masks IF.
Cc: <stable@vger.kernel.org>
Signed-off-by: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Reported-by: Cristian Prundeanu <cpru@amazon.com>
Fixes: 59b37fe52f49 ("arm64: Stash shadow stack pointer in the task struct on interrupt")
Tested-by: Cristian Prundeanu <cpru@amazon.com>
Acked-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20250718142814.133329-1-ada.coupriediaz@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2025-07-18 15:28:14 +01:00
|
|
|
.macro save_and_disable_daif, flags
|
|
|
|
mrs \flags, daif
|
|
|
|
msr daifset, #0xf
|
|
|
|
.endm
|
|
|
|
|
2016-07-01 16:53:00 +01:00
|
|
|
.macro save_and_disable_irq, flags
|
|
|
|
mrs \flags, daif
|
2021-03-15 11:56:28 +00:00
|
|
|
msr daifset, #3
|
2016-07-01 16:53:00 +01:00
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro restore_irq, flags
|
|
|
|
msr daif, \flags
|
|
|
|
.endm
|
|
|
|
|
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 19:04:06 +01:00
|
|
|
.macro disable_step_tsk, flgs, tmp
|
|
|
|
tbz \flgs, #TIF_SINGLESTEP, 9990f
|
2012-03-05 11:49:26 +00:00
|
|
|
mrs \tmp, mdscr_el1
|
2025-06-13 08:06:45 +05:30
|
|
|
bic \tmp, \tmp, #MDSCR_EL1_SS
|
2012-03-05 11:49:26 +00:00
|
|
|
msr mdscr_el1, \tmp
|
2024-04-22 12:35:22 +01:00
|
|
|
isb // Take effect before a subsequent clear of DAIF.D
|
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 19:04:06 +01:00
|
|
|
9990:
|
2012-03-05 11:49:26 +00:00
|
|
|
.endm
|
|
|
|
|
2017-11-02 12:12:38 +00:00
|
|
|
/* call with daif masked */
|
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 19:04:06 +01:00
|
|
|
.macro enable_step_tsk, flgs, tmp
|
|
|
|
tbz \flgs, #TIF_SINGLESTEP, 9990f
|
2012-03-05 11:49:26 +00:00
|
|
|
mrs \tmp, mdscr_el1
|
2025-06-13 08:06:45 +05:30
|
|
|
orr \tmp, \tmp, #MDSCR_EL1_SS
|
2012-03-05 11:49:26 +00:00
|
|
|
msr mdscr_el1, \tmp
|
arm64: debug: avoid accessing mdscr_el1 on fault paths where possible
Since mdscr_el1 is part of the debug register group, it is highly likely
to be trapped by a hypervisor to prevent virtual machines from debugging
(buggering?) each other. Unfortunately, this absolutely destroys our
performance, since we access the register on many of our low-level
fault handling paths to keep track of the various debug state machines.
This patch removes our dependency on mdscr_el1 in the case that debugging
is not being used. More specifically we:
- Use TIF_SINGLESTEP to indicate that a task is stepping at EL0 and
avoid disabling step in the MDSCR when we don't need to.
MDSCR_EL1.SS handling is moved to kernel_entry, when trapping from
userspace.
- Ensure debug exceptions are re-enabled on *all* exception entry
paths, even the debug exception handling path (where we re-enable
exceptions after invoking the handler). Since we can now rely on
MDSCR_EL1.SS being cleared by the entry code, exception handlers can
usually enable debug immediately before enabling interrupts.
- Remove all debug exception unmasking from ret_to_user and
el1_preempt, since we will never get here with debug exceptions
masked.
This results in a slight change to kernel debug behaviour, where we now
step into interrupt handlers and data aborts from EL1 when debugging the
kernel, which is actually a useful thing to do. A side-effect of this is
that it *does* potentially prevent stepping off {break,watch}points when
there is a high-frequency interrupt source (e.g. a timer), so a debugger
would need to use either breakpoints or manually disable interrupts to
get around this issue.
With this patch applied, guest performance is restored under KVM when
debug register accesses are trapped (and we get a measurable performance
increase on the host on Cortex-A57 too).
Cc: Ian Campbell <ian.campbell@citrix.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-29 19:04:06 +01:00
|
|
|
9990:
|
2012-03-05 11:49:26 +00:00
|
|
|
.endm
|
|
|
|
|
2018-01-15 19:38:59 +00:00
|
|
|
/*
|
|
|
|
* RAS Error Synchronization barrier
|
|
|
|
*/
|
|
|
|
.macro esb
|
2019-06-18 16:17:33 +01:00
|
|
|
#ifdef CONFIG_ARM64_RAS_EXTN
|
2018-01-15 19:38:59 +00:00
|
|
|
hint #16
|
2019-06-18 16:17:33 +01:00
|
|
|
#else
|
|
|
|
nop
|
|
|
|
#endif
|
2018-01-15 19:38:59 +00:00
|
|
|
.endm
|
|
|
|
|
2018-02-05 15:34:16 +00:00
|
|
|
/*
|
|
|
|
* Value prediction barrier
|
|
|
|
*/
|
|
|
|
.macro csdb
|
|
|
|
hint #20
|
|
|
|
.endm
|
|
|
|
|
2021-12-10 14:32:56 +00:00
|
|
|
/*
|
|
|
|
* Clear Branch History instruction
|
|
|
|
*/
|
|
|
|
.macro clearbhb
|
|
|
|
hint #22
|
|
|
|
.endm
|
|
|
|
|
2018-06-14 11:21:34 +01:00
|
|
|
/*
|
|
|
|
* Speculation barrier
|
|
|
|
*/
|
|
|
|
.macro sb
|
|
|
|
alternative_if_not ARM64_HAS_SB
|
|
|
|
dsb nsh
|
|
|
|
isb
|
|
|
|
alternative_else
|
|
|
|
SB_BARRIER_INSN
|
|
|
|
nop
|
|
|
|
alternative_endif
|
|
|
|
.endm
|
|
|
|
|
2016-09-06 16:40:23 +01:00
|
|
|
/*
|
|
|
|
* NOP sequence
|
|
|
|
*/
|
|
|
|
.macro nops, num
|
|
|
|
.rept \num
|
|
|
|
nop
|
|
|
|
.endr
|
|
|
|
.endm
|
|
|
|
|
2012-03-05 11:49:26 +00:00
|
|
|
/*
|
|
|
|
* Register aliases.
|
|
|
|
*/
|
|
|
|
lr .req x30 // link register
|
2012-10-19 17:37:35 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Vector entry
|
|
|
|
*/
|
|
|
|
.macro ventry label
|
|
|
|
.align 7
|
|
|
|
b \label
|
|
|
|
.endm
|
2013-10-11 14:52:15 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Select code when configured for BE.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
|
|
|
#define CPU_BE(code...) code
|
|
|
|
#else
|
|
|
|
#define CPU_BE(code...)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Select code when configured for LE.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
|
|
|
#define CPU_LE(code...)
|
|
|
|
#else
|
|
|
|
#define CPU_LE(code...) code
|
|
|
|
#endif
|
|
|
|
|
2013-10-11 14:52:13 +01:00
|
|
|
/*
|
|
|
|
* Define a macro that constructs a 64-bit value by concatenating two
|
|
|
|
* 32-bit registers. Note that on big endian systems the order of the
|
|
|
|
* registers is swapped.
|
|
|
|
*/
|
|
|
|
#ifndef CONFIG_CPU_BIG_ENDIAN
|
|
|
|
.macro regs_to_64, rd, lbits, hbits
|
|
|
|
#else
|
|
|
|
.macro regs_to_64, rd, hbits, lbits
|
|
|
|
#endif
|
|
|
|
orr \rd, \lbits, \hbits, lsl #32
|
|
|
|
.endm
|
2015-02-20 13:53:13 +00:00
|
|
|
|
2015-03-04 19:45:38 +01:00
|
|
|
/*
|
|
|
|
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
|
2018-03-10 14:59:29 +00:00
|
|
|
* <symbol> is within the range +/- 4 GB of the PC.
|
2015-03-04 19:45:38 +01:00
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* @dst: destination register (64 bit wide)
|
|
|
|
* @sym: name of the symbol
|
|
|
|
*/
|
2017-01-11 14:54:53 +00:00
|
|
|
.macro adr_l, dst, sym
|
2015-03-04 19:45:38 +01:00
|
|
|
adrp \dst, \sym
|
|
|
|
add \dst, \dst, :lo12:\sym
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @dst: destination register (32 or 64 bit wide)
|
|
|
|
* @sym: name of the symbol
|
|
|
|
* @tmp: optional 64-bit scratch register to be used if <dst> is a
|
|
|
|
* 32-bit wide register, in which case it cannot be used to hold
|
|
|
|
* the address
|
|
|
|
*/
|
|
|
|
.macro ldr_l, dst, sym, tmp=
|
|
|
|
.ifb \tmp
|
|
|
|
adrp \dst, \sym
|
|
|
|
ldr \dst, [\dst, :lo12:\sym]
|
|
|
|
.else
|
|
|
|
adrp \tmp, \sym
|
|
|
|
ldr \dst, [\tmp, :lo12:\sym]
|
|
|
|
.endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @src: source register (32 or 64 bit wide)
|
|
|
|
* @sym: name of the symbol
|
|
|
|
* @tmp: mandatory 64-bit scratch register to calculate the address
|
|
|
|
* while <src> needs to be preserved.
|
|
|
|
*/
|
|
|
|
.macro str_l, src, sym, tmp
|
|
|
|
adrp \tmp, \sym
|
|
|
|
str \src, [\tmp, :lo12:\sym]
|
|
|
|
.endm
|
|
|
|
|
2020-09-22 21:49:05 +01:00
|
|
|
/*
|
|
|
|
* @dst: destination register
|
|
|
|
*/
|
|
|
|
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
|
2021-05-20 12:50:27 +01:00
|
|
|
.macro get_this_cpu_offset, dst
|
2020-09-22 21:49:05 +01:00
|
|
|
mrs \dst, tpidr_el2
|
|
|
|
.endm
|
|
|
|
#else
|
2021-05-20 12:50:27 +01:00
|
|
|
.macro get_this_cpu_offset, dst
|
2020-09-22 21:49:05 +01:00
|
|
|
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
|
|
|
mrs \dst, tpidr_el1
|
|
|
|
alternative_else
|
|
|
|
mrs \dst, tpidr_el2
|
2021-05-20 12:50:27 +01:00
|
|
|
alternative_endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro set_this_cpu_offset, src
|
|
|
|
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
|
|
|
msr tpidr_el1, \src
|
|
|
|
alternative_else
|
|
|
|
msr tpidr_el2, \src
|
2020-09-22 21:49:05 +01:00
|
|
|
alternative_endif
|
|
|
|
.endm
|
|
|
|
#endif
|
|
|
|
|
2015-12-10 10:22:39 +00:00
|
|
|
/*
|
2018-03-10 14:59:29 +00:00
|
|
|
* @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
|
2015-12-10 10:22:39 +00:00
|
|
|
* @sym: The name of the per-cpu variable
|
|
|
|
* @tmp: scratch register
|
|
|
|
*/
|
2016-11-03 20:23:12 +00:00
|
|
|
.macro adr_this_cpu, dst, sym, tmp
|
2017-07-15 17:23:13 +01:00
|
|
|
adrp \tmp, \sym
|
|
|
|
add \dst, \tmp, #:lo12:\sym
|
2021-05-20 12:50:27 +01:00
|
|
|
get_this_cpu_offset \tmp
|
2016-11-03 20:23:12 +00:00
|
|
|
add \dst, \dst, \tmp
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
|
|
|
|
* @sym: The name of the per-cpu variable
|
|
|
|
* @tmp: scratch register
|
|
|
|
*/
|
|
|
|
.macro ldr_this_cpu dst, sym, tmp
|
|
|
|
adr_l \dst, \sym
|
2021-05-20 12:50:27 +01:00
|
|
|
get_this_cpu_offset \tmp
|
2016-11-03 20:23:12 +00:00
|
|
|
ldr \dst, [\dst, \tmp]
|
2015-12-10 10:22:39 +00:00
|
|
|
.endm
|
|
|
|
|
2016-09-09 14:07:16 +01:00
|
|
|
/*
|
2018-09-19 11:41:21 +01:00
|
|
|
* read_ctr - read CTR_EL0. If the system has mismatched register fields,
|
|
|
|
* provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
|
2016-09-09 14:07:16 +01:00
|
|
|
*/
|
|
|
|
.macro read_ctr, reg
|
2021-03-22 12:09:51 +00:00
|
|
|
#ifndef __KVM_NVHE_HYPERVISOR__
|
2018-09-19 11:41:21 +01:00
|
|
|
alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
|
2016-09-09 14:07:16 +01:00
|
|
|
mrs \reg, ctr_el0 // read CTR
|
|
|
|
nop
|
|
|
|
alternative_else
|
|
|
|
ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
|
|
|
|
alternative_endif
|
2021-03-22 12:09:51 +00:00
|
|
|
#else
|
|
|
|
alternative_if_not ARM64_KVM_PROTECTED_MODE
|
|
|
|
ASM_BUG()
|
|
|
|
alternative_else_nop_endif
|
2022-09-12 17:22:08 +01:00
|
|
|
alternative_cb ARM64_ALWAYS_SYSTEM, kvm_compute_final_ctr_el0
|
2021-03-22 12:09:51 +00:00
|
|
|
movz \reg, #0
|
|
|
|
movk \reg, #0, lsl #16
|
|
|
|
movk \reg, #0, lsl #32
|
|
|
|
movk \reg, #0, lsl #48
|
|
|
|
alternative_cb_end
|
|
|
|
#endif
|
2016-09-09 14:07:16 +01:00
|
|
|
.endm
|
|
|
|
|
2016-04-27 17:47:00 +01:00
|
|
|
|
|
|
|
/*
|
2016-09-09 14:07:14 +01:00
|
|
|
* raw_dcache_line_size - get the minimum D-cache line size on this CPU
|
|
|
|
* from the CTR register.
|
2016-04-27 17:47:00 +01:00
|
|
|
*/
|
2016-09-09 14:07:14 +01:00
|
|
|
.macro raw_dcache_line_size, reg, tmp
|
2016-04-27 17:47:00 +01:00
|
|
|
mrs \tmp, ctr_el0 // read CTR
|
|
|
|
ubfm \tmp, \tmp, #16, #19 // cache line size encoding
|
|
|
|
mov \reg, #4 // bytes per word
|
|
|
|
lsl \reg, \reg, \tmp // actual cache line size
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
2016-09-09 14:07:14 +01:00
|
|
|
* dcache_line_size - get the safe D-cache line size across all CPUs
|
2016-04-27 17:47:00 +01:00
|
|
|
*/
|
2016-09-09 14:07:14 +01:00
|
|
|
.macro dcache_line_size, reg, tmp
|
2016-09-09 14:07:16 +01:00
|
|
|
read_ctr \tmp
|
|
|
|
ubfm \tmp, \tmp, #16, #19 // cache line size encoding
|
|
|
|
mov \reg, #4 // bytes per word
|
|
|
|
lsl \reg, \reg, \tmp // actual cache line size
|
2016-09-09 14:07:14 +01:00
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* raw_icache_line_size - get the minimum I-cache line size on this CPU
|
|
|
|
* from the CTR register.
|
|
|
|
*/
|
|
|
|
.macro raw_icache_line_size, reg, tmp
|
2016-04-27 17:47:00 +01:00
|
|
|
mrs \tmp, ctr_el0 // read CTR
|
|
|
|
and \tmp, \tmp, #0xf // cache line size encoding
|
|
|
|
mov \reg, #4 // bytes per word
|
|
|
|
lsl \reg, \reg, \tmp // actual cache line size
|
|
|
|
.endm
|
|
|
|
|
2016-09-09 14:07:14 +01:00
|
|
|
/*
|
|
|
|
* icache_line_size - get the safe I-cache line size across all CPUs
|
|
|
|
*/
|
|
|
|
.macro icache_line_size, reg, tmp
|
2016-09-09 14:07:16 +01:00
|
|
|
read_ctr \tmp
|
|
|
|
and \tmp, \tmp, #0xf // cache line size encoding
|
|
|
|
mov \reg, #4 // bytes per word
|
|
|
|
lsl \reg, \reg, \tmp // actual cache line size
|
2016-09-09 14:07:14 +01:00
|
|
|
.endm
|
|
|
|
|
2016-04-27 17:47:00 +01:00
|
|
|
/*
|
2018-12-06 22:50:41 +00:00
|
|
|
* tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
|
2016-04-27 17:47:00 +01:00
|
|
|
*/
|
2018-12-06 22:50:41 +00:00
|
|
|
.macro tcr_set_t0sz, valreg, t0sz
|
|
|
|
bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
|
2016-04-27 17:47:00 +01:00
|
|
|
.endm
|
|
|
|
|
2019-08-07 16:55:22 +01:00
|
|
|
/*
|
|
|
|
* tcr_set_t1sz - update TCR.T1SZ
|
|
|
|
*/
|
|
|
|
.macro tcr_set_t1sz, valreg, t1sz
|
|
|
|
bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
|
|
|
|
.endm
|
|
|
|
|
2017-12-13 17:07:17 +00:00
|
|
|
/*
|
|
|
|
* tcr_compute_pa_size - set TCR.(I)PS to the highest supported
|
|
|
|
* ID_AA64MMFR0_EL1.PARange value
|
|
|
|
*
|
|
|
|
* tcr: register with the TCR_ELx value to be updated
|
2018-01-15 15:23:50 +00:00
|
|
|
* pos: IPS or PS bitfield position
|
2017-12-13 17:07:17 +00:00
|
|
|
* tmp{0,1}: temporary registers
|
|
|
|
*/
|
|
|
|
.macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
|
|
|
|
mrs \tmp0, ID_AA64MMFR0_EL1
|
|
|
|
// Narrow PARange to fit the PS field in TCR_ELx
|
2022-09-05 23:54:01 +01:00
|
|
|
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
|
|
|
|
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
|
2024-12-12 09:18:44 +01:00
|
|
|
#ifdef CONFIG_ARM64_LPA2
|
|
|
|
alternative_if_not ARM64_HAS_VA52
|
|
|
|
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_48
|
|
|
|
alternative_else_nop_endif
|
|
|
|
#endif
|
2017-12-13 17:07:17 +00:00
|
|
|
cmp \tmp0, \tmp1
|
|
|
|
csel \tmp0, \tmp1, \tmp0, hi
|
|
|
|
bfi \tcr, \tmp0, \pos, #3
|
2016-04-27 17:47:00 +01:00
|
|
|
.endm
|
|
|
|
|
2021-05-24 09:29:51 +01:00
|
|
|
.macro __dcache_op_workaround_clean_cache, op, addr
|
|
|
|
alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
|
|
|
|
dc \op, \addr
|
|
|
|
alternative_else
|
|
|
|
dc civac, \addr
|
|
|
|
alternative_endif
|
|
|
|
.endm
|
|
|
|
|
2016-04-27 17:47:00 +01:00
|
|
|
/*
|
|
|
|
* Macro to perform a data cache maintenance for the interval
|
2021-09-30 14:31:04 +00:00
|
|
|
* [start, end) with dcache line size explicitly provided.
|
2016-04-27 17:47:00 +01:00
|
|
|
*
|
|
|
|
* op: operation passed to dc instruction
|
|
|
|
* domain: domain used in dsb instruciton
|
2021-05-24 09:29:54 +01:00
|
|
|
* start: starting virtual address of the region
|
|
|
|
* end: end virtual address of the region
|
2021-09-30 14:31:04 +00:00
|
|
|
* linesz: dcache line size
|
2021-05-24 09:29:45 +01:00
|
|
|
* fixup: optional label to branch to on user fault
|
2021-09-30 14:31:04 +00:00
|
|
|
* Corrupts: start, end, tmp
|
2016-04-27 17:47:00 +01:00
|
|
|
*/
|
2021-09-30 14:31:04 +00:00
|
|
|
.macro dcache_by_myline_op op, domain, start, end, linesz, tmp, fixup
|
|
|
|
sub \tmp, \linesz, #1
|
|
|
|
bic \start, \start, \tmp
|
2021-05-24 09:29:45 +01:00
|
|
|
.Ldcache_op\@:
|
2018-12-10 13:39:48 +00:00
|
|
|
.ifc \op, cvau
|
2021-05-24 09:29:54 +01:00
|
|
|
__dcache_op_workaround_clean_cache \op, \start
|
2018-12-10 13:39:48 +00:00
|
|
|
.else
|
|
|
|
.ifc \op, cvac
|
2021-05-24 09:29:54 +01:00
|
|
|
__dcache_op_workaround_clean_cache \op, \start
|
2018-12-10 13:39:48 +00:00
|
|
|
.else
|
|
|
|
.ifc \op, cvap
|
2021-05-24 09:29:54 +01:00
|
|
|
sys 3, c7, c12, 1, \start // dc cvap
|
2016-06-28 18:07:29 +01:00
|
|
|
.else
|
2019-04-09 10:52:44 +01:00
|
|
|
.ifc \op, cvadp
|
2021-05-24 09:29:54 +01:00
|
|
|
sys 3, c7, c13, 1, \start // dc cvadp
|
2019-04-09 10:52:44 +01:00
|
|
|
.else
|
2021-05-24 09:29:54 +01:00
|
|
|
dc \op, \start
|
2016-06-28 18:07:29 +01:00
|
|
|
.endif
|
2018-12-10 13:39:48 +00:00
|
|
|
.endif
|
|
|
|
.endif
|
2019-04-09 10:52:44 +01:00
|
|
|
.endif
|
2021-09-30 14:31:04 +00:00
|
|
|
add \start, \start, \linesz
|
2021-05-24 09:29:54 +01:00
|
|
|
cmp \start, \end
|
2021-05-24 09:29:45 +01:00
|
|
|
b.lo .Ldcache_op\@
|
2016-04-27 17:47:00 +01:00
|
|
|
dsb \domain
|
2021-05-24 09:29:45 +01:00
|
|
|
|
2022-06-21 07:26:33 +00:00
|
|
|
_cond_uaccess_extable .Ldcache_op\@, \fixup
|
2016-04-27 17:47:00 +01:00
|
|
|
.endm
|
|
|
|
|
2021-09-30 14:31:04 +00:00
|
|
|
/*
|
|
|
|
* Macro to perform a data cache maintenance for the interval
|
|
|
|
* [start, end)
|
|
|
|
*
|
|
|
|
* op: operation passed to dc instruction
|
|
|
|
* domain: domain used in dsb instruciton
|
|
|
|
* start: starting virtual address of the region
|
|
|
|
* end: end virtual address of the region
|
|
|
|
* fixup: optional label to branch to on user fault
|
|
|
|
* Corrupts: start, end, tmp1, tmp2
|
|
|
|
*/
|
|
|
|
.macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
|
|
|
|
dcache_line_size \tmp1, \tmp2
|
|
|
|
dcache_by_myline_op \op, \domain, \start, \end, \tmp1, \tmp2, \fixup
|
|
|
|
.endm
|
|
|
|
|
2017-10-23 17:11:16 +01:00
|
|
|
/*
|
|
|
|
* Macro to perform an instruction cache maintenance for the interval
|
|
|
|
* [start, end)
|
|
|
|
*
|
|
|
|
* start, end: virtual addresses describing the region
|
2021-05-24 09:29:45 +01:00
|
|
|
* fixup: optional label to branch to on user fault
|
2017-10-23 17:11:16 +01:00
|
|
|
* Corrupts: tmp1, tmp2
|
|
|
|
*/
|
2021-05-24 09:29:45 +01:00
|
|
|
.macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
|
2017-10-23 17:11:16 +01:00
|
|
|
icache_line_size \tmp1, \tmp2
|
|
|
|
sub \tmp2, \tmp1, #1
|
|
|
|
bic \tmp2, \start, \tmp2
|
2021-05-24 09:29:45 +01:00
|
|
|
.Licache_op\@:
|
|
|
|
ic ivau, \tmp2 // invalidate I line PoU
|
2017-10-23 17:11:16 +01:00
|
|
|
add \tmp2, \tmp2, \tmp1
|
|
|
|
cmp \tmp2, \end
|
2021-05-24 09:29:45 +01:00
|
|
|
b.lo .Licache_op\@
|
2017-10-23 17:11:16 +01:00
|
|
|
dsb ish
|
|
|
|
isb
|
2021-05-24 09:29:45 +01:00
|
|
|
|
2022-06-21 07:26:33 +00:00
|
|
|
_cond_uaccess_extable .Licache_op\@, \fixup
|
2017-10-23 17:11:16 +01:00
|
|
|
.endm
|
|
|
|
|
2022-06-24 17:06:46 +02:00
|
|
|
/*
|
|
|
|
* load_ttbr1 - install @pgtbl as a TTBR1 page table
|
|
|
|
* pgtbl preserved
|
|
|
|
* tmp1/tmp2 clobbered, either may overlap with pgtbl
|
|
|
|
*/
|
|
|
|
.macro load_ttbr1, pgtbl, tmp1, tmp2
|
|
|
|
phys_to_ttbr \tmp1, \pgtbl
|
|
|
|
offset_ttbr1 \tmp1, \tmp2
|
|
|
|
msr ttbr1_el1, \tmp1
|
|
|
|
isb
|
|
|
|
.endm
|
|
|
|
|
2021-09-30 14:31:09 +00:00
|
|
|
/*
|
|
|
|
* To prevent the possibility of old and new partial table walks being visible
|
|
|
|
* in the tlb, switch the ttbr to a zero page when we invalidate the old
|
|
|
|
* records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
|
|
|
|
* Even switching to our copied tables will cause a changed output address at
|
|
|
|
* each stage of the walk.
|
|
|
|
*/
|
|
|
|
.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
|
|
|
|
phys_to_ttbr \tmp, \zero_page
|
|
|
|
msr ttbr1_el1, \tmp
|
|
|
|
isb
|
|
|
|
tlbi vmalle1
|
|
|
|
dsb nsh
|
2022-06-24 17:06:46 +02:00
|
|
|
load_ttbr1 \page_table, \tmp, \tmp2
|
2021-09-30 14:31:09 +00:00
|
|
|
.endm
|
|
|
|
|
2016-04-27 17:47:00 +01:00
|
|
|
/*
|
|
|
|
* reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
|
|
|
|
*/
|
|
|
|
.macro reset_pmuserenr_el0, tmpreg
|
2019-04-05 11:20:12 +01:00
|
|
|
mrs \tmpreg, id_aa64dfr0_el1
|
2024-04-11 20:30:30 +08:00
|
|
|
ubfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
|
|
|
|
cmp \tmpreg, #ID_AA64DFR0_EL1_PMUVer_NI
|
|
|
|
ccmp \tmpreg, #ID_AA64DFR0_EL1_PMUVer_IMP_DEF, #4, ne
|
|
|
|
b.eq 9000f // Skip if no PMU present or IMP_DEF
|
2016-04-27 17:47:00 +01:00
|
|
|
msr pmuserenr_el0, xzr // Disable PMU access from EL0
|
|
|
|
9000:
|
|
|
|
.endm
|
|
|
|
|
2020-03-05 09:06:22 +00:00
|
|
|
/*
|
|
|
|
* reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present
|
|
|
|
*/
|
|
|
|
.macro reset_amuserenr_el0, tmpreg
|
|
|
|
mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1
|
2022-09-05 23:54:03 +01:00
|
|
|
ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
|
2020-03-05 09:06:22 +00:00
|
|
|
cbz \tmpreg, .Lskip_\@ // Skip if no AMU present
|
|
|
|
msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0
|
|
|
|
.Lskip_\@:
|
|
|
|
.endm
|
2016-04-27 17:47:10 +01:00
|
|
|
/*
|
|
|
|
* copy_page - copy src to dest using temp registers t1-t8
|
|
|
|
*/
|
|
|
|
.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
|
|
|
|
9998: ldp \t1, \t2, [\src]
|
|
|
|
ldp \t3, \t4, [\src, #16]
|
|
|
|
ldp \t5, \t6, [\src, #32]
|
|
|
|
ldp \t7, \t8, [\src, #48]
|
|
|
|
add \src, \src, #64
|
|
|
|
stnp \t1, \t2, [\dest]
|
|
|
|
stnp \t3, \t4, [\dest, #16]
|
|
|
|
stnp \t5, \t6, [\dest, #32]
|
|
|
|
stnp \t7, \t8, [\dest, #48]
|
|
|
|
add \dest, \dest, #64
|
|
|
|
tst \src, #(PAGE_SIZE - 1)
|
|
|
|
b.ne 9998b
|
|
|
|
.endm
|
|
|
|
|
2017-07-26 16:05:20 +01:00
|
|
|
/*
|
|
|
|
* Annotate a function as being unsuitable for kprobes.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_KPROBES
|
|
|
|
#define NOKPROBE(x) \
|
|
|
|
.pushsection "_kprobe_blacklist", "aw"; \
|
|
|
|
.quad x; \
|
|
|
|
.popsection;
|
|
|
|
#else
|
|
|
|
#define NOKPROBE(x)
|
|
|
|
#endif
|
2018-12-07 18:08:16 +00:00
|
|
|
|
2020-12-22 12:02:06 -08:00
|
|
|
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
2018-12-07 18:08:16 +00:00
|
|
|
#define EXPORT_SYMBOL_NOKASAN(name)
|
|
|
|
#else
|
|
|
|
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
|
|
|
|
#endif
|
|
|
|
|
2015-12-26 13:48:02 +01:00
|
|
|
/*
|
|
|
|
* Emit a 64-bit absolute little endian symbol reference in a way that
|
|
|
|
* ensures that it will be resolved at build time, even when building a
|
|
|
|
* PIE binary. This requires cooperation from the linker script, which
|
|
|
|
* must emit the lo32/hi32 halves individually.
|
|
|
|
*/
|
|
|
|
.macro le64sym, sym
|
|
|
|
.long \sym\()_lo32
|
|
|
|
.long \sym\()_hi32
|
|
|
|
.endm
|
|
|
|
|
2016-04-18 17:09:44 +02:00
|
|
|
/*
|
|
|
|
* mov_q - move an immediate constant into a 64-bit register using
|
|
|
|
* between 2 and 4 movz/movk instructions (depending on the
|
|
|
|
* magnitude and sign of the operand)
|
|
|
|
*/
|
|
|
|
.macro mov_q, reg, val
|
|
|
|
.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
|
|
|
|
movz \reg, :abs_g1_s:\val
|
|
|
|
.else
|
|
|
|
.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
|
|
|
|
movz \reg, :abs_g2_s:\val
|
|
|
|
.else
|
|
|
|
movz \reg, :abs_g3:\val
|
|
|
|
movk \reg, :abs_g2_nc:\val
|
|
|
|
.endif
|
|
|
|
movk \reg, :abs_g1_nc:\val
|
|
|
|
.endif
|
|
|
|
movk \reg, :abs_g0_nc:\val
|
|
|
|
.endm
|
|
|
|
|
2016-07-01 16:53:00 +01:00
|
|
|
/*
|
2019-02-22 09:32:50 +00:00
|
|
|
* Return the current task_struct.
|
2016-07-01 16:53:00 +01:00
|
|
|
*/
|
2019-02-22 09:32:50 +00:00
|
|
|
.macro get_current_task, rd
|
2016-07-01 16:53:00 +01:00
|
|
|
mrs \rd, sp_el0
|
|
|
|
.endm
|
|
|
|
|
arm64: mm: Offset TTBR1 to allow 52-bit PTRS_PER_PGD
Enabling 52-bit VAs on arm64 requires that the PGD table expands from 64
entries (for the 48-bit case) to 1024 entries. This quantity,
PTRS_PER_PGD is used as follows to compute which PGD entry corresponds
to a given virtual address, addr:
pgd_index(addr) -> (addr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)
Userspace addresses are prefixed by 0's, so for a 48-bit userspace
address, uva, the following is true:
(uva >> PGDIR_SHIFT) & (1024 - 1) == (uva >> PGDIR_SHIFT) & (64 - 1)
In other words, a 48-bit userspace address will have the same pgd_index
when using PTRS_PER_PGD = 64 and 1024.
Kernel addresses are prefixed by 1's so, given a 48-bit kernel address,
kva, we have the following inequality:
(kva >> PGDIR_SHIFT) & (1024 - 1) != (kva >> PGDIR_SHIFT) & (64 - 1)
In other words a 48-bit kernel virtual address will have a different
pgd_index when using PTRS_PER_PGD = 64 and 1024.
If, however, we note that:
kva = 0xFFFF << 48 + lower (where lower[63:48] == 0b)
and, PGDIR_SHIFT = 42 (as we are dealing with 64KB PAGE_SIZE)
We can consider:
(kva >> PGDIR_SHIFT) & (1024 - 1) - (kva >> PGDIR_SHIFT) & (64 - 1)
= (0xFFFF << 6) & 0x3FF - (0xFFFF << 6) & 0x3F // "lower" cancels out
= 0x3C0
In other words, one can switch PTRS_PER_PGD to the 52-bit value globally
provided that they increment ttbr1_el1 by 0x3C0 * 8 = 0x1E00 bytes when
running with 48-bit kernel VAs (TCR_EL1.T1SZ = 16).
For kernel configuration where 52-bit userspace VAs are possible, this
patch offsets ttbr1_el1 and sets PTRS_PER_PGD corresponding to the
52-bit value.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Steve Capper <steve.capper@arm.com>
[will: added comment to TTBR1_BADDR_4852_OFFSET calculation]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-12-06 22:50:39 +00:00
|
|
|
/*
|
2024-02-14 13:29:12 +01:00
|
|
|
* If the kernel is built for 52-bit virtual addressing but the hardware only
|
|
|
|
* supports 48 bits, we cannot program the pgdir address into TTBR1 directly,
|
|
|
|
* but we have to add an offset so that the TTBR1 address corresponds with the
|
|
|
|
* pgdir entry that covers the lowest 48-bit addressable VA.
|
|
|
|
*
|
arm64: Enable LPA2 at boot if supported by the system
Update the early kernel mapping code to take 52-bit virtual addressing
into account based on the LPA2 feature. This is a bit more involved than
LVA (which is supported with 64k pages only), given that some page table
descriptor bits change meaning in this case.
To keep the handling in asm to a minimum, the initial ID map is still
created with 48-bit virtual addressing, which implies that the kernel
image must be loaded into 48-bit addressable physical memory. This is
currently required by the boot protocol, even though we happen to
support placement outside of that for LVA/64k based configurations.
Enabling LPA2 involves more than setting TCR.T1SZ to a lower value,
there is also a DS bit in TCR that needs to be set, and which changes
the meaning of bits [9:8] in all page table descriptors. Since we cannot
enable DS and every live page table descriptor at the same time, let's
pivot through another temporary mapping. This avoids the need to
reintroduce manipulations of the page tables with the MMU and caches
disabled.
To permit the LPA2 feature to be overridden on the kernel command line,
which may be necessary to work around silicon errata, or to deal with
mismatched features on heterogeneous SoC designs, test for CPU feature
overrides first, and only then enable LPA2.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20240214122845.2033971-78-ardb+git@google.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2024-02-14 13:29:19 +01:00
|
|
|
* Note that this trick is only used for LVA/64k pages - LPA2/4k pages uses an
|
|
|
|
* additional paging level, and on LPA2/16k pages, we would end up with a root
|
|
|
|
* level table with only 2 entries, which is suboptimal in terms of TLB
|
|
|
|
* utilization, so there we fall back to 47 bits of translation if LPA2 is not
|
|
|
|
* supported.
|
|
|
|
*
|
arm64: mm: Offset TTBR1 to allow 52-bit PTRS_PER_PGD
Enabling 52-bit VAs on arm64 requires that the PGD table expands from 64
entries (for the 48-bit case) to 1024 entries. This quantity,
PTRS_PER_PGD is used as follows to compute which PGD entry corresponds
to a given virtual address, addr:
pgd_index(addr) -> (addr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)
Userspace addresses are prefixed by 0's, so for a 48-bit userspace
address, uva, the following is true:
(uva >> PGDIR_SHIFT) & (1024 - 1) == (uva >> PGDIR_SHIFT) & (64 - 1)
In other words, a 48-bit userspace address will have the same pgd_index
when using PTRS_PER_PGD = 64 and 1024.
Kernel addresses are prefixed by 1's so, given a 48-bit kernel address,
kva, we have the following inequality:
(kva >> PGDIR_SHIFT) & (1024 - 1) != (kva >> PGDIR_SHIFT) & (64 - 1)
In other words a 48-bit kernel virtual address will have a different
pgd_index when using PTRS_PER_PGD = 64 and 1024.
If, however, we note that:
kva = 0xFFFF << 48 + lower (where lower[63:48] == 0b)
and, PGDIR_SHIFT = 42 (as we are dealing with 64KB PAGE_SIZE)
We can consider:
(kva >> PGDIR_SHIFT) & (1024 - 1) - (kva >> PGDIR_SHIFT) & (64 - 1)
= (0xFFFF << 6) & 0x3FF - (0xFFFF << 6) & 0x3F // "lower" cancels out
= 0x3C0
In other words, one can switch PTRS_PER_PGD to the 52-bit value globally
provided that they increment ttbr1_el1 by 0x3C0 * 8 = 0x1E00 bytes when
running with 48-bit kernel VAs (TCR_EL1.T1SZ = 16).
For kernel configuration where 52-bit userspace VAs are possible, this
patch offsets ttbr1_el1 and sets PTRS_PER_PGD corresponding to the
52-bit value.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Steve Capper <steve.capper@arm.com>
[will: added comment to TTBR1_BADDR_4852_OFFSET calculation]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-12-06 22:50:39 +00:00
|
|
|
* orr is used as it can cover the immediate value (and is idempotent).
|
|
|
|
* ttbr: Value of ttbr to set, modified.
|
|
|
|
*/
|
2019-08-07 16:55:19 +01:00
|
|
|
.macro offset_ttbr1, ttbr, tmp
|
arm64: Enable LPA2 at boot if supported by the system
Update the early kernel mapping code to take 52-bit virtual addressing
into account based on the LPA2 feature. This is a bit more involved than
LVA (which is supported with 64k pages only), given that some page table
descriptor bits change meaning in this case.
To keep the handling in asm to a minimum, the initial ID map is still
created with 48-bit virtual addressing, which implies that the kernel
image must be loaded into 48-bit addressable physical memory. This is
currently required by the boot protocol, even though we happen to
support placement outside of that for LVA/64k based configurations.
Enabling LPA2 involves more than setting TCR.T1SZ to a lower value,
there is also a DS bit in TCR that needs to be set, and which changes
the meaning of bits [9:8] in all page table descriptors. Since we cannot
enable DS and every live page table descriptor at the same time, let's
pivot through another temporary mapping. This avoids the need to
reintroduce manipulations of the page tables with the MMU and caches
disabled.
To permit the LPA2 feature to be overridden on the kernel command line,
which may be necessary to work around silicon errata, or to deal with
mismatched features on heterogeneous SoC designs, test for CPU feature
overrides first, and only then enable LPA2.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20240214122845.2033971-78-ardb+git@google.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2024-02-14 13:29:19 +01:00
|
|
|
#if defined(CONFIG_ARM64_VA_BITS_52) && !defined(CONFIG_ARM64_LPA2)
|
2024-02-14 13:29:12 +01:00
|
|
|
mrs \tmp, tcr_el1
|
|
|
|
and \tmp, \tmp, #TCR_T1SZ_MASK
|
|
|
|
cmp \tmp, #TCR_T1SZ(VA_BITS_MIN)
|
|
|
|
orr \tmp, \ttbr, #TTBR1_BADDR_4852_OFFSET
|
|
|
|
csel \ttbr, \tmp, \ttbr, eq
|
arm64: mm: Offset TTBR1 to allow 52-bit PTRS_PER_PGD
Enabling 52-bit VAs on arm64 requires that the PGD table expands from 64
entries (for the 48-bit case) to 1024 entries. This quantity,
PTRS_PER_PGD is used as follows to compute which PGD entry corresponds
to a given virtual address, addr:
pgd_index(addr) -> (addr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)
Userspace addresses are prefixed by 0's, so for a 48-bit userspace
address, uva, the following is true:
(uva >> PGDIR_SHIFT) & (1024 - 1) == (uva >> PGDIR_SHIFT) & (64 - 1)
In other words, a 48-bit userspace address will have the same pgd_index
when using PTRS_PER_PGD = 64 and 1024.
Kernel addresses are prefixed by 1's so, given a 48-bit kernel address,
kva, we have the following inequality:
(kva >> PGDIR_SHIFT) & (1024 - 1) != (kva >> PGDIR_SHIFT) & (64 - 1)
In other words a 48-bit kernel virtual address will have a different
pgd_index when using PTRS_PER_PGD = 64 and 1024.
If, however, we note that:
kva = 0xFFFF << 48 + lower (where lower[63:48] == 0b)
and, PGDIR_SHIFT = 42 (as we are dealing with 64KB PAGE_SIZE)
We can consider:
(kva >> PGDIR_SHIFT) & (1024 - 1) - (kva >> PGDIR_SHIFT) & (64 - 1)
= (0xFFFF << 6) & 0x3FF - (0xFFFF << 6) & 0x3F // "lower" cancels out
= 0x3C0
In other words, one can switch PTRS_PER_PGD to the 52-bit value globally
provided that they increment ttbr1_el1 by 0x3C0 * 8 = 0x1E00 bytes when
running with 48-bit kernel VAs (TCR_EL1.T1SZ = 16).
For kernel configuration where 52-bit userspace VAs are possible, this
patch offsets ttbr1_el1 and sets PTRS_PER_PGD corresponding to the
52-bit value.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Steve Capper <steve.capper@arm.com>
[will: added comment to TTBR1_BADDR_4852_OFFSET calculation]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-12-06 22:50:39 +00:00
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2017-12-13 17:07:18 +00:00
|
|
|
/*
|
|
|
|
* Arrange a physical address in a TTBR register, taking care of 52-bit
|
|
|
|
* addresses.
|
|
|
|
*
|
|
|
|
* phys: physical address, preserved
|
|
|
|
* ttbr: returns the TTBR value
|
|
|
|
*/
|
2018-01-29 11:59:57 +00:00
|
|
|
.macro phys_to_ttbr, ttbr, phys
|
2017-12-13 17:07:18 +00:00
|
|
|
#ifdef CONFIG_ARM64_PA_BITS_52
|
|
|
|
orr \ttbr, \phys, \phys, lsr #46
|
|
|
|
and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
|
|
|
|
#else
|
|
|
|
mov \ttbr, \phys
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2018-01-29 11:59:59 +00:00
|
|
|
.macro phys_to_pte, pte, phys
|
|
|
|
#ifdef CONFIG_ARM64_PA_BITS_52
|
2024-02-14 13:29:16 +01:00
|
|
|
orr \pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT
|
|
|
|
and \pte, \pte, #PHYS_TO_PTE_ADDR_MASK
|
2018-01-29 11:59:59 +00:00
|
|
|
#else
|
|
|
|
mov \pte, \phys
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2019-02-26 18:43:41 +00:00
|
|
|
/*
|
|
|
|
* tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
|
|
|
|
*/
|
|
|
|
.macro tcr_clear_errata_bits, tcr, tmp1, tmp2
|
|
|
|
#ifdef CONFIG_FUJITSU_ERRATUM_010001
|
|
|
|
mrs \tmp1, midr_el1
|
|
|
|
|
|
|
|
mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
|
|
|
|
and \tmp1, \tmp1, \tmp2
|
|
|
|
mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001
|
|
|
|
cmp \tmp1, \tmp2
|
|
|
|
b.ne 10f
|
|
|
|
|
|
|
|
mov_q \tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
|
|
|
|
bic \tcr, \tcr, \tmp2
|
|
|
|
10:
|
|
|
|
#endif /* CONFIG_FUJITSU_ERRATUM_010001 */
|
|
|
|
.endm
|
|
|
|
|
2018-01-29 11:59:52 +00:00
|
|
|
/**
|
|
|
|
* Errata workaround prior to disable MMU. Insert an ISB immediately prior
|
|
|
|
* to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
|
|
|
|
*/
|
|
|
|
.macro pre_disable_mmu_workaround
|
|
|
|
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
|
|
|
|
isb
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2018-03-29 15:13:22 +02:00
|
|
|
/*
|
|
|
|
* frame_push - Push @regcount callee saved registers to the stack,
|
|
|
|
* starting at x19, as well as x29/x30, and set x29 to
|
|
|
|
* the new value of sp. Add @extra bytes of stack space
|
|
|
|
* for locals.
|
|
|
|
*/
|
|
|
|
.macro frame_push, regcount:req, extra
|
|
|
|
__frame st, \regcount, \extra
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* frame_pop - Pop the callee saved registers from the stack that were
|
|
|
|
* pushed in the most recent call to frame_push, as well
|
|
|
|
* as x29/x30 and any extra stack space that may have been
|
|
|
|
* allocated.
|
|
|
|
*/
|
|
|
|
.macro frame_pop
|
|
|
|
__frame ld
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro __frame_regs, reg1, reg2, op, num
|
|
|
|
.if .Lframe_regcount == \num
|
|
|
|
\op\()r \reg1, [sp, #(\num + 1) * 8]
|
|
|
|
.elseif .Lframe_regcount > \num
|
|
|
|
\op\()p \reg1, \reg2, [sp, #(\num + 1) * 8]
|
|
|
|
.endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro __frame, op, regcount, extra=0
|
|
|
|
.ifc \op, st
|
|
|
|
.if (\regcount) < 0 || (\regcount) > 10
|
|
|
|
.error "regcount should be in the range [0 ... 10]"
|
|
|
|
.endif
|
|
|
|
.if ((\extra) % 16) != 0
|
|
|
|
.error "extra should be a multiple of 16 bytes"
|
|
|
|
.endif
|
|
|
|
.ifdef .Lframe_regcount
|
|
|
|
.if .Lframe_regcount != -1
|
|
|
|
.error "frame_push/frame_pop may not be nested"
|
|
|
|
.endif
|
|
|
|
.endif
|
|
|
|
.set .Lframe_regcount, \regcount
|
|
|
|
.set .Lframe_extra, \extra
|
|
|
|
.set .Lframe_local_offset, ((\regcount + 3) / 2) * 16
|
|
|
|
stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
|
|
|
|
mov x29, sp
|
|
|
|
.endif
|
|
|
|
|
|
|
|
__frame_regs x19, x20, \op, 1
|
|
|
|
__frame_regs x21, x22, \op, 3
|
|
|
|
__frame_regs x23, x24, \op, 5
|
|
|
|
__frame_regs x25, x26, \op, 7
|
|
|
|
__frame_regs x27, x28, \op, 9
|
|
|
|
|
|
|
|
.ifc \op, ld
|
|
|
|
.if .Lframe_regcount == -1
|
|
|
|
.error "frame_push/frame_pop may not be nested"
|
|
|
|
.endif
|
|
|
|
ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
|
|
|
|
.set .Lframe_regcount, -1
|
|
|
|
.endif
|
|
|
|
.endm
|
|
|
|
|
2021-02-08 09:57:12 +00:00
|
|
|
/*
|
2021-03-19 10:01:24 +00:00
|
|
|
* Set SCTLR_ELx to the @reg value, and invalidate the local icache
|
2021-02-08 09:57:12 +00:00
|
|
|
* in the process. This is called when setting the MMU on.
|
|
|
|
*/
|
2021-03-19 10:01:24 +00:00
|
|
|
.macro set_sctlr, sreg, reg
|
|
|
|
msr \sreg, \reg
|
2021-02-08 09:57:12 +00:00
|
|
|
isb
|
|
|
|
/*
|
|
|
|
* Invalidate the local I-cache so that any instructions fetched
|
|
|
|
* speculatively from the PoC are discarded, since they may have
|
|
|
|
* been dynamically patched at the PoU.
|
|
|
|
*/
|
|
|
|
ic iallu
|
|
|
|
dsb nsh
|
|
|
|
isb
|
|
|
|
.endm
|
|
|
|
|
2021-03-19 10:01:24 +00:00
|
|
|
.macro set_sctlr_el1, reg
|
|
|
|
set_sctlr sctlr_el1, \reg
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro set_sctlr_el2, reg
|
|
|
|
set_sctlr sctlr_el2, \reg
|
|
|
|
.endm
|
|
|
|
|
2021-02-03 12:36:18 +01:00
|
|
|
/*
|
2024-01-11 12:24:48 +01:00
|
|
|
* Check whether asm code should yield as soon as it is able. This is
|
|
|
|
* the case if we are currently running in task context, and the
|
|
|
|
* TIF_NEED_RESCHED flag is set. (Note that the TIF_NEED_RESCHED flag
|
|
|
|
* is stored negated in the top word of the thread_info::preempt_count
|
2021-03-02 10:01:12 +01:00
|
|
|
* field)
|
2021-02-03 12:36:18 +01:00
|
|
|
*/
|
2024-01-11 12:24:48 +01:00
|
|
|
.macro cond_yield, lbl:req, tmp:req, tmp2
|
|
|
|
#ifdef CONFIG_PREEMPT_VOLUNTARY
|
2021-02-03 12:36:18 +01:00
|
|
|
get_current_task \tmp
|
|
|
|
ldr \tmp, [\tmp, #TSK_TI_PREEMPT]
|
2021-03-02 10:01:12 +01:00
|
|
|
/*
|
|
|
|
* If we are serving a softirq, there is no point in yielding: the
|
|
|
|
* softirq will not be preempted no matter what we do, so we should
|
2024-01-11 12:24:48 +01:00
|
|
|
* run to completion as quickly as we can. The preempt_count field will
|
|
|
|
* have BIT(SOFTIRQ_SHIFT) set in this case, so the zero check will
|
|
|
|
* catch this case too.
|
2021-03-02 10:01:12 +01:00
|
|
|
*/
|
2021-02-03 12:36:18 +01:00
|
|
|
cbz \tmp, \lbl
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2021-12-14 15:27:12 +00:00
|
|
|
/*
|
|
|
|
* Branch Target Identifier (BTI)
|
|
|
|
*/
|
|
|
|
.macro bti, targets
|
|
|
|
.equ .L__bti_targets_c, 34
|
|
|
|
.equ .L__bti_targets_j, 36
|
|
|
|
.equ .L__bti_targets_jc,38
|
|
|
|
hint #.L__bti_targets_\targets
|
|
|
|
.endm
|
|
|
|
|
2020-05-06 20:51:35 +01:00
|
|
|
/*
|
|
|
|
* This macro emits a program property note section identifying
|
|
|
|
* architecture features which require special handling, mainly for
|
|
|
|
* use in assembly files included in the VDSO.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define NT_GNU_PROPERTY_TYPE_0 5
|
|
|
|
#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
|
|
|
|
|
|
|
|
#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0)
|
|
|
|
#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC (1U << 1)
|
|
|
|
|
|
|
|
#ifdef CONFIG_ARM64_BTI_KERNEL
|
|
|
|
#define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT \
|
|
|
|
((GNU_PROPERTY_AARCH64_FEATURE_1_BTI | \
|
|
|
|
GNU_PROPERTY_AARCH64_FEATURE_1_PAC))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
|
|
|
|
.macro emit_aarch64_feature_1_and, feat=GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
|
|
|
|
.pushsection .note.gnu.property, "a"
|
|
|
|
.align 3
|
|
|
|
.long 2f - 1f
|
|
|
|
.long 6f - 3f
|
|
|
|
.long NT_GNU_PROPERTY_TYPE_0
|
|
|
|
1: .string "GNU"
|
|
|
|
2:
|
|
|
|
.align 3
|
|
|
|
3: .long GNU_PROPERTY_AARCH64_FEATURE_1_AND
|
|
|
|
.long 5f - 4f
|
|
|
|
4:
|
|
|
|
/*
|
|
|
|
* This is described with an array of char in the Linux API
|
|
|
|
* spec but the text and all other usage (including binutils,
|
|
|
|
* clang and GCC) treat this as a 32 bit value so no swizzling
|
|
|
|
* is required for big endian.
|
|
|
|
*/
|
|
|
|
.long \feat
|
|
|
|
5:
|
|
|
|
.align 3
|
|
|
|
6:
|
|
|
|
.popsection
|
|
|
|
.endm
|
|
|
|
|
|
|
|
#else
|
|
|
|
.macro emit_aarch64_feature_1_and, feat=0
|
|
|
|
.endm
|
|
|
|
|
|
|
|
#endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
|
|
|
|
|
2021-11-18 13:59:46 +00:00
|
|
|
.macro __mitigate_spectre_bhb_loop tmp
|
|
|
|
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
2022-09-12 17:22:08 +01:00
|
|
|
alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_iter
|
arm64: Mitigate spectre style branch history side channels
Speculation attacks against some high-performance processors can
make use of branch history to influence future speculation.
When taking an exception from user-space, a sequence of branches
or a firmware call overwrites or invalidates the branch history.
The sequence of branches is added to the vectors, and should appear
before the first indirect branch. For systems using KPTI the sequence
is added to the kpti trampoline where it has a free register as the exit
from the trampoline is via a 'ret'. For systems not using KPTI, the same
register tricks are used to free up a register in the vectors.
For the firmware call, arch-workaround-3 clobbers 4 registers, so
there is no choice but to save them to the EL1 stack. This only happens
for entry from EL0, so if we take an exception due to the stack access,
it will not become re-entrant.
For KVM, the existing branch-predictor-hardening vectors are used.
When a spectre version of these vectors is in use, the firmware call
is sufficient to mitigate against Spectre-BHB. For the non-spectre
versions, the sequence of branches is added to the indirect vector.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
2021-11-10 14:48:00 +00:00
|
|
|
mov \tmp, #32 // Patched to correct the immediate
|
|
|
|
alternative_cb_end
|
2021-11-18 13:59:46 +00:00
|
|
|
.Lspectre_bhb_loop\@:
|
|
|
|
b . + 4
|
|
|
|
subs \tmp, \tmp, #1
|
|
|
|
b.ne .Lspectre_bhb_loop\@
|
|
|
|
sb
|
|
|
|
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
|
|
|
.endm
|
|
|
|
|
arm64: Mitigate spectre style branch history side channels
Speculation attacks against some high-performance processors can
make use of branch history to influence future speculation.
When taking an exception from user-space, a sequence of branches
or a firmware call overwrites or invalidates the branch history.
The sequence of branches is added to the vectors, and should appear
before the first indirect branch. For systems using KPTI the sequence
is added to the kpti trampoline where it has a free register as the exit
from the trampoline is via a 'ret'. For systems not using KPTI, the same
register tricks are used to free up a register in the vectors.
For the firmware call, arch-workaround-3 clobbers 4 registers, so
there is no choice but to save them to the EL1 stack. This only happens
for entry from EL0, so if we take an exception due to the stack access,
it will not become re-entrant.
For KVM, the existing branch-predictor-hardening vectors are used.
When a spectre version of these vectors is in use, the firmware call
is sufficient to mitigate against Spectre-BHB. For the non-spectre
versions, the sequence of branches is added to the indirect vector.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
2021-11-10 14:48:00 +00:00
|
|
|
.macro mitigate_spectre_bhb_loop tmp
|
|
|
|
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
2022-09-12 17:22:08 +01:00
|
|
|
alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_mitigation_enable
|
arm64: Mitigate spectre style branch history side channels
Speculation attacks against some high-performance processors can
make use of branch history to influence future speculation.
When taking an exception from user-space, a sequence of branches
or a firmware call overwrites or invalidates the branch history.
The sequence of branches is added to the vectors, and should appear
before the first indirect branch. For systems using KPTI the sequence
is added to the kpti trampoline where it has a free register as the exit
from the trampoline is via a 'ret'. For systems not using KPTI, the same
register tricks are used to free up a register in the vectors.
For the firmware call, arch-workaround-3 clobbers 4 registers, so
there is no choice but to save them to the EL1 stack. This only happens
for entry from EL0, so if we take an exception due to the stack access,
it will not become re-entrant.
For KVM, the existing branch-predictor-hardening vectors are used.
When a spectre version of these vectors is in use, the firmware call
is sufficient to mitigate against Spectre-BHB. For the non-spectre
versions, the sequence of branches is added to the indirect vector.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
2021-11-10 14:48:00 +00:00
|
|
|
b .L_spectre_bhb_loop_done\@ // Patched to NOP
|
|
|
|
alternative_cb_end
|
|
|
|
__mitigate_spectre_bhb_loop \tmp
|
|
|
|
.L_spectre_bhb_loop_done\@:
|
|
|
|
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
|
|
|
.endm
|
|
|
|
|
2021-11-18 13:59:46 +00:00
|
|
|
/* Save/restores x0-x3 to the stack */
|
|
|
|
.macro __mitigate_spectre_bhb_fw
|
|
|
|
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
|
|
|
stp x0, x1, [sp, #-16]!
|
|
|
|
stp x2, x3, [sp, #-16]!
|
|
|
|
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
|
2022-09-12 17:22:08 +01:00
|
|
|
alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
|
2021-11-18 13:59:46 +00:00
|
|
|
nop // Patched to SMC/HVC #0
|
|
|
|
alternative_cb_end
|
|
|
|
ldp x2, x3, [sp], #16
|
|
|
|
ldp x0, x1, [sp], #16
|
2021-12-10 14:32:56 +00:00
|
|
|
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro mitigate_spectre_bhb_clear_insn
|
|
|
|
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
2022-09-12 17:22:08 +01:00
|
|
|
alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_clearbhb
|
2021-12-10 14:32:56 +00:00
|
|
|
/* Patched to NOP when not supported */
|
|
|
|
clearbhb
|
|
|
|
isb
|
|
|
|
alternative_cb_end
|
2021-11-18 13:59:46 +00:00
|
|
|
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
|
|
|
.endm
|
2015-02-20 13:53:13 +00:00
|
|
|
#endif /* __ASM_ASSEMBLER_H */
|