mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-03 15:55:38 +00:00

Speculation attacks against some high-performance processors can make use of branch history to influence future speculation. When taking an exception from user-space, a sequence of branches or a firmware call overwrites or invalidates the branch history. The sequence of branches is added to the vectors, and should appear before the first indirect branch. For systems using KPTI the sequence is added to the kpti trampoline where it has a free register as the exit from the trampoline is via a 'ret'. For systems not using KPTI, the same register tricks are used to free up a register in the vectors. For the firmware call, arch-workaround-3 clobbers 4 registers, so there is no choice but to save them to the EL1 stack. This only happens for entry from EL0, so if we take an exception due to the stack access, it will not become re-entrant. For KVM, the existing branch-predictor-hardening vectors are used. When a spectre version of these vectors is in use, the firmware call is sufficient to mitigate against Spectre-BHB. For the non-spectre versions, the sequence of branches is added to the indirect vector. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: James Morse <james.morse@arm.com>
101 lines
2.7 KiB
C
101 lines
2.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Interface for managing mitigations for Spectre vulnerabilities.
|
|
*
|
|
* Copyright (C) 2020 Google LLC
|
|
* Author: Will Deacon <will@kernel.org>
|
|
*/
|
|
|
|
#ifndef __ASM_SPECTRE_H
|
|
#define __ASM_SPECTRE_H
|
|
|
|
#define BP_HARDEN_EL2_SLOTS 4
|
|
#define __BP_HARDEN_HYP_VECS_SZ ((BP_HARDEN_EL2_SLOTS - 1) * SZ_2K)
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/virt.h>
|
|
|
|
/* Watch out, ordering is important here. */
|
|
enum mitigation_state {
|
|
SPECTRE_UNAFFECTED,
|
|
SPECTRE_MITIGATED,
|
|
SPECTRE_VULNERABLE,
|
|
};
|
|
|
|
struct task_struct;
|
|
|
|
/*
|
|
* Note: the order of this enum corresponds to __bp_harden_hyp_vecs and
|
|
* we rely on having the direct vectors first.
|
|
*/
|
|
enum arm64_hyp_spectre_vector {
|
|
/*
|
|
* Take exceptions directly to __kvm_hyp_vector. This must be
|
|
* 0 so that it used by default when mitigations are not needed.
|
|
*/
|
|
HYP_VECTOR_DIRECT,
|
|
|
|
/*
|
|
* Bounce via a slot in the hypervisor text mapping of
|
|
* __bp_harden_hyp_vecs, which contains an SMC call.
|
|
*/
|
|
HYP_VECTOR_SPECTRE_DIRECT,
|
|
|
|
/*
|
|
* Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
|
|
* next to the idmap page.
|
|
*/
|
|
HYP_VECTOR_INDIRECT,
|
|
|
|
/*
|
|
* Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
|
|
* next to the idmap page, which contains an SMC call.
|
|
*/
|
|
HYP_VECTOR_SPECTRE_INDIRECT,
|
|
};
|
|
|
|
typedef void (*bp_hardening_cb_t)(void);
|
|
|
|
struct bp_hardening_data {
|
|
enum arm64_hyp_spectre_vector slot;
|
|
bp_hardening_cb_t fn;
|
|
};
|
|
|
|
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
|
|
|
|
static inline void arm64_apply_bp_hardening(void)
|
|
{
|
|
struct bp_hardening_data *d;
|
|
|
|
if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
|
|
return;
|
|
|
|
d = this_cpu_ptr(&bp_hardening_data);
|
|
if (d->fn)
|
|
d->fn();
|
|
}
|
|
|
|
enum mitigation_state arm64_get_spectre_v2_state(void);
|
|
bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
|
|
void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
|
|
|
bool has_spectre_v3a(const struct arm64_cpu_capabilities *cap, int scope);
|
|
void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
|
|
|
enum mitigation_state arm64_get_spectre_v4_state(void);
|
|
bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
|
|
void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
|
void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
|
|
|
|
enum mitigation_state arm64_get_meltdown_state(void);
|
|
|
|
enum mitigation_state arm64_get_spectre_bhb_state(void);
|
|
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
|
|
u8 spectre_bhb_loop_affected(int scope);
|
|
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
|
#endif /* __ASSEMBLY__ */
|
|
#endif /* __ASM_SPECTRE_H */
|