mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
smccc/kvm_guest: Enable errata based on implementation CPUs
Retrieve any migration target implementation CPUs using the hypercall and enable associated errata. Reviewed-by: Cornelia Huck <cohuck@redhat.com> Reviewed-by: Sebastian Ott <sebott@redhat.com> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20250221140229.12588-6-shameerali.kolothum.thodi@huawei.com Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
c8c2647e69
commit
86edf6bdcf
5 changed files with 114 additions and 5 deletions
|
@ -276,6 +276,13 @@ static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
|
||||||
return _model == model && rv >= rv_min && rv <= rv_max;
|
return _model == model && rv >= rv_min && rv <= rv_max;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct target_impl_cpu {
|
||||||
|
u64 midr;
|
||||||
|
u64 revidr;
|
||||||
|
u64 aidr;
|
||||||
|
};
|
||||||
|
|
||||||
|
bool cpu_errata_set_target_impl(u64 num, void *impl_cpus);
|
||||||
bool is_midr_in_range_list(struct midr_range const *ranges);
|
bool is_midr_in_range_list(struct midr_range const *ranges);
|
||||||
|
|
||||||
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
|
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
|
|
||||||
void kvm_init_hyp_services(void);
|
void kvm_init_hyp_services(void);
|
||||||
bool kvm_arm_hyp_service_available(u32 func_id);
|
bool kvm_arm_hyp_service_available(u32 func_id);
|
||||||
|
void kvm_arm_target_impl_cpu_init(void);
|
||||||
|
|
||||||
#ifdef CONFIG_ARM_PKVM_GUEST
|
#ifdef CONFIG_ARM_PKVM_GUEST
|
||||||
void pkvm_init_hyp_services(void);
|
void pkvm_init_hyp_services(void);
|
||||||
|
|
|
@ -14,10 +14,34 @@
|
||||||
#include <asm/kvm_asm.h>
|
#include <asm/kvm_asm.h>
|
||||||
#include <asm/smp_plat.h>
|
#include <asm/smp_plat.h>
|
||||||
|
|
||||||
|
static u64 target_impl_cpu_num;
|
||||||
|
static struct target_impl_cpu *target_impl_cpus;
|
||||||
|
|
||||||
|
bool cpu_errata_set_target_impl(u64 num, void *impl_cpus)
|
||||||
|
{
|
||||||
|
if (target_impl_cpu_num || !num || !impl_cpus)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
target_impl_cpu_num = num;
|
||||||
|
target_impl_cpus = impl_cpus;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool is_midr_in_range(struct midr_range const *range)
|
static inline bool is_midr_in_range(struct midr_range const *range)
|
||||||
{
|
{
|
||||||
return midr_is_cpu_model_range(read_cpuid_id(), range->model,
|
int i;
|
||||||
range->rv_min, range->rv_max);
|
|
||||||
|
if (!target_impl_cpu_num)
|
||||||
|
return midr_is_cpu_model_range(read_cpuid_id(), range->model,
|
||||||
|
range->rv_min, range->rv_max);
|
||||||
|
|
||||||
|
for (i = 0; i < target_impl_cpu_num; i++) {
|
||||||
|
if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
|
||||||
|
range->model,
|
||||||
|
range->rv_min, range->rv_max))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_midr_in_range_list(struct midr_range const *ranges)
|
bool is_midr_in_range_list(struct midr_range const *ranges)
|
||||||
|
@ -47,9 +71,20 @@ __is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
|
||||||
static bool __maybe_unused
|
static bool __maybe_unused
|
||||||
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
|
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
|
||||||
{
|
{
|
||||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
int i;
|
||||||
return __is_affected_midr_range(entry, read_cpuid_id(),
|
|
||||||
read_cpuid(REVIDR_EL1));
|
if (!target_impl_cpu_num) {
|
||||||
|
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||||
|
return __is_affected_midr_range(entry, read_cpuid_id(),
|
||||||
|
read_cpuid(REVIDR_EL1));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < target_impl_cpu_num; i++) {
|
||||||
|
if (__is_affected_midr_range(entry, target_impl_cpus[i].midr,
|
||||||
|
target_impl_cpus[i].midr))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool __maybe_unused
|
static bool __maybe_unused
|
||||||
|
|
|
@ -86,6 +86,7 @@
|
||||||
#include <asm/kvm_host.h>
|
#include <asm/kvm_host.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
#include <asm/mte.h>
|
#include <asm/mte.h>
|
||||||
|
#include <asm/hypervisor.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
#include <asm/sysreg.h>
|
#include <asm/sysreg.h>
|
||||||
|
@ -3680,6 +3681,7 @@ unsigned long cpu_get_elf_hwcap3(void)
|
||||||
|
|
||||||
static void __init setup_boot_cpu_capabilities(void)
|
static void __init setup_boot_cpu_capabilities(void)
|
||||||
{
|
{
|
||||||
|
kvm_arm_target_impl_cpu_init();
|
||||||
/*
|
/*
|
||||||
* The boot CPU's feature register values have been recorded. Detect
|
* The boot CPU's feature register values have been recorded. Detect
|
||||||
* boot cpucaps and local cpucaps for the boot CPU, then enable and
|
* boot cpucaps and local cpucaps for the boot CPU, then enable and
|
||||||
|
|
|
@ -6,8 +6,11 @@
|
||||||
#include <linux/bitmap.h>
|
#include <linux/bitmap.h>
|
||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/memblock.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
|
|
||||||
|
#include <uapi/linux/psci.h>
|
||||||
|
|
||||||
#include <asm/hypervisor.h>
|
#include <asm/hypervisor.h>
|
||||||
|
|
||||||
static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { };
|
static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { };
|
||||||
|
@ -51,3 +54,64 @@ bool kvm_arm_hyp_service_available(u32 func_id)
|
||||||
return test_bit(func_id, __kvm_arm_hyp_services);
|
return test_bit(func_id, __kvm_arm_hyp_services);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
|
EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
|
||||||
|
|
||||||
|
void __init kvm_arm_target_impl_cpu_init(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
u32 ver;
|
||||||
|
u64 max_cpus;
|
||||||
|
struct arm_smccc_res res;
|
||||||
|
struct target_impl_cpu *target;
|
||||||
|
|
||||||
|
if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER) ||
|
||||||
|
!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS))
|
||||||
|
return;
|
||||||
|
|
||||||
|
arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID,
|
||||||
|
0, &res);
|
||||||
|
if (res.a0 != SMCCC_RET_SUCCESS)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Version info is in lower 32 bits and is in SMMCCC_VERSION format */
|
||||||
|
ver = lower_32_bits(res.a1);
|
||||||
|
if (PSCI_VERSION_MAJOR(ver) != 1) {
|
||||||
|
pr_warn("Unsupported target CPU implementation version v%d.%d\n",
|
||||||
|
PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!res.a2) {
|
||||||
|
pr_warn("No target implementation CPUs specified\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
max_cpus = res.a2;
|
||||||
|
target = memblock_alloc(sizeof(*target) * max_cpus, __alignof__(*target));
|
||||||
|
if (!target) {
|
||||||
|
pr_warn("Not enough memory for struct target_impl_cpu\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < max_cpus; i++) {
|
||||||
|
arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID,
|
||||||
|
i, &res);
|
||||||
|
if (res.a0 != SMCCC_RET_SUCCESS) {
|
||||||
|
pr_warn("Discovering target implementation CPUs failed\n");
|
||||||
|
goto mem_free;
|
||||||
|
}
|
||||||
|
target[i].midr = res.a1;
|
||||||
|
target[i].revidr = res.a2;
|
||||||
|
target[i].aidr = res.a3;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!cpu_errata_set_target_impl(max_cpus, target)) {
|
||||||
|
pr_warn("Failed to set target implementation CPUs\n");
|
||||||
|
goto mem_free;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_info("Number of target implementation CPUs is %lld\n", max_cpus);
|
||||||
|
return;
|
||||||
|
|
||||||
|
mem_free:
|
||||||
|
memblock_free(target, sizeof(*target) * max_cpus);
|
||||||
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue