2019-06-03 07:44:50 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-12-10 16:23:59 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012,2013 - ARM Ltd
|
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
*
|
|
|
|
* Derived from arch/arm/kvm/reset.c
|
|
|
|
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
|
|
|
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
2019-02-28 18:56:50 +00:00
|
|
|
#include <linux/kernel.h>
|
2012-12-10 16:23:59 +00:00
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <linux/kvm.h>
|
2015-07-07 17:30:02 +01:00
|
|
|
#include <linux/hw_breakpoint.h>
|
2019-02-28 18:46:44 +00:00
|
|
|
#include <linux/slab.h>
|
2019-02-28 18:56:50 +00:00
|
|
|
#include <linux/string.h>
|
2019-02-28 18:46:44 +00:00
|
|
|
#include <linux/types.h>
|
2012-12-10 16:23:59 +00:00
|
|
|
|
2012-12-07 17:52:03 +00:00
|
|
|
#include <kvm/arm_arch_timer.h>
|
|
|
|
|
2018-09-26 17:32:43 +01:00
|
|
|
#include <asm/cpufeature.h>
|
2012-12-10 16:23:59 +00:00
|
|
|
#include <asm/cputype.h>
|
2019-02-28 18:46:44 +00:00
|
|
|
#include <asm/fpsimd.h>
|
2012-12-10 16:23:59 +00:00
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/kvm_arm.h>
|
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization
at system boot, and has no way to gracefully shutdown a core in terms of
kvm. This prevents kexec from rebooting the system at EL2.
This patch adds a cpu tear-down function and also puts an existing cpu-init
code into a separate function, kvm_arch_hardware_disable() and
kvm_arch_hardware_enable() respectively.
We don't need the arm64 specific cpu hotplug hook any more.
Since this patch modifies common code between arm and arm64, one stub
definition, __cpu_reset_hyp_mode(), is added on arm side to avoid
compilation errors.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
[Rebase, added separate VHE init/exit path, changed resets use of
kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(),
added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed
guest-enter after teardown handling]
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-27 17:47:05 +01:00
|
|
|
#include <asm/kvm_asm.h>
|
2018-12-20 11:36:07 +00:00
|
|
|
#include <asm/kvm_emulate.h>
|
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization
at system boot, and has no way to gracefully shutdown a core in terms of
kvm. This prevents kexec from rebooting the system at EL2.
This patch adds a cpu tear-down function and also puts an existing cpu-init
code into a separate function, kvm_arch_hardware_disable() and
kvm_arch_hardware_enable() respectively.
We don't need the arm64 specific cpu hotplug hook any more.
Since this patch modifies common code between arm and arm64, one stub
definition, __cpu_reset_hyp_mode(), is added on arm side to avoid
compilation errors.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
[Rebase, added separate VHE init/exit path, changed resets use of
kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(),
added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed
guest-enter after teardown handling]
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-27 17:47:05 +01:00
|
|
|
#include <asm/kvm_mmu.h>
|
2023-02-09 17:58:06 +00:00
|
|
|
#include <asm/kvm_nested.h>
|
2019-02-28 18:56:50 +00:00
|
|
|
#include <asm/virt.h>
|
2012-12-10 16:23:59 +00:00
|
|
|
|
2018-09-26 17:32:52 +01:00
|
|
|
/* Maximum phys_shift supported for any VM on this host */
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-11-30 23:09:18 +00:00
|
|
|
static u32 __ro_after_init kvm_ipa_limit;
|
2024-06-03 13:28:47 +01:00
|
|
|
unsigned int __ro_after_init kvm_host_sve_max_vl;
|
2018-09-26 17:32:52 +01:00
|
|
|
|
2012-12-10 16:23:59 +00:00
|
|
|
/*
|
|
|
|
* ARMv8 Reset Values
|
|
|
|
*/
|
2020-04-12 18:49:31 +01:00
|
|
|
#define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
|
|
|
|
PSR_F_BIT | PSR_D_BIT)
|
2012-12-10 16:23:59 +00:00
|
|
|
|
2023-02-09 17:58:06 +00:00
|
|
|
#define VCPU_RESET_PSTATE_EL2 (PSR_MODE_EL2h | PSR_A_BIT | PSR_I_BIT | \
|
|
|
|
PSR_F_BIT | PSR_D_BIT)
|
|
|
|
|
2020-04-12 18:49:31 +01:00
|
|
|
#define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
|
|
|
|
PSR_AA32_I_BIT | PSR_AA32_F_BIT)
|
2013-02-07 10:46:46 +00:00
|
|
|
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-11-30 23:09:18 +00:00
|
|
|
unsigned int __ro_after_init kvm_sve_max_vl;
|
2019-02-28 18:46:44 +00:00
|
|
|
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-11-30 23:09:18 +00:00
|
|
|
int __init kvm_arm_init_sve(void)
|
2019-02-28 18:46:44 +00:00
|
|
|
{
|
|
|
|
if (system_supports_sve()) {
|
2021-10-19 18:22:12 +01:00
|
|
|
kvm_sve_max_vl = sve_max_virtualisable_vl();
|
2024-06-03 13:28:47 +01:00
|
|
|
kvm_host_sve_max_vl = sve_max_vl();
|
|
|
|
kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
|
2019-02-28 18:46:44 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The get_sve_reg()/set_sve_reg() ioctl interface will need
|
|
|
|
* to be extended with multiple register slice support in
|
|
|
|
* order to support vector lengths greater than
|
2021-12-10 18:40:58 +00:00
|
|
|
* VL_ARCH_MAX:
|
2019-02-28 18:46:44 +00:00
|
|
|
*/
|
2021-12-10 18:40:58 +00:00
|
|
|
if (WARN_ON(kvm_sve_max_vl > VL_ARCH_MAX))
|
|
|
|
kvm_sve_max_vl = VL_ARCH_MAX;
|
2019-02-28 18:46:44 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't even try to make use of vector lengths that
|
|
|
|
* aren't available on all CPUs, for now:
|
|
|
|
*/
|
2021-10-19 18:22:12 +01:00
|
|
|
if (kvm_sve_max_vl < sve_max_vl())
|
2019-02-28 18:46:44 +00:00
|
|
|
pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
|
|
|
|
kvm_sve_max_vl);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-09-20 19:50:31 +00:00
|
|
|
static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
|
2019-02-28 18:56:50 +00:00
|
|
|
{
|
|
|
|
vcpu->arch.sve_max_vl = kvm_sve_max_vl;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Userspace can still customize the vector lengths by writing
|
|
|
|
* KVM_REG_ARM64_SVE_VLS. Allocation is deferred until
|
|
|
|
* kvm_arm_vcpu_finalize(), which freezes the configuration.
|
|
|
|
*/
|
2022-05-28 12:38:17 +01:00
|
|
|
vcpu_set_flag(vcpu, GUEST_HAS_SVE);
|
2019-02-28 18:56:50 +00:00
|
|
|
}
|
|
|
|
|
2019-02-28 18:46:44 +00:00
|
|
|
/*
|
|
|
|
* Finalize vcpu's maximum SVE vector length, allocating
|
|
|
|
* vcpu->arch.sve_state as necessary.
|
|
|
|
*/
|
|
|
|
static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
void *buf;
|
|
|
|
unsigned int vl;
|
2021-10-14 10:24:48 +01:00
|
|
|
size_t reg_sz;
|
|
|
|
int ret;
|
2019-02-28 18:46:44 +00:00
|
|
|
|
|
|
|
vl = vcpu->arch.sve_max_vl;
|
|
|
|
|
|
|
|
/*
|
2020-04-01 15:03:10 +01:00
|
|
|
* Responsibility for these properties is shared between
|
2021-12-30 22:15:35 +08:00
|
|
|
* kvm_arm_init_sve(), kvm_vcpu_enable_sve() and
|
2019-02-28 18:46:44 +00:00
|
|
|
* set_sve_vls(). Double-check here just to be sure:
|
|
|
|
*/
|
2021-10-19 18:22:12 +01:00
|
|
|
if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
|
2021-12-10 18:40:58 +00:00
|
|
|
vl > VL_ARCH_MAX))
|
2019-02-28 18:46:44 +00:00
|
|
|
return -EIO;
|
|
|
|
|
2021-10-14 10:24:48 +01:00
|
|
|
reg_sz = vcpu_sve_state_size(vcpu);
|
|
|
|
buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
|
2019-02-28 18:46:44 +00:00
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2021-12-15 16:12:23 +00:00
|
|
|
ret = kvm_share_hyp(buf, buf + reg_sz);
|
2021-10-14 10:24:48 +01:00
|
|
|
if (ret) {
|
|
|
|
kfree(buf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-02-28 18:46:44 +00:00
|
|
|
vcpu->arch.sve_state = buf;
|
2022-05-28 12:38:17 +01:00
|
|
|
vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
|
2019-02-28 18:46:44 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-04-10 17:17:37 +01:00
|
|
|
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
|
2019-02-28 18:46:44 +00:00
|
|
|
{
|
2019-04-10 17:17:37 +01:00
|
|
|
switch (feature) {
|
2019-02-28 18:46:44 +00:00
|
|
|
case KVM_ARM_VCPU_SVE:
|
|
|
|
if (!vcpu_has_sve(vcpu))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (kvm_arm_vcpu_sve_finalized(vcpu))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
return kvm_vcpu_finalize_sve(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-12-18 13:55:27 -08:00
|
|
|
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
|
2019-02-28 18:46:44 +00:00
|
|
|
{
|
2021-12-15 16:12:31 +00:00
|
|
|
void *sve_state = vcpu->arch.sve_state;
|
|
|
|
|
|
|
|
kvm_unshare_hyp(vcpu, vcpu + 1);
|
|
|
|
if (sve_state)
|
|
|
|
kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
|
|
|
|
kfree(sve_state);
|
2023-01-12 11:38:52 +09:00
|
|
|
kfree(vcpu->arch.ccsidr);
|
2019-02-28 18:46:44 +00:00
|
|
|
}
|
|
|
|
|
2019-02-28 18:56:50 +00:00
|
|
|
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (vcpu_has_sve(vcpu))
|
|
|
|
memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
|
|
|
|
}
|
|
|
|
|
2012-12-10 16:23:59 +00:00
|
|
|
/**
|
|
|
|
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
|
|
|
|
* @vcpu: The VCPU pointer
|
|
|
|
*
|
2021-12-08 19:32:56 +00:00
|
|
|
* This function sets the registers on the virtual CPU struct to their
|
|
|
|
* architecturally defined reset values, except for registers whose reset is
|
|
|
|
* deferred until kvm_arm_vcpu_finalize().
|
2018-12-20 12:44:05 +01:00
|
|
|
*
|
|
|
|
* Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
|
|
|
|
* ioctl or as part of handling a request issued by another VCPU in the PSCI
|
|
|
|
* handling code. In the first case, the VCPU will not be loaded, and in the
|
|
|
|
* second case the VCPU will be loaded. Because this function operates purely
|
2020-04-01 15:03:10 +01:00
|
|
|
* on the memory-backed values of system registers, we want to do a full put if
|
2018-12-20 12:44:05 +01:00
|
|
|
* we were loaded (handling a request) and load the values back at the end of
|
|
|
|
* the function. Otherwise we leave the state alone. In both cases, we
|
|
|
|
* disable preemption around the vcpu reset as we would otherwise race with
|
|
|
|
* preempt notifiers which also call put/load.
|
2012-12-10 16:23:59 +00:00
|
|
|
*/
|
2023-09-20 19:50:35 +00:00
|
|
|
void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
2012-12-10 16:23:59 +00:00
|
|
|
{
|
2021-08-18 20:21:30 +00:00
|
|
|
struct vcpu_reset_state reset_state;
|
2018-12-20 12:44:05 +01:00
|
|
|
bool loaded;
|
2020-04-12 18:49:31 +01:00
|
|
|
u32 pstate;
|
2018-12-20 12:44:05 +01:00
|
|
|
|
2023-03-27 16:47:44 +00:00
|
|
|
spin_lock(&vcpu->arch.mp_state_lock);
|
|
|
|
reset_state = vcpu->arch.reset_state;
|
|
|
|
vcpu->arch.reset_state.reset = false;
|
|
|
|
spin_unlock(&vcpu->arch.mp_state_lock);
|
|
|
|
|
2019-03-04 17:37:44 +00:00
|
|
|
/* Reset PMU outside of the non-preemptible section */
|
|
|
|
kvm_pmu_vcpu_reset(vcpu);
|
|
|
|
|
2018-12-20 12:44:05 +01:00
|
|
|
preempt_disable();
|
|
|
|
loaded = (vcpu->cpu != -1);
|
|
|
|
if (loaded)
|
|
|
|
kvm_arch_vcpu_put(vcpu);
|
2012-12-10 16:23:59 +00:00
|
|
|
|
2019-02-28 18:56:50 +00:00
|
|
|
if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
|
2023-09-20 19:50:36 +00:00
|
|
|
if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
|
2023-09-20 19:50:31 +00:00
|
|
|
kvm_vcpu_enable_sve(vcpu);
|
2019-02-28 18:56:50 +00:00
|
|
|
} else {
|
|
|
|
kvm_vcpu_reset_sve(vcpu);
|
|
|
|
}
|
|
|
|
|
2023-09-20 19:50:36 +00:00
|
|
|
if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||
|
|
|
|
vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC))
|
2023-09-20 19:50:32 +00:00
|
|
|
kvm_vcpu_enable_ptrauth(vcpu);
|
2019-04-23 10:12:36 +05:30
|
|
|
|
2023-07-10 19:31:37 +00:00
|
|
|
if (vcpu_el1_is_32bit(vcpu))
|
|
|
|
pstate = VCPU_RESET_PSTATE_SVC;
|
|
|
|
else if (vcpu_has_nv(vcpu))
|
|
|
|
pstate = VCPU_RESET_PSTATE_EL2;
|
|
|
|
else
|
|
|
|
pstate = VCPU_RESET_PSTATE_EL1;
|
|
|
|
|
2012-12-10 16:23:59 +00:00
|
|
|
/* Reset core registers */
|
2020-04-12 18:49:31 +01:00
|
|
|
memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
|
2021-04-07 18:54:16 +01:00
|
|
|
memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
|
|
|
|
vcpu->arch.ctxt.spsr_abt = 0;
|
|
|
|
vcpu->arch.ctxt.spsr_und = 0;
|
|
|
|
vcpu->arch.ctxt.spsr_irq = 0;
|
|
|
|
vcpu->arch.ctxt.spsr_fiq = 0;
|
2019-06-28 22:40:58 +01:00
|
|
|
vcpu_gp_regs(vcpu)->pstate = pstate;
|
2012-12-10 16:23:59 +00:00
|
|
|
|
|
|
|
/* Reset system registers */
|
|
|
|
kvm_reset_sys_regs(vcpu);
|
|
|
|
|
2018-12-20 11:36:07 +00:00
|
|
|
/*
|
|
|
|
* Additional reset state handling that PSCI may have imposed on us.
|
|
|
|
* Must be done after all the sys_reg reset.
|
|
|
|
*/
|
2021-08-18 20:21:30 +00:00
|
|
|
if (reset_state.reset) {
|
|
|
|
unsigned long target_pc = reset_state.pc;
|
2018-12-20 11:36:07 +00:00
|
|
|
|
|
|
|
/* Gracefully handle Thumb2 entry point */
|
|
|
|
if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
|
|
|
|
target_pc &= ~1UL;
|
|
|
|
vcpu_set_thumb(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Propagate caller endianness */
|
2021-08-18 20:21:30 +00:00
|
|
|
if (reset_state.be)
|
2018-12-20 11:36:07 +00:00
|
|
|
kvm_vcpu_set_be(vcpu);
|
|
|
|
|
|
|
|
*vcpu_pc(vcpu) = target_pc;
|
2021-08-18 20:21:30 +00:00
|
|
|
vcpu_set_reg(vcpu, 0, reset_state.r0);
|
2018-12-20 11:36:07 +00:00
|
|
|
}
|
|
|
|
|
2012-12-07 17:52:03 +00:00
|
|
|
/* Reset timer */
|
2023-09-20 19:50:35 +00:00
|
|
|
kvm_timer_vcpu_reset(vcpu);
|
2023-09-20 19:50:34 +00:00
|
|
|
|
2018-12-20 12:44:05 +01:00
|
|
|
if (loaded)
|
|
|
|
kvm_arch_vcpu_load(vcpu, smp_processor_id());
|
|
|
|
preempt_enable();
|
2012-12-10 16:23:59 +00:00
|
|
|
}
|
2018-09-26 17:32:42 +01:00
|
|
|
|
2024-06-14 15:45:37 +01:00
|
|
|
u32 kvm_get_pa_bits(struct kvm *kvm)
|
|
|
|
{
|
|
|
|
/* Fixed limit until we can configure ID_AA64MMFR0.PARange */
|
|
|
|
return kvm_ipa_limit;
|
|
|
|
}
|
|
|
|
|
2020-05-12 07:27:27 +05:30
|
|
|
u32 get_kvm_ipa_limit(void)
|
|
|
|
{
|
|
|
|
return kvm_ipa_limit;
|
|
|
|
}
|
|
|
|
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-11-30 23:09:18 +00:00
|
|
|
int __init kvm_set_ipa_limit(void)
|
2018-09-26 17:32:52 +01:00
|
|
|
{
|
2021-08-10 09:59:42 +05:30
|
|
|
unsigned int parange;
|
2020-05-13 14:33:34 +05:30
|
|
|
u64 mmfr0;
|
2018-09-26 17:32:52 +01:00
|
|
|
|
2020-05-13 14:33:34 +05:30
|
|
|
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
|
|
|
parange = cpuid_feature_extract_unsigned_field(mmfr0,
|
2022-09-05 23:54:01 +01:00
|
|
|
ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
2021-08-11 16:41:15 +05:30
|
|
|
/*
|
2023-11-27 11:17:35 +00:00
|
|
|
* IPA size beyond 48 bits for 4K and 16K page size is only supported
|
|
|
|
* when LPA2 is available. So if we have LPA2, enable it, else cap to 48
|
|
|
|
* bits, in case it's reported as larger on the system.
|
2021-08-11 16:41:15 +05:30
|
|
|
*/
|
2023-11-27 11:17:35 +00:00
|
|
|
if (!kvm_lpa2_is_enabled() && PAGE_SIZE != SZ_64K)
|
2022-09-05 23:54:01 +01:00
|
|
|
parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48);
|
2020-05-28 14:12:58 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
|
|
|
|
* Stage-2. If not, things will stop very quickly.
|
|
|
|
*/
|
2022-09-05 23:54:01 +01:00
|
|
|
switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_TGRAN_2_SHIFT)) {
|
|
|
|
case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE:
|
2020-05-28 14:12:58 +01:00
|
|
|
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
|
|
|
|
return -EINVAL;
|
2022-09-05 23:54:01 +01:00
|
|
|
case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT:
|
2020-05-28 14:12:58 +01:00
|
|
|
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
|
|
|
|
break;
|
2022-09-05 23:54:01 +01:00
|
|
|
case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX:
|
2020-05-28 14:12:58 +01:00
|
|
|
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
|
|
|
|
break;
|
2021-03-10 11:23:10 +05:30
|
|
|
default:
|
|
|
|
kvm_err("Unsupported value for TGRAN_2, giving up\n");
|
|
|
|
return -EINVAL;
|
2020-05-28 14:12:58 +01:00
|
|
|
}
|
|
|
|
|
2020-09-11 14:25:29 +01:00
|
|
|
kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
|
2021-03-11 10:00:15 +00:00
|
|
|
kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
|
|
|
|
((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
|
|
|
|
" (Reduced IPA size, limited VM/VMM compatibility)" : ""));
|
2020-05-28 14:12:58 +01:00
|
|
|
|
|
|
|
return 0;
|
2018-09-26 17:32:52 +01:00
|
|
|
}
|