2018-03-20 15:02:11 +01:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2022-11-01 15:53:44 +01:00
|
|
|
#ifndef __KVM_X86_VMX_HYPERV_H
|
|
|
|
#define __KVM_X86_VMX_HYPERV_H
|
2018-03-20 15:02:11 +01:00
|
|
|
|
2023-12-05 11:36:20 +01:00
|
|
|
#include <linux/kvm_host.h>
|
2020-02-05 13:30:34 +01:00
|
|
|
#include "vmcs12.h"
|
2023-12-05 11:36:25 +01:00
|
|
|
#include "vmx.h"
|
2018-12-03 13:53:06 -08:00
|
|
|
|
2021-05-26 15:20:16 +02:00
|
|
|
#define EVMPTR_INVALID (-1ULL)
|
2021-05-26 15:20:20 +02:00
|
|
|
#define EVMPTR_MAP_PENDING (-2ULL)
|
2021-05-26 15:20:16 +02:00
|
|
|
|
2020-03-09 16:52:13 +01:00
|
|
|
enum nested_evmptrld_status {
|
|
|
|
EVMPTRLD_DISABLED,
|
|
|
|
EVMPTRLD_SUCCEEDED,
|
|
|
|
EVMPTRLD_VMFAIL,
|
|
|
|
EVMPTRLD_ERROR,
|
|
|
|
};
|
|
|
|
|
2023-12-05 11:36:26 +01:00
|
|
|
#ifdef CONFIG_KVM_HYPERV
|
|
|
|
static inline bool evmptr_is_valid(u64 evmptr)
|
|
|
|
{
|
|
|
|
return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
|
|
|
|
}
|
|
|
|
|
2023-12-05 11:36:27 +01:00
|
|
|
static inline bool nested_vmx_is_evmptr12_valid(struct vcpu_vmx *vmx)
|
|
|
|
{
|
|
|
|
return evmptr_is_valid(vmx->nested.hv_evmcs_vmptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool evmptr_is_set(u64 evmptr)
|
|
|
|
{
|
|
|
|
return evmptr != EVMPTR_INVALID;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool nested_vmx_is_evmptr12_set(struct vcpu_vmx *vmx)
|
|
|
|
{
|
|
|
|
return evmptr_is_set(vmx->nested.hv_evmcs_vmptr);
|
|
|
|
}
|
|
|
|
|
2023-12-05 11:36:28 +01:00
|
|
|
static inline struct hv_enlightened_vmcs *nested_vmx_evmcs(struct vcpu_vmx *vmx)
|
|
|
|
{
|
|
|
|
return vmx->nested.hv_evmcs;
|
|
|
|
}
|
|
|
|
|
KVM: x86: Replace (almost) all guest CPUID feature queries with cpu_caps
Switch all queries (except XSAVES) of guest features from guest CPUID to
guest capabilities, i.e. replace all calls to guest_cpuid_has() with calls
to guest_cpu_cap_has().
Keep guest_cpuid_has() around for XSAVES, but subsume its helper
guest_cpuid_get_register() and add a compile-time assertion to prevent
using guest_cpuid_has() for any other feature. Add yet another comment
for XSAVE to explain why KVM is allowed to query its raw guest CPUID.
Opportunistically drop the unused guest_cpuid_clear(), as there should be
no circumstance in which KVM needs to _clear_ a guest CPUID feature now
that everything is tracked via cpu_caps. E.g. KVM may need to _change_
a feature to emulate dynamic CPUID flags, but KVM should never need to
clear a feature in guest CPUID to prevent it from being used by the guest.
Delete the last remnants of the governed features framework, as the lone
holdout was vmx_adjust_secondary_exec_control()'s divergent behavior for
governed vs. ungoverned features.
Note, replacing guest_cpuid_has() checks with guest_cpu_cap_has() when
computing reserved CR4 bits is a nop when viewed as a whole, as KVM's
capabilities are already incorporated into the calculation, i.e. if a
feature is present in guest CPUID but unsupported by KVM, its CR4 bit
was already being marked as reserved, checking guest_cpu_cap_has() simply
double-stamps that it's a reserved bit.
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20241128013424.4096668-51-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-11-27 17:34:17 -08:00
|
|
|
static inline bool guest_cpu_cap_has_evmcs(struct kvm_vcpu *vcpu)
|
2023-12-05 11:36:25 +01:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* eVMCS is exposed to the guest if Hyper-V is enabled in CPUID and
|
|
|
|
* eVMCS has been explicitly enabled by userspace.
|
|
|
|
*/
|
|
|
|
return vcpu->arch.hyperv_enabled &&
|
|
|
|
to_vmx(vcpu)->nested.enlightened_vmcs_enabled;
|
|
|
|
}
|
|
|
|
|
2022-11-01 15:54:03 +01:00
|
|
|
u64 nested_get_evmptr(struct kvm_vcpu *vcpu);
|
2018-12-10 18:21:55 +01:00
|
|
|
uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
|
2018-12-03 13:53:06 -08:00
|
|
|
int nested_enable_evmcs(struct kvm_vcpu *vcpu,
|
|
|
|
uint16_t *vmcs_version);
|
2022-08-30 15:37:19 +02:00
|
|
|
void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
|
2020-02-05 13:30:34 +01:00
|
|
|
int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
|
2022-11-01 15:54:04 +01:00
|
|
|
bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu);
|
2022-11-01 15:53:59 +01:00
|
|
|
void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu);
|
2023-12-05 11:36:26 +01:00
|
|
|
#else
|
|
|
|
static inline bool evmptr_is_valid(u64 evmptr)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2023-12-05 11:36:27 +01:00
|
|
|
|
|
|
|
static inline bool nested_vmx_is_evmptr12_valid(struct vcpu_vmx *vmx)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool evmptr_is_set(u64 evmptr)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool nested_vmx_is_evmptr12_set(struct vcpu_vmx *vmx)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2023-12-05 11:36:28 +01:00
|
|
|
|
|
|
|
static inline struct hv_enlightened_vmcs *nested_vmx_evmcs(struct vcpu_vmx *vmx)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
2023-12-05 11:36:26 +01:00
|
|
|
#endif
|
2018-12-03 13:53:06 -08:00
|
|
|
|
2022-11-01 15:53:44 +01:00
|
|
|
#endif /* __KVM_X86_VMX_HYPERV_H */
|