Merge tag 'kvm-x86-mmio-6.17' of https://github.com/kvm-x86/linux into HEAD

KVM MMIO Stale Data mitigation cleanup for 6.17

Rework KVM's mitigation for the MMIO State Data vulnerability to track
whether or not a vCPU has access to (host) MMIO based on the MMU that will be
used when running in the guest.  The current approach doesn't actually detect
whether or not a guest has access to MMIO, and is prone to false negatives (and
to a lesser extent, false positives), as KVM_DEV_VFIO_FILE_ADD is optional, and
obviously only covers VFIO devices.
This commit is contained in:
Paolo Bonzini 2025-07-28 11:04:27 -04:00
commit f05efcfe07
6 changed files with 67 additions and 8 deletions

View file

@ -1465,6 +1465,7 @@ struct kvm_arch {
bool x2apic_format;
bool x2apic_broadcast_quirk_disabled;
bool has_mapped_host_mmio;
bool guest_can_read_msr_platform_info;
bool exception_payload_enabled;

View file

@ -103,6 +103,9 @@ struct kvm_mmu_page {
int root_count;
refcount_t tdp_mmu_root_count;
};
bool has_mapped_host_mmio;
union {
/* These two members aren't used for TDP MMU */
struct {

View file

@ -104,7 +104,7 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
return spte;
}
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
static bool __kvm_is_mmio_pfn(kvm_pfn_t pfn)
{
if (pfn_valid(pfn))
return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
@ -125,6 +125,35 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
E820_TYPE_RAM);
}
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn, int *is_host_mmio)
{
/*
* Determining if a PFN is host MMIO is relative expensive. Cache the
* result locally (in the sole caller) to avoid doing the full query
* multiple times when creating a single SPTE.
*/
if (*is_host_mmio < 0)
*is_host_mmio = __kvm_is_mmio_pfn(pfn);
return *is_host_mmio;
}
static void kvm_track_host_mmio_mapping(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
if (root)
WRITE_ONCE(root->has_mapped_host_mmio, true);
else
WRITE_ONCE(vcpu->kvm->arch.has_mapped_host_mmio, true);
/*
* Force vCPUs to exit and flush CPU buffers if the vCPU is using the
* affected root(s).
*/
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_OUTSIDE_GUEST_MODE);
}
/*
* Returns true if the SPTE needs to be updated atomically due to having bits
* that may be changed without holding mmu_lock, and for which KVM must not
@ -162,6 +191,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
{
int level = sp->role.level;
u64 spte = SPTE_MMU_PRESENT_MASK;
int is_host_mmio = -1;
bool wrprot = false;
/*
@ -209,13 +239,15 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (level > PG_LEVEL_4K)
spte |= PT_PAGE_SIZE_MASK;
spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn));
if (kvm_x86_ops.get_mt_mask)
spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn,
kvm_is_mmio_pfn(pfn, &is_host_mmio));
if (host_writable)
spte |= shadow_host_writable_mask;
else
pte_access &= ~ACC_WRITE_MASK;
if (shadow_me_value && !kvm_is_mmio_pfn(pfn))
if (shadow_me_value && !kvm_is_mmio_pfn(pfn, &is_host_mmio))
spte |= shadow_me_value;
spte |= (u64)pfn << PAGE_SHIFT;
@ -260,6 +292,11 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
}
if (static_branch_unlikely(&cpu_buf_vm_clear) &&
!kvm_vcpu_can_access_host_mmio(vcpu) &&
kvm_is_mmio_pfn(pfn, &is_host_mmio))
kvm_track_host_mmio_mapping(vcpu);
*new_spte = spte;
return wrprot;
}

View file

@ -280,6 +280,16 @@ static inline bool is_mirror_sptep(tdp_ptep_t sptep)
return is_mirror_sp(sptep_to_sp(rcu_dereference(sptep)));
}
static inline bool kvm_vcpu_can_access_host_mmio(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
if (root)
return READ_ONCE(root->has_mapped_host_mmio);
return READ_ONCE(vcpu->kvm->arch.has_mapped_host_mmio);
}
static inline bool is_mmio_spte(struct kvm *kvm, u64 spte)
{
return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value &&

View file

@ -2,10 +2,12 @@
#ifndef __KVM_X86_VMX_RUN_FLAGS_H
#define __KVM_X86_VMX_RUN_FLAGS_H
#define VMX_RUN_VMRESUME_SHIFT 0
#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
#define VMX_RUN_VMRESUME_SHIFT 0
#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT 2
#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT)
#endif /* __KVM_X86_VMX_RUN_FLAGS_H */

View file

@ -75,6 +75,8 @@
#include "vmx_onhyperv.h"
#include "posted_intr.h"
#include "mmu/spte.h"
MODULE_AUTHOR("Qumranet");
MODULE_DESCRIPTION("KVM support for VMX (Intel VT-x) extensions");
MODULE_LICENSE("GPL");
@ -961,6 +963,10 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))
flags |= VMX_RUN_SAVE_SPEC_CTRL;
if (static_branch_unlikely(&cpu_buf_vm_clear) &&
kvm_vcpu_can_access_host_mmio(&vmx->vcpu))
flags |= VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO;
return flags;
}
@ -7288,7 +7294,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu);
else if (static_branch_unlikely(&cpu_buf_vm_clear) &&
kvm_arch_has_assigned_device(vcpu->kvm))
(flags & VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO))
x86_clear_cpu_buffers();
vmx_disable_fb_clear(vmx);