mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
KVM: x86: Fully defer to vendor code to decide how to force immediate exit
Now that vmx->req_immediate_exit is used only in the scope of vmx_vcpu_run(), use force_immediate_exit to detect that KVM should usurp the VMX preemption to force a VM-Exit and let vendor code fully handle forcing a VM-Exit. Opportunsitically drop __kvm_request_immediate_exit() and just have vendor code call smp_send_reschedule() directly. SVM already does this when injecting an event while also trying to single-step an IRET, i.e. it's not exactly secret knowledge that KVM uses a reschedule IPI to force an exit. Link: https://lore.kernel.org/r/20240110012705.506918-7-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
7b3d1bbf8d
commit
0ec3d6d1f1
6 changed files with 19 additions and 36 deletions
|
@ -103,7 +103,6 @@ KVM_X86_OP(write_tsc_multiplier)
|
||||||
KVM_X86_OP(get_exit_info)
|
KVM_X86_OP(get_exit_info)
|
||||||
KVM_X86_OP(check_intercept)
|
KVM_X86_OP(check_intercept)
|
||||||
KVM_X86_OP(handle_exit_irqoff)
|
KVM_X86_OP(handle_exit_irqoff)
|
||||||
KVM_X86_OP(request_immediate_exit)
|
|
||||||
KVM_X86_OP(sched_in)
|
KVM_X86_OP(sched_in)
|
||||||
KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging)
|
KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging)
|
||||||
KVM_X86_OP_OPTIONAL(vcpu_blocking)
|
KVM_X86_OP_OPTIONAL(vcpu_blocking)
|
||||||
|
|
|
@ -1732,8 +1732,6 @@ struct kvm_x86_ops {
|
||||||
struct x86_exception *exception);
|
struct x86_exception *exception);
|
||||||
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
|
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
|
|
||||||
|
|
||||||
void (*sched_in)(struct kvm_vcpu *vcpu, int cpu);
|
void (*sched_in)(struct kvm_vcpu *vcpu, int cpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2239,7 +2237,6 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||||
|
|
||||||
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
|
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
|
||||||
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
|
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
|
||||||
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
|
|
||||||
|
|
||||||
void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
|
void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
|
||||||
u32 size);
|
u32 size);
|
||||||
|
|
|
@ -4140,9 +4140,12 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
|
||||||
* is enough to force an immediate vmexit.
|
* is enough to force an immediate vmexit.
|
||||||
*/
|
*/
|
||||||
disable_nmi_singlestep(svm);
|
disable_nmi_singlestep(svm);
|
||||||
smp_send_reschedule(vcpu->cpu);
|
force_immediate_exit = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (force_immediate_exit)
|
||||||
|
smp_send_reschedule(vcpu->cpu);
|
||||||
|
|
||||||
pre_svm_run(vcpu);
|
pre_svm_run(vcpu);
|
||||||
|
|
||||||
sync_lapic_to_cr8(vcpu);
|
sync_lapic_to_cr8(vcpu);
|
||||||
|
@ -4995,8 +4998,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
||||||
.check_intercept = svm_check_intercept,
|
.check_intercept = svm_check_intercept,
|
||||||
.handle_exit_irqoff = svm_handle_exit_irqoff,
|
.handle_exit_irqoff = svm_handle_exit_irqoff,
|
||||||
|
|
||||||
.request_immediate_exit = __kvm_request_immediate_exit,
|
|
||||||
|
|
||||||
.sched_in = svm_sched_in,
|
.sched_in = svm_sched_in,
|
||||||
|
|
||||||
.nested_ops = &svm_nested_ops,
|
.nested_ops = &svm_nested_ops,
|
||||||
|
|
|
@ -49,6 +49,8 @@
|
||||||
#include <asm/spec-ctrl.h>
|
#include <asm/spec-ctrl.h>
|
||||||
#include <asm/vmx.h>
|
#include <asm/vmx.h>
|
||||||
|
|
||||||
|
#include <trace/events/ipi.h>
|
||||||
|
|
||||||
#include "capabilities.h"
|
#include "capabilities.h"
|
||||||
#include "cpuid.h"
|
#include "cpuid.h"
|
||||||
#include "hyperv.h"
|
#include "hyperv.h"
|
||||||
|
@ -1281,8 +1283,6 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||||
u16 fs_sel, gs_sel;
|
u16 fs_sel, gs_sel;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
vmx->req_immediate_exit = false;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note that guest MSRs to be saved/restored can also be changed
|
* Note that guest MSRs to be saved/restored can also be changed
|
||||||
* when guest state is loaded. This happens when guest transitions
|
* when guest state is loaded. This happens when guest transitions
|
||||||
|
@ -5988,7 +5988,8 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu)
|
static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu,
|
||||||
|
bool force_immediate_exit)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
|
||||||
|
@ -6004,7 +6005,7 @@ static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu)
|
||||||
* If the timer expired because KVM used it to force an immediate exit,
|
* If the timer expired because KVM used it to force an immediate exit,
|
||||||
* then mission accomplished.
|
* then mission accomplished.
|
||||||
*/
|
*/
|
||||||
if (vmx->req_immediate_exit)
|
if (force_immediate_exit)
|
||||||
return EXIT_FASTPATH_EXIT_HANDLED;
|
return EXIT_FASTPATH_EXIT_HANDLED;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -7166,13 +7167,13 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
|
||||||
msrs[i].host, false);
|
msrs[i].host, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
|
static void vmx_update_hv_timer(struct kvm_vcpu *vcpu, bool force_immediate_exit)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
u64 tscl;
|
u64 tscl;
|
||||||
u32 delta_tsc;
|
u32 delta_tsc;
|
||||||
|
|
||||||
if (vmx->req_immediate_exit) {
|
if (force_immediate_exit) {
|
||||||
vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0);
|
vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0);
|
||||||
vmx->loaded_vmcs->hv_timer_soft_disabled = false;
|
vmx->loaded_vmcs->hv_timer_soft_disabled = false;
|
||||||
} else if (vmx->hv_deadline_tsc != -1) {
|
} else if (vmx->hv_deadline_tsc != -1) {
|
||||||
|
@ -7225,7 +7226,8 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
|
||||||
barrier_nospec();
|
barrier_nospec();
|
||||||
}
|
}
|
||||||
|
|
||||||
static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
|
static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
|
||||||
|
bool force_immediate_exit)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* If L2 is active, some VMX preemption timer exits can be handled in
|
* If L2 is active, some VMX preemption timer exits can be handled in
|
||||||
|
@ -7239,7 +7241,7 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
|
||||||
case EXIT_REASON_MSR_WRITE:
|
case EXIT_REASON_MSR_WRITE:
|
||||||
return handle_fastpath_set_msr_irqoff(vcpu);
|
return handle_fastpath_set_msr_irqoff(vcpu);
|
||||||
case EXIT_REASON_PREEMPTION_TIMER:
|
case EXIT_REASON_PREEMPTION_TIMER:
|
||||||
return handle_fastpath_preemption_timer(vcpu);
|
return handle_fastpath_preemption_timer(vcpu, force_immediate_exit);
|
||||||
default:
|
default:
|
||||||
return EXIT_FASTPATH_NONE;
|
return EXIT_FASTPATH_NONE;
|
||||||
}
|
}
|
||||||
|
@ -7382,7 +7384,9 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
|
||||||
vmx_passthrough_lbr_msrs(vcpu);
|
vmx_passthrough_lbr_msrs(vcpu);
|
||||||
|
|
||||||
if (enable_preemption_timer)
|
if (enable_preemption_timer)
|
||||||
vmx_update_hv_timer(vcpu);
|
vmx_update_hv_timer(vcpu, force_immediate_exit);
|
||||||
|
else if (force_immediate_exit)
|
||||||
|
smp_send_reschedule(vcpu->cpu);
|
||||||
|
|
||||||
kvm_wait_lapic_expire(vcpu);
|
kvm_wait_lapic_expire(vcpu);
|
||||||
|
|
||||||
|
@ -7446,7 +7450,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
|
||||||
vmx_recover_nmi_blocking(vmx);
|
vmx_recover_nmi_blocking(vmx);
|
||||||
vmx_complete_interrupts(vmx);
|
vmx_complete_interrupts(vmx);
|
||||||
|
|
||||||
return vmx_exit_handlers_fastpath(vcpu);
|
return vmx_exit_handlers_fastpath(vcpu, force_immediate_exit);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_vcpu_free(struct kvm_vcpu *vcpu)
|
static void vmx_vcpu_free(struct kvm_vcpu *vcpu)
|
||||||
|
@ -7926,11 +7930,6 @@ static __init void vmx_set_cpu_caps(void)
|
||||||
kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
|
kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
to_vmx(vcpu)->req_immediate_exit = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
|
static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
|
||||||
struct x86_instruction_info *info)
|
struct x86_instruction_info *info)
|
||||||
{
|
{
|
||||||
|
@ -8383,8 +8382,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
|
||||||
.check_intercept = vmx_check_intercept,
|
.check_intercept = vmx_check_intercept,
|
||||||
.handle_exit_irqoff = vmx_handle_exit_irqoff,
|
.handle_exit_irqoff = vmx_handle_exit_irqoff,
|
||||||
|
|
||||||
.request_immediate_exit = vmx_request_immediate_exit,
|
|
||||||
|
|
||||||
.sched_in = vmx_sched_in,
|
.sched_in = vmx_sched_in,
|
||||||
|
|
||||||
.cpu_dirty_log_size = PML_ENTITY_NUM,
|
.cpu_dirty_log_size = PML_ENTITY_NUM,
|
||||||
|
@ -8644,7 +8641,6 @@ static __init int hardware_setup(void)
|
||||||
if (!enable_preemption_timer) {
|
if (!enable_preemption_timer) {
|
||||||
vmx_x86_ops.set_hv_timer = NULL;
|
vmx_x86_ops.set_hv_timer = NULL;
|
||||||
vmx_x86_ops.cancel_hv_timer = NULL;
|
vmx_x86_ops.cancel_hv_timer = NULL;
|
||||||
vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
kvm_caps.supported_mce_cap |= MCG_LMCE_P;
|
kvm_caps.supported_mce_cap |= MCG_LMCE_P;
|
||||||
|
|
|
@ -332,8 +332,6 @@ struct vcpu_vmx {
|
||||||
unsigned int ple_window;
|
unsigned int ple_window;
|
||||||
bool ple_window_dirty;
|
bool ple_window_dirty;
|
||||||
|
|
||||||
bool req_immediate_exit;
|
|
||||||
|
|
||||||
/* Support for PML */
|
/* Support for PML */
|
||||||
#define PML_ENTITY_NUM 512
|
#define PML_ENTITY_NUM 512
|
||||||
struct page *pml_pg;
|
struct page *pml_pg;
|
||||||
|
|
|
@ -10667,12 +10667,6 @@ static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
|
||||||
static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu);
|
static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
smp_send_reschedule(vcpu->cpu);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called within kvm->srcu read side.
|
* Called within kvm->srcu read side.
|
||||||
* Returns 1 to let vcpu_run() continue the guest execution loop without
|
* Returns 1 to let vcpu_run() continue the guest execution loop without
|
||||||
|
@ -10922,10 +10916,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||||
goto cancel_injection;
|
goto cancel_injection;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (req_immediate_exit) {
|
if (req_immediate_exit)
|
||||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||||
static_call(kvm_x86_request_immediate_exit)(vcpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
fpregs_assert_state_consistent();
|
fpregs_assert_state_consistent();
|
||||||
if (test_thread_flag(TIF_NEED_FPU_LOAD))
|
if (test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||||
|
|
Loading…
Add table
Reference in a new issue