2019-06-04 10:11:32 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-06-19 15:45:05 +02:00
|
|
|
/*
|
|
|
|
* KVM PMU support for Intel CPUs
|
|
|
|
*
|
|
|
|
* Copyright 2011 Red Hat, Inc. and/or its affiliates.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Avi Kivity <avi@redhat.com>
|
|
|
|
* Gleb Natapov <gleb@redhat.com>
|
|
|
|
*/
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-11-30 23:09:18 +00:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2015-06-19 15:45:05 +02:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <linux/perf_event.h>
|
2025-04-30 22:42:41 -07:00
|
|
|
#include <asm/msr.h>
|
2015-06-19 15:45:05 +02:00
|
|
|
#include <asm/perf_event.h>
|
|
|
|
#include "x86.h"
|
|
|
|
#include "cpuid.h"
|
|
|
|
#include "lapic.h"
|
2019-11-13 16:17:20 -08:00
|
|
|
#include "nested.h"
|
2015-06-19 15:45:05 +02:00
|
|
|
#include "pmu.h"
|
2024-10-30 12:00:33 -07:00
|
|
|
#include "tdx.h"
|
2015-06-19 15:45:05 +02:00
|
|
|
|
2024-01-09 15:02:28 -08:00
|
|
|
/*
|
|
|
|
* Perf's "BASE" is wildly misleading, architectural PMUs use bits 31:16 of ECX
|
|
|
|
* to encode the "type" of counter to read, i.e. this is not a "base". And to
|
|
|
|
* further confuse things, non-architectural PMUs use bit 31 as a flag for
|
|
|
|
* "fast" reads, whereas the "type" is an explicit value.
|
|
|
|
*/
|
2024-01-09 15:02:31 -08:00
|
|
|
#define INTEL_RDPMC_GP 0
|
2024-01-09 15:02:28 -08:00
|
|
|
#define INTEL_RDPMC_FIXED INTEL_PMC_FIXED_RDPMC_BASE
|
2023-06-06 18:02:03 -07:00
|
|
|
|
2024-01-09 15:02:30 -08:00
|
|
|
#define INTEL_RDPMC_TYPE_MASK GENMASK(31, 16)
|
|
|
|
#define INTEL_RDPMC_INDEX_MASK GENMASK(15, 0)
|
2015-06-19 15:45:05 +02:00
|
|
|
|
2020-05-29 15:43:45 +08:00
|
|
|
#define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
|
2015-06-19 15:45:05 +02:00
|
|
|
|
2024-10-30 12:00:33 -07:00
|
|
|
static struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (is_td_vcpu(vcpu))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return &to_vmx(vcpu)->lbr_desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (is_td_vcpu(vcpu))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return &to_vmx(vcpu)->lbr_desc.records;
|
|
|
|
}
|
|
|
|
|
|
|
|
#pragma GCC poison to_vmx
|
|
|
|
|
2015-06-19 15:45:05 +02:00
|
|
|
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
|
|
|
|
{
|
2022-05-18 21:25:07 +08:00
|
|
|
struct kvm_pmc *pmc;
|
2024-01-23 22:12:20 +00:00
|
|
|
u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
|
2015-06-19 15:45:05 +02:00
|
|
|
int i;
|
|
|
|
|
2022-05-18 21:25:07 +08:00
|
|
|
pmu->fixed_ctr_ctrl = data;
|
2015-06-19 15:45:05 +02:00
|
|
|
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
|
|
|
|
u8 new_ctrl = fixed_ctrl_field(data, i);
|
2022-05-18 21:25:07 +08:00
|
|
|
u8 old_ctrl = fixed_ctrl_field(old_fixed_ctr_ctrl, i);
|
2015-06-19 15:45:05 +02:00
|
|
|
|
|
|
|
if (old_ctrl == new_ctrl)
|
|
|
|
continue;
|
|
|
|
|
2022-05-18 21:25:07 +08:00
|
|
|
pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
|
|
|
|
|
2023-11-09 18:28:49 -08:00
|
|
|
__set_bit(KVM_FIXED_PMC_BASE_IDX + i, pmu->pmc_in_use);
|
2023-03-10 19:33:49 +08:00
|
|
|
kvm_pmu_request_counter_reprogram(pmc);
|
2015-06-19 15:45:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-27 18:52:40 +08:00
|
|
|
static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned int idx, u64 *mask)
|
2015-06-19 15:45:05 +02:00
|
|
|
{
|
2024-01-09 15:02:30 -08:00
|
|
|
unsigned int type = idx & INTEL_RDPMC_TYPE_MASK;
|
2015-06-19 15:45:05 +02:00
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
struct kvm_pmc *counters;
|
2019-12-11 12:47:53 -08:00
|
|
|
unsigned int num_counters;
|
2024-01-09 15:02:30 -08:00
|
|
|
u64 bitmask;
|
2015-06-19 15:45:05 +02:00
|
|
|
|
KVM: x86/pmu: Disallow "fast" RDPMC for architectural Intel PMUs
Inject #GP on RDPMC if the "fast" flag is set for architectural Intel
PMUs, i.e. if the PMU version is non-zero. Per Intel's SDM, and confirmed
on bare metal, the "fast" flag is supported only for non-architectural
PMUs, and is reserved for architectural PMUs.
If the processor does not support architectural performance monitoring
(CPUID.0AH:EAX[7:0]=0), ECX[30:0] specifies the index of the PMC to be
read. Setting ECX[31] selects “fast” read mode if supported. In this mode,
RDPMC returns bits 31:0 of the PMC in EAX while clearing EDX to zero.
If the processor does support architectural performance monitoring
(CPUID.0AH:EAX[7:0] ≠ 0), ECX[31:16] specifies type of PMC while ECX[15:0]
specifies the index of the PMC to be read within that type. The following
PMC types are currently defined:
— General-purpose counters use type 0. The index x (to read IA32_PMCx)
must be less than the value enumerated by CPUID.0AH.EAX[15:8] (thus
ECX[15:8] must be zero).
— Fixed-function counters use type 4000H. The index x (to read
IA32_FIXED_CTRx) can be used if either CPUID.0AH.EDX[4:0] > x or
CPUID.0AH.ECX[x] = 1 (thus ECX[15:5] must be 0).
— Performance metrics use type 2000H. This type can be used only if
IA32_PERF_CAPABILITIES.PERF_METRICS_AVAILABLE[bit 15]=1. For this type,
the index in ECX[15:0] is implementation specific.
Opportunistically WARN if KVM ever actually tries to complete RDPMC for a
non-architectural PMU, and drop the non-existent "support" for fast RDPMC,
as KVM doesn't support such PMUs, i.e. kvm_pmu_rdpmc() should reject the
RDPMC before getting to the Intel code.
Fixes: f5132b01386b ("KVM: Expose a version 2 architectural PMU to a guests")
Fixes: 67f4d4288c35 ("KVM: x86: rdpmc emulation checks the counter incorrectly")
Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20240109230250.424295-10-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-01-09 15:02:29 -08:00
|
|
|
/*
|
|
|
|
* The encoding of ECX for RDPMC is different for architectural versus
|
|
|
|
* non-architecturals PMUs (PMUs with version '0'). For architectural
|
|
|
|
* PMUs, bits 31:16 specify the PMC type and bits 15:0 specify the PMC
|
|
|
|
* index. For non-architectural PMUs, bit 31 is a "fast" flag, and
|
|
|
|
* bits 30:0 specify the PMC index.
|
|
|
|
*
|
|
|
|
* Yell and reject attempts to read PMCs for a non-architectural PMU,
|
|
|
|
* as KVM doesn't support such PMUs.
|
|
|
|
*/
|
|
|
|
if (WARN_ON_ONCE(!pmu->version))
|
|
|
|
return NULL;
|
2024-01-09 15:02:28 -08:00
|
|
|
|
KVM: x86/pmu: Disallow "fast" RDPMC for architectural Intel PMUs
Inject #GP on RDPMC if the "fast" flag is set for architectural Intel
PMUs, i.e. if the PMU version is non-zero. Per Intel's SDM, and confirmed
on bare metal, the "fast" flag is supported only for non-architectural
PMUs, and is reserved for architectural PMUs.
If the processor does not support architectural performance monitoring
(CPUID.0AH:EAX[7:0]=0), ECX[30:0] specifies the index of the PMC to be
read. Setting ECX[31] selects “fast” read mode if supported. In this mode,
RDPMC returns bits 31:0 of the PMC in EAX while clearing EDX to zero.
If the processor does support architectural performance monitoring
(CPUID.0AH:EAX[7:0] ≠ 0), ECX[31:16] specifies type of PMC while ECX[15:0]
specifies the index of the PMC to be read within that type. The following
PMC types are currently defined:
— General-purpose counters use type 0. The index x (to read IA32_PMCx)
must be less than the value enumerated by CPUID.0AH.EAX[15:8] (thus
ECX[15:8] must be zero).
— Fixed-function counters use type 4000H. The index x (to read
IA32_FIXED_CTRx) can be used if either CPUID.0AH.EDX[4:0] > x or
CPUID.0AH.ECX[x] = 1 (thus ECX[15:5] must be 0).
— Performance metrics use type 2000H. This type can be used only if
IA32_PERF_CAPABILITIES.PERF_METRICS_AVAILABLE[bit 15]=1. For this type,
the index in ECX[15:0] is implementation specific.
Opportunistically WARN if KVM ever actually tries to complete RDPMC for a
non-architectural PMU, and drop the non-existent "support" for fast RDPMC,
as KVM doesn't support such PMUs, i.e. kvm_pmu_rdpmc() should reject the
RDPMC before getting to the Intel code.
Fixes: f5132b01386b ("KVM: Expose a version 2 architectural PMU to a guests")
Fixes: 67f4d4288c35 ("KVM: x86: rdpmc emulation checks the counter incorrectly")
Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20240109230250.424295-10-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-01-09 15:02:29 -08:00
|
|
|
/*
|
2024-01-09 15:02:31 -08:00
|
|
|
* General Purpose (GP) PMCs are supported on all PMUs, and fixed PMCs
|
|
|
|
* are supported on all architectural PMUs, i.e. on all virtual PMUs
|
|
|
|
* supported by KVM. Note, KVM only emulates fixed PMCs for PMU v2+,
|
|
|
|
* but the type itself is still valid, i.e. let RDPMC fail due to
|
|
|
|
* accessing a non-existent counter. Reject attempts to read all other
|
|
|
|
* types, which are unknown/unsupported.
|
KVM: x86/pmu: Disallow "fast" RDPMC for architectural Intel PMUs
Inject #GP on RDPMC if the "fast" flag is set for architectural Intel
PMUs, i.e. if the PMU version is non-zero. Per Intel's SDM, and confirmed
on bare metal, the "fast" flag is supported only for non-architectural
PMUs, and is reserved for architectural PMUs.
If the processor does not support architectural performance monitoring
(CPUID.0AH:EAX[7:0]=0), ECX[30:0] specifies the index of the PMC to be
read. Setting ECX[31] selects “fast” read mode if supported. In this mode,
RDPMC returns bits 31:0 of the PMC in EAX while clearing EDX to zero.
If the processor does support architectural performance monitoring
(CPUID.0AH:EAX[7:0] ≠ 0), ECX[31:16] specifies type of PMC while ECX[15:0]
specifies the index of the PMC to be read within that type. The following
PMC types are currently defined:
— General-purpose counters use type 0. The index x (to read IA32_PMCx)
must be less than the value enumerated by CPUID.0AH.EAX[15:8] (thus
ECX[15:8] must be zero).
— Fixed-function counters use type 4000H. The index x (to read
IA32_FIXED_CTRx) can be used if either CPUID.0AH.EDX[4:0] > x or
CPUID.0AH.ECX[x] = 1 (thus ECX[15:5] must be 0).
— Performance metrics use type 2000H. This type can be used only if
IA32_PERF_CAPABILITIES.PERF_METRICS_AVAILABLE[bit 15]=1. For this type,
the index in ECX[15:0] is implementation specific.
Opportunistically WARN if KVM ever actually tries to complete RDPMC for a
non-architectural PMU, and drop the non-existent "support" for fast RDPMC,
as KVM doesn't support such PMUs, i.e. kvm_pmu_rdpmc() should reject the
RDPMC before getting to the Intel code.
Fixes: f5132b01386b ("KVM: Expose a version 2 architectural PMU to a guests")
Fixes: 67f4d4288c35 ("KVM: x86: rdpmc emulation checks the counter incorrectly")
Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20240109230250.424295-10-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-01-09 15:02:29 -08:00
|
|
|
*/
|
2024-01-09 15:02:31 -08:00
|
|
|
switch (type) {
|
|
|
|
case INTEL_RDPMC_FIXED:
|
2019-12-11 12:47:53 -08:00
|
|
|
counters = pmu->fixed_counters;
|
|
|
|
num_counters = pmu->nr_arch_fixed_counters;
|
2024-01-09 15:02:30 -08:00
|
|
|
bitmask = pmu->counter_bitmask[KVM_PMC_FIXED];
|
2024-01-09 15:02:31 -08:00
|
|
|
break;
|
|
|
|
case INTEL_RDPMC_GP:
|
2019-12-11 12:47:53 -08:00
|
|
|
counters = pmu->gp_counters;
|
|
|
|
num_counters = pmu->nr_arch_gp_counters;
|
2024-01-09 15:02:30 -08:00
|
|
|
bitmask = pmu->counter_bitmask[KVM_PMC_GP];
|
2024-01-09 15:02:31 -08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return NULL;
|
2019-12-11 12:47:53 -08:00
|
|
|
}
|
2024-01-09 15:02:30 -08:00
|
|
|
|
2024-01-09 15:02:31 -08:00
|
|
|
idx &= INTEL_RDPMC_INDEX_MASK;
|
2019-12-11 12:47:53 -08:00
|
|
|
if (idx >= num_counters)
|
2015-06-19 15:45:05 +02:00
|
|
|
return NULL;
|
2024-01-09 15:02:30 -08:00
|
|
|
|
|
|
|
*mask &= bitmask;
|
2019-12-11 12:47:53 -08:00
|
|
|
return &counters[array_index_nospec(idx, num_counters)];
|
2015-06-19 15:45:05 +02:00
|
|
|
}
|
|
|
|
|
2021-02-02 09:32:35 -05:00
|
|
|
static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
|
2020-05-29 15:43:45 +08:00
|
|
|
{
|
KVM: x86: Replace (almost) all guest CPUID feature queries with cpu_caps
Switch all queries (except XSAVES) of guest features from guest CPUID to
guest capabilities, i.e. replace all calls to guest_cpuid_has() with calls
to guest_cpu_cap_has().
Keep guest_cpuid_has() around for XSAVES, but subsume its helper
guest_cpuid_get_register() and add a compile-time assertion to prevent
using guest_cpuid_has() for any other feature. Add yet another comment
for XSAVE to explain why KVM is allowed to query its raw guest CPUID.
Opportunistically drop the unused guest_cpuid_clear(), as there should be
no circumstance in which KVM needs to _clear_ a guest CPUID feature now
that everything is tracked via cpu_caps. E.g. KVM may need to _change_
a feature to emulate dynamic CPUID flags, but KVM should never need to
clear a feature in guest CPUID to prevent it from being used by the guest.
Delete the last remnants of the governed features framework, as the lone
holdout was vmx_adjust_secondary_exec_control()'s divergent behavior for
governed vs. ungoverned features.
Note, replacing guest_cpuid_has() checks with guest_cpu_cap_has() when
computing reserved CR4 bits is a nop when viewed as a whole, as KVM's
capabilities are already incorporated into the calculation, i.e. if a
feature is present in guest CPUID but unsupported by KVM, its CR4 bit
was already being marked as reserved, checking guest_cpu_cap_has() simply
double-stamps that it's a reserved bit.
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20241128013424.4096668-51-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-11-27 17:34:17 -08:00
|
|
|
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM))
|
2021-02-02 09:32:35 -05:00
|
|
|
return 0;
|
2020-05-29 15:43:45 +08:00
|
|
|
|
2021-02-02 09:32:35 -05:00
|
|
|
return vcpu->arch.perf_capabilities;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
|
2020-05-29 15:43:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
|
|
|
|
{
|
|
|
|
if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
|
|
|
|
}
|
|
|
|
|
2024-10-30 12:00:33 -07:00
|
|
|
static bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (is_td_vcpu(vcpu))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return cpuid_model_is_consistent(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (is_td_vcpu(vcpu))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return !!vcpu_to_lbr_records(vcpu)->nr;
|
|
|
|
}
|
|
|
|
|
2021-02-01 13:10:34 +08:00
|
|
|
static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
|
|
|
|
{
|
|
|
|
struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
if (!intel_pmu_lbr_is_enabled(vcpu))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
|
|
|
|
(index >= records->from && index < records->from + records->nr) ||
|
|
|
|
(index >= records->to && index < records->to + records->nr);
|
|
|
|
|
|
|
|
if (!ret && records->info)
|
|
|
|
ret = (index >= records->info && index < records->info + records->nr);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-06-11 00:57:52 +00:00
|
|
|
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
|
2015-06-19 15:45:05 +02:00
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
2022-06-11 00:57:53 +00:00
|
|
|
u64 perf_capabilities;
|
2022-06-11 00:57:52 +00:00
|
|
|
int ret;
|
2015-06-19 15:45:05 +02:00
|
|
|
|
|
|
|
switch (msr) {
|
|
|
|
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
2023-06-02 18:10:50 -07:00
|
|
|
return kvm_pmu_has_perf_global_ctrl(pmu);
|
2022-04-11 18:19:36 +08:00
|
|
|
case MSR_IA32_PEBS_ENABLE:
|
2022-06-11 00:57:53 +00:00
|
|
|
ret = vcpu_get_perf_capabilities(vcpu) & PERF_CAP_PEBS_FORMAT;
|
2022-04-11 18:19:36 +08:00
|
|
|
break;
|
2022-04-11 18:19:39 +08:00
|
|
|
case MSR_IA32_DS_AREA:
|
KVM: x86: Replace (almost) all guest CPUID feature queries with cpu_caps
Switch all queries (except XSAVES) of guest features from guest CPUID to
guest capabilities, i.e. replace all calls to guest_cpuid_has() with calls
to guest_cpu_cap_has().
Keep guest_cpuid_has() around for XSAVES, but subsume its helper
guest_cpuid_get_register() and add a compile-time assertion to prevent
using guest_cpuid_has() for any other feature. Add yet another comment
for XSAVE to explain why KVM is allowed to query its raw guest CPUID.
Opportunistically drop the unused guest_cpuid_clear(), as there should be
no circumstance in which KVM needs to _clear_ a guest CPUID feature now
that everything is tracked via cpu_caps. E.g. KVM may need to _change_
a feature to emulate dynamic CPUID flags, but KVM should never need to
clear a feature in guest CPUID to prevent it from being used by the guest.
Delete the last remnants of the governed features framework, as the lone
holdout was vmx_adjust_secondary_exec_control()'s divergent behavior for
governed vs. ungoverned features.
Note, replacing guest_cpuid_has() checks with guest_cpu_cap_has() when
computing reserved CR4 bits is a nop when viewed as a whole, as KVM's
capabilities are already incorporated into the calculation, i.e. if a
feature is present in guest CPUID but unsupported by KVM, its CR4 bit
was already being marked as reserved, checking guest_cpu_cap_has() simply
double-stamps that it's a reserved bit.
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20241128013424.4096668-51-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-11-27 17:34:17 -08:00
|
|
|
ret = guest_cpu_cap_has(vcpu, X86_FEATURE_DS);
|
2022-04-11 18:19:39 +08:00
|
|
|
break;
|
2022-04-11 18:19:40 +08:00
|
|
|
case MSR_PEBS_DATA_CFG:
|
2022-06-11 00:57:53 +00:00
|
|
|
perf_capabilities = vcpu_get_perf_capabilities(vcpu);
|
2022-06-11 00:57:52 +00:00
|
|
|
ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
|
2022-04-11 18:19:40 +08:00
|
|
|
((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3);
|
|
|
|
break;
|
2015-06-19 15:45:05 +02:00
|
|
|
default:
|
2022-06-11 00:57:52 +00:00
|
|
|
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
|
2015-06-19 15:45:05 +02:00
|
|
|
get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
|
2021-02-01 13:10:34 +08:00
|
|
|
get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
|
|
|
|
intel_pmu_is_valid_lbr_msr(vcpu, msr);
|
2015-06-19 15:45:05 +02:00
|
|
|
break;
|
|
|
|
}
|
2022-06-11 00:57:52 +00:00
|
|
|
|
|
|
|
return ret;
|
2015-06-19 15:45:05 +02:00
|
|
|
}
|
|
|
|
|
2019-10-27 18:52:41 +08:00
|
|
|
static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
|
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
struct kvm_pmc *pmc;
|
|
|
|
|
|
|
|
pmc = get_fixed_pmc(pmu, msr);
|
|
|
|
pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
|
|
|
|
pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
|
|
|
|
|
|
|
|
return pmc;
|
|
|
|
}
|
|
|
|
|
2021-02-01 13:10:33 +08:00
|
|
|
static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
|
|
|
|
2024-10-30 12:00:33 -07:00
|
|
|
if (!lbr_desc)
|
|
|
|
return;
|
|
|
|
|
2021-02-01 13:10:33 +08:00
|
|
|
if (lbr_desc->event) {
|
|
|
|
perf_event_release_kernel(lbr_desc->event);
|
|
|
|
lbr_desc->event = NULL;
|
|
|
|
vcpu_to_pmu(vcpu)->event_count--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
struct perf_event *event;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The perf_event_attr is constructed in the minimum efficient way:
|
|
|
|
* - set 'pinned = true' to make it task pinned so that if another
|
|
|
|
* cpu pinned event reclaims LBR, the event->oncpu will be set to -1;
|
|
|
|
* - set '.exclude_host = true' to record guest branches behavior;
|
|
|
|
*
|
|
|
|
* - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf
|
|
|
|
* schedule the event without a real HW counter but a fake one;
|
|
|
|
* check is_guest_lbr_event() and __intel_get_event_constraints();
|
|
|
|
*
|
|
|
|
* - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and
|
|
|
|
* 'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
|
|
|
|
* PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack
|
|
|
|
* event, which helps KVM to save/restore guest LBR records
|
|
|
|
* during host context switches and reduces quite a lot overhead,
|
|
|
|
* check branch_user_callstack() and intel_pmu_lbr_sched_task();
|
|
|
|
*/
|
|
|
|
struct perf_event_attr attr = {
|
|
|
|
.type = PERF_TYPE_RAW,
|
|
|
|
.size = sizeof(attr),
|
|
|
|
.config = INTEL_FIXED_VLBR_EVENT,
|
|
|
|
.sample_type = PERF_SAMPLE_BRANCH_STACK,
|
|
|
|
.pinned = true,
|
|
|
|
.exclude_host = true,
|
|
|
|
.branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
|
|
|
|
PERF_SAMPLE_BRANCH_USER,
|
|
|
|
};
|
|
|
|
|
2024-10-30 12:00:33 -07:00
|
|
|
if (WARN_ON_ONCE(!lbr_desc))
|
|
|
|
return 0;
|
|
|
|
|
2021-02-01 13:10:37 +08:00
|
|
|
if (unlikely(lbr_desc->event)) {
|
|
|
|
__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
|
2021-02-01 13:10:33 +08:00
|
|
|
return 0;
|
2021-02-01 13:10:37 +08:00
|
|
|
}
|
2021-02-01 13:10:33 +08:00
|
|
|
|
|
|
|
event = perf_event_create_kernel_counter(&attr, -1,
|
|
|
|
current, NULL, NULL);
|
|
|
|
if (IS_ERR(event)) {
|
|
|
|
pr_debug_ratelimited("%s: failed %ld\n",
|
|
|
|
__func__, PTR_ERR(event));
|
2021-02-23 09:39:57 +08:00
|
|
|
return PTR_ERR(event);
|
2021-02-01 13:10:33 +08:00
|
|
|
}
|
|
|
|
lbr_desc->event = event;
|
|
|
|
pmu->event_count++;
|
2021-02-01 13:10:37 +08:00
|
|
|
__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
|
2021-02-01 13:10:33 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-01 13:10:34 +08:00
|
|
|
/*
|
|
|
|
* It's safe to access LBR msrs from guest when they have not
|
|
|
|
* been passthrough since the host would help restore or reset
|
|
|
|
* the LBR msrs records when the guest LBR event is scheduled in.
|
|
|
|
*/
|
|
|
|
static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
|
|
|
|
struct msr_data *msr_info, bool read)
|
|
|
|
{
|
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
|
|
|
u32 index = msr_info->index;
|
|
|
|
|
|
|
|
if (!intel_pmu_is_valid_lbr_msr(vcpu, index))
|
|
|
|
return false;
|
|
|
|
|
2021-02-23 09:39:57 +08:00
|
|
|
if (!lbr_desc->event && intel_pmu_create_guest_lbr_event(vcpu) < 0)
|
2021-02-01 13:10:34 +08:00
|
|
|
goto dummy;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable irq to ensure the LBR feature doesn't get reclaimed by the
|
|
|
|
* host at the time the value is read from the msr, and this avoids the
|
|
|
|
* host LBR value to be leaked to the guest. If LBR has been reclaimed,
|
|
|
|
* return 0 on guest reads.
|
|
|
|
*/
|
|
|
|
local_irq_disable();
|
|
|
|
if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) {
|
|
|
|
if (read)
|
2025-04-09 22:28:54 +02:00
|
|
|
rdmsrq(index, msr_info->data);
|
2021-02-01 13:10:34 +08:00
|
|
|
else
|
2025-04-09 22:28:55 +02:00
|
|
|
wrmsrq(index, msr_info->data);
|
2021-02-01 13:10:37 +08:00
|
|
|
__set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
|
2021-02-01 13:10:34 +08:00
|
|
|
local_irq_enable();
|
|
|
|
return true;
|
|
|
|
}
|
2021-02-01 13:10:37 +08:00
|
|
|
clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
|
2021-02-01 13:10:34 +08:00
|
|
|
local_irq_enable();
|
|
|
|
|
|
|
|
dummy:
|
|
|
|
if (read)
|
|
|
|
msr_info->data = 0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-05-29 15:43:44 +08:00
|
|
|
static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
2015-06-19 15:45:05 +02:00
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
struct kvm_pmc *pmc;
|
2020-05-29 15:43:44 +08:00
|
|
|
u32 msr = msr_info->index;
|
2015-06-19 15:45:05 +02:00
|
|
|
|
|
|
|
switch (msr) {
|
|
|
|
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
2020-05-29 15:43:44 +08:00
|
|
|
msr_info->data = pmu->fixed_ctr_ctrl;
|
2023-01-26 17:08:03 -08:00
|
|
|
break;
|
2022-04-11 18:19:36 +08:00
|
|
|
case MSR_IA32_PEBS_ENABLE:
|
|
|
|
msr_info->data = pmu->pebs_enable;
|
2023-01-26 17:08:03 -08:00
|
|
|
break;
|
2022-04-11 18:19:39 +08:00
|
|
|
case MSR_IA32_DS_AREA:
|
|
|
|
msr_info->data = pmu->ds_area;
|
2023-01-26 17:08:03 -08:00
|
|
|
break;
|
2022-04-11 18:19:40 +08:00
|
|
|
case MSR_PEBS_DATA_CFG:
|
|
|
|
msr_info->data = pmu->pebs_data_cfg;
|
2023-01-26 17:08:03 -08:00
|
|
|
break;
|
2015-06-19 15:45:05 +02:00
|
|
|
default:
|
2020-05-29 15:43:45 +08:00
|
|
|
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
|
|
|
|
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
|
2019-05-20 17:20:40 +02:00
|
|
|
u64 val = pmc_read_counter(pmc);
|
2020-05-29 15:43:44 +08:00
|
|
|
msr_info->data =
|
|
|
|
val & pmu->counter_bitmask[KVM_PMC_GP];
|
2023-01-26 17:08:03 -08:00
|
|
|
break;
|
2019-05-20 17:20:40 +02:00
|
|
|
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
|
|
|
|
u64 val = pmc_read_counter(pmc);
|
2020-05-29 15:43:44 +08:00
|
|
|
msr_info->data =
|
|
|
|
val & pmu->counter_bitmask[KVM_PMC_FIXED];
|
2023-01-26 17:08:03 -08:00
|
|
|
break;
|
2015-06-19 15:45:05 +02:00
|
|
|
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
|
2020-05-29 15:43:44 +08:00
|
|
|
msr_info->data = pmc->eventsel;
|
2023-01-26 17:08:03 -08:00
|
|
|
break;
|
|
|
|
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 1;
|
2015-06-19 15:45:05 +02:00
|
|
|
}
|
|
|
|
|
2023-01-26 17:08:03 -08:00
|
|
|
return 0;
|
2015-06-19 15:45:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
struct kvm_pmc *pmc;
|
|
|
|
u32 msr = msr_info->index;
|
|
|
|
u64 data = msr_info->data;
|
2022-09-22 13:40:38 -07:00
|
|
|
u64 reserved_bits, diff;
|
2015-06-19 15:45:05 +02:00
|
|
|
|
|
|
|
switch (msr) {
|
|
|
|
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
2024-04-30 08:52:38 +08:00
|
|
|
if (data & pmu->fixed_ctr_ctrl_rsvd)
|
2023-01-26 17:08:03 -08:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (pmu->fixed_ctr_ctrl != data)
|
2015-06-19 15:45:05 +02:00
|
|
|
reprogram_fixed_counters(pmu, data);
|
|
|
|
break;
|
2022-04-11 18:19:36 +08:00
|
|
|
case MSR_IA32_PEBS_ENABLE:
|
2024-04-30 08:52:38 +08:00
|
|
|
if (data & pmu->pebs_enable_rsvd)
|
2023-01-26 17:08:03 -08:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (pmu->pebs_enable != data) {
|
2022-08-31 16:53:24 +08:00
|
|
|
diff = pmu->pebs_enable ^ data;
|
2022-04-11 18:19:36 +08:00
|
|
|
pmu->pebs_enable = data;
|
2022-08-31 16:53:24 +08:00
|
|
|
reprogram_counters(pmu, diff);
|
2022-04-11 18:19:36 +08:00
|
|
|
}
|
|
|
|
break;
|
2022-04-11 18:19:39 +08:00
|
|
|
case MSR_IA32_DS_AREA:
|
KVM: x86: model canonical checks more precisely
As a result of a recent investigation, it was determined that x86 CPUs
which support 5-level paging, don't always respect CR4.LA57 when doing
canonical checks.
In particular:
1. MSRs which contain a linear address, allow full 57-bitcanonical address
regardless of CR4.LA57 state. For example: MSR_KERNEL_GS_BASE.
2. All hidden segment bases and GDT/IDT bases also behave like MSRs.
This means that full 57-bit canonical address can be loaded to them
regardless of CR4.LA57, both using MSRS (e.g GS_BASE) and instructions
(e.g LGDT).
3. TLB invalidation instructions also allow the user to use full 57-bit
address regardless of the CR4.LA57.
Finally, it must be noted that the CPU doesn't prevent the user from
disabling 5-level paging, even when the full 57-bit canonical address is
present in one of the registers mentioned above (e.g GDT base).
In fact, this can happen without any userspace help, when the CPU enters
SMM mode - some MSRs, for example MSR_KERNEL_GS_BASE are left to contain
a non-canonical address in regard to the new mode.
Since most of the affected MSRs and all segment bases can be read and
written freely by the guest without any KVM intervention, this patch makes
the emulator closely follow hardware behavior, which means that the
emulator doesn't take in the account the guest CPUID support for 5-level
paging, and only takes in the account the host CPU support.
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20240906221824.491834-4-mlevitsk@redhat.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2024-09-06 18:18:23 -04:00
|
|
|
if (is_noncanonical_msr_address(data, vcpu))
|
2022-04-11 18:19:39 +08:00
|
|
|
return 1;
|
2023-01-26 17:08:03 -08:00
|
|
|
|
2022-04-11 18:19:39 +08:00
|
|
|
pmu->ds_area = data;
|
2023-01-26 17:08:03 -08:00
|
|
|
break;
|
2022-04-11 18:19:40 +08:00
|
|
|
case MSR_PEBS_DATA_CFG:
|
2024-04-30 08:52:38 +08:00
|
|
|
if (data & pmu->pebs_data_cfg_rsvd)
|
2023-01-26 17:08:03 -08:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
pmu->pebs_data_cfg = data;
|
2022-04-11 18:19:40 +08:00
|
|
|
break;
|
2015-06-19 15:45:05 +02:00
|
|
|
default:
|
2020-05-29 15:43:45 +08:00
|
|
|
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
|
|
|
|
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
|
|
|
|
if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
|
|
|
|
(data & ~pmu->counter_bitmask[KVM_PMC_GP]))
|
|
|
|
return 1;
|
2023-01-26 17:08:03 -08:00
|
|
|
|
2020-05-29 15:43:45 +08:00
|
|
|
if (!msr_info->host_initiated &&
|
|
|
|
!(msr & MSR_PMC_FULL_WIDTH_BIT))
|
2020-01-27 13:22:56 -08:00
|
|
|
data = (s64)(s32)data;
|
KVM: x86/pmu: Truncate counter value to allowed width on write
Performance counters are defined to have width less than 64 bits. The
vPMU code maintains the counters in u64 variables but assumes the value
to fit within the defined width. However, for Intel non-full-width
counters (MSR_IA32_PERFCTRx) the value receieved from the guest is
truncated to 32 bits and then sign-extended to full 64 bits. If a
negative value is set, it's sign-extended to 64 bits, but then in
kvm_pmu_incr_counter() it's incremented, truncated, and compared to the
previous value for overflow detection.
That previous value is not truncated, so it always evaluates bigger than
the truncated new one, and a PMI is injected. If the PMI handler writes
a negative counter value itself, the vCPU never quits the PMI loop.
Turns out that Linux PMI handler actually does write the counter with
the value just read with RDPMC, so when no full-width support is exposed
via MSR_IA32_PERF_CAPABILITIES, and the guest initializes the counter to
a negative value, it locks up.
This has been observed in the field, for example, when the guest configures
atop to use perfevents and runs two instances of it simultaneously.
To address the problem, maintain the invariant that the counter value
always fits in the defined bit width, by truncating the received value
in the respective set_msr methods. For better readability, factor the
out into a helper function, pmc_write_counter(), shared by vmx and svm
parts.
Fixes: 9cd803d496e7 ("KVM: x86: Update vPMCs when retiring instructions")
Cc: stable@vger.kernel.org
Signed-off-by: Roman Kagan <rkagan@amazon.de>
Link: https://lore.kernel.org/all/20230504120042.785651-1-rkagan@amazon.de
Tested-by: Like Xu <likexu@tencent.com>
[sean: tweak changelog, s/set/write in the helper]
Signed-off-by: Sean Christopherson <seanjc@google.com>
2023-05-04 14:00:42 +02:00
|
|
|
pmc_write_counter(pmc, data);
|
2023-01-26 17:08:03 -08:00
|
|
|
break;
|
2019-05-20 17:34:30 +02:00
|
|
|
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
|
KVM: x86/pmu: Truncate counter value to allowed width on write
Performance counters are defined to have width less than 64 bits. The
vPMU code maintains the counters in u64 variables but assumes the value
to fit within the defined width. However, for Intel non-full-width
counters (MSR_IA32_PERFCTRx) the value receieved from the guest is
truncated to 32 bits and then sign-extended to full 64 bits. If a
negative value is set, it's sign-extended to 64 bits, but then in
kvm_pmu_incr_counter() it's incremented, truncated, and compared to the
previous value for overflow detection.
That previous value is not truncated, so it always evaluates bigger than
the truncated new one, and a PMI is injected. If the PMI handler writes
a negative counter value itself, the vCPU never quits the PMI loop.
Turns out that Linux PMI handler actually does write the counter with
the value just read with RDPMC, so when no full-width support is exposed
via MSR_IA32_PERF_CAPABILITIES, and the guest initializes the counter to
a negative value, it locks up.
This has been observed in the field, for example, when the guest configures
atop to use perfevents and runs two instances of it simultaneously.
To address the problem, maintain the invariant that the counter value
always fits in the defined bit width, by truncating the received value
in the respective set_msr methods. For better readability, factor the
out into a helper function, pmc_write_counter(), shared by vmx and svm
parts.
Fixes: 9cd803d496e7 ("KVM: x86: Update vPMCs when retiring instructions")
Cc: stable@vger.kernel.org
Signed-off-by: Roman Kagan <rkagan@amazon.de>
Link: https://lore.kernel.org/all/20230504120042.785651-1-rkagan@amazon.de
Tested-by: Like Xu <likexu@tencent.com>
[sean: tweak changelog, s/set/write in the helper]
Signed-off-by: Sean Christopherson <seanjc@google.com>
2023-05-04 14:00:42 +02:00
|
|
|
pmc_write_counter(pmc, data);
|
2023-01-26 17:08:03 -08:00
|
|
|
break;
|
2015-06-19 15:45:05 +02:00
|
|
|
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
|
2022-03-09 16:42:57 +08:00
|
|
|
reserved_bits = pmu->reserved_bits;
|
|
|
|
if ((pmc->idx == 2) &&
|
|
|
|
(pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
|
|
|
|
reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
|
2023-01-26 17:08:03 -08:00
|
|
|
if (data & reserved_bits)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (data != pmc->eventsel) {
|
2022-05-18 21:25:06 +08:00
|
|
|
pmc->eventsel = data;
|
2023-03-10 19:33:49 +08:00
|
|
|
kvm_pmu_request_counter_reprogram(pmc);
|
2015-06-19 15:45:05 +02:00
|
|
|
}
|
2023-01-26 17:08:03 -08:00
|
|
|
break;
|
|
|
|
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Not a known PMU MSR. */
|
|
|
|
return 1;
|
2015-06-19 15:45:05 +02:00
|
|
|
}
|
|
|
|
|
2023-01-26 17:08:03 -08:00
|
|
|
return 0;
|
2015-06-19 15:45:05 +02:00
|
|
|
}
|
|
|
|
|
2024-01-09 15:02:23 -08:00
|
|
|
/*
|
|
|
|
* Map fixed counter events to architectural general purpose event encodings.
|
|
|
|
* Perf doesn't provide APIs to allow KVM to directly program a fixed counter,
|
|
|
|
* and so KVM instead programs the architectural event to effectively request
|
|
|
|
* the fixed counter. Perf isn't guaranteed to use a fixed counter and may
|
|
|
|
* instead program the encoding into a general purpose counter, e.g. if a
|
|
|
|
* different perf_event is already utilizing the requested counter, but the end
|
|
|
|
* result is the same (ignoring the fact that using a general purpose counter
|
|
|
|
* will likely exacerbate counter contention).
|
|
|
|
*
|
2024-01-09 15:02:25 -08:00
|
|
|
* Forcibly inlined to allow asserting on @index at build time, and there should
|
|
|
|
* never be more than one user.
|
2024-01-09 15:02:23 -08:00
|
|
|
*/
|
2024-01-09 15:02:25 -08:00
|
|
|
static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
|
2021-11-30 15:42:16 +08:00
|
|
|
{
|
2024-01-09 15:02:25 -08:00
|
|
|
const enum perf_hw_id fixed_pmc_perf_ids[] = {
|
|
|
|
[0] = PERF_COUNT_HW_INSTRUCTIONS,
|
|
|
|
[1] = PERF_COUNT_HW_CPU_CYCLES,
|
|
|
|
[2] = PERF_COUNT_HW_REF_CPU_CYCLES,
|
2024-01-09 15:02:23 -08:00
|
|
|
};
|
2024-01-09 15:02:25 -08:00
|
|
|
u64 eventsel;
|
2023-06-06 18:02:05 -07:00
|
|
|
|
2024-06-27 10:17:55 +08:00
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_perf_ids) != KVM_MAX_NR_INTEL_FIXED_COUTNERS);
|
|
|
|
BUILD_BUG_ON(index >= KVM_MAX_NR_INTEL_FIXED_COUTNERS);
|
2023-06-06 18:02:05 -07:00
|
|
|
|
2024-01-09 15:02:25 -08:00
|
|
|
/*
|
|
|
|
* Yell if perf reports support for a fixed counter but perf doesn't
|
|
|
|
* have a known encoding for the associated general purpose event.
|
|
|
|
*/
|
|
|
|
eventsel = perf_get_hw_event_config(fixed_pmc_perf_ids[index]);
|
|
|
|
WARN_ON_ONCE(!eventsel && index < kvm_pmu_cap.num_counters_fixed);
|
|
|
|
return eventsel;
|
2021-11-30 15:42:16 +08:00
|
|
|
}
|
|
|
|
|
2024-06-07 17:08:19 -07:00
|
|
|
static void intel_pmu_enable_fixed_counter_bits(struct kvm_pmu *pmu, u64 bits)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
|
|
|
|
pmu->fixed_ctr_ctrl_rsvd &= ~intel_fixed_bits_by_idx(i, bits);
|
|
|
|
}
|
|
|
|
|
2015-06-19 15:45:05 +02:00
|
|
|
static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
2021-02-02 09:36:08 -05:00
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
2015-06-19 15:45:05 +02:00
|
|
|
struct kvm_cpuid_entry2 *entry;
|
|
|
|
union cpuid10_eax eax;
|
|
|
|
union cpuid10_edx edx;
|
2022-06-11 00:57:53 +00:00
|
|
|
u64 perf_capabilities;
|
2024-04-30 08:52:38 +08:00
|
|
|
u64 counter_rsvd;
|
2015-06-19 15:45:05 +02:00
|
|
|
|
2024-10-30 12:00:33 -07:00
|
|
|
if (!lbr_desc)
|
|
|
|
return;
|
|
|
|
|
2023-03-10 16:46:05 -08:00
|
|
|
memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setting passthrough of LBR MSRs is done only in the VM-Entry loop,
|
|
|
|
* and PMU refresh is disallowed after the vCPU has run, i.e. this code
|
|
|
|
* should never be reached while KVM is passing through MSRs.
|
|
|
|
*/
|
|
|
|
if (KVM_BUG_ON(lbr_desc->msr_passthrough, vcpu->kvm))
|
|
|
|
return;
|
|
|
|
|
2022-07-12 02:06:45 +02:00
|
|
|
entry = kvm_find_cpuid_entry(vcpu, 0xa);
|
2023-11-09 18:28:48 -08:00
|
|
|
if (!entry)
|
2015-06-19 15:45:05 +02:00
|
|
|
return;
|
2023-11-09 18:28:48 -08:00
|
|
|
|
2015-06-19 15:45:05 +02:00
|
|
|
eax.full = entry->eax;
|
|
|
|
edx.full = entry->edx;
|
|
|
|
|
|
|
|
pmu->version = eax.split.version_id;
|
|
|
|
if (!pmu->version)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
|
2022-04-11 18:19:44 +08:00
|
|
|
kvm_pmu_cap.num_counters_gp);
|
|
|
|
eax.split.bit_width = min_t(int, eax.split.bit_width,
|
|
|
|
kvm_pmu_cap.bit_width_gp);
|
2015-06-19 15:45:05 +02:00
|
|
|
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
|
2022-04-11 18:19:44 +08:00
|
|
|
eax.split.mask_length = min_t(int, eax.split.mask_length,
|
|
|
|
kvm_pmu_cap.events_mask_len);
|
2015-06-19 15:45:05 +02:00
|
|
|
pmu->available_event_types = ~entry->ebx &
|
|
|
|
((1ull << eax.split.mask_length) - 1);
|
|
|
|
|
|
|
|
if (pmu->version == 1) {
|
|
|
|
pmu->nr_arch_fixed_counters = 0;
|
|
|
|
} else {
|
2023-06-06 18:02:05 -07:00
|
|
|
pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
|
|
|
|
kvm_pmu_cap.num_counters_fixed);
|
2022-04-11 18:19:44 +08:00
|
|
|
edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
|
|
|
|
kvm_pmu_cap.bit_width_fixed);
|
2015-06-19 15:45:05 +02:00
|
|
|
pmu->counter_bitmask[KVM_PMC_FIXED] =
|
|
|
|
((u64)1 << edx.split.bit_width_fixed) - 1;
|
|
|
|
}
|
|
|
|
|
2024-06-07 17:08:19 -07:00
|
|
|
intel_pmu_enable_fixed_counter_bits(pmu, INTEL_FIXED_0_KERNEL |
|
|
|
|
INTEL_FIXED_0_USER |
|
|
|
|
INTEL_FIXED_0_ENABLE_PMI);
|
2024-04-30 08:52:39 +08:00
|
|
|
|
2024-04-30 08:52:38 +08:00
|
|
|
counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
|
2023-11-09 18:28:49 -08:00
|
|
|
(((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
|
2024-04-30 08:52:38 +08:00
|
|
|
pmu->global_ctrl_rsvd = counter_rsvd;
|
2023-06-02 18:10:47 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
|
|
|
|
* share reserved bit definitions. The kernel just happens to use
|
|
|
|
* OVF_CTRL for the names.
|
|
|
|
*/
|
2024-04-30 08:52:38 +08:00
|
|
|
pmu->global_status_rsvd = pmu->global_ctrl_rsvd
|
2019-02-18 19:26:08 -05:00
|
|
|
& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
|
|
|
|
MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
|
2020-03-02 15:57:00 -08:00
|
|
|
if (vmx_pt_mode_is_host_guest())
|
2024-04-30 08:52:38 +08:00
|
|
|
pmu->global_status_rsvd &=
|
2019-02-18 19:26:08 -05:00
|
|
|
~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
|
2015-06-19 15:45:05 +02:00
|
|
|
|
2022-07-12 02:06:45 +02:00
|
|
|
entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
|
2015-06-19 15:45:05 +02:00
|
|
|
if (entry &&
|
|
|
|
(boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
|
2022-03-09 16:42:57 +08:00
|
|
|
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
|
|
|
|
pmu->reserved_bits ^= HSW_IN_TX;
|
|
|
|
pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
|
|
|
|
}
|
2019-10-27 18:52:43 +08:00
|
|
|
|
|
|
|
bitmap_set(pmu->all_valid_pmc_idx,
|
|
|
|
0, pmu->nr_arch_gp_counters);
|
|
|
|
bitmap_set(pmu->all_valid_pmc_idx,
|
|
|
|
INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
|
2019-11-13 16:17:20 -08:00
|
|
|
|
2022-07-27 23:34:24 +00:00
|
|
|
perf_capabilities = vcpu_get_perf_capabilities(vcpu);
|
2024-10-30 12:00:33 -07:00
|
|
|
if (intel_pmu_lbr_is_compatible(vcpu) &&
|
2022-07-27 23:34:24 +00:00
|
|
|
(perf_capabilities & PMU_CAP_LBR_FMT))
|
2024-03-06 17:13:42 -08:00
|
|
|
memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
|
2021-02-02 09:36:08 -05:00
|
|
|
else
|
|
|
|
lbr_desc->records.nr = 0;
|
2021-02-01 13:10:37 +08:00
|
|
|
|
|
|
|
if (lbr_desc->records.nr)
|
|
|
|
bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
|
2022-04-11 18:19:36 +08:00
|
|
|
|
2022-06-11 00:57:53 +00:00
|
|
|
if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
|
|
|
|
if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
|
2024-04-30 08:52:38 +08:00
|
|
|
pmu->pebs_enable_rsvd = counter_rsvd;
|
2022-04-11 18:19:36 +08:00
|
|
|
pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
|
2024-04-30 08:52:38 +08:00
|
|
|
pmu->pebs_data_cfg_rsvd = ~0xff00000full;
|
2024-06-07 17:08:19 -07:00
|
|
|
intel_pmu_enable_fixed_counter_bits(pmu, ICL_FIXED_0_ADAPTIVE);
|
2022-04-11 18:19:36 +08:00
|
|
|
} else {
|
2024-04-30 08:52:38 +08:00
|
|
|
pmu->pebs_enable_rsvd =
|
2022-04-11 18:19:36 +08:00
|
|
|
~((1ull << pmu->nr_arch_gp_counters) - 1);
|
|
|
|
}
|
|
|
|
}
|
2015-06-19 15:45:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_pmu_init(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
2021-02-02 09:36:08 -05:00
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
2015-06-19 15:45:05 +02:00
|
|
|
|
2024-10-30 12:00:33 -07:00
|
|
|
if (!lbr_desc)
|
|
|
|
return;
|
|
|
|
|
2024-06-27 10:17:55 +08:00
|
|
|
for (i = 0; i < KVM_MAX_NR_INTEL_GP_COUNTERS; i++) {
|
2015-06-19 15:45:05 +02:00
|
|
|
pmu->gp_counters[i].type = KVM_PMC_GP;
|
|
|
|
pmu->gp_counters[i].vcpu = vcpu;
|
|
|
|
pmu->gp_counters[i].idx = i;
|
KVM: x86/vPMU: Reuse perf_event to avoid unnecessary pmc_reprogram_counter
The perf_event_create_kernel_counter() in the pmc_reprogram_counter() is
a heavyweight and high-frequency operation, especially when host disables
the watchdog (maximum 21000000 ns) which leads to an unacceptable latency
of the guest NMI handler. It limits the use of vPMUs in the guest.
When a vPMC is fully enabled, the legacy reprogram_*_counter() would stop
and release its existing perf_event (if any) every time EVEN in most cases
almost the same requested perf_event will be created and configured again.
For each vPMC, if the reuqested config ('u64 eventsel' for gp and 'u8 ctrl'
for fixed) is the same as its current config AND a new sample period based
on pmc->counter is accepted by host perf interface, the current event could
be reused safely as a new created one does. Otherwise, do release the
undesirable perf_event and reprogram a new one as usual.
It's light-weight to call pmc_pause_counter (disable, read and reset event)
and pmc_resume_counter (recalibrate period and re-enable event) as guest
expects instead of release-and-create again on any condition. Compared to
use the filterable event->attr or hw.config, a new 'u64 current_config'
field is added to save the last original programed config for each vPMC.
Based on this implementation, the number of calls to pmc_reprogram_counter
is reduced by ~82.5% for a gp sampling event and ~99.9% for a fixed event.
In the usage of multiplexing perf sampling mode, the average latency of the
guest NMI handler is reduced from 104923 ns to 48393 ns (~2.16x speed up).
If host disables watchdog, the minimum latecy of guest NMI handler could be
speed up at ~3413x (from 20407603 to 5979 ns) and at ~786x in the average.
Suggested-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-10-27 18:52:42 +08:00
|
|
|
pmu->gp_counters[i].current_config = 0;
|
2015-06-19 15:45:05 +02:00
|
|
|
}
|
|
|
|
|
2024-06-27 10:17:55 +08:00
|
|
|
for (i = 0; i < KVM_MAX_NR_INTEL_FIXED_COUTNERS; i++) {
|
2015-06-19 15:45:05 +02:00
|
|
|
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
|
|
|
|
pmu->fixed_counters[i].vcpu = vcpu;
|
2023-11-09 18:28:49 -08:00
|
|
|
pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX;
|
KVM: x86/vPMU: Reuse perf_event to avoid unnecessary pmc_reprogram_counter
The perf_event_create_kernel_counter() in the pmc_reprogram_counter() is
a heavyweight and high-frequency operation, especially when host disables
the watchdog (maximum 21000000 ns) which leads to an unacceptable latency
of the guest NMI handler. It limits the use of vPMUs in the guest.
When a vPMC is fully enabled, the legacy reprogram_*_counter() would stop
and release its existing perf_event (if any) every time EVEN in most cases
almost the same requested perf_event will be created and configured again.
For each vPMC, if the reuqested config ('u64 eventsel' for gp and 'u8 ctrl'
for fixed) is the same as its current config AND a new sample period based
on pmc->counter is accepted by host perf interface, the current event could
be reused safely as a new created one does. Otherwise, do release the
undesirable perf_event and reprogram a new one as usual.
It's light-weight to call pmc_pause_counter (disable, read and reset event)
and pmc_resume_counter (recalibrate period and re-enable event) as guest
expects instead of release-and-create again on any condition. Compared to
use the filterable event->attr or hw.config, a new 'u64 current_config'
field is added to save the last original programed config for each vPMC.
Based on this implementation, the number of calls to pmc_reprogram_counter
is reduced by ~82.5% for a gp sampling event and ~99.9% for a fixed event.
In the usage of multiplexing perf sampling mode, the average latency of the
guest NMI handler is reduced from 104923 ns to 48393 ns (~2.16x speed up).
If host disables watchdog, the minimum latecy of guest NMI handler could be
speed up at ~3413x (from 20407603 to 5979 ns) and at ~786x in the average.
Suggested-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-10-27 18:52:42 +08:00
|
|
|
pmu->fixed_counters[i].current_config = 0;
|
2024-01-09 15:02:24 -08:00
|
|
|
pmu->fixed_counters[i].eventsel = intel_get_fixed_pmc_eventsel(i);
|
2015-06-19 15:45:05 +02:00
|
|
|
}
|
2021-02-02 09:32:35 -05:00
|
|
|
|
2021-02-02 09:36:08 -05:00
|
|
|
lbr_desc->records.nr = 0;
|
2021-02-01 13:10:33 +08:00
|
|
|
lbr_desc->event = NULL;
|
2021-02-01 13:10:35 +08:00
|
|
|
lbr_desc->msr_passthrough = false;
|
2015-06-19 15:45:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_pmu_reset(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2021-02-01 13:10:33 +08:00
|
|
|
intel_pmu_release_guest_lbr_event(vcpu);
|
2015-06-19 15:45:05 +02:00
|
|
|
}
|
|
|
|
|
2021-02-01 13:10:36 +08:00
|
|
|
/*
|
|
|
|
* Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
|
|
|
|
*
|
|
|
|
* If Freeze_LBR_On_PMI = 1, the LBR is frozen on PMI and
|
|
|
|
* the KVM emulates to clear the LBR bit (bit 0) in IA32_DEBUGCTL.
|
|
|
|
*
|
|
|
|
* Guest needs to re-enable LBR to resume branches recording.
|
|
|
|
*/
|
|
|
|
static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2025-06-10 16:20:09 -07:00
|
|
|
u64 data = vmx_guest_debugctl_read();
|
2021-02-01 13:10:36 +08:00
|
|
|
|
|
|
|
if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
|
|
|
|
data &= ~DEBUGCTLMSR_LBR;
|
2025-06-10 16:20:09 -07:00
|
|
|
vmx_guest_debugctl_write(vcpu, data);
|
2021-02-01 13:10:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
u8 version = vcpu_to_pmu(vcpu)->version;
|
|
|
|
|
|
|
|
if (!intel_pmu_lbr_is_enabled(vcpu))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (version > 1 && version < 4)
|
|
|
|
intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu);
|
|
|
|
}
|
|
|
|
|
2021-02-01 13:10:34 +08:00
|
|
|
static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
|
|
|
|
{
|
|
|
|
struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < lbr->nr; i++) {
|
|
|
|
vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set);
|
|
|
|
vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set);
|
|
|
|
if (lbr->info)
|
|
|
|
vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
|
|
|
|
}
|
|
|
|
|
|
|
|
vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
|
|
|
|
vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2021-02-01 13:10:35 +08:00
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
|
|
|
|
|
|
|
if (!lbr_desc->msr_passthrough)
|
|
|
|
return;
|
|
|
|
|
2021-02-01 13:10:34 +08:00
|
|
|
vmx_update_intercept_for_lbr_msrs(vcpu, true);
|
2021-02-01 13:10:35 +08:00
|
|
|
lbr_desc->msr_passthrough = false;
|
2021-02-01 13:10:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2021-02-01 13:10:35 +08:00
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
|
|
|
|
|
|
|
if (lbr_desc->msr_passthrough)
|
|
|
|
return;
|
|
|
|
|
2021-02-01 13:10:34 +08:00
|
|
|
vmx_update_intercept_for_lbr_msrs(vcpu, false);
|
2021-02-01 13:10:35 +08:00
|
|
|
lbr_desc->msr_passthrough = true;
|
2021-02-01 13:10:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Higher priority host perf events (e.g. cpu pinned) could reclaim the
|
|
|
|
* pmu resources (e.g. LBR) that were assigned to the guest. This is
|
|
|
|
* usually done via ipi calls (more details in perf_install_in_context).
|
|
|
|
*
|
|
|
|
* Before entering the non-root mode (with irq disabled here), double
|
|
|
|
* confirm that the pmu features enabled to the guest are not reclaimed
|
|
|
|
* by higher priority host events. Otherwise, disallow vcpu's access to
|
|
|
|
* the reclaimed features.
|
|
|
|
*/
|
|
|
|
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2021-02-01 13:10:37 +08:00
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
2021-02-01 13:10:34 +08:00
|
|
|
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
|
|
|
|
2024-10-30 12:00:33 -07:00
|
|
|
if (WARN_ON_ONCE(!lbr_desc))
|
|
|
|
return;
|
|
|
|
|
2021-02-01 13:10:34 +08:00
|
|
|
if (!lbr_desc->event) {
|
|
|
|
vmx_disable_lbr_msrs_passthrough(vcpu);
|
2025-06-10 16:20:09 -07:00
|
|
|
if (vmx_guest_debugctl_read() & DEBUGCTLMSR_LBR)
|
2021-02-01 13:10:34 +08:00
|
|
|
goto warn;
|
2021-02-01 13:10:37 +08:00
|
|
|
if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
|
|
|
|
goto warn;
|
2021-02-01 13:10:34 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) {
|
|
|
|
vmx_disable_lbr_msrs_passthrough(vcpu);
|
2021-02-01 13:10:37 +08:00
|
|
|
__clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
|
2021-02-01 13:10:34 +08:00
|
|
|
goto warn;
|
|
|
|
} else
|
|
|
|
vmx_enable_lbr_msrs_passthrough(vcpu);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
warn:
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-11-30 23:09:18 +00:00
|
|
|
pr_warn_ratelimited("vcpu-%d: fail to passthrough LBR.\n", vcpu->vcpu_id);
|
2021-02-01 13:10:34 +08:00
|
|
|
}
|
|
|
|
|
2021-02-01 13:10:37 +08:00
|
|
|
static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2025-06-10 16:20:09 -07:00
|
|
|
if (!(vmx_guest_debugctl_read() & DEBUGCTLMSR_LBR))
|
2021-02-01 13:10:37 +08:00
|
|
|
intel_pmu_release_guest_lbr_event(vcpu);
|
|
|
|
}
|
|
|
|
|
2022-04-11 18:19:43 +08:00
|
|
|
void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
|
|
|
|
{
|
|
|
|
struct kvm_pmc *pmc = NULL;
|
2022-08-31 16:53:22 +08:00
|
|
|
int bit, hw_idx;
|
2022-04-11 18:19:43 +08:00
|
|
|
|
2023-11-09 18:28:52 -08:00
|
|
|
kvm_for_each_pmc(pmu, pmc, bit, (unsigned long *)&pmu->global_ctrl) {
|
|
|
|
if (!pmc_speculative_in_use(pmc) ||
|
2023-06-02 18:10:51 -07:00
|
|
|
!pmc_is_globally_enabled(pmc) || !pmc->perf_event)
|
2022-04-11 18:19:43 +08:00
|
|
|
continue;
|
|
|
|
|
2022-08-31 16:53:22 +08:00
|
|
|
/*
|
|
|
|
* A negative index indicates the event isn't mapped to a
|
|
|
|
* physical counter in the host, e.g. due to contention.
|
|
|
|
*/
|
|
|
|
hw_idx = pmc->perf_event->hw.idx;
|
|
|
|
if (hw_idx != pmc->idx && hw_idx > -1)
|
|
|
|
pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx);
|
2022-04-11 18:19:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-29 23:50:53 +00:00
|
|
|
struct kvm_pmu_ops intel_pmu_ops __initdata = {
|
2019-10-27 18:52:40 +08:00
|
|
|
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
|
2019-10-27 18:52:41 +08:00
|
|
|
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
|
2015-06-19 15:45:05 +02:00
|
|
|
.is_valid_msr = intel_is_valid_msr,
|
|
|
|
.get_msr = intel_pmu_get_msr,
|
|
|
|
.set_msr = intel_pmu_set_msr,
|
|
|
|
.refresh = intel_pmu_refresh,
|
|
|
|
.init = intel_pmu_init,
|
|
|
|
.reset = intel_pmu_reset,
|
2021-02-01 13:10:36 +08:00
|
|
|
.deliver_pmi = intel_pmu_deliver_pmi,
|
2021-02-01 13:10:37 +08:00
|
|
|
.cleanup = intel_pmu_cleanup,
|
2022-12-20 16:12:30 +00:00
|
|
|
.EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
|
2024-06-27 10:17:55 +08:00
|
|
|
.MAX_NR_GP_COUNTERS = KVM_MAX_NR_INTEL_GP_COUNTERS,
|
2023-06-02 18:10:53 -07:00
|
|
|
.MIN_NR_GP_COUNTERS = 1,
|
2015-06-19 15:45:05 +02:00
|
|
|
};
|