KVM: arm64: Use a cpucap to determine if system supports FEAT_PMUv3

KVM is about to learn some new tricks to virtualize PMUv3 on IMPDEF
hardware. As part of that, we now need to differentiate host support
from guest support for PMUv3.

Add a cpucap to determine if an architectural PMUv3 is present to guard
host usage of PMUv3 controls.

Tested-by: Janne Grunau <j@jannau.net>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250305202641.428114-6-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
Oliver Upton 2025-03-05 12:26:32 -08:00
parent ed335722b4
commit 6f34024d18
7 changed files with 46 additions and 8 deletions

View file

@ -71,6 +71,8 @@ cpucap_is_possible(const unsigned int cap)
* KVM MPAM support doesn't rely on the host kernel supporting MPAM. * KVM MPAM support doesn't rely on the host kernel supporting MPAM.
*/ */
return true; return true;
case ARM64_HAS_PMUV3:
return IS_ENABLED(CONFIG_HW_PERF_EVENTS);
} }
return true; return true;

View file

@ -866,6 +866,11 @@ static __always_inline bool system_supports_mpam_hcr(void)
return alternative_has_cap_unlikely(ARM64_MPAM_HCR); return alternative_has_cap_unlikely(ARM64_MPAM_HCR);
} }
static inline bool system_supports_pmuv3(void)
{
return cpus_have_final_cap(ARM64_HAS_PMUV3);
}
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
bool try_emulate_mrs(struct pt_regs *regs, u32 isn); bool try_emulate_mrs(struct pt_regs *regs, u32 isn);

View file

@ -1898,6 +1898,28 @@ static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
} }
#endif #endif
#ifdef CONFIG_HW_PERF_EVENTS
static bool has_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
{
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
unsigned int pmuver;
/*
* PMUVer follows the standard ID scheme for an unsigned field with the
* exception of 0xF (IMP_DEF) which is treated specially and implies
* FEAT_PMUv3 is not implemented.
*
* See DDI0487L.a D24.1.3.2 for more details.
*/
pmuver = cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_EL1_PMUVer_SHIFT);
if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
return false;
return pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP;
}
#endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
#define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT)) #define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT))
@ -2998,6 +3020,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, GCS, IMP) ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, GCS, IMP)
}, },
#endif
#ifdef CONFIG_HW_PERF_EVENTS
{
.desc = "PMUv3",
.capability = ARM64_HAS_PMUV3,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_pmuv3,
},
#endif #endif
{}, {},
}; };

View file

@ -244,7 +244,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
* counter, which could make a PMXEVCNTR_EL0 access UNDEF at * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
* EL1 instead of being trapped to EL2. * EL1 instead of being trapped to EL2.
*/ */
if (kvm_arm_support_pmu_v3()) { if (system_supports_pmuv3()) {
struct kvm_cpu_context *hctxt; struct kvm_cpu_context *hctxt;
write_sysreg(0, pmselr_el0); write_sysreg(0, pmselr_el0);
@ -281,7 +281,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2); write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
write_sysreg(0, hstr_el2); write_sysreg(0, hstr_el2);
if (kvm_arm_support_pmu_v3()) { if (system_supports_pmuv3()) {
struct kvm_cpu_context *hctxt; struct kvm_cpu_context *hctxt;
hctxt = host_data_ptr(host_ctxt); hctxt = host_data_ptr(host_ctxt);

View file

@ -41,7 +41,7 @@ void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr)
{ {
struct kvm_pmu_events *pmu = kvm_get_pmu_events(); struct kvm_pmu_events *pmu = kvm_get_pmu_events();
if (!kvm_arm_support_pmu_v3() || !kvm_pmu_switch_needed(attr)) if (!system_supports_pmuv3() || !kvm_pmu_switch_needed(attr))
return; return;
if (!attr->exclude_host) if (!attr->exclude_host)
@ -57,7 +57,7 @@ void kvm_clr_pmu_events(u64 clr)
{ {
struct kvm_pmu_events *pmu = kvm_get_pmu_events(); struct kvm_pmu_events *pmu = kvm_get_pmu_events();
if (!kvm_arm_support_pmu_v3()) if (!system_supports_pmuv3())
return; return;
pmu->events_host &= ~clr; pmu->events_host &= ~clr;
@ -133,7 +133,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
struct kvm_pmu_events *pmu; struct kvm_pmu_events *pmu;
u64 events_guest, events_host; u64 events_guest, events_host;
if (!kvm_arm_support_pmu_v3() || !has_vhe()) if (!system_supports_pmuv3() || !has_vhe())
return; return;
preempt_disable(); preempt_disable();
@ -154,7 +154,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
struct kvm_pmu_events *pmu; struct kvm_pmu_events *pmu;
u64 events_guest, events_host; u64 events_guest, events_host;
if (!kvm_arm_support_pmu_v3() || !has_vhe()) if (!system_supports_pmuv3() || !has_vhe())
return; return;
pmu = kvm_get_pmu_events(); pmu = kvm_get_pmu_events();
@ -180,7 +180,7 @@ bool kvm_set_pmuserenr(u64 val)
struct kvm_cpu_context *hctxt; struct kvm_cpu_context *hctxt;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
if (!kvm_arm_support_pmu_v3() || !has_vhe()) if (!system_supports_pmuv3() || !has_vhe())
return false; return false;
vcpu = kvm_get_running_vcpu(); vcpu = kvm_get_running_vcpu();

View file

@ -45,6 +45,7 @@ HAS_LSE_ATOMICS
HAS_MOPS HAS_MOPS
HAS_NESTED_VIRT HAS_NESTED_VIRT
HAS_PAN HAS_PAN
HAS_PMUV3
HAS_S1PIE HAS_S1PIE
HAS_S1POE HAS_S1POE
HAS_RAS_EXTN HAS_RAS_EXTN

View file

@ -86,7 +86,7 @@ void kvm_vcpu_pmu_resync_el0(void);
*/ */
#define kvm_pmu_update_vcpu_events(vcpu) \ #define kvm_pmu_update_vcpu_events(vcpu) \
do { \ do { \
if (!has_vhe() && kvm_arm_support_pmu_v3()) \ if (!has_vhe() && system_supports_pmuv3()) \
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
} while (0) } while (0)