mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-21 06:50:25 +00:00
KVM: arm64: Add feature checking helpers
In order to make it easier to check whether a particular feature is exposed to a guest, add a new set of helpers, with kvm_has_feat() being the most useful. Let's start making use of them in the PMU code (courtesy of Oliver). Follow-up changes will introduce additional use patterns. Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Co-developed--by: Oliver Upton <oliver.upton@linux.dev> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20240214131827.2856277-3-maz@kernel.org Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
aeddd5b214
commit
c62d7a23b9
4 changed files with 52 additions and 20 deletions
|
@ -1233,4 +1233,48 @@ static inline void kvm_hyp_reserve(void) { }
|
||||||
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
|
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
|
||||||
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
|
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
|
#define __expand_field_sign_unsigned(id, fld, val) \
|
||||||
|
((u64)SYS_FIELD_VALUE(id, fld, val))
|
||||||
|
|
||||||
|
#define __expand_field_sign_signed(id, fld, val) \
|
||||||
|
({ \
|
||||||
|
u64 __val = SYS_FIELD_VALUE(id, fld, val); \
|
||||||
|
sign_extend64(__val, id##_##fld##_WIDTH - 1); \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define expand_field_sign(id, fld, val) \
|
||||||
|
(id##_##fld##_SIGNED ? \
|
||||||
|
__expand_field_sign_signed(id, fld, val) : \
|
||||||
|
__expand_field_sign_unsigned(id, fld, val))
|
||||||
|
|
||||||
|
#define get_idreg_field_unsigned(kvm, id, fld) \
|
||||||
|
({ \
|
||||||
|
u64 __val = IDREG((kvm), SYS_##id); \
|
||||||
|
FIELD_GET(id##_##fld##_MASK, __val); \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define get_idreg_field_signed(kvm, id, fld) \
|
||||||
|
({ \
|
||||||
|
u64 __val = get_idreg_field_unsigned(kvm, id, fld); \
|
||||||
|
sign_extend64(__val, id##_##fld##_WIDTH - 1); \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define get_idreg_field_enum(kvm, id, fld) \
|
||||||
|
get_idreg_field_unsigned(kvm, id, fld)
|
||||||
|
|
||||||
|
#define get_idreg_field(kvm, id, fld) \
|
||||||
|
(id##_##fld##_SIGNED ? \
|
||||||
|
get_idreg_field_signed(kvm, id, fld) : \
|
||||||
|
get_idreg_field_unsigned(kvm, id, fld))
|
||||||
|
|
||||||
|
#define kvm_has_feat(kvm, id, fld, limit) \
|
||||||
|
(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit))
|
||||||
|
|
||||||
|
#define kvm_has_feat_enum(kvm, id, fld, val) \
|
||||||
|
(get_idreg_field_unsigned((kvm), id, fld) == __expand_field_sign_unsigned(id, fld, val))
|
||||||
|
|
||||||
|
#define kvm_has_feat_range(kvm, id, fld, min, max) \
|
||||||
|
(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
|
||||||
|
get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))
|
||||||
|
|
||||||
#endif /* __ARM64_KVM_HOST_H__ */
|
#endif /* __ARM64_KVM_HOST_H__ */
|
||||||
|
|
|
@ -64,12 +64,11 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
|
u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
|
||||||
kvm_pmu_event_mask(kvm);
|
kvm_pmu_event_mask(kvm);
|
||||||
u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
|
|
||||||
|
|
||||||
if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0))
|
if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL2, IMP))
|
||||||
mask |= ARMV8_PMU_INCLUDE_EL2;
|
mask |= ARMV8_PMU_INCLUDE_EL2;
|
||||||
|
|
||||||
if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr0))
|
if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP))
|
||||||
mask |= ARMV8_PMU_EXCLUDE_NS_EL0 |
|
mask |= ARMV8_PMU_EXCLUDE_NS_EL0 |
|
||||||
ARMV8_PMU_EXCLUDE_NS_EL1 |
|
ARMV8_PMU_EXCLUDE_NS_EL1 |
|
||||||
ARMV8_PMU_EXCLUDE_EL3;
|
ARMV8_PMU_EXCLUDE_EL3;
|
||||||
|
@ -83,8 +82,10 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
|
||||||
*/
|
*/
|
||||||
static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
|
static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
|
||||||
{
|
{
|
||||||
|
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
|
||||||
|
|
||||||
return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
|
return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
|
||||||
kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc)));
|
kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5));
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
|
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
|
||||||
|
@ -556,7 +557,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
|
/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
|
||||||
if (!kvm_pmu_is_3p5(vcpu))
|
if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
|
||||||
val &= ~ARMV8_PMU_PMCR_LP;
|
val &= ~ARMV8_PMU_PMCR_LP;
|
||||||
|
|
||||||
/* The reset bits don't indicate any state, and shouldn't be saved. */
|
/* The reset bits don't indicate any state, and shouldn't be saved. */
|
||||||
|
|
|
@ -505,10 +505,9 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
|
||||||
struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *r)
|
const struct sys_reg_desc *r)
|
||||||
{
|
{
|
||||||
u64 val = IDREG(vcpu->kvm, SYS_ID_AA64MMFR1_EL1);
|
|
||||||
u32 sr = reg_to_encoding(r);
|
u32 sr = reg_to_encoding(r);
|
||||||
|
|
||||||
if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
|
if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) {
|
||||||
kvm_inject_undefined(vcpu);
|
kvm_inject_undefined(vcpu);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -2748,8 +2747,7 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
|
||||||
return ignore_write(vcpu, p);
|
return ignore_write(vcpu, p);
|
||||||
} else {
|
} else {
|
||||||
u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
|
u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
|
||||||
u64 pfr = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1);
|
u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
|
||||||
u32 el3 = !!SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr);
|
|
||||||
|
|
||||||
p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
|
p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
|
||||||
(SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
|
(SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
|
||||||
|
|
|
@ -90,16 +90,6 @@ void kvm_vcpu_pmu_resync_el0(void);
|
||||||
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
|
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/*
|
|
||||||
* Evaluates as true when emulating PMUv3p5, and false otherwise.
|
|
||||||
*/
|
|
||||||
#define kvm_pmu_is_3p5(vcpu) ({ \
|
|
||||||
u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); \
|
|
||||||
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val); \
|
|
||||||
\
|
|
||||||
pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5; \
|
|
||||||
})
|
|
||||||
|
|
||||||
u8 kvm_arm_pmu_get_pmuver_limit(void);
|
u8 kvm_arm_pmu_get_pmuver_limit(void);
|
||||||
u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
|
u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
|
||||||
int kvm_arm_set_default_pmu(struct kvm *kvm);
|
int kvm_arm_set_default_pmu(struct kvm *kvm);
|
||||||
|
@ -168,7 +158,6 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define kvm_vcpu_has_pmu(vcpu) ({ false; })
|
#define kvm_vcpu_has_pmu(vcpu) ({ false; })
|
||||||
#define kvm_pmu_is_3p5(vcpu) ({ false; })
|
|
||||||
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
|
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
|
||||||
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
|
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
|
||||||
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
|
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
|
||||||
|
|
Loading…
Add table
Reference in a new issue