mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
KVM/arm64 changes for 6.13, part #3
- Always check page state in hyp_ack_unshare() - Align set_id_regs selftest with the fact that ASIDBITS field is RO - Various vPMU fixes for bugs that only affect nested virt -----BEGIN PGP SIGNATURE----- iI0EABYIADUWIQSNXHjWXuzMZutrKNKivnWIJHzdFgUCZ3Cfbxccb2xpdmVyLnVw dG9uQGxpbnV4LmRldgAKCRCivnWIJHzdFuc4AQDcDPzJAmtA2xem+JOTPwTOl4Fk TKDfwes3zZ/42/BA8wEA2X4vKcSwQTujdCpRrRiQvegn3pcT4V9oo9TB0kzjoQU= =KEa8 -----END PGP SIGNATURE----- Merge tag 'kvmarm-fixes-6.13-3' of https://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD KVM/arm64 changes for 6.13, part #3 - Always check page state in hyp_ack_unshare() - Align set_id_regs selftest with the fact that ASIDBITS field is RO - Various vPMU fixes for bugs that only affect nested virt
This commit is contained in:
commit
5c99a684c9
5 changed files with 63 additions and 70 deletions
|
@ -783,9 +783,6 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
|
|||
if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
|
||||
return -EBUSY;
|
||||
|
||||
if (__hyp_ack_skip_pgtable_check(tx))
|
||||
return 0;
|
||||
|
||||
return __hyp_check_page_state_range(addr, size,
|
||||
PKVM_PAGE_SHARED_BORROWED);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ static DEFINE_MUTEX(arm_pmus_lock);
|
|||
|
||||
static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
|
||||
static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
|
||||
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
|
||||
|
||||
static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
|
||||
{
|
||||
|
@ -327,48 +328,25 @@ u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
|
|||
return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_enable_counter_mask - enable selected PMU counters
|
||||
* @vcpu: The vcpu pointer
|
||||
* @val: the value guest writes to PMCNTENSET register
|
||||
*
|
||||
* Call perf_event_enable to start counting the perf event
|
||||
*/
|
||||
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc)
|
||||
{
|
||||
int i;
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
if (!pmc->perf_event) {
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
return;
|
||||
|
||||
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
|
||||
return;
|
||||
|
||||
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
struct kvm_pmc *pmc;
|
||||
|
||||
if (!(val & BIT(i)))
|
||||
continue;
|
||||
|
||||
pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
||||
if (!pmc->perf_event) {
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
} else {
|
||||
perf_event_enable(pmc->perf_event);
|
||||
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
kvm_debug("fail to enable perf event\n");
|
||||
}
|
||||
}
|
||||
|
||||
perf_event_enable(pmc->perf_event);
|
||||
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
kvm_debug("fail to enable perf event\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_disable_counter_mask - disable selected PMU counters
|
||||
* @vcpu: The vcpu pointer
|
||||
* @val: the value guest writes to PMCNTENCLR register
|
||||
*
|
||||
* Call perf_event_disable to stop counting the perf event
|
||||
*/
|
||||
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc)
|
||||
{
|
||||
if (pmc->perf_event)
|
||||
perf_event_disable(pmc->perf_event);
|
||||
}
|
||||
|
||||
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -376,16 +354,18 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
|||
return;
|
||||
|
||||
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
struct kvm_pmc *pmc;
|
||||
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
||||
if (!(val & BIT(i)))
|
||||
continue;
|
||||
|
||||
pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
||||
if (pmc->perf_event)
|
||||
perf_event_disable(pmc->perf_event);
|
||||
if (kvm_pmu_counter_is_enabled(pmc))
|
||||
kvm_pmc_enable_perf_event(pmc);
|
||||
else
|
||||
kvm_pmc_disable_perf_event(pmc);
|
||||
}
|
||||
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -626,27 +606,28 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
|||
if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
|
||||
val &= ~ARMV8_PMU_PMCR_LP;
|
||||
|
||||
/* Request a reload of the PMU to enable/disable affected counters */
|
||||
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) ^ val) & ARMV8_PMU_PMCR_E)
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
/* The reset bits don't indicate any state, and shouldn't be saved. */
|
||||
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_E) {
|
||||
kvm_pmu_enable_counter_mask(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
|
||||
} else {
|
||||
kvm_pmu_disable_counter_mask(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
|
||||
}
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_C)
|
||||
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_P) {
|
||||
unsigned long mask = kvm_pmu_accessible_counter_mask(vcpu);
|
||||
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
/*
|
||||
* Unlike other PMU sysregs, the controls in PMCR_EL0 always apply
|
||||
* to the 'guest' range of counters and never the 'hyp' range.
|
||||
*/
|
||||
unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu) &
|
||||
~kvm_pmu_hyp_counter_mask(vcpu) &
|
||||
~BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
|
||||
for_each_set_bit(i, &mask, 32)
|
||||
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
|
||||
}
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
}
|
||||
|
||||
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
|
||||
|
@ -910,11 +891,11 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
|
||||
|
||||
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
|
||||
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
|
||||
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
|
||||
|
||||
kvm_pmu_reprogram_counter_mask(vcpu, mask);
|
||||
}
|
||||
|
||||
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -1208,16 +1208,14 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
mask = kvm_pmu_accessible_counter_mask(vcpu);
|
||||
if (p->is_write) {
|
||||
val = p->regval & mask;
|
||||
if (r->Op2 & 0x1) {
|
||||
if (r->Op2 & 0x1)
|
||||
/* accessing PMCNTENSET_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
|
||||
kvm_pmu_enable_counter_mask(vcpu, val);
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
} else {
|
||||
else
|
||||
/* accessing PMCNTENCLR_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
|
||||
kvm_pmu_disable_counter_mask(vcpu, val);
|
||||
}
|
||||
|
||||
kvm_pmu_reprogram_counter_mask(vcpu, val);
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
}
|
||||
|
@ -2450,6 +2448,26 @@ static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
|
|||
return __el2_visibility(vcpu, rd, s1pie_visibility);
|
||||
}
|
||||
|
||||
static bool access_mdcr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 old = __vcpu_sys_reg(vcpu, MDCR_EL2);
|
||||
|
||||
if (!access_rw(vcpu, p, r))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Request a reload of the PMU to enable/disable the counters affected
|
||||
* by HPME.
|
||||
*/
|
||||
if ((old ^ __vcpu_sys_reg(vcpu, MDCR_EL2)) & MDCR_EL2_HPME)
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Architected system registers.
|
||||
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
|
||||
|
@ -2983,7 +3001,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
|
||||
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
|
||||
EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
|
||||
EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
|
||||
EL2_REG(MDCR_EL2, access_mdcr, reset_val, 0),
|
||||
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
|
||||
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
|
||||
EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),
|
||||
|
|
|
@ -53,8 +53,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
|
|||
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
|
||||
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
|
||||
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
|
||||
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
|
||||
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
|
||||
|
@ -127,8 +126,7 @@ static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
|
|||
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
|
||||
static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
|
||||
static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
|
||||
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
|
||||
static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -152,7 +152,6 @@ static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
|
|||
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0),
|
||||
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0),
|
||||
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0),
|
||||
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ASIDBITS, 0),
|
||||
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0),
|
||||
REG_FTR_END,
|
||||
};
|
||||
|
|
Loading…
Add table
Reference in a new issue