mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
KVM: x86/pmu: Pass only "struct kvm_pmc *pmc" to reprogram_counter()
Passing the reference "struct kvm_pmc *pmc" when creating pmc->perf_event is sufficient. This change helps to simplify the calling convention by replacing reprogram_{gp, fixed}_counter() with reprogram_counter() seamlessly. No functional change intended. Signed-off-by: Like Xu <likexu@tencent.com> Message-Id: <20220518132512.37864-5-likexu@tencent.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
89cb454ea9
commit
a40239b4cf
3 changed files with 24 additions and 27 deletions
|
@ -355,18 +355,13 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
|
||||
|
||||
void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
|
||||
void reprogram_counter(struct kvm_pmc *pmc)
|
||||
{
|
||||
struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, pmc_idx);
|
||||
|
||||
if (!pmc)
|
||||
return;
|
||||
|
||||
if (pmc_is_gp(pmc))
|
||||
reprogram_gp_counter(pmc, pmc->eventsel);
|
||||
else {
|
||||
int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
|
||||
u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
|
||||
int idx = pmc->idx - INTEL_PMC_IDX_FIXED;
|
||||
u8 ctrl = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, idx);
|
||||
|
||||
reprogram_fixed_counter(pmc, ctrl, idx);
|
||||
}
|
||||
|
@ -385,8 +380,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
|
|||
clear_bit(bit, pmu->reprogram_pmi);
|
||||
continue;
|
||||
}
|
||||
|
||||
reprogram_counter(pmu, bit);
|
||||
reprogram_counter(pmc);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -559,13 +553,12 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
|
|||
|
||||
static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
|
||||
{
|
||||
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
|
||||
u64 prev_count;
|
||||
|
||||
prev_count = pmc->counter;
|
||||
pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
|
||||
|
||||
reprogram_counter(pmu, pmc->idx);
|
||||
reprogram_counter(pmc);
|
||||
if (pmc->counter < prev_count)
|
||||
__kvm_perf_overflow(pmc, false);
|
||||
}
|
||||
|
|
|
@ -175,7 +175,7 @@ static inline void kvm_init_pmu_capability(void)
|
|||
|
||||
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
|
||||
void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
|
||||
void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
|
||||
void reprogram_counter(struct kvm_pmc *pmc);
|
||||
|
||||
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -56,16 +56,32 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
|
|||
pmu->fixed_ctr_ctrl = data;
|
||||
}
|
||||
|
||||
static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
|
||||
{
|
||||
if (pmc_idx < INTEL_PMC_IDX_FIXED) {
|
||||
return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
|
||||
MSR_P6_EVNTSEL0);
|
||||
} else {
|
||||
u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
|
||||
|
||||
return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
|
||||
}
|
||||
}
|
||||
|
||||
/* function is called when global control register has been updated. */
|
||||
static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
|
||||
{
|
||||
int bit;
|
||||
u64 diff = pmu->global_ctrl ^ data;
|
||||
struct kvm_pmc *pmc;
|
||||
|
||||
pmu->global_ctrl = data;
|
||||
|
||||
for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
|
||||
reprogram_counter(pmu, bit);
|
||||
for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) {
|
||||
pmc = intel_pmc_idx_to_pmc(pmu, bit);
|
||||
if (pmc)
|
||||
reprogram_counter(pmc);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
|
||||
|
@ -104,18 +120,6 @@ static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
|
|||
return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
|
||||
}
|
||||
|
||||
static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
|
||||
{
|
||||
if (pmc_idx < INTEL_PMC_IDX_FIXED)
|
||||
return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
|
||||
MSR_P6_EVNTSEL0);
|
||||
else {
|
||||
u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
|
||||
|
||||
return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
|
||||
}
|
||||
}
|
||||
|
||||
static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
|
||||
{
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
|
|
Loading…
Add table
Reference in a new issue