]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86/pmu: Pass only "struct kvm_pmc *pmc" to reprogram_counter()
authorLike Xu <likexu@tencent.com>
Wed, 18 May 2022 13:25:05 +0000 (21:25 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Jun 2022 08:48:48 +0000 (04:48 -0400)
Passing the reference "struct kvm_pmc *pmc" when creating
pmc->perf_event is sufficient. This change helps to simplify the
calling convention by replacing reprogram_{gp, fixed}_counter()
with reprogram_counter() seamlessly.

No functional change intended.

Signed-off-by: Like Xu <likexu@tencent.com>
Message-Id: <20220518132512.37864-5-likexu@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/vmx/pmu_intel.c

index ee6b2895faed16fad2c6dab2774ff559ee90a786..817352d83ff4c860e78846880c7562f99e879f1e 100644 (file)
@@ -355,18 +355,13 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
 }
 EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
 
-void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
+void reprogram_counter(struct kvm_pmc *pmc)
 {
-       struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, pmc_idx);
-
-       if (!pmc)
-               return;
-
        if (pmc_is_gp(pmc))
                reprogram_gp_counter(pmc, pmc->eventsel);
        else {
-               int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
-               u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
+               int idx = pmc->idx - INTEL_PMC_IDX_FIXED;
+               u8 ctrl = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, idx);
 
                reprogram_fixed_counter(pmc, ctrl, idx);
        }
@@ -385,8 +380,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
                        clear_bit(bit, pmu->reprogram_pmi);
                        continue;
                }
-
-               reprogram_counter(pmu, bit);
+               reprogram_counter(pmc);
        }
 
        /*
@@ -559,13 +553,12 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
 
 static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
 {
-       struct kvm_pmu *pmu = pmc_to_pmu(pmc);
        u64 prev_count;
 
        prev_count = pmc->counter;
        pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
 
-       reprogram_counter(pmu, pmc->idx);
+       reprogram_counter(pmc);
        if (pmc->counter < prev_count)
                __kvm_perf_overflow(pmc, false);
 }
index 1398297ae6dc7d7d6205f33c61b22fc788d828e0..1dd6be22cdb2d2c3cfa7af2bbc2a16caa377f4a8 100644 (file)
@@ -175,7 +175,7 @@ static inline void kvm_init_pmu_capability(void)
 
 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
-void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
+void reprogram_counter(struct kvm_pmc *pmc);
 
 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
index 8eca1321af7e3c7c8e972197fb301e9dfff5939e..719ae6c62a5a2497f949dad5239a9967f4fbcc31 100644 (file)
@@ -56,16 +56,32 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
        pmu->fixed_ctr_ctrl = data;
 }
 
+static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
+{
+       if (pmc_idx < INTEL_PMC_IDX_FIXED) {
+               return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
+                                 MSR_P6_EVNTSEL0);
+       } else {
+               u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
+
+               return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
+       }
+}
+
 /* function is called when global control register has been updated. */
 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
 {
        int bit;
        u64 diff = pmu->global_ctrl ^ data;
+       struct kvm_pmc *pmc;
 
        pmu->global_ctrl = data;
 
-       for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
-               reprogram_counter(pmu, bit);
+       for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) {
+               pmc = intel_pmc_idx_to_pmc(pmu, bit);
+               if (pmc)
+                       reprogram_counter(pmc);
+       }
 }
 
 static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
@@ -104,18 +120,6 @@ static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
        return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
 }
 
-static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
-{
-       if (pmc_idx < INTEL_PMC_IDX_FIXED)
-               return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
-                                 MSR_P6_EVNTSEL0);
-       else {
-               u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
-
-               return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
-       }
-}
-
 static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
 {
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);