]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86/pmu: Use only the uniform interface reprogram_counter()
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 25 May 2022 09:28:56 +0000 (05:28 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Jun 2022 08:48:55 +0000 (04:48 -0400)
Since reprogram_counter(), reprogram_{gp, fixed}_counter() currently have
the same incoming parameter "struct kvm_pmc *pmc", the callers can simplify
the conetxt by using uniformly exported interface, which makes reprogram_
{gp, fixed}_counter() static and eliminates EXPORT_SYMBOL_GPL.

Signed-off-by: Like Xu <likexu@tencent.com>
Message-Id: <20220518132512.37864-8-likexu@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/vmx/pmu_intel.c

index 4c354298e51603689d2d665f70d49c6746ef28fe..d2a0581d9d4d6be5678ca36e573fc3b5d2072962 100644 (file)
@@ -283,7 +283,7 @@ out:
        return allow_event;
 }
 
-void reprogram_gp_counter(struct kvm_pmc *pmc)
+static void reprogram_gp_counter(struct kvm_pmc *pmc)
 {
        u64 config;
        u32 type = PERF_TYPE_RAW;
@@ -325,9 +325,8 @@ void reprogram_gp_counter(struct kvm_pmc *pmc)
                              !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
                              eventsel & ARCH_PERFMON_EVENTSEL_INT);
 }
-EXPORT_SYMBOL_GPL(reprogram_gp_counter);
 
-void reprogram_fixed_counter(struct kvm_pmc *pmc)
+static void reprogram_fixed_counter(struct kvm_pmc *pmc)
 {
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
        int idx = pmc->idx - INTEL_PMC_IDX_FIXED;
@@ -355,7 +354,6 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc)
                              !(en_field & 0x1), /* exclude kernel */
                              pmi);
 }
-EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
 
 void reprogram_counter(struct kvm_pmc *pmc)
 {
index fe31bbd1f9060c802195ead22e7faea31a96d4d9..60faf27678d9c810b53f58317e3ac1be473c399d 100644 (file)
@@ -173,8 +173,6 @@ static inline void kvm_init_pmu_capability(void)
                                             KVM_PMC_MAX_FIXED);
 }
 
-void reprogram_gp_counter(struct kvm_pmc *pmc);
-void reprogram_fixed_counter(struct kvm_pmc *pmc);
 void reprogram_counter(struct kvm_pmc *pmc);
 
 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
index a1fbb72d6fbb660941c7ce369d1bd3b71304c382..79346def7c961b3ebbf3617df046bbd0267f5edb 100644 (file)
@@ -288,7 +288,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                data &= ~pmu->reserved_bits;
                if (data != pmc->eventsel) {
                        pmc->eventsel = data;
-                       reprogram_gp_counter(pmc);
+                       reprogram_counter(pmc);
                }
                return 0;
        }
index 13d54c5fd12ba4a68effb02db42799bcdf5881ff..0dc270e6717cca6cc20dea1c0fa0684b0ceab659 100644 (file)
@@ -52,7 +52,7 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
                pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
 
                __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
-               reprogram_fixed_counter(pmc);
+               reprogram_counter(pmc);
        }
 }
 
@@ -493,7 +493,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
                        if (!(data & reserved_bits)) {
                                pmc->eventsel = data;
-                               reprogram_gp_counter(pmc);
+                               reprogram_counter(pmc);
                                return 0;
                        }
                } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))