]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86/pmu: Drop "u64 eventsel" for reprogram_gp_counter()
authorLike Xu <likexu@tencent.com>
Wed, 18 May 2022 13:25:06 +0000 (21:25 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Jun 2022 08:48:50 +0000 (04:48 -0400)
Because inside reprogram_gp_counter() it is bound to assign the requested
eventel to pmc->eventsel, this assignment step can be moved forward, thus
simplifying the passing of parameters to "struct kvm_pmc *pmc" only.

No functional change intended.

Signed-off-by: Like Xu <likexu@tencent.com>
Message-Id: <20220518132512.37864-6-likexu@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/vmx/pmu_intel.c

index 817352d83ff4c860e78846880c7562f99e879f1e..b001471fbf829d7467a6c9e31b11f8a634039bb5 100644 (file)
@@ -283,17 +283,16 @@ out:
        return allow_event;
 }
 
-void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+void reprogram_gp_counter(struct kvm_pmc *pmc)
 {
        u64 config;
        u32 type = PERF_TYPE_RAW;
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+       u64 eventsel = pmc->eventsel;
 
        if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
                printk_once("kvm pmu: pin control bit is ignored\n");
 
-       pmc->eventsel = eventsel;
-
        pmc_pause_counter(pmc);
 
        if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
@@ -358,7 +357,7 @@ EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
 void reprogram_counter(struct kvm_pmc *pmc)
 {
        if (pmc_is_gp(pmc))
-               reprogram_gp_counter(pmc, pmc->eventsel);
+               reprogram_gp_counter(pmc);
        else {
                int idx = pmc->idx - INTEL_PMC_IDX_FIXED;
                u8 ctrl = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, idx);
index 1dd6be22cdb2d2c3cfa7af2bbc2a16caa377f4a8..b9a76dd982420b1baf18d1a6aca7ce5236095adf 100644 (file)
@@ -173,7 +173,7 @@ static inline void kvm_init_pmu_capability(void)
                                             KVM_PMC_MAX_FIXED);
 }
 
-void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
+void reprogram_gp_counter(struct kvm_pmc *pmc);
 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
 void reprogram_counter(struct kvm_pmc *pmc);
 
index 0e5784371ac030ec649684b893bf440566efbda5..a1fbb72d6fbb660941c7ce369d1bd3b71304c382 100644 (file)
@@ -286,8 +286,10 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
        if (pmc) {
                data &= ~pmu->reserved_bits;
-               if (data != pmc->eventsel)
-                       reprogram_gp_counter(pmc, data);
+               if (data != pmc->eventsel) {
+                       pmc->eventsel = data;
+                       reprogram_gp_counter(pmc);
+               }
                return 0;
        }
 
index 719ae6c62a5a2497f949dad5239a9967f4fbcc31..61e14a5a247df51704e13823719b111cd8d6ac41 100644 (file)
@@ -492,7 +492,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                            (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
                                reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
                        if (!(data & reserved_bits)) {
-                               reprogram_gp_counter(pmc, data);
+                               pmc->eventsel = data;
+                               reprogram_gp_counter(pmc);
                                return 0;
                        }
                } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))