]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86/pmu: Disable guest PEBS temporarily in two rare situations
authorLike Xu <like.xu@linux.intel.com>
Mon, 11 Apr 2022 10:19:43 +0000 (18:19 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Jun 2022 08:48:14 +0000 (04:48 -0400)
The guest PEBS will be disabled when some users try to perf KVM and
its user-space through the same PEBS facility OR when the host perf
doesn't schedule the guest PEBS counter in a one-to-one mapping manner
(neither of these are typical scenarios).

The PEBS records in the guest DS buffer are still accurate and the
above two restrictions will be checked before each vm-entry only if
guest PEBS is deemed to be enabled.

Suggested-by: Wei Wang <wei.w.wang@intel.com>
Signed-off-by: Like Xu <like.xu@linux.intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Message-Id: <20220411101946.20262-15-likexu@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/events/intel/core.c
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h

index 8f6189f2fbf3d2b16ee590c6e283a67ac19640a0..39832a5e7d759d069f7ee05af516cad969e47f63 100644 (file)
@@ -4048,8 +4048,15 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
                .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
        };
 
-       /* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
-       arr[0].guest |= arr[*nr].guest;
+       if (arr[pebs_enable].host) {
+               /* Disable guest PEBS if host PEBS is enabled. */
+               arr[pebs_enable].guest = 0;
+       } else {
+               /* Disable guest PEBS for cross-mapped PEBS counters. */
+               arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
+               /* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
+               arr[global_ctrl].guest |= arr[pebs_enable].guest;
+       }
 
        return arr;
 }
index d99c130d0a13e6747053492052263d575030f527..032278f0ee6dc87d8666c1544cf062471ce1a800 100644 (file)
@@ -527,6 +527,15 @@ struct kvm_pmu {
        u64 pebs_data_cfg;
        u64 pebs_data_cfg_mask;
 
+       /*
+        * If a guest counter is cross-mapped to host counter with different
+        * index, its PEBS capability will be temporarily disabled.
+        *
+        * The user should make sure that this mask is updated
+        * after disabling interrupts and before perf_guest_get_msrs();
+        */
+       u64 host_cross_mapped_mask;
+
        /*
         * The gate to release perf_events not marked in
         * pmc_in_use only once in a vcpu time slice.
index 02cad8e08ed0d194fa02711c5bf527377eb89f18..cc3d2a768320dfe7d14f3f63c885a86c8fe149e3 100644 (file)
@@ -786,6 +786,26 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
                intel_pmu_release_guest_lbr_event(vcpu);
 }
 
+void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
+{
+       struct kvm_pmc *pmc = NULL;
+       int bit;
+
+       for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl,
+                        X86_PMC_IDX_MAX) {
+               pmc = intel_pmc_idx_to_pmc(pmu, bit);
+
+               if (!pmc || !pmc_speculative_in_use(pmc) ||
+                   !intel_pmc_is_enabled(pmc))
+                       continue;
+
+               if (pmc->perf_event && pmc->idx != pmc->perf_event->hw.idx) {
+                       pmu->host_cross_mapped_mask |=
+                               BIT_ULL(pmc->perf_event->hw.idx);
+               }
+       }
+}
+
 struct kvm_pmu_ops intel_pmu_ops __initdata = {
        .pmc_perf_hw_id = intel_pmc_perf_hw_id,
        .pmc_is_enabled = intel_pmc_is_enabled,
index 070b02162db646aef39522fcc11bae055513195a..d5ec0635ccd4884995e852fad9e4c5d8a5dac879 100644 (file)
@@ -6796,6 +6796,10 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
        struct perf_guest_switch_msr *msrs;
        struct kvm_pmu *pmu = vcpu_to_pmu(&vmx->vcpu);
 
+       pmu->host_cross_mapped_mask = 0;
+       if (pmu->pebs_enable & pmu->global_ctrl)
+               intel_pmu_cross_mapped_check(pmu);
+
        /* Note, nr_msrs may be garbage if perf_guest_get_msrs() returns NULL. */
        msrs = perf_guest_get_msrs(&nr_msrs, (void *)pmu);
        if (!msrs)
index d7baedda79e54193cd9ab232a5caaf03a31628df..2d6d7870a974bc6fd766a4321e29b17295151a96 100644 (file)
@@ -94,6 +94,7 @@ union vmx_exit_reason {
 #define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc)
 #define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records)
 
+void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
 bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu);
 bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);