]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86/pmu: Add PEBS_DATA_CFG MSR emulation to support adaptive PEBS
authorLike Xu <likexu@tencent.com>
Mon, 11 Apr 2022 10:19:40 +0000 (18:19 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Jun 2022 08:48:06 +0000 (04:48 -0400)
If IA32_PERF_CAPABILITIES.PEBS_BASELINE [bit 14] is set, the adaptive
PEBS is supported. The PEBS_DATA_CFG MSR and adaptive record enable
bits (IA32_PERFEVTSELx.Adaptive_Record and IA32_FIXED_CTR_CTRL.
FCx_Adaptive_Record) are also supported.

Adaptive PEBS provides software the capability to configure the PEBS
records to capture only the data of interest, keeping the record size
compact. An overflow of PMCx results in generation of an adaptive PEBS
record with state information based on the selections specified in
MSR_PEBS_DATA_CFG.By default, the record only contain the Basic group.

When guest adaptive PEBS is enabled, the IA32_PEBS_ENABLE MSR will
be added to the perf_guest_switch_msr() and switched during the VMX
transitions just like CORE_PERF_GLOBAL_CTRL MSR.

According to Intel SDM, software is recommended to  PEBS Baseline
when the following is true. IA32_PERF_CAPABILITIES.PEBS_BASELINE[14]
&& IA32_PERF_CAPABILITIES.PEBS_FMT[11:8] ≥ 4.

Co-developed-by: Luwei Kang <luwei.kang@intel.com>
Signed-off-by: Luwei Kang <luwei.kang@intel.com>
Signed-off-by: Like Xu <likexu@tencent.com>
Message-Id: <20220411101946.20262-12-likexu@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/events/intel/core.c
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/x86.c

index e5e624cae95731eb39aaa6e7f743b8a056ec482d..8f6189f2fbf3d2b16ee590c6e283a67ac19640a0 100644 (file)
@@ -4033,6 +4033,14 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
                .guest = kvm_pmu->ds_area,
        };
 
+       if (x86_pmu.intel_cap.pebs_baseline) {
+               arr[(*nr)++] = (struct perf_guest_switch_msr){
+                       .msr = MSR_PEBS_DATA_CFG,
+                       .host = cpuc->pebs_data_cfg,
+                       .guest = kvm_pmu->pebs_data_cfg,
+               };
+       }
+
        pebs_enable = (*nr)++;
        arr[pebs_enable] = (struct perf_guest_switch_msr){
                .msr = MSR_IA32_PEBS_ENABLE,
index dc5f68a313b04ac199f4136018020317a98a2ad2..d99c130d0a13e6747053492052263d575030f527 100644 (file)
@@ -524,6 +524,8 @@ struct kvm_pmu {
        u64 ds_area;
        u64 pebs_enable;
        u64 pebs_enable_mask;
+       u64 pebs_data_cfg;
+       u64 pebs_data_cfg_mask;
 
        /*
         * The gate to release perf_events not marked in
index 36ba29b664bf49df6eecd7260fe92d0c425fd115..69eb5372c922cb90345531b70019dae553225941 100644 (file)
@@ -205,6 +205,7 @@ static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 {
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+       u64 perf_capabilities = vcpu->arch.perf_capabilities;
        int ret;
 
        switch (msr) {
@@ -215,11 +216,15 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
                ret = pmu->version > 1;
                break;
        case MSR_IA32_PEBS_ENABLE:
-               ret = vcpu->arch.perf_capabilities & PERF_CAP_PEBS_FORMAT;
+               ret = perf_capabilities & PERF_CAP_PEBS_FORMAT;
                break;
        case MSR_IA32_DS_AREA:
                ret = guest_cpuid_has(vcpu, X86_FEATURE_DS);
                break;
+       case MSR_PEBS_DATA_CFG:
+               ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
+                       ((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3);
+               break;
        default:
                ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
                        get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
@@ -373,6 +378,9 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_IA32_DS_AREA:
                msr_info->data = pmu->ds_area;
                return 0;
+       case MSR_PEBS_DATA_CFG:
+               msr_info->data = pmu->pebs_data_cfg;
+               return 0;
        default:
                if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
                    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
@@ -446,6 +454,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        return 1;
                pmu->ds_area = data;
                return 0;
+       case MSR_PEBS_DATA_CFG:
+               if (pmu->pebs_data_cfg == data)
+                       return 0;
+               if (!(data & pmu->pebs_data_cfg_mask)) {
+                       pmu->pebs_data_cfg = data;
+                       return 0;
+               }
+               break;
        default:
                if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
                    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
@@ -515,6 +531,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        pmu->raw_event_mask = X86_RAW_EVENT_MASK;
        pmu->fixed_ctr_ctrl_mask = ~0ull;
        pmu->pebs_enable_mask = ~0ull;
+       pmu->pebs_data_cfg_mask = ~0ull;
 
        entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
        if (!entry || !vcpu->kvm->arch.enable_pmu)
@@ -595,6 +612,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
                                pmu->fixed_ctr_ctrl_mask &=
                                        ~(1ULL << (INTEL_PMC_IDX_FIXED + i * 4));
                        }
+                       pmu->pebs_data_cfg_mask = ~0xff00000full;
                } else {
                        pmu->pebs_enable_mask =
                                ~((1ull << pmu->nr_arch_gp_counters) - 1);
index ead86072612d737290303f8afb22ed6d06920bb4..2d9456b4874ba77dc5f4a11505677d860cf09601 100644 (file)
@@ -1448,7 +1448,7 @@ static const u32 msrs_to_save_all[] = {
        MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
        MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
        MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
-       MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA,
+       MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
 
        MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
        MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,