]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: VMX: Use vcpu_get_perf_capabilities() to get guest-visible value
authorSean Christopherson <seanjc@google.com>
Sat, 11 Jun 2022 00:57:53 +0000 (00:57 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 20 Jun 2022 15:49:54 +0000 (11:49 -0400)
Use vcpu_get_perf_capabilities() when querying MSR_IA32_PERF_CAPABILITIES
from the guest's perspective, e.g. to update the vPMU and to determine
which MSRs exist.  If userspace ignores MSR_IA32_PERF_CAPABILITIES but
clear X86_FEATURE_PDCM, the guest should see '0'.

Fixes: e04a03139ab6 ("KVM: x86/pmu: Add PEBS_DATA_CFG MSR emulation to support adaptive PEBS")
Fixes: 56a759afcfa5 ("KVM: x86/pmu: Add IA32_PEBS_ENABLE MSR emulation for extended PEBS")
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220611005755.753273-6-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/pmu_intel.c

index b1aae60cf06195a8aa3631a6fea670e243b2c3e7..53ccba896e779163cd48b63250b8643e18f59500 100644 (file)
@@ -199,7 +199,7 @@ static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 {
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
-       u64 perf_capabilities = vcpu->arch.perf_capabilities;
+       u64 perf_capabilities;
        int ret;
 
        switch (msr) {
@@ -210,12 +210,13 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
                ret = pmu->version > 1;
                break;
        case MSR_IA32_PEBS_ENABLE:
-               ret = perf_capabilities & PERF_CAP_PEBS_FORMAT;
+               ret = vcpu_get_perf_capabilities(vcpu) & PERF_CAP_PEBS_FORMAT;
                break;
        case MSR_IA32_DS_AREA:
                ret = guest_cpuid_has(vcpu, X86_FEATURE_DS);
                break;
        case MSR_PEBS_DATA_CFG:
+               perf_capabilities = vcpu_get_perf_capabilities(vcpu);
                ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
                        ((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3);
                break;
@@ -515,6 +516,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        struct kvm_cpuid_entry2 *entry;
        union cpuid10_eax eax;
        union cpuid10_edx edx;
+       u64 perf_capabilities;
        u64 counter_mask;
        int i;
 
@@ -599,8 +601,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        if (lbr_desc->records.nr)
                bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
 
-       if (vcpu->arch.perf_capabilities & PERF_CAP_PEBS_FORMAT) {
-               if (vcpu->arch.perf_capabilities & PERF_CAP_PEBS_BASELINE) {
+       perf_capabilities = vcpu_get_perf_capabilities(vcpu);
+       if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
+               if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
                        pmu->pebs_enable_mask = counter_mask;
                        pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
                        for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {