]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86: Add dedicated helper to get CPUID entry with significant index
authorSean Christopherson <seanjc@google.com>
Tue, 12 Jul 2022 00:06:45 +0000 (02:06 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 14 Jul 2022 15:38:32 +0000 (11:38 -0400)
Add a second CPUID helper, kvm_find_cpuid_entry_index(), to handle KVM
queries for CPUID leaves whose index _may_ be significant, and drop the
index param from the existing kvm_find_cpuid_entry().  Add a WARN in the
inner helper, cpuid_entry2_find(), to detect attempts to retrieve a CPUID
entry whose index is significant without explicitly providing an index.

Using an explicit magic number and letting callers omit the index avoids
confusion by eliminating the myriad cases where KVM specifies '0' as a
dummy value.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/cpuid.c
arch/x86/kvm/cpuid.h
arch/x86/kvm/hyperv.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/sgx.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index d47222ab8e6eafd9da50c53e403108666241d122..75dcf7a72605f335d0f104b26986d17cc5cd81ec 100644 (file)
@@ -67,9 +67,17 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted)
 #define F feature_bit
 #define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0)
 
+/*
+ * Magic value used by KVM when querying userspace-provided CPUID entries and
+ * doesn't care about the CPIUD index because the index of the function in
+ * question is not significant.  Note, this magic value must have at least one
+ * bit set in bits[63:32] and must be consumed as a u64 by cpuid_entry2_find()
+ * to avoid false positives when processing guest CPUID input.
+ */
+#define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull
 
 static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
-       struct kvm_cpuid_entry2 *entries, int nent, u32 function, u32 index)
+       struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index)
 {
        struct kvm_cpuid_entry2 *e;
        int i;
@@ -77,9 +85,31 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
        for (i = 0; i < nent; i++) {
                e = &entries[i];
 
-               if (e->function == function &&
-                   (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))
+               if (e->function != function)
+                       continue;
+
+               /*
+                * If the index isn't significant, use the first entry with a
+                * matching function.  It's userspace's responsibilty to not
+                * provide "duplicate" entries in all cases.
+                */
+               if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index)
+                       return e;
+
+
+               /*
+                * Similarly, use the first matching entry if KVM is doing a
+                * lookup (as opposed to emulating CPUID) for a function that's
+                * architecturally defined as not having a significant index.
+                */
+               if (index == KVM_CPUID_INDEX_NOT_SIGNIFICANT) {
+                       /*
+                        * Direct lookups from KVM should not diverge from what
+                        * KVM defines internally (the architectural behavior).
+                        */
+                       WARN_ON_ONCE(cpuid_function_is_indexed(function));
                        return e;
+               }
        }
 
        return NULL;
@@ -96,7 +126,8 @@ static int kvm_check_cpuid(struct kvm_vcpu *vcpu,
         * The existing code assumes virtual address is 48-bit or 57-bit in the
         * canonical address checks; exit if it is ever changed.
         */
-       best = cpuid_entry2_find(entries, nent, 0x80000008, 0);
+       best = cpuid_entry2_find(entries, nent, 0x80000008,
+                                KVM_CPUID_INDEX_NOT_SIGNIFICANT);
        if (best) {
                int vaddr_bits = (best->eax & 0xff00) >> 8;
 
@@ -151,7 +182,7 @@ static void kvm_update_kvm_cpuid_base(struct kvm_vcpu *vcpu)
        vcpu->arch.kvm_cpuid_base = 0;
 
        for_each_possible_hypervisor_cpuid_base(function) {
-               entry = kvm_find_cpuid_entry(vcpu, function, 0);
+               entry = kvm_find_cpuid_entry(vcpu, function);
 
                if (entry) {
                        u32 signature[3];
@@ -177,7 +208,8 @@ static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_vcpu *v
        if (!base)
                return NULL;
 
-       return cpuid_entry2_find(entries, nent, base | KVM_CPUID_FEATURES, 0);
+       return cpuid_entry2_find(entries, nent, base | KVM_CPUID_FEATURES,
+                                KVM_CPUID_INDEX_NOT_SIGNIFICANT);
 }
 
 static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
@@ -219,7 +251,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
        struct kvm_cpuid_entry2 *best;
        u64 guest_supported_xcr0 = cpuid_get_supported_xcr0(entries, nent);
 
-       best = cpuid_entry2_find(entries, nent, 1, 0);
+       best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
        if (best) {
                /* Update OSXSAVE bit */
                if (boot_cpu_has(X86_FEATURE_XSAVE))
@@ -250,7 +282,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
                best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
 
        if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
-               best = cpuid_entry2_find(entries, nent, 0x1, 0);
+               best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
                if (best)
                        cpuid_entry_change(best, X86_FEATURE_MWAIT,
                                           vcpu->arch.ia32_misc_enable_msr &
@@ -285,7 +317,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
        struct kvm_cpuid_entry2 *best;
        u64 guest_supported_xcr0;
 
-       best = kvm_find_cpuid_entry(vcpu, 1, 0);
+       best = kvm_find_cpuid_entry(vcpu, 1);
        if (best && apic) {
                if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
                        apic->lapic_timer.timer_mode_mask = 3 << 17;
@@ -325,10 +357,10 @@ int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
 
-       best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
+       best = kvm_find_cpuid_entry(vcpu, 0x80000000);
        if (!best || best->eax < 0x80000008)
                goto not_found;
-       best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+       best = kvm_find_cpuid_entry(vcpu, 0x80000008);
        if (best)
                return best->eax & 0xff;
 not_found:
@@ -1302,12 +1334,20 @@ out_free:
        return r;
 }
 
-struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
-                                             u32 function, u32 index)
+struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
+                                                   u32 function, u32 index)
 {
        return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
                                 function, index);
 }
+EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index);
+
+struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+                                             u32 function)
+{
+       return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
+                                function, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
+}
 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
 
 /*
@@ -1344,7 +1384,7 @@ get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
        struct kvm_cpuid_entry2 *basic, *class;
        u32 function = *fn_ptr;
 
-       basic = kvm_find_cpuid_entry(vcpu, 0, 0);
+       basic = kvm_find_cpuid_entry(vcpu, 0);
        if (!basic)
                return NULL;
 
@@ -1353,11 +1393,11 @@ get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
                return NULL;
 
        if (function >= 0x40000000 && function <= 0x4fffffff)
-               class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00, 0);
+               class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00);
        else if (function >= 0xc0000000)
-               class = kvm_find_cpuid_entry(vcpu, 0xc0000000, 0);
+               class = kvm_find_cpuid_entry(vcpu, 0xc0000000);
        else
-               class = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
+               class = kvm_find_cpuid_entry(vcpu, function & 0x80000000);
 
        if (class && function <= class->eax)
                return NULL;
@@ -1375,7 +1415,7 @@ get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
         * the effective CPUID entry is the max basic leaf.  Note, the index of
         * the original requested leaf is observed!
         */
-       return kvm_find_cpuid_entry(vcpu, basic->eax, index);
+       return kvm_find_cpuid_entry_index(vcpu, basic->eax, index);
 }
 
 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
@@ -1385,7 +1425,7 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
        struct kvm_cpuid_entry2 *entry;
        bool exact, used_max_basic = false;
 
-       entry = kvm_find_cpuid_entry(vcpu, function, index);
+       entry = kvm_find_cpuid_entry_index(vcpu, function, index);
        exact = !!entry;
 
        if (!entry && !exact_only) {
@@ -1414,7 +1454,7 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
                 * exists. EDX can be copied from any existing index.
                 */
                if (function == 0xb || function == 0x1f) {
-                       entry = kvm_find_cpuid_entry(vcpu, function, 1);
+                       entry = kvm_find_cpuid_entry_index(vcpu, function, 1);
                        if (entry) {
                                *ecx = index & 0xff;
                                *edx = entry->edx;
index ac72aabba981b325392ebd39148f06061d87dd41..b1658c0de847cd38c0560c9bbb947a1a267c2c7e 100644 (file)
@@ -13,8 +13,10 @@ void kvm_set_cpu_caps(void);
 
 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
+struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
+                                                   u32 function, u32 index);
 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
-                                             u32 function, u32 index);
+                                             u32 function);
 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
                            struct kvm_cpuid_entry2 __user *entries,
                            unsigned int type);
@@ -76,7 +78,7 @@ static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
        const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
        struct kvm_cpuid_entry2 *entry;
 
-       entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
+       entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
        if (!entry)
                return NULL;
 
@@ -109,7 +111,7 @@ static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
 
-       best = kvm_find_cpuid_entry(vcpu, 0, 0);
+       best = kvm_find_cpuid_entry(vcpu, 0);
        return best &&
               (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
                is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
@@ -119,7 +121,7 @@ static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
 
-       best = kvm_find_cpuid_entry(vcpu, 0, 0);
+       best = kvm_find_cpuid_entry(vcpu, 0);
        return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
 }
 
@@ -127,7 +129,7 @@ static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
 
-       best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
+       best = kvm_find_cpuid_entry(vcpu, 0x1);
        if (!best)
                return -1;
 
@@ -138,7 +140,7 @@ static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
 
-       best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
+       best = kvm_find_cpuid_entry(vcpu, 0x1);
        if (!best)
                return -1;
 
@@ -154,7 +156,7 @@ static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
 
-       best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
+       best = kvm_find_cpuid_entry(vcpu, 0x1);
        if (!best)
                return -1;
 
index e2e95a6fccfde0f60f78bafb78cef40d48c23904..ed804447589c9353765413ae41e0dc4252fb6f78 100644 (file)
@@ -1992,7 +1992,7 @@ void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
        struct kvm_cpuid_entry2 *entry;
        struct kvm_vcpu_hv *hv_vcpu;
 
-       entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
+       entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE);
        if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
                vcpu->arch.hyperv_enabled = true;
        } else {
@@ -2005,7 +2005,7 @@ void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
 
        hv_vcpu = to_hv_vcpu(vcpu);
 
-       entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES, 0);
+       entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
        if (entry) {
                hv_vcpu->cpuid_cache.features_eax = entry->eax;
                hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
@@ -2016,7 +2016,7 @@ void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
                hv_vcpu->cpuid_cache.features_edx = 0;
        }
 
-       entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO, 0);
+       entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
        if (entry) {
                hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
                hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
@@ -2025,7 +2025,7 @@ void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
                hv_vcpu->cpuid_cache.enlightenments_ebx = 0;
        }
 
-       entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0);
+       entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
        if (entry)
                hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
        else
index 1d8eb3333f46337f68660207e322466c3d78be47..84414eafcd0dd425be19ff77ae70956124ec0613 100644 (file)
@@ -4194,7 +4194,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 
        /* For sev guests, the memory encryption bit is not reserved in CR3.  */
        if (sev_guest(vcpu->kvm)) {
-               best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0);
+               best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
                if (best)
                        vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
        }
index 53ccba896e779163cd48b63250b8643e18f59500..4bc098fbec31189f99a27f123e28a479e33e080e 100644 (file)
@@ -531,7 +531,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        pmu->pebs_enable_mask = ~0ull;
        pmu->pebs_data_cfg_mask = ~0ull;
 
-       entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
+       entry = kvm_find_cpuid_entry(vcpu, 0xa);
        if (!entry || !vcpu->kvm->arch.enable_pmu)
                return;
        eax.full = entry->eax;
@@ -577,7 +577,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
                pmu->global_ovf_ctrl_mask &=
                                ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
 
-       entry = kvm_find_cpuid_entry(vcpu, 7, 0);
+       entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
        if (entry &&
            (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
            (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
index 7ae8aa73724c0a37e720993b9a89c886b2816fab..aba8cebdc587fc271d11689ee9440b423557aecc 100644 (file)
@@ -148,8 +148,8 @@ static int __handle_encls_ecreate(struct kvm_vcpu *vcpu,
        u8 max_size_log2;
        int trapnr, ret;
 
-       sgx_12_0 = kvm_find_cpuid_entry(vcpu, 0x12, 0);
-       sgx_12_1 = kvm_find_cpuid_entry(vcpu, 0x12, 1);
+       sgx_12_0 = kvm_find_cpuid_entry_index(vcpu, 0x12, 0);
+       sgx_12_1 = kvm_find_cpuid_entry_index(vcpu, 0x12, 1);
        if (!sgx_12_0 || !sgx_12_1) {
                kvm_prepare_emulation_failure_exit(vcpu);
                return 0;
@@ -431,7 +431,7 @@ static bool sgx_intercept_encls_ecreate(struct kvm_vcpu *vcpu)
        if (!vcpu->kvm->arch.sgx_provisioning_allowed)
                return true;
 
-       guest_cpuid = kvm_find_cpuid_entry(vcpu, 0x12, 0);
+       guest_cpuid = kvm_find_cpuid_entry_index(vcpu, 0x12, 0);
        if (!guest_cpuid)
                return true;
 
@@ -439,7 +439,7 @@ static bool sgx_intercept_encls_ecreate(struct kvm_vcpu *vcpu)
        if (guest_cpuid->ebx != ebx || guest_cpuid->edx != edx)
                return true;
 
-       guest_cpuid = kvm_find_cpuid_entry(vcpu, 0x12, 1);
+       guest_cpuid = kvm_find_cpuid_entry_index(vcpu, 0x12, 1);
        if (!guest_cpuid)
                return true;
 
index 3842e121070ac4cb11d0062381311204df0a9e72..e6ab2c2c4d3bd5484d877c1e177c653e3b653d51 100644 (file)
@@ -7430,7 +7430,7 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
                vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask);     \
 } while (0)
 
-       entry = kvm_find_cpuid_entry(vcpu, 0x1, 0);
+       entry = kvm_find_cpuid_entry(vcpu, 0x1);
        cr4_fixed1_update(X86_CR4_VME,        edx, feature_bit(VME));
        cr4_fixed1_update(X86_CR4_PVI,        edx, feature_bit(VME));
        cr4_fixed1_update(X86_CR4_TSD,        edx, feature_bit(TSC));
@@ -7446,7 +7446,7 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
        cr4_fixed1_update(X86_CR4_PCIDE,      ecx, feature_bit(PCID));
        cr4_fixed1_update(X86_CR4_OSXSAVE,    ecx, feature_bit(XSAVE));
 
-       entry = kvm_find_cpuid_entry(vcpu, 0x7, 0);
+       entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 0);
        cr4_fixed1_update(X86_CR4_FSGSBASE,   ebx, feature_bit(FSGSBASE));
        cr4_fixed1_update(X86_CR4_SMEP,       ebx, feature_bit(SMEP));
        cr4_fixed1_update(X86_CR4_SMAP,       ebx, feature_bit(SMAP));
@@ -7481,7 +7481,7 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
        int i;
 
        for (i = 0; i < PT_CPUID_LEAVES; i++) {
-               best = kvm_find_cpuid_entry(vcpu, 0x14, i);
+               best = kvm_find_cpuid_entry_index(vcpu, 0x14, i);
                if (!best)
                        return;
                vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax;
index 0729e434c51b65e9278e721ef45bedb8698d8684..f389691d8c04a35cb666dbd17b4801d5bd4502cb 100644 (file)
@@ -11735,7 +11735,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
         * i.e. it's impossible for kvm_find_cpuid_entry() to find a valid entry
         * on RESET.  But, go through the motions in case that's ever remedied.
         */
-       cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1, 0);
+       cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1);
        kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600);
 
        static_call(kvm_x86_vcpu_reset)(vcpu, init_event);