]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: nVMX: Let userspace set nVMX MSR to any _host_ supported value
authorSean Christopherson <seanjc@google.com>
Tue, 7 Jun 2022 21:35:54 +0000 (21:35 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 28 Jul 2022 17:22:28 +0000 (13:22 -0400)
Restrict the nVMX MSRs based on KVM's config, not based on the guest's
current config.  Using the guest's config to audit the new config
prevents userspace from restoring the original config (KVM's config) if
at any point in the past the guest's config was restricted in any way.

Fixes: 651c35318378 ("KVM: nVMX: support restore of VMX capability MSRs")
Cc: stable@vger.kernel.org
Cc: David Matlack <dmatlack@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220607213604.3346000-6-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/nested.c

index 1e760994d2073cd1126cdfc67b25e3762ed41b13..c1c85fd75d421839611594334ca9371230eb575c 100644 (file)
@@ -1224,7 +1224,7 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
                BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
                /* reserved */
                BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
-       u64 vmx_basic = vmx->nested.msrs.basic;
+       u64 vmx_basic = vmcs_config.nested.basic;
 
        if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
                return -EINVAL;
@@ -1247,36 +1247,42 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
        return 0;
 }
 
-static int
-vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index,
+                               u32 **low, u32 **high)
 {
-       u64 supported;
-       u32 *lowp, *highp;
-
        switch (msr_index) {
        case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
-               lowp = &vmx->nested.msrs.pinbased_ctls_low;
-               highp = &vmx->nested.msrs.pinbased_ctls_high;
+               *low = &msrs->pinbased_ctls_low;
+               *high = &msrs->pinbased_ctls_high;
                break;
        case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
-               lowp = &vmx->nested.msrs.procbased_ctls_low;
-               highp = &vmx->nested.msrs.procbased_ctls_high;
+               *low = &msrs->procbased_ctls_low;
+               *high = &msrs->procbased_ctls_high;
                break;
        case MSR_IA32_VMX_TRUE_EXIT_CTLS:
-               lowp = &vmx->nested.msrs.exit_ctls_low;
-               highp = &vmx->nested.msrs.exit_ctls_high;
+               *low = &msrs->exit_ctls_low;
+               *high = &msrs->exit_ctls_high;
                break;
        case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
-               lowp = &vmx->nested.msrs.entry_ctls_low;
-               highp = &vmx->nested.msrs.entry_ctls_high;
+               *low = &msrs->entry_ctls_low;
+               *high = &msrs->entry_ctls_high;
                break;
        case MSR_IA32_VMX_PROCBASED_CTLS2:
-               lowp = &vmx->nested.msrs.secondary_ctls_low;
-               highp = &vmx->nested.msrs.secondary_ctls_high;
+               *low = &msrs->secondary_ctls_low;
+               *high = &msrs->secondary_ctls_high;
                break;
        default:
                BUG();
        }
+}
+
+static int
+vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+{
+       u32 *lowp, *highp;
+       u64 supported;
+
+       vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp);
 
        supported = vmx_control_msr(*lowp, *highp);
 
@@ -1288,6 +1294,7 @@ vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
        if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
                return -EINVAL;
 
+       vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp);
        *lowp = data;
        *highp = data >> 32;
        return 0;
@@ -1301,10 +1308,8 @@ static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
                BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
                /* reserved */
                GENMASK_ULL(13, 9) | BIT_ULL(31);
-       u64 vmx_misc;
-
-       vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
-                                  vmx->nested.msrs.misc_high);
+       u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low,
+                                      vmcs_config.nested.misc_high);
 
        if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
                return -EINVAL;
@@ -1332,10 +1337,8 @@ static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
 
 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
 {
-       u64 vmx_ept_vpid_cap;
-
-       vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
-                                          vmx->nested.msrs.vpid_caps);
+       u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps,
+                                              vmcs_config.nested.vpid_caps);
 
        /* Every bit is either reserved or a feature bit. */
        if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
@@ -1346,20 +1349,21 @@ static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
        return 0;
 }
 
-static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+static u64 *vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index)
 {
-       u64 *msr;
-
        switch (msr_index) {
        case MSR_IA32_VMX_CR0_FIXED0:
-               msr = &vmx->nested.msrs.cr0_fixed0;
-               break;
+               return &msrs->cr0_fixed0;
        case MSR_IA32_VMX_CR4_FIXED0:
-               msr = &vmx->nested.msrs.cr4_fixed0;
-               break;
+               return &msrs->cr4_fixed0;
        default:
                BUG();
        }
+}
+
+static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+{
+       const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index);
 
        /*
         * 1 bits (which indicates bits which "must-be-1" during VMX operation)
@@ -1368,7 +1372,7 @@ static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
        if (!is_bitwise_subset(data, *msr, -1ULL))
                return -EINVAL;
 
-       *msr = data;
+       *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data;
        return 0;
 }
 
@@ -1429,7 +1433,7 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
                vmx->nested.msrs.vmcs_enum = data;
                return 0;
        case MSR_IA32_VMX_VMFUNC:
-               if (data & ~vmx->nested.msrs.vmfunc_controls)
+               if (data & ~vmcs_config.nested.vmfunc_controls)
                        return -EINVAL;
                vmx->nested.msrs.vmfunc_controls = data;
                return 0;