]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86: nSVM: implement nested LBR virtualization
authorMaxim Levitsky <mlevitsk@redhat.com>
Tue, 22 Mar 2022 17:40:46 +0000 (19:40 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Sat, 2 Apr 2022 09:41:23 +0000 (05:41 -0400)
This was tested with kvm-unit-test that was developed
for this purpose.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220322174050.241850-3-mlevitsk@redhat.com>
[Copy all of DEBUGCTL except for reserved bits. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index 98647f5dec93fe48feba35b13a83a5771cc65434..f1332d802ec81bb51f4382f5470fc7091219746b 100644 (file)
@@ -588,8 +588,18 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
                vmcb_mark_dirty(vmcb02, VMCB_DR);
        }
 
-       if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK))
+       if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
+               /*
+                * Reserved bits of DEBUGCTL are ignored.  Be consistent with
+                * svm_set_msr's definition of reserved bits.
+                */
+               svm_copy_lbrs(vmcb02, vmcb12);
+               vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
+               svm_update_lbrv(&svm->vcpu);
+
+       } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
                svm_copy_lbrs(vmcb02, vmcb01);
+       }
 }
 
 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
@@ -651,6 +661,9 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
 
        vmcb02->control.virt_ext            = vmcb01->control.virt_ext &
                                              LBR_CTL_ENABLE_MASK;
+       if (svm->lbrv_enabled)
+               vmcb02->control.virt_ext  |=
+                       (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
 
        if (!nested_vmcb_needs_vls_intercept(svm))
                vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
@@ -919,7 +932,10 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
 
        svm_switch_vmcb(svm, &svm->vmcb01);
 
-       if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
+       if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
+               svm_copy_lbrs(vmcb12, vmcb02);
+               svm_update_lbrv(vcpu);
+       } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
                svm_copy_lbrs(vmcb01, vmcb02);
                svm_update_lbrv(vcpu);
        }
index 699a367af107d051e811efd3d21ba76d781e83a3..a6282be4e419a5a0bc4b1399d27872ae638b5f4f 100644 (file)
@@ -62,8 +62,6 @@ MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
 #define SEG_TYPE_LDT 2
 #define SEG_TYPE_BUSY_TSS16 3
 
-#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
-
 static bool erratum_383_found __read_mostly;
 
 u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
@@ -878,6 +876,10 @@ void svm_update_lbrv(struct kvm_vcpu *vcpu)
        bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext &
                                      LBR_CTL_ENABLE_MASK);
 
+       if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled))
+               if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))
+                       enable_lbrv = true;
+
        if (enable_lbrv == current_enable_lbrv)
                return;
 
@@ -4012,6 +4014,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
                             guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
 
        svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR);
+       svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV);
 
        svm->v_vmload_vmsave_enabled = vls && guest_cpuid_has(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
 
@@ -4765,6 +4768,8 @@ static __init void svm_set_cpu_caps(void)
 
                if (vls)
                        kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD);
+               if (lbrv)
+                       kvm_cpu_cap_set(X86_FEATURE_LBRV);
 
                /* Nested VM can receive #VMEXIT instead of triggering #GP */
                kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
index 47d4f389bf9107acc981986861bbf0f1d62de2d3..b687393e86ad942edb862bfa966ba7e881b9c405 100644 (file)
@@ -236,6 +236,7 @@ struct vcpu_svm {
        bool nrips_enabled                : 1;
        bool tsc_scaling_enabled          : 1;
        bool v_vmload_vmsave_enabled      : 1;
+       bool lbrv_enabled                 : 1;
 
        u32 ldr_reg;
        u32 dfr_reg;
@@ -486,6 +487,8 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm)
 /* svm.c */
 #define MSR_INVALID                            0xffffffffU
 
+#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
+
 extern bool dump_invalid_vmcb;
 
 u32 svm_msrpm_offset(u32 msr);