]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86/mmu: Let guest use GBPAGES if supported in hardware and TDP is on
authorSean Christopherson <seanjc@google.com>
Tue, 22 Jun 2021 17:57:39 +0000 (10:57 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 24 Jun 2021 22:00:48 +0000 (18:00 -0400)
Let the guest use 1g hugepages if TDP is enabled and the host supports
GBPAGES, KVM can't actively prevent the guest from using 1g pages in this
case since they can't be disabled in the hardware page walker.  While
injecting a page fault if a bogus 1g page is encountered during a
software page walk is perfectly reasonable since KVM is simply honoring
userspace's vCPU model, doing so arguably doesn't provide any meaningful
value, and at worst will be horribly confusing as the guest will see
inconsistent behavior and seemingly spurious page faults.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210622175739.3610207-55-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index 690f560341a2ad80993b61e6d7b2e50bc03a8bf9..00732757cc609c44088b5813b9b0288928d39619 100644 (file)
@@ -4174,13 +4174,28 @@ __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
        }
 }
 
+static bool guest_can_use_gbpages(struct kvm_vcpu *vcpu)
+{
+       /*
+        * If TDP is enabled, let the guest use GBPAGES if they're supported in
+        * hardware.  The hardware page walker doesn't let KVM disable GBPAGES,
+        * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
+        * walk for performance and complexity reasons.  Not to mention KVM
+        * _can't_ solve the problem because GVA->GPA walks aren't visible to
+        * KVM once a TDP translation is installed.  Mimic hardware behavior so
+        * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
+        */
+       return tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
+                            guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
+}
+
 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
                                  struct kvm_mmu *context)
 {
        __reset_rsvds_bits_mask(&context->guest_rsvd_check,
                                vcpu->arch.reserved_gpa_bits,
                                context->root_level, is_efer_nx(context),
-                               guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
+                               guest_can_use_gbpages(vcpu),
                                is_cr4_pse(context),
                                guest_cpuid_is_amd_or_hygon(vcpu));
 }
@@ -4259,8 +4274,7 @@ static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
        shadow_zero_check = &context->shadow_zero_check;
        __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
                                context->shadow_root_level, uses_nx,
-                               guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
-                               is_pse, is_amd);
+                               guest_can_use_gbpages(vcpu), is_pse, is_amd);
 
        if (!shadow_me_mask)
                return;