]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86/mmu: Zap collapsible SPTEs in shadow MMU at all possible levels
authorDavid Matlack <dmatlack@google.com>
Wed, 22 Jun 2022 19:27:06 +0000 (15:27 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 24 Jun 2022 08:51:59 +0000 (04:51 -0400)
Currently KVM only zaps collapsible 4KiB SPTEs in the shadow MMU. This
is fine for now since KVM never creates intermediate huge pages during
dirty logging. In other words, KVM always replaces 1GiB pages directly
with 4KiB pages, so there is no reason to look for collapsible 2MiB
pages.

However, this will stop being true once the shadow MMU participates in
eager page splitting. During eager page splitting, each 1GiB is first
split into 2MiB pages and then those are split into 4KiB pages. The
intermediate 2MiB pages may be left behind if an error condition causes
eager page splitting to bail early.

No functional change intended.

Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: David Matlack <dmatlack@google.com>
Message-Id: <20220516232138.1783324-20-dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index 13a059ad5dc7eb1edeaf73cdc2e4272dc0bbd08b..7fdbb9060e9dd53a523d929a27e6b69b32ffe16a 100644 (file)
@@ -6154,18 +6154,24 @@ restart:
        return need_tlb_flush;
 }
 
+static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
+                                          const struct kvm_memory_slot *slot)
+{
+       /*
+        * Note, use KVM_MAX_HUGEPAGE_LEVEL - 1 since there's no need to zap
+        * pages that are already mapped at the maximum hugepage level.
+        */
+       if (slot_handle_level(kvm, slot, kvm_mmu_zap_collapsible_spte,
+                             PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true))
+               kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+}
+
 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
                                   const struct kvm_memory_slot *slot)
 {
        if (kvm_memslots_have_rmaps(kvm)) {
                write_lock(&kvm->mmu_lock);
-               /*
-                * Zap only 4k SPTEs since the legacy MMU only supports dirty
-                * logging at a 4k granularity and never creates collapsible
-                * 2m SPTEs during dirty logging.
-                */
-               if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true))
-                       kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+               kvm_rmap_zap_collapsible_sptes(kvm, slot);
                write_unlock(&kvm->mmu_lock);
        }