]> git.baikalelectronics.ru Git - kernel.git/commitdiff
Revert "KVM: x86/mmu: Zap only the relevant pages when removing a memslot"
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 15 Aug 2019 07:43:32 +0000 (09:43 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 21 Aug 2019 08:28:41 +0000 (10:28 +0200)
This reverts commit 2955cb206c319211ab4e59f7f197fbf280e0cbbe.
Alex Williamson reported regressions with device assignment with
this patch.  Even though the bug is probably elsewhere and still
latent, this is needed to fix the regression.

Fixes: 2955cb206c31 ("KVM: x86/mmu: Zap only the relevant pages when removing a memslot", 2019-02-05)
Reported-by: Alex Willamson <alex.williamson@redhat.com>
Cc: stable@vger.kernel.org
Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c

index 24843cf49579936c813ee7d34fc99d3b4608344f..218b277bfda3fdd2b1dd6a7569fc360855f3cc53 100644 (file)
@@ -5653,38 +5653,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
                        struct kvm_memory_slot *slot,
                        struct kvm_page_track_notifier_node *node)
 {
-       struct kvm_mmu_page *sp;
-       LIST_HEAD(invalid_list);
-       unsigned long i;
-       bool flush;
-       gfn_t gfn;
-
-       spin_lock(&kvm->mmu_lock);
-
-       if (list_empty(&kvm->arch.active_mmu_pages))
-               goto out_unlock;
-
-       flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
-
-       for (i = 0; i < slot->npages; i++) {
-               gfn = slot->base_gfn + i;
-
-               for_each_valid_sp(kvm, sp, gfn) {
-                       if (sp->gfn != gfn)
-                               continue;
-
-                       kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
-               }
-               if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
-                       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
-                       flush = false;
-                       cond_resched_lock(&kvm->mmu_lock);
-               }
-       }
-       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
-
-out_unlock:
-       spin_unlock(&kvm->mmu_lock);
+       kvm_mmu_zap_all(kvm);
 }
 
 void kvm_mmu_init_vm(struct kvm *kvm)