]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: Use prepare/commit hooks to handle generic memslot metadata updates
authorSean Christopherson <seanjc@google.com>
Mon, 6 Dec 2021 19:54:19 +0000 (20:54 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Dec 2021 09:24:26 +0000 (04:24 -0500)
Handle the generic memslot metadata, a.k.a. dirty bitmap, updates at the
same time that arch handles it's own metadata updates, i.e. at memslot
prepare and commit.  This will simplify converting @new to a dynamically
allocated object, and more closely aligns common KVM with architecture
code.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <2ddd5446e3706fe3c1e52e3df279f04c458be830.1638817640.git.maciej.szmigiero@oracle.com>

virt/kvm/kvm_main.c

index 373079a03710af99e0af9a42d5b152059c81ea22..ec5567e8442b97ccf3cee93d9a8c9c5b7bac7a65 100644 (file)
@@ -1549,6 +1549,69 @@ static void kvm_copy_memslots_arch(struct kvm_memslots *to,
                to->memslots[i].arch = from->memslots[i].arch;
 }
 
+static int kvm_prepare_memory_region(struct kvm *kvm,
+                                    const struct kvm_memory_slot *old,
+                                    struct kvm_memory_slot *new,
+                                    enum kvm_mr_change change)
+{
+       int r;
+
+       /*
+        * If dirty logging is disabled, nullify the bitmap; the old bitmap
+        * will be freed on "commit".  If logging is enabled in both old and
+        * new, reuse the existing bitmap.  If logging is enabled only in the
+        * new and KVM isn't using a ring buffer, allocate and initialize a
+        * new bitmap.
+        */
+       if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
+               new->dirty_bitmap = NULL;
+       else if (old->dirty_bitmap)
+               new->dirty_bitmap = old->dirty_bitmap;
+       else if (!kvm->dirty_ring_size) {
+               r = kvm_alloc_dirty_bitmap(new);
+               if (r)
+                       return r;
+
+               if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+                       bitmap_set(new->dirty_bitmap, 0, new->npages);
+       }
+
+       r = kvm_arch_prepare_memory_region(kvm, old, new, change);
+
+       /* Free the bitmap on failure if it was allocated above. */
+       if (r && new->dirty_bitmap && !old->dirty_bitmap)
+               kvm_destroy_dirty_bitmap(new);
+
+       return r;
+}
+
+static void kvm_commit_memory_region(struct kvm *kvm,
+                                    struct kvm_memory_slot *old,
+                                    const struct kvm_memory_slot *new,
+                                    enum kvm_mr_change change)
+{
+       /*
+        * Update the total number of memslot pages before calling the arch
+        * hook so that architectures can consume the result directly.
+        */
+       if (change == KVM_MR_DELETE)
+               kvm->nr_memslot_pages -= old->npages;
+       else if (change == KVM_MR_CREATE)
+               kvm->nr_memslot_pages += new->npages;
+
+       kvm_arch_commit_memory_region(kvm, old, new, change);
+
+       /*
+        * Free the old memslot's metadata.  On DELETE, free the whole thing,
+        * otherwise free the dirty bitmap as needed (the below effectively
+        * checks both the flags and whether a ring buffer is being used).
+        */
+       if (change == KVM_MR_DELETE)
+               kvm_free_memslot(kvm, old);
+       else if (old->dirty_bitmap && !new->dirty_bitmap)
+               kvm_destroy_dirty_bitmap(old);
+}
+
 static int kvm_set_memslot(struct kvm *kvm,
                           struct kvm_memory_slot *new,
                           enum kvm_mr_change change)
@@ -1635,27 +1698,14 @@ static int kvm_set_memslot(struct kvm *kvm,
                old.as_id = new->as_id;
        }
 
-       r = kvm_arch_prepare_memory_region(kvm, &old, new, change);
+       r = kvm_prepare_memory_region(kvm, &old, new, change);
        if (r)
                goto out_slots;
 
        update_memslots(slots, new, change);
        slots = install_new_memslots(kvm, new->as_id, slots);
 
-       /*
-        * Update the total number of memslot pages before calling the arch
-        * hook so that architectures can consume the result directly.
-        */
-       if (change == KVM_MR_DELETE)
-               kvm->nr_memslot_pages -= old.npages;
-       else if (change == KVM_MR_CREATE)
-               kvm->nr_memslot_pages += new->npages;
-
-       kvm_arch_commit_memory_region(kvm, &old, new, change);
-
-       /* Free the old memslot's metadata.  Note, this is the full copy!!! */
-       if (change == KVM_MR_DELETE)
-               kvm_free_memslot(kvm, &old);
+       kvm_commit_memory_region(kvm, &old, new, change);
 
        kvfree(slots);
        return 0;
@@ -1751,7 +1801,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
 
        if (!old.npages) {
                change = KVM_MR_CREATE;
-               new.dirty_bitmap = NULL;
 
                /*
                 * To simplify KVM internals, the total number of pages across
@@ -1771,9 +1820,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
                        change = KVM_MR_FLAGS_ONLY;
                else /* Nothing to change. */
                        return 0;
-
-               /* Copy dirty_bitmap from the current memslot. */
-               new.dirty_bitmap = old.dirty_bitmap;
        }
 
        if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
@@ -1787,30 +1833,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
                }
        }
 
-       /* Allocate/free page dirty bitmap as needed */
-       if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
-               new.dirty_bitmap = NULL;
-       else if (!new.dirty_bitmap && !kvm->dirty_ring_size) {
-               r = kvm_alloc_dirty_bitmap(&new);
-               if (r)
-                       return r;
-
-               if (kvm_dirty_log_manual_protect_and_init_set(kvm))
-                       bitmap_set(new.dirty_bitmap, 0, new.npages);
-       }
-
-       r = kvm_set_memslot(kvm, &new, change);
-       if (r)
-               goto out_bitmap;
-
-       if (old.dirty_bitmap && !new.dirty_bitmap)
-               kvm_destroy_dirty_bitmap(&old);
-       return 0;
-
-out_bitmap:
-       if (new.dirty_bitmap && !old.dirty_bitmap)
-               kvm_destroy_dirty_bitmap(&new);
-       return r;
+       return kvm_set_memslot(kvm, &new, change);
 }
 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);