]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86/mmu: Pass memory caches to allocate SPs separately
authorDavid Matlack <dmatlack@google.com>
Wed, 22 Jun 2022 19:26:57 +0000 (15:26 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 24 Jun 2022 08:51:56 +0000 (04:51 -0400)
Refactor kvm_mmu_alloc_shadow_page() to receive the caches from which it
will allocate the various pieces of memory for shadow pages as a
parameter, rather than deriving them from the vcpu pointer. This will be
useful in a future commit where shadow pages are allocated during VM
ioctls for eager page splitting, and thus will use a different set of
caches.

Preemptively pull the caches out all the way to
kvm_mmu_get_shadow_page() since eager page splitting will not be calling
kvm_mmu_alloc_shadow_page() directly.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
Message-Id: <20220516232138.1783324-11-dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index 2602c3642f230ba5475aab6d0248f81f0580df2b..fab417e7bf6ce4edf43eeef8029582d6a44f47de 100644 (file)
@@ -2049,17 +2049,25 @@ out:
        return sp;
 }
 
+/* Caches used when allocating a new shadow page. */
+struct shadow_page_caches {
+       struct kvm_mmu_memory_cache *page_header_cache;
+       struct kvm_mmu_memory_cache *shadow_page_cache;
+       struct kvm_mmu_memory_cache *gfn_array_cache;
+};
+
 static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm_vcpu *vcpu,
+                                                     struct shadow_page_caches *caches,
                                                      gfn_t gfn,
                                                      struct hlist_head *sp_list,
                                                      union kvm_mmu_page_role role)
 {
        struct kvm_mmu_page *sp;
 
-       sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
-       sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
+       sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
+       sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
        if (!role.direct)
-               sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
+               sp->gfns = kvm_mmu_memory_cache_alloc(caches->gfn_array_cache);
 
        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
 
@@ -2081,9 +2089,10 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm_vcpu *vcpu,
        return sp;
 }
 
-static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
-                                                   gfn_t gfn,
-                                                   union kvm_mmu_page_role role)
+static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
+                                                     struct shadow_page_caches *caches,
+                                                     gfn_t gfn,
+                                                     union kvm_mmu_page_role role)
 {
        struct hlist_head *sp_list;
        struct kvm_mmu_page *sp;
@@ -2094,13 +2103,26 @@ static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
        sp = kvm_mmu_find_shadow_page(vcpu, gfn, sp_list, role);
        if (!sp) {
                created = true;
-               sp = kvm_mmu_alloc_shadow_page(vcpu, gfn, sp_list, role);
+               sp = kvm_mmu_alloc_shadow_page(vcpu, caches, gfn, sp_list, role);
        }
 
        trace_kvm_mmu_get_page(sp, created);
        return sp;
 }
 
+static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
+                                                   gfn_t gfn,
+                                                   union kvm_mmu_page_role role)
+{
+       struct shadow_page_caches caches = {
+               .page_header_cache = &vcpu->arch.mmu_page_header_cache,
+               .shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache,
+               .gfn_array_cache = &vcpu->arch.mmu_gfn_array_cache,
+       };
+
+       return __kvm_mmu_get_shadow_page(vcpu, &caches, gfn, role);
+}
+
 static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct, unsigned int access)
 {
        struct kvm_mmu_page *parent_sp = sptep_to_sp(sptep);