]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/vmalloc: introduce alloc_pages_bulk_array_mempolicy to accelerate memory allocation
authorChen Wandun <chenwandun@huawei.com>
Fri, 5 Nov 2021 20:39:53 +0000 (13:39 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 6 Nov 2021 20:30:37 +0000 (13:30 -0700)
Commit ec503e6df89e ("mm/vmalloc: fix numa spreading for large hash
tables") can cause significant performance regressions in some
situations as Andrew mentioned in [1].  The main situation is vmalloc,
vmalloc will allocate pages with NUMA_NO_NODE by default, that will
result in alloc page one by one;

In order to solve this, __alloc_pages_bulk and mempolicy should be
considered at the same time.

1) If node is specified in memory allocation request, it will alloc all
   pages by __alloc_pages_bulk.

2) If interleaving allocate memory, it will cauculate how many pages
   should be allocated in each node, and use __alloc_pages_bulk to alloc
   pages in each node.

[1]: https://lore.kernel.org/lkml/CALvZod4G3SzP3kWxQYn0fj+VgG-G3yWXz=gz17+3N57ru1iajw@mail.gmail.com/t/#m750c8e3231206134293b089feaa090590afa0f60

[akpm@linux-foundation.org: coding style fixes]
[akpm@linux-foundation.org: make two functions static]
[akpm@linux-foundation.org: fix CONFIG_NUMA=n build]

Link: https://lkml.kernel.org/r/20211021080744.874701-3-chenwandun@huawei.com
Signed-off-by: Chen Wandun <chenwandun@huawei.com>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Hanjun Guo <guohanjun@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/gfp.h
mm/mempolicy.c
mm/vmalloc.c

index fbd4abc33f24215f72be3fb5825504a743d45cee..c1b262725dca9abdb7bc13d7269da08ee2881b0b 100644 (file)
@@ -535,6 +535,10 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
                                struct list_head *page_list,
                                struct page **page_array);
 
+unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
+                               unsigned long nr_pages,
+                               struct page **page_array);
+
 /* Bulk allocate order-0 pages */
 static inline unsigned long
 alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
index d12e0608fced235dc9137d0628437046299c7cfc..ce722333d9f64303f947310d7f62ed34e955eddc 100644 (file)
@@ -2196,6 +2196,88 @@ struct page *alloc_pages(gfp_t gfp, unsigned order)
 }
 EXPORT_SYMBOL(alloc_pages);
 
+static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
+               struct mempolicy *pol, unsigned long nr_pages,
+               struct page **page_array)
+{
+       int nodes;
+       unsigned long nr_pages_per_node;
+       int delta;
+       int i;
+       unsigned long nr_allocated;
+       unsigned long total_allocated = 0;
+
+       nodes = nodes_weight(pol->nodes);
+       nr_pages_per_node = nr_pages / nodes;
+       delta = nr_pages - nodes * nr_pages_per_node;
+
+       for (i = 0; i < nodes; i++) {
+               if (delta) {
+                       nr_allocated = __alloc_pages_bulk(gfp,
+                                       interleave_nodes(pol), NULL,
+                                       nr_pages_per_node + 1, NULL,
+                                       page_array);
+                       delta--;
+               } else {
+                       nr_allocated = __alloc_pages_bulk(gfp,
+                                       interleave_nodes(pol), NULL,
+                                       nr_pages_per_node, NULL, page_array);
+               }
+
+               page_array += nr_allocated;
+               total_allocated += nr_allocated;
+       }
+
+       return total_allocated;
+}
+
+static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
+               struct mempolicy *pol, unsigned long nr_pages,
+               struct page **page_array)
+{
+       gfp_t preferred_gfp;
+       unsigned long nr_allocated = 0;
+
+       preferred_gfp = gfp | __GFP_NOWARN;
+       preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
+
+       nr_allocated  = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
+                                          nr_pages, NULL, page_array);
+
+       if (nr_allocated < nr_pages)
+               nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
+                               nr_pages - nr_allocated, NULL,
+                               page_array + nr_allocated);
+       return nr_allocated;
+}
+
+/* alloc pages bulk and mempolicy should be considered at the
+ * same time in some situation such as vmalloc.
+ *
+ * It can accelerate memory allocation especially interleaving
+ * allocate memory.
+ */
+unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
+               unsigned long nr_pages, struct page **page_array)
+{
+       struct mempolicy *pol = &default_policy;
+
+       if (!in_interrupt() && !(gfp & __GFP_THISNODE))
+               pol = get_task_policy(current);
+
+       if (pol->mode == MPOL_INTERLEAVE)
+               return alloc_pages_bulk_array_interleave(gfp, pol,
+                                                        nr_pages, page_array);
+
+       if (pol->mode == MPOL_PREFERRED_MANY)
+               return alloc_pages_bulk_array_preferred_many(gfp,
+                               numa_node_id(), pol, nr_pages, page_array);
+
+       return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
+                                 policy_nodemask(gfp, pol), nr_pages, NULL,
+                                 page_array);
+}
+
 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
 {
        struct mempolicy *pol = mpol_dup(vma_policy(src));
index c56720136c45dddeef00730ec9306422720fce14..d2a00ad4e1dd155eb474c797e060f6b85f3d37d4 100644 (file)
@@ -2843,7 +2843,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
         * to fails, fallback to a single page allocator that is
         * more permissive.
         */
-       if (!order && nid != NUMA_NO_NODE) {
+       if (!order) {
                while (nr_allocated < nr_pages) {
                        unsigned int nr, nr_pages_request;
 
@@ -2855,8 +2855,20 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                         */
                        nr_pages_request = min(100U, nr_pages - nr_allocated);
 
-                       nr = alloc_pages_bulk_array_node(gfp, nid,
-                               nr_pages_request, pages + nr_allocated);
+                       /* memory allocation should consider mempolicy, we can't
+                        * wrongly use nearest node when nid == NUMA_NO_NODE,
+                        * otherwise memory may be allocated in only one node,
+                        * but mempolcy want to alloc memory by interleaving.
+                        */
+                       if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
+                               nr = alloc_pages_bulk_array_mempolicy(gfp,
+                                                       nr_pages_request,
+                                                       pages + nr_allocated);
+
+                       else
+                               nr = alloc_pages_bulk_array_node(gfp, nid,
+                                                       nr_pages_request,
+                                                       pages + nr_allocated);
 
                        nr_allocated += nr;
                        cond_resched();
@@ -2868,7 +2880,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                        if (nr != nr_pages_request)
                                break;
                }
-       } else if (order)
+       } else
                /*
                 * Compound pages required for remap_vmalloc_page if
                 * high-order pages.