]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/slab: use kmalloc_node() for off slab freelist_idx_t array allocation
authorHyeonggon Yoo <42.hyeyoo@gmail.com>
Sat, 15 Oct 2022 04:34:29 +0000 (13:34 +0900)
committerVlastimil Babka <vbabka@suse.cz>
Sat, 15 Oct 2022 19:42:05 +0000 (21:42 +0200)
After commit 710bdf2b63c5 ("mm/slab: kmalloc: pass requests larger than
order-1 page to page allocator"), SLAB passes large ( > PAGE_SIZE * 2)
requests to buddy like SLUB does.

SLAB has been using kmalloc caches to allocate freelist_idx_t array for
off slab caches. But after the commit, freelist_size can be bigger than
KMALLOC_MAX_CACHE_SIZE.

Instead of using pointer to kmalloc cache, use kmalloc_node() and only
check if the kmalloc cache is off slab during calculate_slab_order().
If freelist_size > KMALLOC_MAX_CACHE_SIZE, no looping condition happens
as it allocates freelist_idx_t array directly from buddy.

Link: https://lore.kernel.org/all/20221014205818.GA1428667@roeck-us.net/
Reported-and-tested-by: Guenter Roeck <linux@roeck-us.net>
Fixes: 710bdf2b63c5 ("mm/slab: kmalloc: pass requests larger than order-1 page to page allocator")
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
include/linux/slab_def.h
mm/slab.c

index e24c9aff6fed0cdc8abefabc97f0acbf1bf1f01c..f0ffad6a336531e0383cc2162b481241ef1ad440 100644 (file)
@@ -33,7 +33,6 @@ struct kmem_cache {
 
        size_t colour;                  /* cache colouring range */
        unsigned int colour_off;        /* colour offset */
-       struct kmem_cache *freelist_cache;
        unsigned int freelist_size;
 
        /* constructor func */
index a5486ff8362a13099c79eff6a8f77a394db361a3..d1f6e2c64c2ec4a9f328dd4b60e3412726c4a496 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1619,7 +1619,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slab)
         * although actual page can be freed in rcu context
         */
        if (OFF_SLAB(cachep))
-               kmem_cache_free(cachep->freelist_cache, freelist);
+               kfree(freelist);
 }
 
 /*
@@ -1671,21 +1671,27 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
                if (flags & CFLGS_OFF_SLAB) {
                        struct kmem_cache *freelist_cache;
                        size_t freelist_size;
+                       size_t freelist_cache_size;
 
                        freelist_size = num * sizeof(freelist_idx_t);
-                       freelist_cache = kmalloc_slab(freelist_size, 0u);
-                       if (!freelist_cache)
-                               continue;
-
-                       /*
-                        * Needed to avoid possible looping condition
-                        * in cache_grow_begin()
-                        */
-                       if (OFF_SLAB(freelist_cache))
-                               continue;
+                       if (freelist_size > KMALLOC_MAX_CACHE_SIZE) {
+                               freelist_cache_size = PAGE_SIZE << get_order(freelist_size);
+                       } else {
+                               freelist_cache = kmalloc_slab(freelist_size, 0u);
+                               if (!freelist_cache)
+                                       continue;
+                               freelist_cache_size = freelist_cache->size;
+
+                               /*
+                                * Needed to avoid possible looping condition
+                                * in cache_grow_begin()
+                                */
+                               if (OFF_SLAB(freelist_cache))
+                                       continue;
+                       }
 
                        /* check if off slab has enough benefit */
-                       if (freelist_cache->size > cachep->size / 2)
+                       if (freelist_cache_size > cachep->size / 2)
                                continue;
                }
 
@@ -2061,11 +2067,6 @@ done:
                cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
 #endif
 
-       if (OFF_SLAB(cachep)) {
-               cachep->freelist_cache =
-                       kmalloc_slab(cachep->freelist_size, 0u);
-       }
-
        err = setup_cpu_cache(cachep, gfp);
        if (err) {
                __kmem_cache_release(cachep);
@@ -2292,7 +2293,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
                freelist = NULL;
        else if (OFF_SLAB(cachep)) {
                /* Slab management obj is off-slab. */
-               freelist = kmem_cache_alloc_node(cachep->freelist_cache,
+               freelist = kmalloc_node(cachep->freelist_size,
                                              local_flags, nodeid);
        } else {
                /* We will use last bytes at the slab for freelist */