]> git.baikalelectronics.ru Git - kernel.git/commitdiff
zsmalloc: decouple class actions from zspage works
authorMinchan Kim <minchan@kernel.org>
Sat, 22 Jan 2022 06:13:57 +0000 (22:13 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 22 Jan 2022 06:33:37 +0000 (08:33 +0200)
This patch moves class stat update out of obj_malloc since it's not
related to zspage operation.  This is a preparation to introduce new
lock scheme in next patch.

Link: https://lkml.kernel.org/r/20211115185909.3949505-4-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/zsmalloc.c

index e219593cb9de909c861502c862fb216b2f2b7d18..c45807f170e84fb434322a0e7721fe62958d4584 100644 (file)
@@ -1360,17 +1360,19 @@ size_t zs_huge_class_size(struct zs_pool *pool)
 }
 EXPORT_SYMBOL_GPL(zs_huge_class_size);
 
-static unsigned long obj_malloc(struct size_class *class,
+static unsigned long obj_malloc(struct zs_pool *pool,
                                struct zspage *zspage, unsigned long handle)
 {
        int i, nr_page, offset;
        unsigned long obj;
        struct link_free *link;
+       struct size_class *class;
 
        struct page *m_page;
        unsigned long m_offset;
        void *vaddr;
 
+       class = pool->size_class[zspage->class];
        handle |= OBJ_ALLOCATED_TAG;
        obj = get_freeobj(zspage);
 
@@ -1394,7 +1396,6 @@ static unsigned long obj_malloc(struct size_class *class,
 
        kunmap_atomic(vaddr);
        mod_zspage_inuse(zspage, 1);
-       class_stat_inc(class, OBJ_USED, 1);
 
        obj = location_to_obj(m_page, obj);
 
@@ -1433,10 +1434,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
        spin_lock(&class->lock);
        zspage = find_get_zspage(class);
        if (likely(zspage)) {
-               obj = obj_malloc(class, zspage, handle);
+               obj = obj_malloc(pool, zspage, handle);
                /* Now move the zspage to another fullness group, if required */
                fix_fullness_group(class, zspage);
                record_obj(handle, obj);
+               class_stat_inc(class, OBJ_USED, 1);
                spin_unlock(&class->lock);
 
                return handle;
@@ -1451,7 +1453,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
        }
 
        spin_lock(&class->lock);
-       obj = obj_malloc(class, zspage, handle);
+       obj = obj_malloc(pool, zspage, handle);
        newfg = get_fullness_group(class, zspage);
        insert_zspage(class, zspage, newfg);
        set_zspage_mapping(zspage, class->index, newfg);
@@ -1459,6 +1461,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
        atomic_long_add(class->pages_per_zspage,
                                &pool->pages_allocated);
        class_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
+       class_stat_inc(class, OBJ_USED, 1);
 
        /* We completely set up zspage so mark them as movable */
        SetZsPageMovable(pool, zspage);
@@ -1468,7 +1471,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
 }
 EXPORT_SYMBOL_GPL(zs_malloc);
 
-static void obj_free(struct size_class *class, unsigned long obj)
+static void obj_free(int class_size, unsigned long obj)
 {
        struct link_free *link;
        struct zspage *zspage;
@@ -1478,7 +1481,7 @@ static void obj_free(struct size_class *class, unsigned long obj)
        void *vaddr;
 
        obj_to_location(obj, &f_page, &f_objidx);
-       f_offset = (class->size * f_objidx) & ~PAGE_MASK;
+       f_offset = (class_size * f_objidx) & ~PAGE_MASK;
        zspage = get_zspage(f_page);
 
        vaddr = kmap_atomic(f_page);
@@ -1489,7 +1492,6 @@ static void obj_free(struct size_class *class, unsigned long obj)
        kunmap_atomic(vaddr);
        set_freeobj(zspage, f_objidx);
        mod_zspage_inuse(zspage, -1);
-       class_stat_dec(class, OBJ_USED, 1);
 }
 
 void zs_free(struct zs_pool *pool, unsigned long handle)
@@ -1513,7 +1515,8 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
        class = zspage_class(pool, zspage);
 
        spin_lock(&class->lock);
-       obj_free(class, obj);
+       obj_free(class->size, obj);
+       class_stat_dec(class, OBJ_USED, 1);
        fullness = fix_fullness_group(class, zspage);
        if (fullness != ZS_EMPTY) {
                migrate_read_unlock(zspage);
@@ -1671,7 +1674,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
                }
 
                used_obj = handle_to_obj(handle);
-               free_obj = obj_malloc(class, get_zspage(d_page), handle);
+               free_obj = obj_malloc(pool, get_zspage(d_page), handle);
                zs_object_copy(class, free_obj, used_obj);
                obj_idx++;
                /*
@@ -1683,7 +1686,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
                free_obj |= BIT(HANDLE_PIN_BIT);
                record_obj(handle, free_obj);
                unpin_tag(handle);
-               obj_free(class, used_obj);
+               obj_free(class->size, used_obj);
        }
 
        /* Remember last position in this iteration */