]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: memcg/percpu: account extra objcg space to memory cgroups
authorQi Zheng <zhengqi.arch@bytedance.com>
Fri, 14 Jan 2022 22:09:12 +0000 (14:09 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 15 Jan 2022 14:30:31 +0000 (16:30 +0200)
Similar to slab memory allocator, for each accounted percpu object there
is an extra space which is used to store obj_cgroup membership.  Charge
it too.

[akpm@linux-foundation.org: fix layout]

Link: https://lkml.kernel.org/r/20211126040606.97836-1-zhengqi.arch@bytedance.com
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Acked-by: Dennis Zhou <dennis@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/percpu-internal.h
mm/percpu.c

index 639662c20c821be42a50a66f0a2ed1df013c93b3..411d1593ef238013e00b4ff38f3a815603171edb 100644 (file)
@@ -113,6 +113,24 @@ static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
        return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
 }
 
+#ifdef CONFIG_MEMCG_KMEM
+/**
+ * pcpu_obj_full_size - helper to calculate size of each accounted object
+ * @size: size of area to allocate in bytes
+ *
+ * For each accounted object there is an extra space which is used to store
+ * obj_cgroup membership. Charge it too.
+ */
+static inline size_t pcpu_obj_full_size(size_t size)
+{
+       size_t extra_size;
+
+       extra_size = size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
+
+       return size * num_possible_cpus() + extra_size;
+}
+#endif /* CONFIG_MEMCG_KMEM */
+
 #ifdef CONFIG_PERCPU_STATS
 
 #include <linux/spinlock.h>
index f5b2c2ea5a548adf4704ee7aacfdade4be3c1c13..4199a0604c32da1c95b0a2b70a53b1955379e559 100644 (file)
@@ -1635,7 +1635,7 @@ static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
        if (!objcg)
                return true;
 
-       if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) {
+       if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size))) {
                obj_cgroup_put(objcg);
                return false;
        }
@@ -1656,10 +1656,10 @@ static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
 
                rcu_read_lock();
                mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
-                               size * num_possible_cpus());
+                               pcpu_obj_full_size(size));
                rcu_read_unlock();
        } else {
-               obj_cgroup_uncharge(objcg, size * num_possible_cpus());
+               obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
                obj_cgroup_put(objcg);
        }
 }
@@ -1676,11 +1676,11 @@ static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
                return;
        chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
 
-       obj_cgroup_uncharge(objcg, size * num_possible_cpus());
+       obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
 
        rcu_read_lock();
        mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
-                       -(size * num_possible_cpus()));
+                       -pcpu_obj_full_size(size));
        rcu_read_unlock();
 
        obj_cgroup_put(objcg);