]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/memcg: unify swap and memsw page counters
authorWaiman Long <longman@redhat.com>
Tue, 13 Oct 2020 23:52:56 +0000 (16:52 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 14 Oct 2020 01:38:30 +0000 (18:38 -0700)
The swap page counter is v2 only while memsw is v1 only.  As v1 and v2
controllers cannot be active at the same time, there is no point to keep
both swap and memsw page counters in mem_cgroup.  The previous patch has
made sure that memsw page counter is updated and accessed only when in v1
code paths.  So it is now safe to alias the v1 memsw page counter to v2
swap page counter.  This saves 14 long's in the size of mem_cgroup.  This
is a saving of 112 bytes for 64-bit archs.

While at it, also document which page counters are used in v1 and/or v2.

Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Chris Down <chris@chrisdown.name>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Yafang Shao <laoar.shao@gmail.com>
Link: https://lkml.kernel.org/r/20200914024452.19167-4-longman@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c

index d0b036123c6abd25f5a05d7c647c3c4cd083cbb4..6ef4a552e09d3ebfc0e1f0a0ca51d6efafdce480 100644 (file)
@@ -215,13 +215,16 @@ struct mem_cgroup {
        struct mem_cgroup_id id;
 
        /* Accounted resources */
-       struct page_counter memory;
-       struct page_counter swap;
+       struct page_counter memory;             /* Both v1 & v2 */
+
+       union {
+               struct page_counter swap;       /* v2 only */
+               struct page_counter memsw;      /* v1 only */
+       };
 
        /* Legacy consumer-oriented counters */
-       struct page_counter memsw;
-       struct page_counter kmem;
-       struct page_counter tcpmem;
+       struct page_counter kmem;               /* v1 only */
+       struct page_counter tcpmem;             /* v1 only */
 
        /* Range enforcement for interrupt charges */
        struct work_struct high_work;
index 962f8d649b83fb9b5c1e302307ad1bf7be6496e7..a0bfc92804b7f05b44452cd3b657fcfee724675b 100644 (file)
@@ -5295,13 +5295,11 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
                memcg->use_hierarchy = true;
                page_counter_init(&memcg->memory, &parent->memory);
                page_counter_init(&memcg->swap, &parent->swap);
-               page_counter_init(&memcg->memsw, &parent->memsw);
                page_counter_init(&memcg->kmem, &parent->kmem);
                page_counter_init(&memcg->tcpmem, &parent->tcpmem);
        } else {
                page_counter_init(&memcg->memory, NULL);
                page_counter_init(&memcg->swap, NULL);
-               page_counter_init(&memcg->memsw, NULL);
                page_counter_init(&memcg->kmem, NULL);
                page_counter_init(&memcg->tcpmem, NULL);
                /*
@@ -5430,7 +5428,6 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
 
        page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
        page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
-       page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX);
        page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
        page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
        page_counter_set_min(&memcg->memory, 0);