]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: fix global NR_SLAB_.*CLAIMABLE counter reads
authorJohannes Weiner <hannes@cmpxchg.org>
Thu, 10 Aug 2017 22:23:31 +0000 (15:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 10 Aug 2017 22:54:06 +0000 (15:54 -0700)
As Tetsuo points out:
 "Commit 385386cff4c6 ("mm: vmstat: move slab statistics from zone to
  node counters") broke "Slab:" field of /proc/meminfo . It shows nearly
  0kB"

In addition to /proc/meminfo, this problem also affects the slab
counters OOM/allocation failure info dumps, can cause early -ENOMEM from
overcommit protection, and miscalculate image size requirements during
suspend-to-disk.

This is because the patch in question switched the slab counters from
the zone level to the node level, but forgot to update the global
accessor functions to read the aggregate node data instead of the
aggregate zone data.

Use global_node_page_state() to access the global slab counters.

Fixes: 385386cff4c6 ("mm: vmstat: move slab statistics from zone to node counters")
Link: http://lkml.kernel.org/r/20170801134256.5400-1-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reported-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Stefan Agner <stefan@agner.ch>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/proc/meminfo.c
kernel/power/snapshot.c
mm/page_alloc.c
mm/util.c

index 8a428498d6b21f08c8c26ef184ff9f4332b5cdd0..509a61668d902b84f6756e2ed1bcb22a6d7020a5 100644 (file)
@@ -106,13 +106,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                    global_node_page_state(NR_FILE_MAPPED));
        show_val_kb(m, "Shmem:          ", i.sharedram);
        show_val_kb(m, "Slab:           ",
-                   global_page_state(NR_SLAB_RECLAIMABLE) +
-                   global_page_state(NR_SLAB_UNRECLAIMABLE));
+                   global_node_page_state(NR_SLAB_RECLAIMABLE) +
+                   global_node_page_state(NR_SLAB_UNRECLAIMABLE));
 
        show_val_kb(m, "SReclaimable:   ",
-                   global_page_state(NR_SLAB_RECLAIMABLE));
+                   global_node_page_state(NR_SLAB_RECLAIMABLE));
        show_val_kb(m, "SUnreclaim:     ",
-                   global_page_state(NR_SLAB_UNRECLAIMABLE));
+                   global_node_page_state(NR_SLAB_UNRECLAIMABLE));
        seq_printf(m, "KernelStack:    %8lu kB\n",
                   global_page_state(NR_KERNEL_STACK_KB));
        show_val_kb(m, "PageTables:     ",
index 222317721c5a09291c6b78fc839e722b2196b177..0972a8e09d082d99c7f197cbe6bd4fdb6475ba33 100644 (file)
@@ -1650,7 +1650,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
 {
        unsigned long size;
 
-       size = global_page_state(NR_SLAB_RECLAIMABLE)
+       size = global_node_page_state(NR_SLAB_RECLAIMABLE)
                + global_node_page_state(NR_ACTIVE_ANON)
                + global_node_page_state(NR_INACTIVE_ANON)
                + global_node_page_state(NR_ACTIVE_FILE)
index fc32aa81f3593537cc2b11d5f63b5c5f517097a4..626a430e32d1d1880eca953753ab2166a0a48d44 100644 (file)
@@ -4458,8 +4458,9 @@ long si_mem_available(void)
         * Part of the reclaimable slab consists of items that are in use,
         * and cannot be freed. Cap this estimate at the low watermark.
         */
-       available += global_page_state(NR_SLAB_RECLAIMABLE) -
-                    min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
+       available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
+                    min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
+                        wmark_low);
 
        if (available < 0)
                available = 0;
@@ -4602,8 +4603,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
                global_node_page_state(NR_FILE_DIRTY),
                global_node_page_state(NR_WRITEBACK),
                global_node_page_state(NR_UNSTABLE_NFS),
-               global_page_state(NR_SLAB_RECLAIMABLE),
-               global_page_state(NR_SLAB_UNRECLAIMABLE),
+               global_node_page_state(NR_SLAB_RECLAIMABLE),
+               global_node_page_state(NR_SLAB_UNRECLAIMABLE),
                global_node_page_state(NR_FILE_MAPPED),
                global_node_page_state(NR_SHMEM),
                global_page_state(NR_PAGETABLE),
index 7b07ec852e01fa931b2b302e8df5cff9f17f62d6..9ecddf568fe30e5cf1fba6db8eda3b7abe96d379 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -633,7 +633,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
                 * which are reclaimable, under pressure.  The dentry
                 * cache and most inode caches should fall into this
                 */
-               free += global_page_state(NR_SLAB_RECLAIMABLE);
+               free += global_node_page_state(NR_SLAB_RECLAIMABLE);
 
                /*
                 * Leave reserved pages. The pages are not for anonymous pages.