]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: oom: show unreclaimable slab info when unreclaimable slabs > user memory
authorYang Shi <yang.s@alibaba-inc.com>
Thu, 16 Nov 2017 01:32:07 +0000 (17:32 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 16 Nov 2017 02:21:01 +0000 (18:21 -0800)
The kernel may panic when an oom happens without killable process
sometimes it is caused by huge unreclaimable slabs used by kernel.

Although kdump could help debug such problem, however, kdump is not
available on all architectures and it might be malfunction sometime.
And, since kernel already panic it is worthy capturing such information
in dmesg to aid touble shooting.

Print out unreclaimable slab info (used size and total size) which
actual memory usage is not zero (num_objs * size != 0) when
unreclaimable slabs amount is greater than total user memory (LRU
pages).

The output looks like:

  Unreclaimable slab info:
  Name                      Used          Total
  rpc_buffers               31KB         31KB
  rpc_tasks                  7KB          7KB
  ebitmap_node            1964KB       1964KB
  avtab_node              5024KB       5024KB
  xfs_buf                 1402KB       1402KB
  xfs_ili                  134KB        134KB
  xfs_efi_item             115KB        115KB
  xfs_efd_item             115KB        115KB
  xfs_buf_item             134KB        134KB
  xfs_log_item_desc        342KB        342KB
  xfs_trans               1412KB       1412KB
  xfs_ifork                212KB        212KB

[yang.s@alibaba-inc.com: v11]
Link: http://lkml.kernel.org/r/1507656303-103845-4-git-send-email-yang.s@alibaba-inc.com
Link: http://lkml.kernel.org/r/1507152550-46205-4-git-send-email-yang.s@alibaba-inc.com
Signed-off-by: Yang Shi <yang.s@alibaba-inc.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/oom_kill.c
mm/slab.h
mm/slab_common.c

index dee0f75c301337af62156d2ae46d5c5391cc6127..3023919970f7468ae89ebba14f59b8aa0cd53eaa 100644 (file)
@@ -44,6 +44,7 @@
 
 #include <asm/tlb.h>
 #include "internal.h"
+#include "slab.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/oom.h>
@@ -161,6 +162,25 @@ static bool oom_unkillable_task(struct task_struct *p,
        return false;
 }
 
+/*
+ * Print out unreclaimble slabs info when unreclaimable slabs amount is greater
+ * than all user memory (LRU pages)
+ */
+static bool is_dump_unreclaim_slabs(void)
+{
+       unsigned long nr_lru;
+
+       nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
+                global_node_page_state(NR_INACTIVE_ANON) +
+                global_node_page_state(NR_ACTIVE_FILE) +
+                global_node_page_state(NR_INACTIVE_FILE) +
+                global_node_page_state(NR_ISOLATED_ANON) +
+                global_node_page_state(NR_ISOLATED_FILE) +
+                global_node_page_state(NR_UNEVICTABLE);
+
+       return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru);
+}
+
 /**
  * oom_badness - heuristic function to determine which candidate task to kill
  * @p: task struct of which task we should calculate
@@ -420,10 +440,13 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
 
        cpuset_print_current_mems_allowed();
        dump_stack();
-       if (oc->memcg)
+       if (is_memcg_oom(oc))
                mem_cgroup_print_oom_info(oc->memcg, p);
-       else
+       else {
                show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
+               if (is_dump_unreclaim_slabs())
+                       dump_unreclaimable_slab();
+       }
        if (sysctl_oom_dump_tasks)
                dump_tasks(oc->memcg, oc->nodemask);
 }
index 86d7c7d860f92c3a46d505a4b327c76e34f5415a..45c586cefc11353d5a73e8f0f477d5c4ce6e708b 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -506,6 +506,14 @@ void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
 void memcg_slab_stop(struct seq_file *m, void *p);
 int memcg_slab_show(struct seq_file *m, void *p);
 
+#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
+void dump_unreclaimable_slab(void);
+#else
+static inline void dump_unreclaimable_slab(void)
+{
+}
+#endif
+
 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
 
 #ifdef CONFIG_SLAB_FREELIST_RANDOM
index 9357353bcb6400ebcbb6c736f25da3fbe1bf8611..8f7f9f75d7eaf43e29941173b3befe9536fc7e46 100644 (file)
@@ -1280,6 +1280,40 @@ static int slab_show(struct seq_file *m, void *p)
        return 0;
 }
 
+void dump_unreclaimable_slab(void)
+{
+       struct kmem_cache *s, *s2;
+       struct slabinfo sinfo;
+
+       /*
+        * Here acquiring slab_mutex is risky since we don't prefer to get
+        * sleep in oom path. But, without mutex hold, it may introduce a
+        * risk of crash.
+        * Use mutex_trylock to protect the list traverse, dump nothing
+        * without acquiring the mutex.
+        */
+       if (!mutex_trylock(&slab_mutex)) {
+               pr_warn("excessive unreclaimable slab but cannot dump stats\n");
+               return;
+       }
+
+       pr_info("Unreclaimable slab info:\n");
+       pr_info("Name                      Used          Total\n");
+
+       list_for_each_entry_safe(s, s2, &slab_caches, list) {
+               if (!is_root_cache(s) || (s->flags & SLAB_RECLAIM_ACCOUNT))
+                       continue;
+
+               get_slabinfo(s, &sinfo);
+
+               if (sinfo.num_objs > 0)
+                       pr_info("%-17s %10luKB %10luKB\n", cache_name(s),
+                               (sinfo.active_objs * s->size) / 1024,
+                               (sinfo.num_objs * s->size) / 1024);
+       }
+       mutex_unlock(&slab_mutex);
+}
+
 #if defined(CONFIG_MEMCG)
 void *memcg_slab_start(struct seq_file *m, loff_t *pos)
 {