]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/list_lru.c: fix list_lru_count_node() to be race free
authorSahitya Tummala <stummala@codeaurora.org>
Mon, 10 Jul 2017 22:49:57 +0000 (15:49 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 10 Jul 2017 23:32:33 +0000 (16:32 -0700)
list_lru_count_node() iterates over all memcgs to get the total number of
entries on the node but it can race with memcg_drain_all_list_lrus(),
which migrates the entries from a dead cgroup to another.  This can return
incorrect number of entries from list_lru_count_node().

Fix this by keeping track of entries per node and simply return it in
list_lru_count_node().

Link: http://lkml.kernel.org/r/1498707555-30525-1-git-send-email-stummala@codeaurora.org
Signed-off-by: Sahitya Tummala <stummala@codeaurora.org>
Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Alexander Polakov <apolyakov@beget.ru>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/list_lru.h
mm/list_lru.c

index cb0ba9f2a9a291112eb7bbdc3eb28e9fba8ad4a4..fa7fd03cb5f964c4be4b74f435c2fdb81ba58936 100644 (file)
@@ -44,6 +44,7 @@ struct list_lru_node {
        /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
        struct list_lru_memcg   *memcg_lrus;
 #endif
+       long nr_items;
 } ____cacheline_aligned_in_smp;
 
 struct list_lru {
index 234676e31edd3b0609014a3adcf4fb4e200a0c0e..7a40fa2be858acbc79cc79d887c1af575a3d2026 100644 (file)
@@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
                l = list_lru_from_kmem(nlru, item);
                list_add_tail(item, &l->list);
                l->nr_items++;
+               nlru->nr_items++;
                spin_unlock(&nlru->lock);
                return true;
        }
@@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
                l = list_lru_from_kmem(nlru, item);
                list_del_init(item);
                l->nr_items--;
+               nlru->nr_items--;
                spin_unlock(&nlru->lock);
                return true;
        }
@@ -183,15 +185,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one);
 
 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
 {
-       long count = 0;
-       int memcg_idx;
+       struct list_lru_node *nlru;
 
-       count += __list_lru_count_one(lru, nid, -1);
-       if (list_lru_memcg_aware(lru)) {
-               for_each_memcg_cache_index(memcg_idx)
-                       count += __list_lru_count_one(lru, nid, memcg_idx);
-       }
-       return count;
+       nlru = &lru->node[nid];
+       return nlru->nr_items;
 }
 EXPORT_SYMBOL_GPL(list_lru_count_node);
 
@@ -226,6 +223,7 @@ restart:
                        assert_spin_locked(&nlru->lock);
                case LRU_REMOVED:
                        isolated++;
+                       nlru->nr_items--;
                        /*
                         * If the lru lock has been dropped, our list
                         * traversal is now invalid and so we have to