]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mbcache: don't reclaim used entries
authorJan Kara <jack@suse.cz>
Tue, 12 Jul 2022 10:54:20 +0000 (12:54 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 18 Jan 2023 10:41:55 +0000 (11:41 +0100)
[ Upstream commit 4225cbb33459e80f0a5ce0a1eb2f3acd3eaee19f ]

Do not reclaim entries that are currently used by somebody from a
shrinker. Firstly, these entries are likely useful. Secondly, we will
need to keep such entries to protect pending increment of xattr block
refcount.

CC: stable@vger.kernel.org
Fixes: 4e4b07fdbb82 ("ext4: convert to mbcache2")
Signed-off-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20220712105436.32204-1-jack@suse.cz
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Stable-dep-of: a44e84a9b776 ("ext4: fix deadlock due to mbcache entry corruption")
Signed-off-by: Sasha Levin <sashal@kernel.org>
fs/mbcache.c

index 97c54d3a2227693df2050109fc10a9fa7f5921ac..cfc28129fb6f1bde5230f7742c0ec21db26643df 100644 (file)
@@ -288,7 +288,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
        while (nr_to_scan-- && !list_empty(&cache->c_list)) {
                entry = list_first_entry(&cache->c_list,
                                         struct mb_cache_entry, e_list);
-               if (entry->e_referenced) {
+               if (entry->e_referenced || atomic_read(&entry->e_refcnt) > 2) {
                        entry->e_referenced = 0;
                        list_move_tail(&entry->e_list, &cache->c_list);
                        continue;
@@ -302,6 +302,14 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
                spin_unlock(&cache->c_list_lock);
                head = mb_cache_entry_head(cache, entry->e_key);
                hlist_bl_lock(head);
+               /* Now a reliable check if the entry didn't get used... */
+               if (atomic_read(&entry->e_refcnt) > 2) {
+                       hlist_bl_unlock(head);
+                       spin_lock(&cache->c_list_lock);
+                       list_add_tail(&entry->e_list, &cache->c_list);
+                       cache->c_entry_count++;
+                       continue;
+               }
                if (!hlist_bl_unhashed(&entry->e_hash_list)) {
                        hlist_bl_del_init(&entry->e_hash_list);
                        atomic_dec(&entry->e_refcnt);