]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm, slub: fix mismatch between reconstructed freelist depth and cnt
authorMiaohe Lin <linmiaohe@huawei.com>
Mon, 18 Oct 2021 22:15:55 +0000 (15:15 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 27 Oct 2021 07:54:28 +0000 (09:54 +0200)
commit 899447f669da76cc3605665e1a95ee877bc464cc upstream.

If object's reuse is delayed, it will be excluded from the reconstructed
freelist.  But we forgot to adjust the cnt accordingly.  So there will
be a mismatch between reconstructed freelist depth and cnt.  This will
lead to free_debug_processing() complaining about freelist count or a
incorrect slub inuse count.

Link: https://lkml.kernel.org/r/20210916123920.48704-3-linmiaohe@huawei.com
Fixes: 1066fbd3ea85 ("kasan, slub: fix handling of kasan_slab_free hook")
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Bharata B Rao <bharata@linux.ibm.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Faiyaz Mohammed <faiyazm@codeaurora.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/slub.c

index ca7143fe25b56d683d14d48aa3968753da5904cb..f5f830d8b2926dab336fb126dd82957b9e9fbdf4 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1455,7 +1455,8 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
 }
 
 static inline bool slab_free_freelist_hook(struct kmem_cache *s,
-                                          void **head, void **tail)
+                                          void **head, void **tail,
+                                          int *cnt)
 {
 
        void *object;
@@ -1490,6 +1491,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
                        *head = object;
                        if (!*tail)
                                *tail = object;
+               } else {
+                       /*
+                        * Adjust the reconstructed freelist depth
+                        * accordingly if object's reuse is delayed.
+                        */
+                       --(*cnt);
                }
        } while (object != old_tail);
 
@@ -3049,7 +3056,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
         * With KASAN enabled slab_free_freelist_hook modifies the freelist
         * to remove objects, whose reuse must be delayed.
         */
-       if (slab_free_freelist_hook(s, &head, &tail))
+       if (slab_free_freelist_hook(s, &head, &tail, &cnt))
                do_slab_free(s, page, head, tail, cnt, addr);
 }