]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: kmsan: call KMSAN hooks from SLUB code
authorAlexander Potapenko <glider@google.com>
Thu, 15 Sep 2022 15:03:49 +0000 (17:03 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 3 Oct 2022 21:03:20 +0000 (14:03 -0700)
In order to report uninitialized memory coming from heap allocations KMSAN
has to poison them unless they're created with __GFP_ZERO.

It's handy that we need KMSAN hooks in the places where
init_on_alloc/init_on_free initialization is performed.

In addition, we apply __no_kmsan_checks to get_freepointer_safe() to
suppress reports when accessing freelist pointers that reside in freed
objects.

Link: https://lkml.kernel.org/r/20220915150417.722975-16-glider@google.com
Signed-off-by: Alexander Potapenko <glider@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Konovalov <andreyknvl@google.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Eric Biggers <ebiggers@kernel.org>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Ilya Leoshkevich <iii@linux.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/kmsan.h
mm/kmsan/hooks.c
mm/slab.h
mm/slub.c

index b36bf3db835ee46eb9002eb41912bbb8821c686f..5c4e0079054e664dd4b55ebd447024621f8f0e1b 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/types.h>
 
 struct page;
+struct kmem_cache;
 
 #ifdef CONFIG_KMSAN
 
@@ -48,6 +49,44 @@ void kmsan_free_page(struct page *page, unsigned int order);
  */
 void kmsan_copy_page_meta(struct page *dst, struct page *src);
 
+/**
+ * kmsan_slab_alloc() - Notify KMSAN about a slab allocation.
+ * @s:      slab cache the object belongs to.
+ * @object: object pointer.
+ * @flags:  GFP flags passed to the allocator.
+ *
+ * Depending on cache flags and GFP flags, KMSAN sets up the metadata of the
+ * newly created object, marking it as initialized or uninitialized.
+ */
+void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
+
+/**
+ * kmsan_slab_free() - Notify KMSAN about a slab deallocation.
+ * @s:      slab cache the object belongs to.
+ * @object: object pointer.
+ *
+ * KMSAN marks the freed object as uninitialized.
+ */
+void kmsan_slab_free(struct kmem_cache *s, void *object);
+
+/**
+ * kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation.
+ * @ptr:   object pointer.
+ * @size:  object size.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Similar to kmsan_slab_alloc(), but for large allocations.
+ */
+void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
+
+/**
+ * kmsan_kfree_large() - Notify KMSAN about a large slab deallocation.
+ * @ptr: object pointer.
+ *
+ * Similar to kmsan_slab_free(), but for large allocations.
+ */
+void kmsan_kfree_large(const void *ptr);
+
 /**
  * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
  * @start:     start of vmapped range.
@@ -114,6 +153,24 @@ static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
 {
 }
 
+static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object,
+                                   gfp_t flags)
+{
+}
+
+static inline void kmsan_slab_free(struct kmem_cache *s, void *object)
+{
+}
+
+static inline void kmsan_kmalloc_large(const void *ptr, size_t size,
+                                      gfp_t flags)
+{
+}
+
+static inline void kmsan_kfree_large(const void *ptr)
+{
+}
+
 static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
                                                  unsigned long end,
                                                  pgprot_t prot,
index 040111bb9f6a3f4ee883e61ceb5b9ad3e948aa1b..000703c563a4d2ec3f9f31eb51317d3c50b64eaf 100644 (file)
  * skipping effects of functions like memset() inside instrumented code.
  */
 
+void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags)
+{
+       if (unlikely(object == NULL))
+               return;
+       if (!kmsan_enabled || kmsan_in_runtime())
+               return;
+       /*
+        * There's a ctor or this is an RCU cache - do nothing. The memory
+        * status hasn't changed since last use.
+        */
+       if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU))
+               return;
+
+       kmsan_enter_runtime();
+       if (flags & __GFP_ZERO)
+               kmsan_internal_unpoison_memory(object, s->object_size,
+                                              KMSAN_POISON_CHECK);
+       else
+               kmsan_internal_poison_memory(object, s->object_size, flags,
+                                            KMSAN_POISON_CHECK);
+       kmsan_leave_runtime();
+}
+
+void kmsan_slab_free(struct kmem_cache *s, void *object)
+{
+       if (!kmsan_enabled || kmsan_in_runtime())
+               return;
+
+       /* RCU slabs could be legally used after free within the RCU period */
+       if (unlikely(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)))
+               return;
+       /*
+        * If there's a constructor, freed memory must remain in the same state
+        * until the next allocation. We cannot save its state to detect
+        * use-after-free bugs, instead we just keep it unpoisoned.
+        */
+       if (s->ctor)
+               return;
+       kmsan_enter_runtime();
+       kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL,
+                                    KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
+       kmsan_leave_runtime();
+}
+
+void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
+{
+       if (unlikely(ptr == NULL))
+               return;
+       if (!kmsan_enabled || kmsan_in_runtime())
+               return;
+       kmsan_enter_runtime();
+       if (flags & __GFP_ZERO)
+               kmsan_internal_unpoison_memory((void *)ptr, size,
+                                              /*checked*/ true);
+       else
+               kmsan_internal_poison_memory((void *)ptr, size, flags,
+                                            KMSAN_POISON_CHECK);
+       kmsan_leave_runtime();
+}
+
+void kmsan_kfree_large(const void *ptr)
+{
+       struct page *page;
+
+       if (!kmsan_enabled || kmsan_in_runtime())
+               return;
+       kmsan_enter_runtime();
+       page = virt_to_head_page((void *)ptr);
+       KMSAN_WARN_ON(ptr != page_address(page));
+       kmsan_internal_poison_memory((void *)ptr,
+                                    PAGE_SIZE << compound_order(page),
+                                    GFP_KERNEL,
+                                    KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
+       kmsan_leave_runtime();
+}
+
 static unsigned long vmalloc_shadow(unsigned long addr)
 {
        return (unsigned long)kmsan_get_metadata((void *)addr,
index 4ec82bec15ecd3b4f50e67bdde6a954bc50781df..9d0afd2985df7e3e9b40812655967f7c181515e6 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -729,6 +729,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
                        memset(p[i], 0, s->object_size);
                kmemleak_alloc_recursive(p[i], s->object_size, 1,
                                         s->flags, flags);
+               kmsan_slab_alloc(s, p[i], flags);
        }
 
        memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
index 6953c3367bc2003f2f06a31e4c5bd0c0055e058e..ce8310e131b34d5f291eb461e1cf68c40e126ea7 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -22,6 +22,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/kasan.h>
+#include <linux/kmsan.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
 #include <linux/mempolicy.h>
@@ -359,6 +360,17 @@ static void prefetch_freepointer(const struct kmem_cache *s, void *object)
        prefetchw(object + s->offset);
 }
 
+/*
+ * When running under KMSAN, get_freepointer_safe() may return an uninitialized
+ * pointer value in the case the current thread loses the race for the next
+ * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
+ * slab_alloc_node() will fail, so the uninitialized value won't be used, but
+ * KMSAN will still check all arguments of cmpxchg because of imperfect
+ * handling of inline assembly.
+ * To work around this problem, we apply __no_kmsan_checks to ensure that
+ * get_freepointer_safe() returns initialized memory.
+ */
+__no_kmsan_checks
 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
 {
        unsigned long freepointer_addr;
@@ -1709,6 +1721,7 @@ static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
        ptr = kasan_kmalloc_large(ptr, size, flags);
        /* As ptr might get tagged, call kmemleak hook after KASAN. */
        kmemleak_alloc(ptr, size, 1, flags);
+       kmsan_kmalloc_large(ptr, size, flags);
        return ptr;
 }
 
@@ -1716,12 +1729,14 @@ static __always_inline void kfree_hook(void *x)
 {
        kmemleak_free(x);
        kasan_kfree_large(x);
+       kmsan_kfree_large(x);
 }
 
 static __always_inline bool slab_free_hook(struct kmem_cache *s,
                                                void *x, bool init)
 {
        kmemleak_free_recursive(x, s->flags);
+       kmsan_slab_free(s, x);
 
        debug_check_no_locks_freed(x, s->object_size);
 
@@ -5941,6 +5956,7 @@ static char *create_unique_id(struct kmem_cache *s)
        p += sprintf(p, "%07u", s->size);
 
        BUG_ON(p > name + ID_STR_LENGTH - 1);
+       kmsan_unpoison_memory(name, p - name);
        return name;
 }
 
@@ -6042,6 +6058,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
        al->name = name;
        al->next = alias_list;
        alias_list = al;
+       kmsan_unpoison_memory(al, sizeof(*al));
        return 0;
 }