From eeb3160c2419e0f1045537acac7b19cba64112f4 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:03:13 -0800 Subject: [PATCH] kasan, mm: rename kasan_poison_kfree Rename kasan_poison_kfree() to kasan_slab_free_mempool() as it better reflects what this annotation does. Also add a comment that explains the PageSlab() check. No functional changes. Link: https://lkml.kernel.org/r/141675fb493555e984c5dca555e9d9f768c7bbaa.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/I5026f87364e556b506ef1baee725144bb04b8810 Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Tested-by: Vincenzo Frascino Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 16 ++++++++-------- mm/kasan/common.c | 40 +++++++++++++++++++++++----------------- mm/mempool.c | 2 +- 3 files changed, 32 insertions(+), 26 deletions(-) diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 9176849c49349..6f0c5d9aa43f2 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -176,6 +176,13 @@ static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object, return false; } +void __kasan_slab_free_mempool(void *ptr, unsigned long ip); +static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) +{ + if (kasan_enabled()) + __kasan_slab_free_mempool(ptr, ip); +} + void * __must_check __kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); static __always_inline void * __must_check kasan_slab_alloc( @@ -216,13 +223,6 @@ static __always_inline void * __must_check kasan_krealloc(const void *object, return (void *)object; } -void __kasan_poison_kfree(void *ptr, unsigned long ip); -static __always_inline void kasan_poison_kfree(void *ptr, unsigned long ip) -{ - if (kasan_enabled()) - __kasan_poison_kfree(ptr, ip); -} - void __kasan_kfree_large(void *ptr, unsigned long ip); static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip) { @@ -261,6 +261,7 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, { return false; } +static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {} static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) { @@ -280,7 +281,6 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, { return (void *)object; } -static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} #endif /* CONFIG_KASAN */ diff --git a/mm/kasan/common.c b/mm/kasan/common.c index ae0130cf9de3d..d0f8d7a955cd9 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -331,6 +331,29 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) return ____kasan_slab_free(cache, object, ip, true); } +void __kasan_slab_free_mempool(void *ptr, unsigned long ip) +{ + struct page *page; + + page = virt_to_head_page(ptr); + + /* + * Even though this function is only called for kmem_cache_alloc and + * kmalloc backed mempool allocations, those allocations can still be + * !PageSlab() when the size provided to kmalloc is larger than + * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. + */ + if (unlikely(!PageSlab(page))) { + if (ptr != page_address(page)) { + kasan_report_invalid_free(ptr, ip); + return; + } + poison_range(ptr, page_size(page), KASAN_FREE_PAGE); + } else { + ____kasan_slab_free(page->slab_cache, ptr, ip, false); + } +} + static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) { kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags); @@ -422,23 +445,6 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag flags, true); } -void __kasan_poison_kfree(void *ptr, unsigned long ip) -{ - struct page *page; - - page = virt_to_head_page(ptr); - - if (unlikely(!PageSlab(page))) { - if (ptr != page_address(page)) { - kasan_report_invalid_free(ptr, ip); - return; - } - poison_range(ptr, page_size(page), KASAN_FREE_PAGE); - } else { - ____kasan_slab_free(page->slab_cache, ptr, ip, false); - } -} - void __kasan_kfree_large(void *ptr, unsigned long ip) { if (ptr != page_address(virt_to_head_page(ptr))) diff --git a/mm/mempool.c b/mm/mempool.c index 583a9865b1818..624ed51b060f0 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -104,7 +104,7 @@ static inline void poison_element(mempool_t *pool, void *element) static __always_inline void kasan_poison_element(mempool_t *pool, void *element) { if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) - kasan_poison_kfree(element, _RET_IP_); + kasan_slab_free_mempool(element, _RET_IP_); else if (pool->alloc == mempool_alloc_pages) kasan_free_pages(element, (unsigned long)pool->pool_data); } -- 2.39.5