]> git.baikalelectronics.ru Git - kernel.git/commitdiff
kfence: always use static branches to guard kfence_alloc()
authorMarco Elver <elver@google.com>
Fri, 5 Nov 2021 20:45:46 +0000 (13:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 6 Nov 2021 20:30:43 +0000 (13:30 -0700)
Regardless of KFENCE mode (CONFIG_KFENCE_STATIC_KEYS: either using
static keys to gate allocations, or using a simple dynamic branch),
always use a static branch to avoid the dynamic branch in kfence_alloc()
if KFENCE was disabled at boot.

For CONFIG_KFENCE_STATIC_KEYS=n, this now avoids the dynamic branch if
KFENCE was disabled at boot.

To simplify, also unifies the location where kfence_allocation_gate is
read-checked to just be inline in kfence_alloc().

Link: https://lkml.kernel.org/r/20211019102524.2807208-1-elver@google.com
Signed-off-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Jann Horn <jannh@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/kfence.h
mm/kfence/core.c

index 3fe6dd8a18c19607ad1116f3977929d7c86d856e..4b5e3679a72c78caf5a6aa2c185658328403fa2f 100644 (file)
@@ -14,6 +14,9 @@
 
 #ifdef CONFIG_KFENCE
 
+#include <linux/atomic.h>
+#include <linux/static_key.h>
+
 /*
  * We allocate an even number of pages, as it simplifies calculations to map
  * address to metadata indices; effectively, the very first page serves as an
 #define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
 extern char *__kfence_pool;
 
-#ifdef CONFIG_KFENCE_STATIC_KEYS
-#include <linux/static_key.h>
 DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
-#else
-#include <linux/atomic.h>
 extern atomic_t kfence_allocation_gate;
-#endif
 
 /**
  * is_kfence_address() - check if an address belongs to KFENCE pool
@@ -116,13 +114,16 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
  */
 static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
 {
-#ifdef CONFIG_KFENCE_STATIC_KEYS
-       if (static_branch_unlikely(&kfence_allocation_key))
+#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0
+       if (!static_branch_unlikely(&kfence_allocation_key))
+               return NULL;
 #else
-       if (unlikely(!atomic_read(&kfence_allocation_gate)))
+       if (!static_branch_likely(&kfence_allocation_key))
+               return NULL;
 #endif
-               return __kfence_alloc(s, size, flags);
-       return NULL;
+       if (likely(atomic_read(&kfence_allocation_gate)))
+               return NULL;
+       return __kfence_alloc(s, size, flags);
 }
 
 /**
index 802905b1c89b52cf75ab8977863b53a667812771..09945784df9e656171b0dc40bb023f7ced26fad3 100644 (file)
@@ -104,10 +104,11 @@ struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
 
-#ifdef CONFIG_KFENCE_STATIC_KEYS
-/* The static key to set up a KFENCE allocation. */
+/*
+ * The static key to set up a KFENCE allocation; or if static keys are not used
+ * to gate allocations, to avoid a load and compare if KFENCE is disabled.
+ */
 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
-#endif
 
 /* Gates the allocation, ensuring only one succeeds in a given period. */
 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
@@ -774,6 +775,8 @@ void __init kfence_init(void)
                return;
        }
 
+       if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
+               static_branch_enable(&kfence_allocation_key);
        WRITE_ONCE(kfence_enabled, true);
        queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
        pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
@@ -866,12 +869,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
                return NULL;
        }
 
-       /*
-        * allocation_gate only needs to become non-zero, so it doesn't make
-        * sense to continue writing to it and pay the associated contention
-        * cost, in case we have a large number of concurrent allocations.
-        */
-       if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
+       if (atomic_inc_return(&kfence_allocation_gate) > 1)
                return NULL;
 #ifdef CONFIG_KFENCE_STATIC_KEYS
        /*