]> git.baikalelectronics.ru Git - kernel.git/commitdiff
Rebase locking/kcsan to locking/urgent
authorThomas Gleixner <tglx@linutronix.de>
Thu, 11 Jun 2020 18:02:46 +0000 (20:02 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 11 Jun 2020 18:02:46 +0000 (20:02 +0200)
Merge the state of the locking kcsan branch before the read/write_once()
and the atomics modifications got merged.

Squash the fallout of the rebase on top of the read/write once and atomic
fallback work into the merge. The history of the original branch is
preserved in tag locking-kcsan-2020-06-02.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
25 files changed:
1  2 
MAINTAINERS
Makefile
arch/x86/Kconfig
arch/x86/boot/Makefile
arch/x86/entry/vdso/Makefile
arch/x86/include/asm/bitops.h
arch/x86/kernel/Makefile
arch/x86/kernel/e820.c
drivers/firmware/efi/libstub/Makefile
include/linux/compiler-clang.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/sched.h
include/linux/uaccess.h
init/init_task.c
init/main.c
kernel/Makefile
kernel/trace/Makefile
lib/Kconfig.debug
lib/Makefile
lib/usercopy.c
mm/Makefile
scripts/Makefile.lib
scripts/checkpatch.pl
tools/objtool/check.c

diff --cc MAINTAINERS
Simple merge
diff --cc Makefile
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index cce4a7436052502769d1da47ae724431709cfc71,dd31237fba2e941b1ee194b9a69ea399aff10278..75daaf20374eec6539e960c0ed3367568994ebc1
@@@ -30,14 -30,12 +30,16 @@@ KBUILD_CFLAGS                      := $(cflags-y) -Os -DDI
                                   -D__NO_FORTIFY \
                                   $(call cc-option,-ffreestanding) \
                                   $(call cc-option,-fno-stack-protector) \
 +                                 $(call cc-option,-fno-addrsig) \
                                   -D__DISABLE_EXPORTS
  
 +# remove SCS flags from all objects in this directory
 +KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
 +
  GCOV_PROFILE                  := n
+ # Sanitizer runtimes are unavailable and cannot be linked here.
  KASAN_SANITIZE                        := n
+ KCSAN_SANITIZE                        := n
  UBSAN_SANITIZE                        := n
  OBJECT_FILES_NON_STANDARD     := y
  
Simple merge
Simple merge
index 33d3a2e5abab201ee7216237996ce1d092067b8a,cce2c92567b568062e70fda72bdf4f577203a68d..f09ebbf16562bb0c40d5f33d8fb30f23ad2ffce7
@@@ -230,63 -177,28 +230,93 @@@ void ftrace_likely_update(struct ftrace
  # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
  #endif
  
 -#include <uapi/linux/types.h>
 +/*
 + * Prevent the compiler from merging or refetching reads or writes. The
 + * compiler is also forbidden from reordering successive instances of
 + * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
 + * particular ordering. One way to make the compiler aware of ordering is to
 + * put the two invocations of READ_ONCE or WRITE_ONCE in different C
 + * statements.
 + *
 + * These two macros will also work on aggregate data types like structs or
 + * unions.
 + *
 + * Their two major use cases are: (1) Mediating communication between
 + * process-level code and irq/NMI handlers, all running on the same CPU,
 + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
 + * mutilate accesses that either do not require ordering or that interact
 + * with an explicit memory barrier or atomic instruction that provides the
 + * required ordering.
 + */
 +#include <asm/barrier.h>
 +#include <linux/kasan-checks.h>
+ #include <linux/kcsan-checks.h>
 -#define __READ_ONCE_SIZE                                              \
++/**
++ * data_race - mark an expression as containing intentional data races
++ *
++ * This data_race() macro is useful for situations in which data races
++ * should be forgiven.  One example is diagnostic code that accesses
++ * shared variables but is not a part of the core synchronization design.
++ *
++ * This macro *does not* affect normal code generation, but is a hint
++ * to tooling that data races here are to be ignored.
++ */
++#define data_race(expr)                                                       \
+ ({                                                                    \
 -      switch (size) {                                                 \
 -      case 1: *(__u8 *)res = *(volatile __u8 *)p; break;              \
 -      case 2: *(__u16 *)res = *(volatile __u16 *)p; break;            \
 -      case 4: *(__u32 *)res = *(volatile __u32 *)p; break;            \
 -      case 8: *(__u64 *)res = *(volatile __u64 *)p; break;            \
 -      default:                                                        \
 -              barrier();                                              \
 -              __builtin_memcpy((void *)res, (const void *)p, size);   \
 -              barrier();                                              \
 -      }                                                               \
++      __kcsan_disable_current();                                      \
++      ({                                                              \
++              __unqual_scalar_typeof(({ expr; })) __v = ({ expr; });  \
++              __kcsan_enable_current();                               \
++              __v;                                                    \
++      });                                                             \
+ })
  
-       __unqual_scalar_typeof(x) __x = __READ_ONCE(x);                 \
 +/*
 + * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
 + * atomicity or dependency ordering guarantees. Note that this may result
 + * in tears!
 + */
 +#define __READ_ONCE(x)        (*(const volatile __unqual_scalar_typeof(x) *)&(x))
 +
 +#define __READ_ONCE_SCALAR(x)                                         \
 +({                                                                    \
- #define __WRITE_ONCE(x, val)                          \
- do {                                                  \
-       *(volatile typeof(x) *)&(x) = (val);            \
++      typeof(x) *__xp = &(x);                                         \
++      __unqual_scalar_typeof(x) __x = data_race(__READ_ONCE(*__xp));  \
++      kcsan_check_atomic_read(__xp, sizeof(*__xp));                   \
 +      smp_read_barrier_depends();                                     \
 +      (typeof(x))__x;                                                 \
 +})
 +
 +#define READ_ONCE(x)                                                  \
 +({                                                                    \
 +      compiletime_assert_rwonce_type(x);                              \
 +      __READ_ONCE_SCALAR(x);                                          \
 +})
 +
- #define WRITE_ONCE(x, val)                            \
- do {                                                  \
-       compiletime_assert_rwonce_type(x);              \
-       __WRITE_ONCE(x, val);                           \
++#define __WRITE_ONCE(x, val)                                          \
++do {                                                                  \
++      *(volatile typeof(x) *)&(x) = (val);                            \
++} while (0)
++
++#define __WRITE_ONCE_SCALAR(x, val)                                   \
++do {                                                                  \
++      typeof(x) *__xp = &(x);                                         \
++      kcsan_check_atomic_write(__xp, sizeof(*__xp));                  \
++      data_race(({ __WRITE_ONCE(*__xp, val); 0; }));                  \
 +} while (0)
 +
++#define WRITE_ONCE(x, val)                                            \
++do {                                                                  \
++      compiletime_assert_rwonce_type(x);                              \
++      __WRITE_ONCE_SCALAR(x, val);                                    \
 +} while (0)
 +
  #ifdef CONFIG_KASAN
  /*
 - * We can't declare function 'inline' because __no_sanitize_address confilcts
 + * We can't declare function 'inline' because __no_sanitize_address conflicts
   * with inlining. Attempt to inline it may cause a build failure.
 - *    https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
 + *     https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
   * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
   */
  # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
  # define __no_kasan_or_inline __always_inline
  #endif
  
- static __no_kasan_or_inline
+ #define __no_kcsan __no_sanitize_thread
+ #ifdef __SANITIZE_THREAD__
+ /*
+  * Rely on __SANITIZE_THREAD__ instead of CONFIG_KCSAN, to avoid not inlining in
+  * compilation units where instrumentation is disabled. The attribute 'noinline'
+  * is required for older compilers, where implicit inlining of very small
+  * functions renders __no_sanitize_thread ineffective.
+  */
+ # define __no_kcsan_or_inline __no_kcsan noinline notrace __maybe_unused
+ # define __no_sanitize_or_inline __no_kcsan_or_inline
+ #else
+ # define __no_kcsan_or_inline __always_inline
+ #endif
+ #ifndef __no_sanitize_or_inline
+ #define __no_sanitize_or_inline __always_inline
+ #endif
 -static __no_kcsan_or_inline
 -void __read_once_size(const volatile void *p, void *res, int size)
 -{
 -      kcsan_check_atomic_read(p, size);
 -      __READ_ONCE_SIZE;
 -}
 -
+ static __no_sanitize_or_inline
 -void __read_once_size_nocheck(const volatile void *p, void *res, int size)
 +unsigned long __read_once_word_nocheck(const void *addr)
  {
 -      __READ_ONCE_SIZE;
 -}
 -
 -static __no_kcsan_or_inline
 -void __write_once_size(volatile void *p, void *res, int size)
 -{
 -      kcsan_check_atomic_write(p, size);
 -
 -      switch (size) {
 -      case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
 -      case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
 -      case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
 -      case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
 -      default:
 -              barrier();
 -              __builtin_memcpy((void *)p, (const void *)res, size);
 -              barrier();
 -      }
 +      return __READ_ONCE(*(unsigned long *)addr);
  }
  
  /*
 - * Prevent the compiler from merging or refetching reads or writes. The
 - * compiler is also forbidden from reordering successive instances of
 - * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
 - * particular ordering. One way to make the compiler aware of ordering is to
 - * put the two invocations of READ_ONCE or WRITE_ONCE in different C
 - * statements.
 - *
 - * These two macros will also work on aggregate data types like structs or
 - * unions. If the size of the accessed data type exceeds the word size of
 - * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
 - * fall back to memcpy(). There's at least two memcpy()s: one for the
 - * __builtin_memcpy() and then one for the macro doing the copy of variable
 - * - '__u' allocated on the stack.
 - *
 - * Their two major use cases are: (1) Mediating communication between
 - * process-level code and irq/NMI handlers, all running on the same CPU,
 - * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
 - * mutilate accesses that either do not require ordering or that interact
 - * with an explicit memory barrier or atomic instruction that provides the
 - * required ordering.
 + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
-  * word from memory atomically but without telling KASAN. This is usually
-  * used by unwinding code when walking the stack of a running process.
++ * word from memory atomically but without telling KASAN/KCSAN. This is
++ * usually used by unwinding code when walking the stack of a running process.
   */
 -#include <asm/barrier.h>
 -#include <linux/kasan-checks.h>
 -
 -#define __READ_ONCE(x, check)                                         \
 +#define READ_ONCE_NOCHECK(x)                                          \
  ({                                                                    \
 -      union { typeof(x) __val; char __c[1]; } __u;                    \
 -      if (check)                                                      \
 -              __read_once_size(&(x), __u.__c, sizeof(x));             \
 -      else                                                            \
 -              __read_once_size_nocheck(&(x), __u.__c, sizeof(x));     \
 -      smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
 -      __u.__val;                                                      \
 +      unsigned long __x;                                              \
 +      compiletime_assert(sizeof(x) == sizeof(__x),                    \
 +              "Unsupported access size for READ_ONCE_NOCHECK().");    \
 +      __x = __read_once_word_nocheck(&(x));                           \
 +      smp_read_barrier_depends();                                     \
 +      (typeof(x))__x;                                                 \
  })
 -#define READ_ONCE(x) __READ_ONCE(x, 1)
 -
 -/*
 - * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
 - * to hide memory access from KASAN.
 - */
 -#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
  
  static __no_kasan_or_inline
  unsigned long read_word_at_a_time(const void *addr)
Simple merge
Simple merge
Simple merge
diff --cc init/main.c
Simple merge
diff --cc kernel/Makefile
index c332eb9d4841add15ea0220033f6b915cb8db263,5d935b63f812abb9295b9cc5031de2643d2411ba..ce8716a04d0e91242a74c6daaa0f1de55a5c4f78
@@@ -103,7 -107,7 +107,8 @@@ obj-$(CONFIG_TRACEPOINTS) += trace
  obj-$(CONFIG_IRQ_WORK) += irq_work.o
  obj-$(CONFIG_CPU_PM) += cpu_pm.o
  obj-$(CONFIG_BPF) += bpf/
+ obj-$(CONFIG_KCSAN) += kcsan/
 +obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
  
  obj-$(CONFIG_PERF_EVENTS) += events/
  
Simple merge
Simple merge
diff --cc lib/Makefile
Simple merge
diff --cc lib/usercopy.c
Simple merge
diff --cc mm/Makefile
Simple merge
Simple merge
Simple merge
index 63d65a7029005e66484c021b199933c96f630a18,a22272c819f30b536c24ef04aac4c6c595b44a03..5fbb90a80d2399b0cfcfbbbdddfbd688b494db69
@@@ -505,9 -477,30 +505,31 @@@ static const char *uaccess_safe_builtin
        "__asan_report_store4_noabort",
        "__asan_report_store8_noabort",
        "__asan_report_store16_noabort",
+       /* KCSAN */
+       "__kcsan_check_access",
+       "kcsan_found_watchpoint",
+       "kcsan_setup_watchpoint",
+       "kcsan_check_scoped_accesses",
+       "kcsan_disable_current",
+       "kcsan_enable_current_nowarn",
+       /* KCSAN/TSAN */
+       "__tsan_func_entry",
+       "__tsan_func_exit",
+       "__tsan_read_range",
+       "__tsan_write_range",
+       "__tsan_read1",
+       "__tsan_read2",
+       "__tsan_read4",
+       "__tsan_read8",
+       "__tsan_read16",
+       "__tsan_write1",
+       "__tsan_write2",
+       "__tsan_write4",
+       "__tsan_write8",
+       "__tsan_write16",
        /* KCOV */
        "write_comp_data",
 +      "check_kcov_mode",
        "__sanitizer_cov_trace_pc",
        "__sanitizer_cov_trace_const_cmp1",
        "__sanitizer_cov_trace_const_cmp2",