]> git.baikalelectronics.ru Git - kernel.git/commitdiff
arm64: make atomic helpers __always_inline
authorArnd Bergmann <arnd@arndb.de>
Fri, 8 Jan 2021 09:19:56 +0000 (10:19 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Wed, 13 Jan 2021 15:09:06 +0000 (15:09 +0000)
With UBSAN enabled and building with clang, there are occasionally
warnings like

WARNING: modpost: vmlinux.o(.text+0xc533ec): Section mismatch in reference from the function arch_atomic64_or() to the variable .init.data:numa_nodes_parsed
The function arch_atomic64_or() references
the variable __initdata numa_nodes_parsed.
This is often because arch_atomic64_or lacks a __initdata
annotation or the annotation of numa_nodes_parsed is wrong.

for functions that end up not being inlined as intended but operating
on __initdata variables. Mark these as __always_inline, along with
the corresponding asm-generic wrappers.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210108092024.4034860-1-arnd@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/atomic.h
include/asm-generic/bitops/atomic.h

index 015ddffaf6caa3213813fcdcbb5601b1a3ec95d8..b56a4b2bc24864081198bc104ab6f26d6855201c 100644 (file)
@@ -17,7 +17,7 @@
 #include <asm/lse.h>
 
 #define ATOMIC_OP(op)                                                  \
-static inline void arch_##op(int i, atomic_t *v)                       \
+static __always_inline void arch_##op(int i, atomic_t *v)              \
 {                                                                      \
        __lse_ll_sc_body(op, i, v);                                     \
 }
@@ -32,7 +32,7 @@ ATOMIC_OP(atomic_sub)
 #undef ATOMIC_OP
 
 #define ATOMIC_FETCH_OP(name, op)                                      \
-static inline int arch_##op##name(int i, atomic_t *v)                  \
+static __always_inline int arch_##op##name(int i, atomic_t *v)         \
 {                                                                      \
        return __lse_ll_sc_body(op##name, i, v);                        \
 }
@@ -56,7 +56,7 @@ ATOMIC_FETCH_OPS(atomic_sub_return)
 #undef ATOMIC_FETCH_OPS
 
 #define ATOMIC64_OP(op)                                                        \
-static inline void arch_##op(long i, atomic64_t *v)                    \
+static __always_inline void arch_##op(long i, atomic64_t *v)           \
 {                                                                      \
        __lse_ll_sc_body(op, i, v);                                     \
 }
@@ -71,7 +71,7 @@ ATOMIC64_OP(atomic64_sub)
 #undef ATOMIC64_OP
 
 #define ATOMIC64_FETCH_OP(name, op)                                    \
-static inline long arch_##op##name(long i, atomic64_t *v)              \
+static __always_inline long arch_##op##name(long i, atomic64_t *v)     \
 {                                                                      \
        return __lse_ll_sc_body(op##name, i, v);                        \
 }
@@ -94,7 +94,7 @@ ATOMIC64_FETCH_OPS(atomic64_sub_return)
 #undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_FETCH_OPS
 
-static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
+static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        return __lse_ll_sc_body(atomic64_dec_if_positive, v);
 }
index dd90c9792909d1db39c2a8a52cc78216a034f1ba..0e7316a86240b8c309d6b49ba0d8c1663d2159e5 100644 (file)
  * See Documentation/atomic_bitops.txt for details.
  */
 
-static inline void set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
        atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
        atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-static inline void change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
        atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);