]> git.baikalelectronics.ru Git - kernel.git/commitdiff
ARC: atomics: Add compiler barrier to atomic operations...
authorPavel Kozlov <pavel.kozlov@synopsys.com>
Tue, 15 Aug 2023 15:11:36 +0000 (19:11 +0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 19 Sep 2023 10:28:04 +0000 (12:28 +0200)
commit 42f51fb24fd39cc547c086ab3d8a314cc603a91c upstream.

... to avoid unwanted gcc optimizations

SMP kernels fail to boot with commit 596ff4a09b89
("cpumask: re-introduce constant-sized cpumask optimizations").

|
| percpu: BUG: failure at mm/percpu.c:2981/pcpu_build_alloc_info()!
|

The write operation performed by the SCOND instruction in the atomic
inline asm code is not properly passed to the compiler. The compiler
cannot correctly optimize a nested loop that runs through the cpumask
in the pcpu_build_alloc_info() function.

Fix this by add a compiler barrier (memory clobber in inline asm).

Apparently atomic ops used to have memory clobber implicitly via
surrounding smp_mb(). However commit b64be6836993c431e
("ARC: atomics: implement relaxed variants") removed the smp_mb() for
the relaxed variants, but failed to add the explicit compiler barrier.

Link: https://github.com/foss-for-synopsys-dwc-arc-processors/linux/issues/135
Cc: <stable@vger.kernel.org> # v6.3+
Fixes: b64be6836993c43 ("ARC: atomics: implement relaxed variants")
Signed-off-by: Pavel Kozlov <pavel.kozlov@synopsys.com>
Signed-off-by: Vineet Gupta <vgupta@kernel.org>
[vgupta: tweaked the changelog and added Fixes tag]
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arc/include/asm/atomic-llsc.h
arch/arc/include/asm/atomic64-arcv2.h

index 1b0ffaeee16d0e2efb720a91b8a7b3d0eb7d815a..5258cb81a16b4fbf4048d87b99c40d11b276894d 100644 (file)
@@ -18,7 +18,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v)                       \
        : [val] "=&r"   (val) /* Early clobber to prevent reg reuse */  \
        : [ctr] "r"     (&v->counter), /* Not "m": llock only supports reg direct addr mode */  \
          [i]   "ir"    (i)                                             \
-       : "cc");                                                        \
+       : "cc", "memory");                                              \
 }                                                                      \
 
 #define ATOMIC_OP_RETURN(op, asm_op)                           \
@@ -34,7 +34,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)       \
        : [val] "=&r"   (val)                                           \
        : [ctr] "r"     (&v->counter),                                  \
          [i]   "ir"    (i)                                             \
-       : "cc");                                                        \
+       : "cc", "memory");                                              \
                                                                        \
        return val;                                                     \
 }
@@ -56,7 +56,7 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)        \
          [orig] "=&r" (orig)                                           \
        : [ctr] "r"     (&v->counter),                                  \
          [i]   "ir"    (i)                                             \
-       : "cc");                                                        \
+       : "cc", "memory");                                              \
                                                                        \
        return orig;                                                    \
 }
index c5a8010fdc97ddb0145dc02729aa37f74140a3cd..9089f34baac3b439f75c1eb5670f8c15c86530f6 100644 (file)
@@ -60,7 +60,7 @@ static inline void arch_atomic64_##op(s64 a, atomic64_t *v)           \
        "       bnz     1b              \n"                             \
        : "=&r"(val)                                                    \
        : "r"(&v->counter), "ir"(a)                                     \
-       : "cc");                                                        \
+       : "cc", "memory");                                              \
 }                                                                      \
 
 #define ATOMIC64_OP_RETURN(op, op1, op2)                               \
@@ -77,7 +77,7 @@ static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v)   \
        "       bnz     1b              \n"                             \
        : [val] "=&r"(val)                                              \
        : "r"(&v->counter), "ir"(a)                                     \
-       : "cc");        /* memory clobber comes from smp_mb() */        \
+       : "cc", "memory");                                              \
                                                                        \
        return val;                                                     \
 }
@@ -99,7 +99,7 @@ static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v)    \
        "       bnz     1b              \n"                             \
        : "=&r"(orig), "=&r"(val)                                       \
        : "r"(&v->counter), "ir"(a)                                     \
-       : "cc");        /* memory clobber comes from smp_mb() */        \
+       : "cc", "memory");                                              \
                                                                        \
        return orig;                                                    \
 }