]> git.baikalelectronics.ru Git - kernel.git/commitdiff
arm64: atomics: lse: define ANDs in terms of ANDNOTs
authorMark Rutland <mark.rutland@arm.com>
Fri, 10 Dec 2021 15:14:08 +0000 (15:14 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Tue, 14 Dec 2021 13:00:23 +0000 (13:00 +0000)
The FEAT_LSE atomic instructions include atomic bit-clear instructions
(`ldclr*` and `stclr*`) which can be used to directly implement ANDNOT
operations. Each AND op is implemented as a copy of the corresponding
ANDNOT op with a leading `mvn` instruction to apply a bitwise NOT to the
`i` argument.

As the compiler has no visibility of the `mvn`, this leads to less than
optimal code generation when generating `i` into a register. For
example, __lse_atomic_fetch_and(0xf, v) can be compiled to:

mov     w1, #0xf
mvn     w1, w1
ldclral w1, w1, [x2]

This patch improves this by replacing the `mvn` with NOT in C before the
inline assembly block, e.g.

i = ~i;

This allows the compiler to generate `i` into a register more optimally,
e.g.

mov     w1, #0xfffffff0
ldclral w1, w1, [x2]

With this change the assembly for each AND op is identical to the
corresponding ANDNOT op (including barriers and clobbers), so I've
removed the inline assembly and rewritten each AND op in terms of the
corresponding ANDNOT op, e.g.

| static inline void __lse_atomic_and(int i, atomic_t *v)
| {
|  return __lse_atomic_andnot(~i, v);
| }

This is intended as an optimization and cleanup.
There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20211210151410.2782645-4-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/atomic_lse.h

index 7454febb6d7784e8cdfe9c39699d1a0242300551..d707eafb76779cbf30b6ee415b2a2561af51ab3f 100644 (file)
@@ -102,26 +102,13 @@ ATOMIC_OP_ADD_SUB_RETURN(        , al, "memory")
 
 static inline void __lse_atomic_and(int i, atomic_t *v)
 {
-       asm volatile(
-       __LSE_PREAMBLE
-       "       mvn     %w[i], %w[i]\n"
-       "       stclr   %w[i], %[v]"
-       : [i] "+&r" (i), [v] "+Q" (v->counter)
-       : "r" (v));
+       return __lse_atomic_andnot(~i, v);
 }
 
 #define ATOMIC_FETCH_OP_AND(name, mb, cl...)                           \
 static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v)     \
 {                                                                      \
-       asm volatile(                                                   \
-       __LSE_PREAMBLE                                                  \
-       "       mvn     %w[i], %w[i]\n"                                 \
-       "       ldclr" #mb "    %w[i], %w[i], %[v]"                     \
-       : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
-       : "r" (v)                                                       \
-       : cl);                                                          \
-                                                                       \
-       return i;                                                       \
+       return __lse_atomic_fetch_andnot##name(~i, v);                  \
 }
 
 ATOMIC_FETCH_OP_AND(_relaxed,   )
@@ -223,26 +210,13 @@ ATOMIC64_OP_ADD_SUB_RETURN(        , al, "memory")
 
 static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
 {
-       asm volatile(
-       __LSE_PREAMBLE
-       "       mvn     %[i], %[i]\n"
-       "       stclr   %[i], %[v]"
-       : [i] "+&r" (i), [v] "+Q" (v->counter)
-       : "r" (v));
+       return __lse_atomic64_andnot(~i, v);
 }
 
 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)                         \
 static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)        \
 {                                                                      \
-       asm volatile(                                                   \
-       __LSE_PREAMBLE                                                  \
-       "       mvn     %[i], %[i]\n"                                   \
-       "       ldclr" #mb "    %[i], %[i], %[v]"                       \
-       : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
-       : "r" (v)                                                       \
-       : cl);                                                          \
-                                                                       \
-       return i;                                                       \
+       return __lse_atomic64_fetch_andnot##name(~i, v);                \
 }
 
 ATOMIC64_FETCH_OP_AND(_relaxed,   )