]> git.baikalelectronics.ru Git - kernel.git/commitdiff
arm64: atomics: lse: define RETURN ops in terms of FETCH ops
authorMark Rutland <mark.rutland@arm.com>
Fri, 10 Dec 2021 15:14:10 +0000 (15:14 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Tue, 14 Dec 2021 13:00:33 +0000 (13:00 +0000)
The FEAT_LSE atomic instructions include LD* instructions which return
the original value of a memory location can be used to directly
implement FETCH opertations. Each RETURN op is implemented as a copy of
the corresponding FETCH op with a trailing instruction to generate the
new value of the memory location. We only directly implement
*_fetch_add*(), for which we have a trailing `add` instruction.

As the compiler has no visibility of the `add`, this leads to less than
optimal code generation when consuming the result.

For example, the compiler cannot constant-fold the addition into later
operations, and currently GCC 11.1.0 will compile:

       return __lse_atomic_sub_return(1, v) == 0;

As:

mov     w1, #0xffffffff
ldaddal w1, w2, [x0]
add     w1, w1, w2
cmp     w1, #0x0
cset    w0, eq  // eq = none
ret

This patch improves this by replacing the `add` with C addition after
the inline assembly block, e.g.

ret += i;

This allows the compiler to manipulate `i`. This permits the compiler to
merge the `add` and `cmp` for the above, e.g.

mov     w1, #0xffffffff
ldaddal w1, w1, [x0]
cmp     w1, #0x1
cset    w0, eq  // eq = none
ret

With this change the assembly for each RETURN op is identical to the
corresponding FETCH op (including barriers and clobbers) so I've removed
the inline assembly and rewritten each RETURN op in terms of the
corresponding FETCH op, e.g.

| static inline void __lse_atomic_add_return(int i, atomic_t *v)
| {
|       return __lse_atomic_fetch_add(i, v) + i
| }

The new construction does not adversely affect the common case, and
before and after this patch GCC 11.1.0 can compile:

__lse_atomic_add_return(i, v)

As:

ldaddal w0, w2, [x1]
add     w0, w0, w2

... while having the freedom to do better elsewhere.

This is intended as an optimization and cleanup.
There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20211210151410.2782645-6-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/atomic_lse.h

index e4c5c4c34ce60c3134852f6845fed74693eee971..d955ade5df7c0b401eec0d9bb3883be4e63f2f9f 100644 (file)
@@ -75,31 +75,21 @@ ATOMIC_FETCH_OP_SUB(        )
 
 #undef ATOMIC_FETCH_OP_SUB
 
-#define ATOMIC_OP_ADD_SUB_RETURN(name, mb, cl...)                      \
+#define ATOMIC_OP_ADD_SUB_RETURN(name)                                 \
 static inline int __lse_atomic_add_return##name(int i, atomic_t *v)    \
 {                                                                      \
-       u32 tmp;                                                        \
-                                                                       \
-       asm volatile(                                                   \
-       __LSE_PREAMBLE                                                  \
-       "       ldadd" #mb "    %w[i], %w[tmp], %[v]\n"                 \
-       "       add     %w[i], %w[i], %w[tmp]"                          \
-       : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)        \
-       : "r" (v)                                                       \
-       : cl);                                                          \
-                                                                       \
-       return i;                                                       \
+       return __lse_atomic_fetch_add##name(i, v) + i;                  \
 }                                                                      \
                                                                        \
 static inline int __lse_atomic_sub_return##name(int i, atomic_t *v)    \
 {                                                                      \
-       return __lse_atomic_add_return##name(-i, v);                    \
+       return __lse_atomic_fetch_sub(i, v) - i;                        \
 }
 
-ATOMIC_OP_ADD_SUB_RETURN(_relaxed,   )
-ATOMIC_OP_ADD_SUB_RETURN(_acquire,  a, "memory")
-ATOMIC_OP_ADD_SUB_RETURN(_release,  l, "memory")
-ATOMIC_OP_ADD_SUB_RETURN(        , al, "memory")
+ATOMIC_OP_ADD_SUB_RETURN(_relaxed)
+ATOMIC_OP_ADD_SUB_RETURN(_acquire)
+ATOMIC_OP_ADD_SUB_RETURN(_release)
+ATOMIC_OP_ADD_SUB_RETURN(        )
 
 #undef ATOMIC_OP_ADD_SUB_RETURN
 
@@ -186,31 +176,21 @@ ATOMIC64_FETCH_OP_SUB(        )
 
 #undef ATOMIC64_FETCH_OP_SUB
 
-#define ATOMIC64_OP_ADD_SUB_RETURN(name, mb, cl...)                    \
+#define ATOMIC64_OP_ADD_SUB_RETURN(name)                               \
 static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
 {                                                                      \
-       unsigned long tmp;                                              \
-                                                                       \
-       asm volatile(                                                   \
-       __LSE_PREAMBLE                                                  \
-       "       ldadd" #mb "    %[i], %x[tmp], %[v]\n"                  \
-       "       add     %[i], %[i], %x[tmp]"                            \
-       : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)        \
-       : "r" (v)                                                       \
-       : cl);                                                          \
-                                                                       \
-       return i;                                                       \
+       return __lse_atomic64_fetch_add##name(i, v) + i;                \
 }                                                                      \
                                                                        \
 static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
 {                                                                      \
-       return __lse_atomic64_add_return##name(-i, v);                  \
+       return __lse_atomic64_fetch_sub##name(i, v) - i;                \
 }
 
-ATOMIC64_OP_ADD_SUB_RETURN(_relaxed,   )
-ATOMIC64_OP_ADD_SUB_RETURN(_acquire,  a, "memory")
-ATOMIC64_OP_ADD_SUB_RETURN(_release,  l, "memory")
-ATOMIC64_OP_ADD_SUB_RETURN(        , al, "memory")
+ATOMIC64_OP_ADD_SUB_RETURN(_relaxed)
+ATOMIC64_OP_ADD_SUB_RETURN(_acquire)
+ATOMIC64_OP_ADD_SUB_RETURN(_release)
+ATOMIC64_OP_ADD_SUB_RETURN(        )
 
 #undef ATOMIC64_OP_ADD_SUB_RETURN