]> git.baikalelectronics.ru Git - kernel.git/commitdiff
arm64: atomics: format whitespace consistently
authorMark Rutland <mark.rutland@arm.com>
Fri, 10 Dec 2021 15:14:06 +0000 (15:14 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 18 Jan 2023 10:42:05 +0000 (11:42 +0100)
[ Upstream commit ad94e473785ce8fedd120f067762a2d017385dbc ]

The code for the atomic ops is formatted inconsistently, and while this
is not a functional problem it is rather distracting when working on
them.

Some have ops have consistent indentation, e.g.

| #define ATOMIC_OP_ADD_RETURN(name, mb, cl...)                           \
| static inline int __lse_atomic_add_return##name(int i, atomic_t *v)     \
| {                                                                       \
|         u32 tmp;                                                        \
|                                                                         \
|         asm volatile(                                                   \
|         __LSE_PREAMBLE                                                  \
|         "       ldadd" #mb "    %w[i], %w[tmp], %[v]\n"                 \
|         "       add     %w[i], %w[i], %w[tmp]"                          \
|         : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)        \
|         : "r" (v)                                                       \
|         : cl);                                                          \
|                                                                         \
|         return i;                                                       \
| }

While others have negative indentation for some lines, and/or have
misaligned trailing backslashes, e.g.

| static inline void __lse_atomic_##op(int i, atomic_t *v)                        \
| {                                                                       \
|         asm volatile(                                                   \
|         __LSE_PREAMBLE                                                  \
| "       " #asm_op "     %w[i], %[v]\n"                                  \
|         : [i] "+r" (i), [v] "+Q" (v->counter)                           \
|         : "r" (v));                                                     \
| }

This patch makes the indentation consistent and also aligns the trailing
backslashes. This makes the code easier to read for those (like myself)
who are easily distracted by these inconsistencies.

This is intended as a cleanup.
There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20211210151410.2782645-2-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Stable-dep-of: 031af50045ea ("arm64: cmpxchg_double*: hazard against entire exchange variable")
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/arm64/include/asm/atomic_ll_sc.h
arch/arm64/include/asm/atomic_lse.h

index 7b012148bfd6c1ccbd6f47a437ca1323d92428ed..f5743c911303ad22b2f661c46ad7690c3105a5fb 100644 (file)
@@ -44,11 +44,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v)                                     \
                                                                        \
        asm volatile("// atomic_" #op "\n"                              \
        __LL_SC_FALLBACK(                                               \
-"      prfm    pstl1strm, %2\n"                                        \
-"1:    ldxr    %w0, %2\n"                                              \
-"      " #asm_op "     %w0, %w0, %w3\n"                                \
-"      stxr    %w1, %w0, %2\n"                                         \
-"      cbnz    %w1, 1b\n")                                             \
+       "       prfm    pstl1strm, %2\n"                                \
+       "1:     ldxr    %w0, %2\n"                                      \
+       "       " #asm_op "     %w0, %w0, %w3\n"                        \
+       "       stxr    %w1, %w0, %2\n"                                 \
+       "       cbnz    %w1, 1b\n")                                     \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
        : __stringify(constraint) "r" (i));                             \
 }
@@ -62,12 +62,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v)                      \
                                                                        \
        asm volatile("// atomic_" #op "_return" #name "\n"              \
        __LL_SC_FALLBACK(                                               \
-"      prfm    pstl1strm, %2\n"                                        \
-"1:    ld" #acq "xr    %w0, %2\n"                                      \
-"      " #asm_op "     %w0, %w0, %w3\n"                                \
-"      st" #rel "xr    %w1, %w0, %2\n"                                 \
-"      cbnz    %w1, 1b\n"                                              \
-"      " #mb )                                                         \
+       "       prfm    pstl1strm, %2\n"                                \
+       "1:     ld" #acq "xr    %w0, %2\n"                              \
+       "       " #asm_op "     %w0, %w0, %w3\n"                        \
+       "       st" #rel "xr    %w1, %w0, %2\n"                         \
+       "       cbnz    %w1, 1b\n"                                      \
+       "       " #mb )                                                 \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
        : __stringify(constraint) "r" (i)                               \
        : cl);                                                          \
@@ -84,12 +84,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v)                 \
                                                                        \
        asm volatile("// atomic_fetch_" #op #name "\n"                  \
        __LL_SC_FALLBACK(                                               \
-"      prfm    pstl1strm, %3\n"                                        \
-"1:    ld" #acq "xr    %w0, %3\n"                                      \
-"      " #asm_op "     %w1, %w0, %w4\n"                                \
-"      st" #rel "xr    %w2, %w1, %3\n"                                 \
-"      cbnz    %w2, 1b\n"                                              \
-"      " #mb )                                                         \
+       "       prfm    pstl1strm, %3\n"                                \
+       "1:     ld" #acq "xr    %w0, %3\n"                              \
+       "       " #asm_op "     %w1, %w0, %w4\n"                        \
+       "       st" #rel "xr    %w2, %w1, %3\n"                         \
+       "       cbnz    %w2, 1b\n"                                      \
+       "       " #mb )                                                 \
        : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
        : __stringify(constraint) "r" (i)                               \
        : cl);                                                          \
@@ -143,11 +143,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v)                               \
                                                                        \
        asm volatile("// atomic64_" #op "\n"                            \
        __LL_SC_FALLBACK(                                               \
-"      prfm    pstl1strm, %2\n"                                        \
-"1:    ldxr    %0, %2\n"                                               \
-"      " #asm_op "     %0, %0, %3\n"                                   \
-"      stxr    %w1, %0, %2\n"                                          \
-"      cbnz    %w1, 1b")                                               \
+       "       prfm    pstl1strm, %2\n"                                \
+       "1:     ldxr    %0, %2\n"                                       \
+       "       " #asm_op "     %0, %0, %3\n"                           \
+       "       stxr    %w1, %0, %2\n"                                  \
+       "       cbnz    %w1, 1b")                                       \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
        : __stringify(constraint) "r" (i));                             \
 }
@@ -161,12 +161,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v)                \
                                                                        \
        asm volatile("// atomic64_" #op "_return" #name "\n"            \
        __LL_SC_FALLBACK(                                               \
-"      prfm    pstl1strm, %2\n"                                        \
-"1:    ld" #acq "xr    %0, %2\n"                                       \
-"      " #asm_op "     %0, %0, %3\n"                                   \
-"      st" #rel "xr    %w1, %0, %2\n"                                  \
-"      cbnz    %w1, 1b\n"                                              \
-"      " #mb )                                                         \
+       "       prfm    pstl1strm, %2\n"                                \
+       "1:     ld" #acq "xr    %0, %2\n"                               \
+       "       " #asm_op "     %0, %0, %3\n"                           \
+       "       st" #rel "xr    %w1, %0, %2\n"                          \
+       "       cbnz    %w1, 1b\n"                                      \
+       "       " #mb )                                                 \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
        : __stringify(constraint) "r" (i)                               \
        : cl);                                                          \
@@ -176,19 +176,19 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v)                \
 
 #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
 static inline long                                                     \
-__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v)                \
+__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v)                        \
 {                                                                      \
        s64 result, val;                                                \
        unsigned long tmp;                                              \
                                                                        \
        asm volatile("// atomic64_fetch_" #op #name "\n"                \
        __LL_SC_FALLBACK(                                               \
-"      prfm    pstl1strm, %3\n"                                        \
-"1:    ld" #acq "xr    %0, %3\n"                                       \
-"      " #asm_op "     %1, %0, %4\n"                                   \
-"      st" #rel "xr    %w2, %1, %3\n"                                  \
-"      cbnz    %w2, 1b\n"                                              \
-"      " #mb )                                                         \
+       "       prfm    pstl1strm, %3\n"                                \
+       "1:     ld" #acq "xr    %0, %3\n"                               \
+       "       " #asm_op "     %1, %0, %4\n"                           \
+       "       st" #rel "xr    %w2, %1, %3\n"                          \
+       "       cbnz    %w2, 1b\n"                                      \
+       "       " #mb )                                                 \
        : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
        : __stringify(constraint) "r" (i)                               \
        : cl);                                                          \
@@ -241,14 +241,14 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
 
        asm volatile("// atomic64_dec_if_positive\n"
        __LL_SC_FALLBACK(
-"      prfm    pstl1strm, %2\n"
-"1:    ldxr    %0, %2\n"
-"      subs    %0, %0, #1\n"
-"      b.lt    2f\n"
-"      stlxr   %w1, %0, %2\n"
-"      cbnz    %w1, 1b\n"
-"      dmb     ish\n"
-"2:")
+       "       prfm    pstl1strm, %2\n"
+       "1:     ldxr    %0, %2\n"
+       "       subs    %0, %0, #1\n"
+       "       b.lt    2f\n"
+       "       stlxr   %w1, %0, %2\n"
+       "       cbnz    %w1, 1b\n"
+       "       dmb     ish\n"
+       "2:")
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
        :
        : "cc", "memory");
index da3280f639cd7e8b9058c0125e6d0095622c7acd..ab661375835e398397a5fd666c46382df714320d 100644 (file)
 #define __ASM_ATOMIC_LSE_H
 
 #define ATOMIC_OP(op, asm_op)                                          \
-static inline void __lse_atomic_##op(int i, atomic_t *v)                       \
+static inline void __lse_atomic_##op(int i, atomic_t *v)               \
 {                                                                      \
        asm volatile(                                                   \
        __LSE_PREAMBLE                                                  \
-"      " #asm_op "     %w[i], %[v]\n"                                  \
+       "       " #asm_op "     %w[i], %[v]\n"                          \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v));                                                     \
 }
@@ -32,7 +32,7 @@ static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v)   \
 {                                                                      \
        asm volatile(                                                   \
        __LSE_PREAMBLE                                                  \
-"      " #asm_op #mb " %w[i], %w[i], %[v]"                             \
+       "       " #asm_op #mb " %w[i], %w[i], %[v]"                     \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v)                                                       \
        : cl);                                                          \
@@ -130,7 +130,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
        "       add     %w[i], %w[i], %w[tmp]"                          \
        : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)       \
        : "r" (v)                                                       \
-       : cl);                                                  \
+       : cl);                                                          \
                                                                        \
        return i;                                                       \
 }
@@ -168,7 +168,7 @@ static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)                \
 {                                                                      \
        asm volatile(                                                   \
        __LSE_PREAMBLE                                                  \
-"      " #asm_op "     %[i], %[v]\n"                                   \
+       "       " #asm_op "     %[i], %[v]\n"                           \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v));                                                     \
 }
@@ -185,7 +185,7 @@ static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
 {                                                                      \
        asm volatile(                                                   \
        __LSE_PREAMBLE                                                  \
-"      " #asm_op #mb " %[i], %[i], %[v]"                               \
+       "       " #asm_op #mb " %[i], %[i], %[v]"                       \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v)                                                       \
        : cl);                                                          \
@@ -272,7 +272,7 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
 }
 
 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)                                \
-static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)       \
+static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
 {                                                                      \
        unsigned long tmp;                                              \
                                                                        \