]> git.baikalelectronics.ru Git - kernel.git/commitdiff
MIPS: cmpxchg: Pull xchg() asm into a macro
authorPaul Burton <paul.burton@imgtec.com>
Sat, 10 Jun 2017 00:26:34 +0000 (17:26 -0700)
committerRalf Baechle <ralf@linux-mips.org>
Thu, 29 Jun 2017 00:42:24 +0000 (02:42 +0200)
Use a macro to generate the 32 & 64 bit variants of the backing code for
xchg(), much as is already done for cmpxchg(). This removes the
duplication that could previously be found in __xchg_u32() &
__xchg_u64().

Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/16349/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/include/asm/cmpxchg.h

index 0582c01d229d83af45cbd257a620582a1636a9d6..a19359649b60bf432d325011d8f2dcc770ec4094 100644 (file)
 # define __scbeqz "beqz"
 #endif
 
+#define __xchg_asm(ld, st, m, val)                                     \
+({                                                                     \
+       __typeof(*(m)) __ret;                                           \
+                                                                       \
+       if (kernel_uses_llsc) {                                         \
+               __asm__ __volatile__(                                   \
+               "       .set    push                            \n"     \
+               "       .set    noat                            \n"     \
+               "       .set    " MIPS_ISA_ARCH_LEVEL "         \n"     \
+               "1:     " ld "  %0, %2          # __xchg_asm    \n"     \
+               "       .set    mips0                           \n"     \
+               "       move    $1, %z3                         \n"     \
+               "       .set    " MIPS_ISA_ARCH_LEVEL "         \n"     \
+               "       " st "  $1, %1                          \n"     \
+               "\t" __scbeqz " $1, 1b                          \n"     \
+               "       .set    pop                             \n"     \
+               : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)           \
+               : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)                  \
+               : "memory");                                            \
+       } else {                                                        \
+               unsigned long __flags;                                  \
+                                                                       \
+               raw_local_irq_save(__flags);                            \
+               __ret = *m;                                             \
+               *m = val;                                               \
+               raw_local_irq_restore(__flags);                         \
+       }                                                               \
+                                                                       \
+       __ret;                                                          \
+})
+
 static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
 {
        __u32 retval;
 
        smp_mb__before_llsc();
-
-       if (kernel_uses_llsc) {
-               unsigned long dummy;
-
-               __asm__ __volatile__(
-               "       .set    " MIPS_ISA_ARCH_LEVEL "                 \n"
-               "1:     ll      %0, %3                  # xchg_u32      \n"
-               "       .set    mips0                                   \n"
-               "       move    %2, %z4                                 \n"
-               "       .set    " MIPS_ISA_ARCH_LEVEL "                 \n"
-               "       sc      %2, %1                                  \n"
-               "\t" __scbeqz " %2, 1b                                  \n"
-               "       .set    mips0                                   \n"
-               : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
-               : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
-               : "memory");
-       } else {
-               unsigned long flags;
-
-               raw_local_irq_save(flags);
-               retval = *m;
-               *m = val;
-               raw_local_irq_restore(flags);   /* implies memory barrier  */
-       }
-
+       retval = __xchg_asm("ll", "sc", m, val);
        smp_llsc_mb();
 
        return retval;
@@ -65,29 +72,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
        __u64 retval;
 
        smp_mb__before_llsc();
-
-       if (kernel_uses_llsc) {
-               unsigned long dummy;
-
-               __asm__ __volatile__(
-               "       .set    " MIPS_ISA_ARCH_LEVEL "                 \n"
-               "1:     lld     %0, %3                  # xchg_u64      \n"
-               "       move    %2, %z4                                 \n"
-               "       scd     %2, %1                                  \n"
-               "\t" __scbeqz " %2, 1b                                  \n"
-               "       .set    mips0                                   \n"
-               : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
-               : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
-               : "memory");
-       } else {
-               unsigned long flags;
-
-               raw_local_irq_save(flags);
-               retval = *m;
-               *m = val;
-               raw_local_irq_restore(flags);   /* implies memory barrier  */
-       }
-
+       retval = __xchg_asm("lld", "scd", m, val);
        smp_llsc_mb();
 
        return retval;