]> git.baikalelectronics.ru Git - kernel.git/commitdiff
parisc: Implement __smp_store_release and __smp_load_acquire barriers
authorJohn David Anglin <dave.anglin@bell.net>
Thu, 30 Jul 2020 12:59:12 +0000 (08:59 -0400)
committerHelge Deller <deller@gmx.de>
Wed, 12 Aug 2020 13:13:42 +0000 (15:13 +0200)
This patch implements the __smp_store_release and __smp_load_acquire barriers
using ordered stores and loads.  This avoids the sync instruction present in
the generic implementation.

Cc: <stable@vger.kernel.org> # 4.14+
Signed-off-by: Dave Anglin <dave.anglin@bell.net>
Signed-off-by: Helge Deller <deller@gmx.de>
arch/parisc/include/asm/barrier.h

index dbaaca84f27f342ef1c1b8c743e66fa4d6a6f8eb..640d46edf32e71cd9a117f2e4d0ea1611de1698c 100644 (file)
 #define __smp_rmb()    mb()
 #define __smp_wmb()    mb()
 
+#define __smp_store_release(p, v)                                      \
+do {                                                                   \
+       typeof(p) __p = (p);                                            \
+        union { typeof(*p) __val; char __c[1]; } __u =                 \
+                { .__val = (__force typeof(*p)) (v) };                 \
+       compiletime_assert_atomic_type(*p);                             \
+       switch (sizeof(*p)) {                                           \
+       case 1:                                                         \
+               asm volatile("stb,ma %0,0(%1)"                          \
+                               : : "r"(*(__u8 *)__u.__c), "r"(__p)     \
+                               : "memory");                            \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile("sth,ma %0,0(%1)"                          \
+                               : : "r"(*(__u16 *)__u.__c), "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile("stw,ma %0,0(%1)"                          \
+                               : : "r"(*(__u32 *)__u.__c), "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       case 8:                                                         \
+               if (IS_ENABLED(CONFIG_64BIT))                           \
+                       asm volatile("std,ma %0,0(%1)"                  \
+                               : : "r"(*(__u64 *)__u.__c), "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       }                                                               \
+} while (0)
+
+#define __smp_load_acquire(p)                                          \
+({                                                                     \
+       union { typeof(*p) __val; char __c[1]; } __u;                   \
+       typeof(p) __p = (p);                                            \
+       compiletime_assert_atomic_type(*p);                             \
+       switch (sizeof(*p)) {                                           \
+       case 1:                                                         \
+               asm volatile("ldb,ma 0(%1),%0"                          \
+                               : "=r"(*(__u8 *)__u.__c) : "r"(__p)     \
+                               : "memory");                            \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile("ldh,ma 0(%1),%0"                          \
+                               : "=r"(*(__u16 *)__u.__c) : "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile("ldw,ma 0(%1),%0"                          \
+                               : "=r"(*(__u32 *)__u.__c) : "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       case 8:                                                         \
+               if (IS_ENABLED(CONFIG_64BIT))                           \
+                       asm volatile("ldd,ma 0(%1),%0"                  \
+                               : "=r"(*(__u64 *)__u.__c) : "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       }                                                               \
+       __u.__val;                                                      \
+})
 #include <asm-generic/barrier.h>
 
 #endif /* !__ASSEMBLY__ */