]> git.baikalelectronics.ru Git - kernel.git/commitdiff
MIPS: cmpxchg: Implement 1 byte & 2 byte xchg()
authorPaul Burton <paul.burton@imgtec.com>
Sat, 10 Jun 2017 00:26:39 +0000 (17:26 -0700)
committerRalf Baechle <ralf@linux-mips.org>
Thu, 29 Jun 2017 00:42:25 +0000 (02:42 +0200)
Implement 1 & 2 byte xchg() using read-modify-write atop a 4 byte
cmpxchg(). This allows us to support these atomic operations despite the
MIPS ISA only providing for 4 & 8 byte atomic operations.

This is required in order to support queued spinlocks (qspinlock) in a
later patch, since these make use of a 2 byte xchg() in their slow path.

Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/16354/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/include/asm/cmpxchg.h
arch/mips/kernel/Makefile
arch/mips/kernel/cmpxchg.c [new file with mode: 0644]

index 516cb66f066b2f06828bf6fd45964b8d23e1ebf4..a633bf8456899c59237bb40949a5e5051c8049b9 100644 (file)
@@ -70,9 +70,16 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
        __ret;                                                          \
 })
 
+extern unsigned long __xchg_small(volatile void *ptr, unsigned long val,
+                                 unsigned int size);
+
 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
 {
        switch (size) {
+       case 1:
+       case 2:
+               return __xchg_small(ptr, x, size);
+
        case 4:
                return __xchg_asm("ll", "sc", (volatile u32 *)ptr, x);
 
@@ -91,8 +98,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
 ({                                                                     \
        __typeof__(*(ptr)) __res;                                       \
                                                                        \
-       BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc);                            \
-                                                                       \
        smp_mb__before_llsc();                                          \
                                                                        \
        __res = (__typeof__(*(ptr)))                                    \
index f0edd7e8a0b78646a1bac69830f8a44a7606b2d7..46c0581256f1d7d226b7b49f2968966d4d39ed49 100644 (file)
@@ -4,7 +4,7 @@
 
 extra-y                := head.o vmlinux.lds
 
-obj-y          += cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \
+obj-y          += cmpxchg.o cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \
                   process.o prom.o ptrace.o reset.o setup.o signal.o \
                   syscall.o time.o topology.o traps.o unaligned.o watch.o \
                   vdso.o cacheinfo.o
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
new file mode 100644 (file)
index 0000000..5acfbf9
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2017 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <asm/cmpxchg.h>
+
+unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
+{
+       u32 old32, new32, load32, mask;
+       volatile u32 *ptr32;
+       unsigned int shift;
+
+       /* Check that ptr is naturally aligned */
+       WARN_ON((unsigned long)ptr & (size - 1));
+
+       /* Mask value to the correct size. */
+       mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
+       val &= mask;
+
+       /*
+        * Calculate a shift & mask that correspond to the value we wish to
+        * exchange within the naturally aligned 4 byte integerthat includes
+        * it.
+        */
+       shift = (unsigned long)ptr & 0x3;
+       if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+               shift ^= sizeof(u32) - size;
+       shift *= BITS_PER_BYTE;
+       mask <<= shift;
+
+       /*
+        * Calculate a pointer to the naturally aligned 4 byte integer that
+        * includes our byte of interest, and load its value.
+        */
+       ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
+       load32 = *ptr32;
+
+       do {
+               old32 = load32;
+               new32 = (load32 & ~mask) | (val << shift);
+               load32 = cmpxchg(ptr32, old32, new32);
+       } while (load32 != old32);
+
+       return (load32 & mask) >> shift;
+}