]> git.baikalelectronics.ru Git - kernel.git/commitdiff
wait_on_bit: add an acquire memory barrier
authorMikulas Patocka <mpatocka@redhat.com>
Fri, 26 Aug 2022 13:17:08 +0000 (09:17 -0400)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Aug 2022 16:30:25 +0000 (09:30 -0700)
There are several places in the kernel where wait_on_bit is not followed
by a memory barrier (for example, in drivers/md/dm-bufio.c:new_read).

On architectures with weak memory ordering, it may happen that memory
accesses that follow wait_on_bit are reordered before wait_on_bit and
they may return invalid data.

Fix this class of bugs by introducing a new function "test_bit_acquire"
that works like test_bit, but has acquire memory ordering semantics.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Acked-by: Will Deacon <will@kernel.org>
Cc: stable@vger.kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Documentation/atomic_bitops.txt
arch/x86/include/asm/bitops.h
include/asm-generic/bitops/generic-non-atomic.h
include/asm-generic/bitops/instrumented-non-atomic.h
include/asm-generic/bitops/non-atomic.h
include/asm-generic/bitops/non-instrumented-non-atomic.h
include/linux/bitops.h
include/linux/buffer_head.h
include/linux/wait_bit.h
kernel/sched/wait_bit.c

index d8b101c97031b0870d6431505fbd12f9b7d82351..edea4656c5c05f3da3f6ef46a8e8faf2b2a3f1a1 100644 (file)
@@ -58,13 +58,11 @@ Like with atomic_t, the rule of thumb is:
 
  - RMW operations that have a return value are fully ordered.
 
- - RMW operations that are conditional are unordered on FAILURE,
-   otherwise the above rules apply. In the case of test_and_set_bit_lock(),
-   if the bit in memory is unchanged by the operation then it is deemed to have
-   failed.
+ - RMW operations that are conditional are fully ordered.
 
-Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics and
-clear_bit_unlock() which has RELEASE semantics.
+Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics,
+clear_bit_unlock() which has RELEASE semantics and test_bit_acquire which has
+ACQUIRE semantics.
 
 Since a platform only has a single means of achieving atomic operations
 the same barriers as for atomic_t are used, see atomic_t.txt.
index 973c6bd17f98e31c65b38693c48f54f701ac32a9..0fe9de58af313153b962af9bfde0d77cae44d26e 100644 (file)
@@ -207,6 +207,20 @@ static __always_inline bool constant_test_bit(long nr, const volatile unsigned l
                (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
 }
 
+static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr)
+{
+       bool oldbit;
+
+       asm volatile("testb %2,%1"
+                    CC_SET(nz)
+                    : CC_OUT(nz) (oldbit)
+                    : "m" (((unsigned char *)addr)[nr >> 3]),
+                      "i" (1 << (nr & 7))
+                    :"memory");
+
+       return oldbit;
+}
+
 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
 {
        bool oldbit;
@@ -226,6 +240,13 @@ arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
                                          variable_test_bit(nr, addr);
 }
 
+static __always_inline bool
+arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+       return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) :
+                                         variable_test_bit(nr, addr);
+}
+
 /**
  * __ffs - find first set bit in word
  * @word: The word to search
index 3d5ebd24652b9ff2412c45fd829a5fe8b9308e0e..564a8c675d85898a601e889ab471880ef70c8068 100644 (file)
@@ -4,6 +4,7 @@
 #define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
 
 #include <linux/bits.h>
+#include <asm/barrier.h>
 
 #ifndef _LINUX_BITOPS_H
 #error only <linux/bitops.h> can be included directly
@@ -127,6 +128,18 @@ generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
        return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
 }
 
+/**
+ * generic_test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+generic_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+       unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+       return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
+}
+
 /*
  * const_*() definitions provide good compile-time optimizations when
  * the passed arguments can be resolved at compile time.
@@ -137,6 +150,7 @@ generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
 #define const___test_and_set_bit       generic___test_and_set_bit
 #define const___test_and_clear_bit     generic___test_and_clear_bit
 #define const___test_and_change_bit    generic___test_and_change_bit
+#define const_test_bit_acquire         generic_test_bit_acquire
 
 /**
  * const_test_bit - Determine whether a bit is set
index 988a3bbfba34ec9a1b4904520a5cadc469842dbd..2b238b161a6206e6702a34523ee4043d7dd65d96 100644 (file)
@@ -142,4 +142,16 @@ _test_bit(unsigned long nr, const volatile unsigned long *addr)
        return arch_test_bit(nr, addr);
 }
 
+/**
+ * _test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+       instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
+       return arch_test_bit_acquire(nr, addr);
+}
+
 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
index 5c37ced343aedc0669347f64d042ffc2c161e5a9..71f8d54a5195e919bb51e3311df3150b087fbacd 100644 (file)
@@ -13,6 +13,7 @@
 #define arch___test_and_change_bit generic___test_and_change_bit
 
 #define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
 
 #include <asm-generic/bitops/non-instrumented-non-atomic.h>
 
index bdb9b1ffaee90a9444db25575b93a5a0d50e05f9..0ddc78dfc358bece4d4f6a40036e0e11e73684b5 100644 (file)
@@ -12,5 +12,6 @@
 #define ___test_and_change_bit arch___test_and_change_bit
 
 #define _test_bit              arch_test_bit
+#define _test_bit_acquire      arch_test_bit_acquire
 
 #endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */
index cf9bf65039f22b14b8f810cadc0301397c2303e2..3b89c64bcfd8f029593f9fa2da7e6366467868f6 100644 (file)
@@ -59,6 +59,7 @@ extern unsigned long __sw_hweight64(__u64 w);
 #define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
 #define __test_and_change_bit(nr, addr)        bitop(___test_and_change_bit, nr, addr)
 #define test_bit(nr, addr)             bitop(_test_bit, nr, addr)
+#define test_bit_acquire(nr, addr)     bitop(_test_bit_acquire, nr, addr)
 
 /*
  * Include this here because some architectures need generic_ffs/fls in
index def8b8d30ccc12563418509ad3368099fa7c404d..089c9ade43259e76420e60dd9dff8c46792ffa82 100644 (file)
@@ -156,7 +156,7 @@ static __always_inline int buffer_uptodate(const struct buffer_head *bh)
         * make it consistent with folio_test_uptodate
         * pairs with smp_mb__before_atomic in set_buffer_uptodate
         */
-       return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0;
+       return test_bit_acquire(BH_Uptodate, &bh->b_state);
 }
 
 #define bh_offset(bh)          ((unsigned long)(bh)->b_data & ~PAGE_MASK)
index 7dec36aecbd9fe239ba1b94f81c729e6665d6bd4..7725b7579b7819d3b1f9267fc42e26bf44ff13c0 100644 (file)
@@ -71,7 +71,7 @@ static inline int
 wait_on_bit(unsigned long *word, int bit, unsigned mode)
 {
        might_sleep();
-       if (!test_bit(bit, word))
+       if (!test_bit_acquire(bit, word))
                return 0;
        return out_of_line_wait_on_bit(word, bit,
                                       bit_wait,
@@ -96,7 +96,7 @@ static inline int
 wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
 {
        might_sleep();
-       if (!test_bit(bit, word))
+       if (!test_bit_acquire(bit, word))
                return 0;
        return out_of_line_wait_on_bit(word, bit,
                                       bit_wait_io,
@@ -123,7 +123,7 @@ wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
                    unsigned long timeout)
 {
        might_sleep();
-       if (!test_bit(bit, word))
+       if (!test_bit_acquire(bit, word))
                return 0;
        return out_of_line_wait_on_bit_timeout(word, bit,
                                               bit_wait_timeout,
@@ -151,7 +151,7 @@ wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
                   unsigned mode)
 {
        might_sleep();
-       if (!test_bit(bit, word))
+       if (!test_bit_acquire(bit, word))
                return 0;
        return out_of_line_wait_on_bit(word, bit, action, mode);
 }
index d4788f810b55511d6c88ca626ffddf4b6c7864a3..0b1cd985dc2749a9db3aadf3dc13bd179a7a450f 100644 (file)
@@ -47,7 +47,7 @@ __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_
                prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
                if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags))
                        ret = (*action)(&wbq_entry->key, mode);
-       } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
+       } while (test_bit_acquire(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
 
        finish_wait(wq_head, &wbq_entry->wq_entry);