]> git.baikalelectronics.ru Git - kernel.git/commitdiff
Revert "sbitmap: fix batched wait_cnt accounting"
authorJens Axboe <axboe@kernel.dk>
Sun, 4 Sep 2022 12:39:25 +0000 (06:39 -0600)
committerJens Axboe <axboe@kernel.dk>
Sun, 4 Sep 2022 12:39:25 +0000 (06:39 -0600)
This reverts commit 16ede66973c84f890c03584f79158dd5b2d725f5.

This is causing issues with CPU stalls on my test box, revert it for
now until we understand what is going on. It looks like infinite
looping off sbitmap_queue_wake_up(), but hard to tell with a lot of
CPUs hitting this issue and the console scrolling infinitely.

Link: https://lore.kernel.org/linux-block/e742813b-ce5c-0d58-205b-1626f639b1bd@kernel.dk/
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-tag.c
include/linux/sbitmap.h
lib/sbitmap.c

index 9eb968e14d31f83286c31c3321c0599a0c714881..8e3b36d1cb574ed7dbab5da45720a3e0a3eacdde 100644 (file)
@@ -196,7 +196,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
                 * other allocations on previous queue won't be starved.
                 */
                if (bt != bt_prev)
-                       sbitmap_queue_wake_up(bt_prev, 1);
+                       sbitmap_queue_wake_up(bt_prev);
 
                ws = bt_wait_ptr(bt, data->hctx);
        } while (1);
index 4d2d5205ab5860b03e1b8d61a6e4a7d4d55712a0..8f5a86e210b90c43c5246eaa121b2048d0351066 100644 (file)
@@ -575,9 +575,8 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
  * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
  * on a &struct sbitmap_queue.
  * @sbq: Bitmap queue to wake up.
- * @nr: Number of bits cleared.
  */
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
 
 /**
  * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
index 2fedf07a9db5c8ec34c4612814c83b80256ae705..a39b1a877366a4fd39597c1e2d10f25108334485 100644 (file)
@@ -599,38 +599,34 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
        return NULL;
 }
 
-static bool __sbq_wake_up(struct sbitmap_queue *sbq, int nr)
+static bool __sbq_wake_up(struct sbitmap_queue *sbq)
 {
        struct sbq_wait_state *ws;
-       int wake_batch, wait_cnt, cur;
+       unsigned int wake_batch;
+       int wait_cnt;
 
        ws = sbq_wake_ptr(sbq);
-       if (!ws || !nr)
+       if (!ws)
                return false;
 
-       wake_batch = READ_ONCE(sbq->wake_batch);
-       cur = atomic_read(&ws->wait_cnt);
-       do {
-               if (cur <= 0)
-                       return true;
-               wait_cnt = cur - nr;
-       } while (!atomic_try_cmpxchg(&ws->wait_cnt, &cur, wait_cnt));
-
+       wait_cnt = atomic_dec_return(&ws->wait_cnt);
        /*
         * For concurrent callers of this, callers should call this function
         * again to wakeup a new batch on a different 'ws'.
         */
-       if (!waitqueue_active(&ws->wait))
+       if (wait_cnt < 0 || !waitqueue_active(&ws->wait))
                return true;
 
        if (wait_cnt > 0)
                return false;
 
+       wake_batch = READ_ONCE(sbq->wake_batch);
+
        /*
         * Wake up first in case that concurrent callers decrease wait_cnt
         * while waitqueue is empty.
         */
-       wake_up_nr(&ws->wait, max(wake_batch, nr));
+       wake_up_nr(&ws->wait, wake_batch);
 
        /*
         * Pairs with the memory barrier in sbitmap_queue_resize() to
@@ -655,11 +651,12 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq, int nr)
        return false;
 }
 
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
 {
-       while (__sbq_wake_up(sbq, nr))
+       while (__sbq_wake_up(sbq))
                ;
 }
+EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
 
 static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
 {
@@ -696,7 +693,7 @@ void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
                atomic_long_andnot(mask, (atomic_long_t *) addr);
 
        smp_mb__after_atomic();
-       sbitmap_queue_wake_up(sbq, nr_tags);
+       sbitmap_queue_wake_up(sbq);
        sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
                                        tags[nr_tags - 1] - offset);
 }
@@ -724,7 +721,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
         * waiter. See the comment on waitqueue_active().
         */
        smp_mb__after_atomic();
-       sbitmap_queue_wake_up(sbq, 1);
+       sbitmap_queue_wake_up(sbq);
        sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
 }
 EXPORT_SYMBOL_GPL(sbitmap_queue_clear);