]> git.baikalelectronics.ru Git - kernel.git/commitdiff
sbitmap: fix batched wait_cnt accounting
authorKeith Busch <kbusch@kernel.org>
Fri, 9 Sep 2022 18:40:22 +0000 (11:40 -0700)
committerJens Axboe <axboe@kernel.dk>
Mon, 12 Sep 2022 06:10:34 +0000 (00:10 -0600)
Batched completions can clear multiple bits, but we're only decrementing
the wait_cnt by one each time. This can cause waiters to never be woken,
stalling IO. Use the batched count instead.

Link: https://bugzilla.kernel.org/show_bug.cgi?id=215679
Signed-off-by: Keith Busch <kbusch@kernel.org>
Link: https://lore.kernel.org/r/20220909184022.1709476-1-kbusch@fb.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-tag.c
include/linux/sbitmap.h
lib/sbitmap.c

index 8e3b36d1cb574ed7dbab5da45720a3e0a3eacdde..9eb968e14d31f83286c31c3321c0599a0c714881 100644 (file)
@@ -196,7 +196,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
                 * other allocations on previous queue won't be starved.
                 */
                if (bt != bt_prev)
-                       sbitmap_queue_wake_up(bt_prev);
+                       sbitmap_queue_wake_up(bt_prev, 1);
 
                ws = bt_wait_ptr(bt, data->hctx);
        } while (1);
index 8f5a86e210b90c43c5246eaa121b2048d0351066..4d2d5205ab5860b03e1b8d61a6e4a7d4d55712a0 100644 (file)
@@ -575,8 +575,9 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
  * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
  * on a &struct sbitmap_queue.
  * @sbq: Bitmap queue to wake up.
+ * @nr: Number of bits cleared.
  */
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
 
 /**
  * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
index cbfd2e677d871f37d4adffe8df36d8b9cd01bf38..624fa7f118d1723f07bc595c6075f9f35fc9192e 100644 (file)
@@ -599,24 +599,31 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
        return NULL;
 }
 
-static bool __sbq_wake_up(struct sbitmap_queue *sbq)
+static bool __sbq_wake_up(struct sbitmap_queue *sbq, int *nr)
 {
        struct sbq_wait_state *ws;
        unsigned int wake_batch;
-       int wait_cnt;
+       int wait_cnt, cur, sub;
        bool ret;
 
+       if (*nr <= 0)
+               return false;
+
        ws = sbq_wake_ptr(sbq);
        if (!ws)
                return false;
 
-       wait_cnt = atomic_dec_return(&ws->wait_cnt);
-       /*
-        * For concurrent callers of this, callers should call this function
-        * again to wakeup a new batch on a different 'ws'.
-        */
-       if (wait_cnt < 0)
-               return true;
+       cur = atomic_read(&ws->wait_cnt);
+       do {
+               /*
+                * For concurrent callers of this, callers should call this
+                * function again to wakeup a new batch on a different 'ws'.
+                */
+               if (cur == 0)
+                       return true;
+               sub = min(*nr, cur);
+               wait_cnt = cur - sub;
+       } while (!atomic_try_cmpxchg(&ws->wait_cnt, &cur, wait_cnt));
 
        /*
         * If we decremented queue without waiters, retry to avoid lost
@@ -625,6 +632,8 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq)
        if (wait_cnt > 0)
                return !waitqueue_active(&ws->wait);
 
+       *nr -= sub;
+
        /*
         * When wait_cnt == 0, we have to be particularly careful as we are
         * responsible to reset wait_cnt regardless whether we've actually
@@ -660,12 +669,12 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq)
        sbq_index_atomic_inc(&sbq->wake_index);
        atomic_set(&ws->wait_cnt, wake_batch);
 
-       return ret;
+       return ret || *nr;
 }
 
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
 {
-       while (__sbq_wake_up(sbq))
+       while (__sbq_wake_up(sbq, &nr))
                ;
 }
 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
@@ -705,7 +714,7 @@ void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
                atomic_long_andnot(mask, (atomic_long_t *) addr);
 
        smp_mb__after_atomic();
-       sbitmap_queue_wake_up(sbq);
+       sbitmap_queue_wake_up(sbq, nr_tags);
        sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
                                        tags[nr_tags - 1] - offset);
 }
@@ -733,7 +742,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
         * waiter. See the comment on waitqueue_active().
         */
        smp_mb__after_atomic();
-       sbitmap_queue_wake_up(sbq);
+       sbitmap_queue_wake_up(sbq, 1);
        sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
 }
 EXPORT_SYMBOL_GPL(sbitmap_queue_clear);