]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mq-deadline: Fix request accounting
authorBart Van Assche <bvanassche@acm.org>
Tue, 24 Aug 2021 17:05:20 +0000 (10:05 -0700)
committerJens Axboe <axboe@kernel.dk>
Tue, 24 Aug 2021 22:18:01 +0000 (16:18 -0600)
The block layer may call the I/O scheduler .finish_request() callback
without having called the .insert_requests() callback. Make sure that the
mq-deadline I/O statistics are correct if the block layer inserts an I/O
request that bypasses the I/O scheduler. This patch prevents that lower
priority I/O is delayed longer than necessary for mixed I/O priority
workloads.

Cc: Niklas Cassel <Niklas.Cassel@wdc.com>
Cc: Damien Le Moal <damien.lemoal@wdc.com>
Cc: Hannes Reinecke <hare@suse.de>
Reported-by: Niklas Cassel <Niklas.Cassel@wdc.com>
Fixes: 08a9ad8bf607 ("block/mq-deadline: Add cgroup support")
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20210824170520.1659173-1-bvanassche@acm.org
Reviewed-by: Niklas Cassel <niklas.cassel@wdc.com>
Tested-by: Niklas Cassel <niklas.cassel@wdc.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/mq-deadline.c

index a09761cbdf12e58eb1357d00fe3c69a079810585..18dc8efe9652dcf3a7f5039e780ffc8e5e890267 100644 (file)
@@ -711,6 +711,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 
        prio = ioprio_class_to_prio[ioprio_class];
        dd_count(dd, inserted, prio);
+       rq->elv.priv[0] = (void *)(uintptr_t)1;
 
        if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
                blk_mq_free_requests(&free);
@@ -759,12 +760,10 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
        spin_unlock(&dd->lock);
 }
 
-/*
- * Nothing to do here. This is defined only to ensure that .finish_request
- * method is called upon request completion.
- */
+/* Callback from inside blk_mq_rq_ctx_init(). */
 static void dd_prepare_request(struct request *rq)
 {
+       rq->elv.priv[0] = NULL;
 }
 
 /*
@@ -791,7 +790,14 @@ static void dd_finish_request(struct request *rq)
        const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
        struct dd_per_prio *per_prio = &dd->per_prio[prio];
 
-       dd_count(dd, completed, prio);
+       /*
+        * The block layer core may call dd_finish_request() without having
+        * called dd_insert_requests(). Hence only update statistics for
+        * requests for which dd_insert_requests() has been called. See also
+        * blk_mq_request_bypass_insert().
+        */
+       if (rq->elv.priv[0])
+               dd_count(dd, completed, prio);
 
        if (blk_queue_is_zoned(q)) {
                unsigned long flags;