From: Bart Van Assche Date: Tue, 24 Aug 2021 17:05:20 +0000 (-0700) Subject: mq-deadline: Fix request accounting X-Git-Url: https://git.baikalelectronics.ru/sdk/?a=commitdiff_plain;h=b6d2b054e8baaee53fd2d4854c63cbf0f2c6262a;p=kernel.git mq-deadline: Fix request accounting The block layer may call the I/O scheduler .finish_request() callback without having called the .insert_requests() callback. Make sure that the mq-deadline I/O statistics are correct if the block layer inserts an I/O request that bypasses the I/O scheduler. This patch prevents that lower priority I/O is delayed longer than necessary for mixed I/O priority workloads. Cc: Niklas Cassel Cc: Damien Le Moal Cc: Hannes Reinecke Reported-by: Niklas Cassel Fixes: 08a9ad8bf607 ("block/mq-deadline: Add cgroup support") Signed-off-by: Bart Van Assche Link: https://lore.kernel.org/r/20210824170520.1659173-1-bvanassche@acm.org Reviewed-by: Niklas Cassel Tested-by: Niklas Cassel Signed-off-by: Jens Axboe --- diff --git a/block/mq-deadline.c b/block/mq-deadline.c index a09761cbdf12e..18dc8efe9652d 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -711,6 +711,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, prio = ioprio_class_to_prio[ioprio_class]; dd_count(dd, inserted, prio); + rq->elv.priv[0] = (void *)(uintptr_t)1; if (blk_mq_sched_try_insert_merge(q, rq, &free)) { blk_mq_free_requests(&free); @@ -759,12 +760,10 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, spin_unlock(&dd->lock); } -/* - * Nothing to do here. This is defined only to ensure that .finish_request - * method is called upon request completion. - */ +/* Callback from inside blk_mq_rq_ctx_init(). */ static void dd_prepare_request(struct request *rq) { + rq->elv.priv[0] = NULL; } /* @@ -791,7 +790,14 @@ static void dd_finish_request(struct request *rq) const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; struct dd_per_prio *per_prio = &dd->per_prio[prio]; - dd_count(dd, completed, prio); + /* + * The block layer core may call dd_finish_request() without having + * called dd_insert_requests(). Hence only update statistics for + * requests for which dd_insert_requests() has been called. See also + * blk_mq_request_bypass_insert(). + */ + if (rq->elv.priv[0]) + dd_count(dd, completed, prio); if (blk_queue_is_zoned(q)) { unsigned long flags;