]> git.baikalelectronics.ru Git - kernel.git/commitdiff
blk-mq: remove the request_queue argument to blk_insert_cloned_request
authorChristoph Hellwig <hch@lst.de>
Tue, 15 Feb 2022 10:05:38 +0000 (11:05 +0100)
committerJens Axboe <axboe@kernel.dk>
Thu, 17 Feb 2022 02:39:10 +0000 (19:39 -0700)
The request must be submitted to the queue it was allocated for, so
remove the extra request_queue argument.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Link: https://lore.kernel.org/r/20220215100540.3892965-4-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c
drivers/md/dm-rq.c
include/linux/blk-mq.h

index fc132933397fbd19078aad4d9743c90cfe9eaf87..886836a54064c02d7dfbe79613237a83c4e5e995 100644 (file)
@@ -2843,11 +2843,11 @@ void blk_mq_submit_bio(struct bio *bio)
 #ifdef CONFIG_BLK_MQ_STACKING
 /**
  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
- * @q:  the queue to submit the request
  * @rq: the request being queued
  */
-blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+blk_status_t blk_insert_cloned_request(struct request *rq)
 {
+       struct request_queue *q = rq->q;
        unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
        blk_status_t ret;
 
@@ -2881,8 +2881,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
                return BLK_STS_IOERR;
        }
 
-       if (rq->q->disk &&
-           should_fail_request(rq->q->disk->part0, blk_rq_bytes(rq)))
+       if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
                return BLK_STS_IOERR;
 
        if (blk_crypto_insert_cloned_request(rq))
@@ -2895,7 +2894,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
         * bypass a potential scheduler on the bottom device for
         * insert.
         */
-       blk_mq_run_dispatch_ops(rq->q,
+       blk_mq_run_dispatch_ops(q,
                        ret = blk_mq_request_issue_directly(rq, true));
        if (ret)
                blk_account_io_done(rq, ktime_get_ns());
index 579ab6183d4d811c5c6697b60ecb21a44ade2a52..2fcc9b7f391b33c6c265ceba4d75fa75ed7b33db 100644 (file)
@@ -311,7 +311,7 @@ static blk_status_t dm_dispatch_clone_request(struct request *clone, struct requ
                clone->rq_flags |= RQF_IO_STAT;
 
        clone->start_time_ns = ktime_get_ns();
-       r = blk_insert_cloned_request(clone->q, clone);
+       r = blk_insert_cloned_request(clone);
        if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
                /* must complete clone in terms of original request */
                dm_complete_request(rq, r);
index d319ffa59354a1d943dbb70a92eed1619162ee00..3a41d50b85d3a63078e64e96d867f28d4d24cf5d 100644 (file)
@@ -952,8 +952,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
                struct bio_set *bs, gfp_t gfp_mask,
                int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
 void blk_rq_unprep_clone(struct request *rq);
-blk_status_t blk_insert_cloned_request(struct request_queue *q,
-               struct request *rq);
+blk_status_t blk_insert_cloned_request(struct request *rq);
 
 struct rq_map_data {
        struct page **pages;