]> git.baikalelectronics.ru Git - kernel.git/commitdiff
block: move io_context creation into where it's needed
authorJens Axboe <axboe@kernel.dk>
Sat, 13 Nov 2021 18:18:32 +0000 (11:18 -0700)
committerJens Axboe <axboe@kernel.dk>
Mon, 29 Nov 2021 13:38:44 +0000 (06:38 -0700)
The only user of the io_context for IO is BFQ, yet we put the checking
and logic of it into the normal IO path.

Put the creation into blk_mq_sched_assign_ioc(), and have BFQ use that
helper.

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/bfq-iosched.c
block/blk-core.c
block/blk-mq-sched.c
block/blk-mq.c

index fec18118dc309c98d64e730293cc1b2f342d4f0b..1ce1a99a7160f3b0411e863ebcecffed92f86732 100644 (file)
@@ -6573,6 +6573,8 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
  */
 static void bfq_prepare_request(struct request *rq)
 {
+       blk_mq_sched_assign_ioc(rq);
+
        /*
         * Regardless of whether we have an icq attached, we have to
         * clear the scheduler pointers, as they might point to
index 35a04d8c180a2bc276b5b8d02fdc45027393e78c..2053d1b0e90ea22907ed0c0d7456135d9ca75343 100644 (file)
@@ -750,15 +750,6 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio)
                break;
        }
 
-       /*
-        * Various block parts want %current->io_context, so allocate it up
-        * front rather than dealing with lots of pain to allocate it only
-        * where needed. This may fail and the block layer knows how to live
-        * with it.
-        */
-       if (unlikely(!current->io_context))
-               create_task_io_context(current, GFP_ATOMIC, q->node);
-
        if (blk_throtl_bio(bio))
                return false;
 
index ba21449439cc48ded269a2a40130a06089fb1a3d..b942b38000e534552bfe1d44e6763fa5451fbd55 100644 (file)
@@ -24,6 +24,10 @@ void blk_mq_sched_assign_ioc(struct request *rq)
        struct io_context *ioc;
        struct io_cq *icq;
 
+       /* create task io_context, if we don't have one already */
+       if (unlikely(!current->io_context))
+               create_task_io_context(current, GFP_ATOMIC, q->node);
+
        /*
         * May not have an IO context if it's a passthrough request
         */
@@ -43,6 +47,7 @@ void blk_mq_sched_assign_ioc(struct request *rq)
        get_io_context(icq->ioc);
        rq->elv.icq = icq;
 }
+EXPORT_SYMBOL_GPL(blk_mq_sched_assign_ioc);
 
 /*
  * Mark a hardware queue as needing a restart. For shared queues, maintain
index 7cd408408a373d04e976348f70161127fd5d58f0..d6e7634e5e1f3aa35a4344d6d94fa78be6de4b84 100644 (file)
@@ -406,9 +406,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 
                if (!op_is_flush(data->cmd_flags) &&
                    e->type->ops.prepare_request) {
-                       if (e->type->icq_cache)
-                               blk_mq_sched_assign_ioc(rq);
-
                        e->type->ops.prepare_request(rq);
                        rq->rq_flags |= RQF_ELVPRIV;
                }