*/
static void bfq_prepare_request(struct request *rq)
{
+ blk_mq_sched_assign_ioc(rq);
+
/*
* Regardless of whether we have an icq attached, we have to
* clear the scheduler pointers, as they might point to
break;
}
- /*
- * Various block parts want %current->io_context, so allocate it up
- * front rather than dealing with lots of pain to allocate it only
- * where needed. This may fail and the block layer knows how to live
- * with it.
- */
- if (unlikely(!current->io_context))
- create_task_io_context(current, GFP_ATOMIC, q->node);
-
if (blk_throtl_bio(bio))
return false;
struct io_context *ioc;
struct io_cq *icq;
+ /* create task io_context, if we don't have one already */
+ if (unlikely(!current->io_context))
+ create_task_io_context(current, GFP_ATOMIC, q->node);
+
/*
* May not have an IO context if it's a passthrough request
*/
get_io_context(icq->ioc);
rq->elv.icq = icq;
}
+EXPORT_SYMBOL_GPL(blk_mq_sched_assign_ioc);
/*
* Mark a hardware queue as needing a restart. For shared queues, maintain
if (!op_is_flush(data->cmd_flags) &&
e->type->ops.prepare_request) {
- if (e->type->icq_cache)
- blk_mq_sched_assign_ioc(rq);
-
e->type->ops.prepare_request(rq);
rq->rq_flags |= RQF_ELVPRIV;
}