]> git.baikalelectronics.ru Git - kernel.git/commitdiff
blk-mq-sched: fix crash in switch error path
authorOmar Sandoval <osandov@fb.com>
Fri, 7 Apr 2017 14:52:27 +0000 (08:52 -0600)
committerJens Axboe <axboe@fb.com>
Fri, 7 Apr 2017 14:56:48 +0000 (08:56 -0600)
In elevator_switch(), if blk_mq_init_sched() fails, we attempt to fall
back to the original scheduler. However, at this point, we've already
torn down the original scheduler's tags, so this causes a crash. Doing
the fallback like the legacy elevator path is much harder for mq, so fix
it by just falling back to none, instead.

Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq.c
block/blk-sysfs.c
block/elevator.c
include/linux/elevator.h

index 0bb13bb51daaeabbe42a3687b2d96cfb1784f5c1..e8c2ed654ef075d853e5338922637a4fa5257cc5 100644 (file)
@@ -451,7 +451,7 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q,
        return ret;
 }
 
-void blk_mq_sched_teardown(struct request_queue *q)
+static void blk_mq_sched_tags_teardown(struct request_queue *q)
 {
        struct blk_mq_tag_set *set = q->tag_set;
        struct blk_mq_hw_ctx *hctx;
@@ -513,10 +513,19 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
        return 0;
 
 err:
-       blk_mq_sched_teardown(q);
+       blk_mq_sched_tags_teardown(q);
+       q->elevator = NULL;
        return ret;
 }
 
+void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
+{
+       if (e->type->ops.mq.exit_sched)
+               e->type->ops.mq.exit_sched(e);
+       blk_mq_sched_tags_teardown(q);
+       q->elevator = NULL;
+}
+
 int blk_mq_sched_init(struct request_queue *q)
 {
        int ret;
index 19db25e0c95a0e9c884d9806c1faee0fc90cfc8f..e704956e0862848a4da26fde9dc9ddcaa66d4759 100644 (file)
@@ -33,7 +33,7 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
                        struct request *(*get_rq)(struct blk_mq_hw_ctx *));
 
 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
-void blk_mq_sched_teardown(struct request_queue *q);
+void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
 
 int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
                           unsigned int hctx_idx);
index 72e744cd638caab0c0ef3ff43d1d7f7fc310c6f8..cfb7c97b14ec6bc8846308f80110a596e503d090 100644 (file)
@@ -2240,8 +2240,6 @@ void blk_mq_release(struct request_queue *q)
        struct blk_mq_hw_ctx *hctx;
        unsigned int i;
 
-       blk_mq_sched_teardown(q);
-
        /* hctx kobj stays in hctx */
        queue_for_each_hw_ctx(q, hctx, i) {
                if (!hctx)
index c44b321335f3ebbcc662f0f70b7605f5019c60b7..37f0b3ad635ea63c2b176fff051646d600dd453d 100644 (file)
@@ -816,7 +816,7 @@ static void blk_release_queue(struct kobject *kobj)
 
        if (q->elevator) {
                ioc_clear_queue(q);
-               elevator_exit(q->elevator);
+               elevator_exit(q, q->elevator);
        }
 
        blk_exit_rl(&q->root_rl);
index f236ef1d2be9922dc17450b69a4328dc82c12cbd..dbeecf7be719eaada4457ed3c7ac799fd0bbacec 100644 (file)
@@ -252,11 +252,11 @@ int elevator_init(struct request_queue *q, char *name)
 }
 EXPORT_SYMBOL(elevator_init);
 
-void elevator_exit(struct elevator_queue *e)
+void elevator_exit(struct request_queue *q, struct elevator_queue *e)
 {
        mutex_lock(&e->sysfs_lock);
        if (e->uses_mq && e->type->ops.mq.exit_sched)
-               e->type->ops.mq.exit_sched(e);
+               blk_mq_exit_sched(q, e);
        else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
                e->type->ops.sq.elevator_exit_fn(e);
        mutex_unlock(&e->sysfs_lock);
@@ -941,6 +941,45 @@ void elv_unregister(struct elevator_type *e)
 }
 EXPORT_SYMBOL_GPL(elv_unregister);
 
+static int elevator_switch_mq(struct request_queue *q,
+                             struct elevator_type *new_e)
+{
+       int ret;
+
+       blk_mq_freeze_queue(q);
+       blk_mq_quiesce_queue(q);
+
+       if (q->elevator) {
+               if (q->elevator->registered)
+                       elv_unregister_queue(q);
+               ioc_clear_queue(q);
+               elevator_exit(q, q->elevator);
+       }
+
+       ret = blk_mq_init_sched(q, new_e);
+       if (ret)
+               goto out;
+
+       if (new_e) {
+               ret = elv_register_queue(q);
+               if (ret) {
+                       elevator_exit(q, q->elevator);
+                       goto out;
+               }
+       }
+
+       if (new_e)
+               blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
+       else
+               blk_add_trace_msg(q, "elv switch: none");
+
+out:
+       blk_mq_unfreeze_queue(q);
+       blk_mq_start_stopped_hw_queues(q, true);
+       return ret;
+
+}
+
 /*
  * switch to new_e io scheduler. be careful not to introduce deadlocks -
  * we don't free the old io scheduler, before we have allocated what we
@@ -953,10 +992,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
        bool old_registered = false;
        int err;
 
-       if (q->mq_ops) {
-               blk_mq_freeze_queue(q);
-               blk_mq_quiesce_queue(q);
-       }
+       if (q->mq_ops)
+               return elevator_switch_mq(q, new_e);
 
        /*
         * Turn on BYPASS and drain all requests w/ elevator private data.
@@ -968,11 +1005,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
        if (old) {
                old_registered = old->registered;
 
-               if (old->uses_mq)
-                       blk_mq_sched_teardown(q);
-
-               if (!q->mq_ops)
-                       blk_queue_bypass_start(q);
+               blk_queue_bypass_start(q);
 
                /* unregister and clear all auxiliary data of the old elevator */
                if (old_registered)
@@ -982,53 +1015,32 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
        }
 
        /* allocate, init and register new elevator */
-       if (q->mq_ops)
-               err = blk_mq_init_sched(q, new_e);
-       else
-               err = new_e->ops.sq.elevator_init_fn(q, new_e);
+       err = new_e->ops.sq.elevator_init_fn(q, new_e);
        if (err)
                goto fail_init;
 
-       if (new_e) {
-               err = elv_register_queue(q);
-               if (err)
-                       goto fail_register;
-       }
+       err = elv_register_queue(q);
+       if (err)
+               goto fail_register;
 
        /* done, kill the old one and finish */
        if (old) {
-               elevator_exit(old);
-               if (!q->mq_ops)
-                       blk_queue_bypass_end(q);
+               elevator_exit(q, old);
+               blk_queue_bypass_end(q);
        }
 
-       if (q->mq_ops) {
-               blk_mq_unfreeze_queue(q);
-               blk_mq_start_stopped_hw_queues(q, true);
-       }
-
-       if (new_e)
-               blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
-       else
-               blk_add_trace_msg(q, "elv switch: none");
+       blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
 
        return 0;
 
 fail_register:
-       if (q->mq_ops)
-               blk_mq_sched_teardown(q);
-       elevator_exit(q->elevator);
+       elevator_exit(q, q->elevator);
 fail_init:
        /* switch failed, restore and re-register old elevator */
        if (old) {
                q->elevator = old;
                elv_register_queue(q);
-               if (!q->mq_ops)
-                       blk_queue_bypass_end(q);
-       }
-       if (q->mq_ops) {
-               blk_mq_unfreeze_queue(q);
-               blk_mq_start_stopped_hw_queues(q, true);
+               blk_queue_bypass_end(q);
        }
 
        return err;
index aebecc4ed088f45c189162e560095363ac0e6d2a..22d39e8d4de16be2b69762cf24c92799dfb445d6 100644 (file)
@@ -211,7 +211,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
 extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
 
 extern int elevator_init(struct request_queue *, char *);
-extern void elevator_exit(struct elevator_queue *);
+extern void elevator_exit(struct request_queue *, struct elevator_queue *);
 extern int elevator_change(struct request_queue *, const char *);
 extern bool elv_bio_merge_ok(struct request *, struct bio *);
 extern struct elevator_queue *elevator_alloc(struct request_queue *,