]> git.baikalelectronics.ru Git - kernel.git/commitdiff
block: don't use blocking queue entered for recursive bio submits
authorJens Axboe <axboe@kernel.dk>
Sat, 2 Jun 2018 20:04:07 +0000 (14:04 -0600)
committerJens Axboe <axboe@kernel.dk>
Sun, 3 Jun 2018 02:35:00 +0000 (20:35 -0600)
If we end up splitting a bio and the queue goes away between
the initial submission and the later split submission, then we
can block forever in blk_queue_enter() waiting for the reference
to drop to zero. This will never happen, since we already hold
a reference.

Mark a split bio as already having entered the queue, so we can
just use the live non-blocking queue enter variant.

Thanks to Tetsuo Handa for the analysis.

Reported-by: syzbot+c4f9cebf9d651f6e54de@syzkaller.appspotmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-merge.c
include/linux/blk_types.h

index cd573a33a6f361185ecb1747d1655835a0b2e72e..3f56be15f17e9409f524883b87ec34f93e184ad6 100644 (file)
@@ -2377,7 +2377,9 @@ blk_qc_t generic_make_request(struct bio *bio)
 
        if (bio->bi_opf & REQ_NOWAIT)
                flags = BLK_MQ_REQ_NOWAIT;
-       if (blk_queue_enter(q, flags) < 0) {
+       if (bio_flagged(bio, BIO_QUEUE_ENTERED))
+               blk_queue_enter_live(q);
+       else if (blk_queue_enter(q, flags) < 0) {
                if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
                        bio_wouldblock_error(bio);
                else
index d70ab08820e5eb594aeb3ec0ca809c586bf5721c..aaec38cc37b86489cdfed9ff8f4202f72516ede0 100644 (file)
@@ -210,6 +210,16 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
                /* there isn't chance to merge the splitted bio */
                split->bi_opf |= REQ_NOMERGE;
 
+               /*
+                * Since we're recursing into make_request here, ensure
+                * that we mark this bio as already having entered the queue.
+                * If not, and the queue is going away, we can get stuck
+                * forever on waiting for the queue reference to drop. But
+                * that will never happen, as we're already holding a
+                * reference to it.
+                */
+               bio_set_flag(*bio, BIO_QUEUE_ENTERED);
+
                bio_chain(split, *bio);
                trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
                generic_make_request(*bio);
index 4cb970cdcd11e55789fb29e5274432ee98521804..3c4f390aea4bc27029f8bae42ae5e4bfede1bd46 100644 (file)
@@ -229,6 +229,8 @@ struct bio {
                                 * throttling rules. Don't do it again. */
 #define BIO_TRACE_COMPLETION 10        /* bio_endio() should trace the final completion
                                 * of this bio. */
+#define BIO_QUEUE_ENTERED 11   /* can use blk_queue_enter_live() */
+
 /* See BVEC_POOL_OFFSET below before adding new flags */
 
 /*