]> git.baikalelectronics.ru Git - kernel.git/commitdiff
blk-mq: fix filesystem I/O request allocation
authorMing Lei <ming.lei@redhat.com>
Fri, 12 Nov 2021 12:47:15 +0000 (20:47 +0800)
committerJens Axboe <axboe@kernel.dk>
Fri, 12 Nov 2021 16:31:13 +0000 (09:31 -0700)
submit_bio_checks() may update bio->bi_opf, so we have to initialize
blk_mq_alloc_data.cmd_flags with bio->bi_opf after submit_bio_checks()
returns when allocating new request.

In case of using cached request, fallback to allocate new request if
cached rq isn't compatible with the incoming bio, otherwise change
rq->cmd_flags with incoming bio->bi_opf.

Fixes: 5e94f02278ff8b3f ("block: move queue enter logic into blk_mq_submit_bio()")
Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
Tested-by: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c
block/blk-mq.h

index f511db395c7fc33c72c005c106fce2a140f6d036..3ab34c4f20daf700e2f54fe0e6b4f79d3ef39cf6 100644 (file)
@@ -2521,12 +2521,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
        };
        struct request *rq;
 
-       if (unlikely(bio_queue_enter(bio)))
-               return NULL;
-       if (unlikely(!submit_bio_checks(bio)))
-               goto put_exit;
        if (blk_mq_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
-               goto put_exit;
+               return NULL;
 
        rq_qos_throttle(q, bio);
 
@@ -2543,19 +2539,32 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
        rq_qos_cleanup(q, bio);
        if (bio->bi_opf & REQ_NOWAIT)
                bio_wouldblock_error(bio);
-put_exit:
-       blk_queue_exit(q);
+
        return NULL;
 }
 
+static inline bool blk_mq_can_use_cached_rq(struct request *rq,
+               struct bio *bio)
+{
+       if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
+               return false;
+
+       if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
+               return false;
+
+       return true;
+}
+
 static inline struct request *blk_mq_get_request(struct request_queue *q,
                                                 struct blk_plug *plug,
                                                 struct bio *bio,
                                                 unsigned int nsegs,
                                                 bool *same_queue_rq)
 {
+       struct request *rq;
+       bool checked = false;
+
        if (plug) {
-               struct request *rq;
 
                rq = rq_list_peek(&plug->cached_rq);
                if (rq && rq->q == q) {
@@ -2564,6 +2573,10 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
                        if (blk_mq_attempt_bio_merge(q, bio, nsegs,
                                                same_queue_rq))
                                return NULL;
+                       checked = true;
+                       if (!blk_mq_can_use_cached_rq(rq, bio))
+                               goto fallback;
+                       rq->cmd_flags = bio->bi_opf;
                        plug->cached_rq = rq_list_next(rq);
                        INIT_LIST_HEAD(&rq->queuelist);
                        rq_qos_throttle(q, bio);
@@ -2571,7 +2584,15 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
                }
        }
 
-       return blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
+fallback:
+       if (unlikely(bio_queue_enter(bio)))
+               return NULL;
+       if (!checked && !submit_bio_checks(bio))
+               return NULL;
+       rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
+       if (!rq)
+               blk_queue_exit(q);
+       return rq;
 }
 
 /**
index 39370bbdf3b6ce200ba7ff4020c3abf7f78bb6a6..8acfa650f575156ce27f7b79860df4c75a158e3d 100644 (file)
@@ -89,15 +89,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
        return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
 }
 
-/*
- * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
- * @q: request queue
- * @flags: request command flags
- * @ctx: software queue cpu ctx
- */
-static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
-                                                    unsigned int flags,
-                                                    struct blk_mq_ctx *ctx)
+static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
 {
        enum hctx_type type = HCTX_TYPE_DEFAULT;
 
@@ -108,8 +100,20 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
                type = HCTX_TYPE_POLL;
        else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
                type = HCTX_TYPE_READ;
-       
-       return ctx->hctxs[type];
+       return type;
+}
+
+/*
+ * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
+ * @q: request queue
+ * @flags: request command flags
+ * @ctx: software queue cpu ctx
+ */
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
+                                                    unsigned int flags,
+                                                    struct blk_mq_ctx *ctx)
+{
+       return ctx->hctxs[blk_mq_get_hctx_type(flags)];
 }
 
 /*