]> git.baikalelectronics.ru Git - kernel.git/commitdiff
nvme: fix handling single range discard request
authorMing Lei <ming.lei@redhat.com>
Fri, 3 Mar 2023 23:13:45 +0000 (07:13 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 22 Mar 2023 12:33:47 +0000 (13:33 +0100)
[ Upstream commit e3330553770d67f16059c5a8064ba80bf482293b ]

When investigating one customer report on warning in nvme_setup_discard,
we observed the controller(nvme/tcp) actually exposes
queue_max_discard_segments(req->q) == 1.

Obviously the current code can't handle this situation, since contiguity
merge like normal RW request is taken.

Fix the issue by building range from request sector/nr_sectors directly.

Fixes: 8673d5f203c4 ("nvme: support ranged discard requests")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/nvme/host/core.c

index 2031fd960549c2a2a86932484c1681cb155cae97..a95e48b51da6682a96dd4423a5f63132bdb4275c 100644 (file)
@@ -779,16 +779,26 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
                range = page_address(ns->ctrl->discard_page);
        }
 
-       __rq_for_each_bio(bio, req) {
-               u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
-               u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
-
-               if (n < segments) {
-                       range[n].cattr = cpu_to_le32(0);
-                       range[n].nlb = cpu_to_le32(nlb);
-                       range[n].slba = cpu_to_le64(slba);
+       if (queue_max_discard_segments(req->q) == 1) {
+               u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
+               u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
+
+               range[0].cattr = cpu_to_le32(0);
+               range[0].nlb = cpu_to_le32(nlb);
+               range[0].slba = cpu_to_le64(slba);
+               n = 1;
+       } else {
+               __rq_for_each_bio(bio, req) {
+                       u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
+                       u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+
+                       if (n < segments) {
+                               range[n].cattr = cpu_to_le32(0);
+                               range[n].nlb = cpu_to_le32(nlb);
+                               range[n].slba = cpu_to_le64(slba);
+                       }
+                       n++;
                }
-               n++;
        }
 
        if (WARN_ON_ONCE(n != segments)) {