]> git.baikalelectronics.ru Git - kernel.git/commitdiff
block: refactor discard bio size limiting
authorChristoph Hellwig <hch@lst.de>
Fri, 15 Apr 2022 04:52:53 +0000 (06:52 +0200)
committerJens Axboe <axboe@kernel.dk>
Mon, 18 Apr 2022 01:49:59 +0000 (19:49 -0600)
Move all the logic to limit the discard bio size into a common helper
so that it is better documented.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Coly Li <colyli@suse.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220415045258.199825-23-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-lib.c
block/blk.h

index 237d60d8b585799916fcab8415e020796ed01cce..2ae32a722851c44adb2a5dd2f826bb85d7c8420f 100644 (file)
 
 #include "blk.h"
 
+static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
+{
+       unsigned int discard_granularity =
+               bdev_get_queue(bdev)->limits.discard_granularity;
+       sector_t granularity_aligned_sector;
+
+       if (bdev_is_partition(bdev))
+               sector += bdev->bd_start_sect;
+
+       granularity_aligned_sector =
+               round_up(sector, discard_granularity >> SECTOR_SHIFT);
+
+       /*
+        * Make sure subsequent bios start aligned to the discard granularity if
+        * it needs to be split.
+        */
+       if (granularity_aligned_sector != sector)
+               return granularity_aligned_sector - sector;
+
+       /*
+        * Align the bio size to the discard granularity to make splitting the bio
+        * at discard granularity boundaries easier in the driver if needed.
+        */
+       return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
+}
+
 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                sector_t nr_sects, gfp_t gfp_mask, int flags,
                struct bio **biop)
@@ -17,7 +43,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        struct request_queue *q = bdev_get_queue(bdev);
        struct bio *bio = *biop;
        unsigned int op;
-       sector_t bs_mask, part_offset = 0;
+       sector_t bs_mask;
 
        if (bdev_read_only(bdev))
                return -EPERM;
@@ -48,36 +74,9 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        if (!nr_sects)
                return -EINVAL;
 
-       /* In case the discard request is in a partition */
-       if (bdev_is_partition(bdev))
-               part_offset = bdev->bd_start_sect;
-
        while (nr_sects) {
-               sector_t granularity_aligned_lba, req_sects;
-               sector_t sector_mapped = sector + part_offset;
-
-               granularity_aligned_lba = round_up(sector_mapped,
-                               q->limits.discard_granularity >> SECTOR_SHIFT);
-
-               /*
-                * Check whether the discard bio starts at a discard_granularity
-                * aligned LBA,
-                * - If no: set (granularity_aligned_lba - sector_mapped) to
-                *   bi_size of the first split bio, then the second bio will
-                *   start at a discard_granularity aligned LBA on the device.
-                * - If yes: use bio_aligned_discard_max_sectors() as the max
-                *   possible bi_size of the first split bio. Then when this bio
-                *   is split in device drive, the split ones are very probably
-                *   to be aligned to discard_granularity of the device's queue.
-                */
-               if (granularity_aligned_lba == sector_mapped)
-                       req_sects = min_t(sector_t, nr_sects,
-                                         bio_aligned_discard_max_sectors(q));
-               else
-                       req_sects = min_t(sector_t, nr_sects,
-                                         granularity_aligned_lba - sector_mapped);
-
-               WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
+               sector_t req_sects =
+                       min(nr_sects, bio_discard_limit(bdev, sector));
 
                bio = blk_next_bio(bio, bdev, 0, op, gfp_mask);
                bio->bi_iter.bi_sector = sector;
index 4ea5167dc3392016dadbc26d81b1f1c6c0dc7de0..434017701403fb699668ec4db2a6a467b2b6ae9f 100644 (file)
@@ -346,20 +346,6 @@ static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
        return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
 }
 
-/*
- * The max bio size which is aligned to q->limits.discard_granularity. This
- * is a hint to split large discard bio in generic block layer, then if device
- * driver needs to split the discard bio into smaller ones, their bi_size can
- * be very probably and easily aligned to discard_granularity of the device's
- * queue.
- */
-static inline unsigned int bio_aligned_discard_max_sectors(
-                                       struct request_queue *q)
-{
-       return round_down(UINT_MAX, q->limits.discard_granularity) >>
-                       SECTOR_SHIFT;
-}
-
 /*
  * Internal io_context interface
  */