]> git.baikalelectronics.ru Git - kernel.git/commitdiff
block: decouple REQ_OP_SECURE_ERASE from REQ_OP_DISCARD
authorChristoph Hellwig <hch@lst.de>
Fri, 15 Apr 2022 04:52:57 +0000 (06:52 +0200)
committerJens Axboe <axboe@kernel.dk>
Mon, 18 Apr 2022 01:49:59 +0000 (19:49 -0600)
Secure erase is a very different operation from discard in that it is
a data integrity operation vs hint.  Fully split the limits and helper
infrastructure to make the separation more clear.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Christoph Böhmwalder <christoph.boehmwalder@linbit.com> [drbd]
Acked-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> [nifs2]
Acked-by: Jaegeuk Kim <jaegeuk@kernel.org> [f2fs]
Acked-by: Coly Li <colyli@suse.de> [bcache]
Acked-by: David Sterba <dsterba@suse.com> [btrfs]
Acked-by: Chao Yu <chao@kernel.org>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220415045258.199825-27-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
33 files changed:
block/blk-core.c
block/blk-lib.c
block/blk-mq-debugfs.c
block/blk-settings.c
block/fops.c
block/ioctl.c
drivers/block/drbd/drbd_receiver.c
drivers/block/rnbd/rnbd-clt.c
drivers/block/rnbd/rnbd-srv-dev.h
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c
drivers/md/bcache/alloc.c
drivers/md/dm-table.c
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/raid5-cache.c
drivers/mmc/core/queue.c
drivers/nvme/target/io-cmd-bdev.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
fs/btrfs/extent-tree.c
fs/ext4/mballoc.c
fs/f2fs/file.c
fs/f2fs/segment.c
fs/jbd2/journal.c
fs/nilfs2/sufile.c
fs/nilfs2/the_nilfs.c
fs/ntfs3/super.c
fs/xfs/xfs_discard.c
fs/xfs/xfs_log_cil.c
include/linux/blkdev.h
mm/swapfile.c

index b5c3a8049134c82fbaaa917b4b86d14ff2fcf322..ee18b6a699bdfaf661c09e43d756ca1bf063bcdf 100644 (file)
@@ -824,7 +824,7 @@ void submit_bio_noacct(struct bio *bio)
                        goto not_supported;
                break;
        case REQ_OP_SECURE_ERASE:
-               if (!blk_queue_secure_erase(q))
+               if (!bdev_max_secure_erase_sectors(bdev))
                        goto not_supported;
                break;
        case REQ_OP_ZONE_APPEND:
index 43aa4d7fe859f15e025477f6c1173da5eea41340..09b7e1200c0f40fb0d539ba889db9b8aa553b668 100644 (file)
@@ -36,26 +36,15 @@ static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
 }
 
 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
-               sector_t nr_sects, gfp_t gfp_mask, int flags,
-               struct bio **biop)
+               sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
 {
-       struct request_queue *q = bdev_get_queue(bdev);
        struct bio *bio = *biop;
-       unsigned int op;
        sector_t bs_mask;
 
        if (bdev_read_only(bdev))
                return -EPERM;
-
-       if (flags & BLKDEV_DISCARD_SECURE) {
-               if (!blk_queue_secure_erase(q))
-                       return -EOPNOTSUPP;
-               op = REQ_OP_SECURE_ERASE;
-       } else {
-               if (!bdev_max_discard_sectors(bdev))
-                       return -EOPNOTSUPP;
-               op = REQ_OP_DISCARD;
-       }
+       if (!bdev_max_discard_sectors(bdev))
+               return -EOPNOTSUPP;
 
        /* In case the discard granularity isn't set by buggy device driver */
        if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) {
@@ -77,7 +66,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                sector_t req_sects =
                        min(nr_sects, bio_discard_limit(bdev, sector));
 
-               bio = blk_next_bio(bio, bdev, 0, op, gfp_mask);
+               bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask);
                bio->bi_iter.bi_sector = sector;
                bio->bi_iter.bi_size = req_sects << 9;
                sector += req_sects;
@@ -103,21 +92,19 @@ EXPORT_SYMBOL(__blkdev_issue_discard);
  * @sector:    start sector
  * @nr_sects:  number of sectors to discard
  * @gfp_mask:  memory allocation flags (for bio_alloc)
- * @flags:     BLKDEV_DISCARD_* flags to control behaviour
  *
  * Description:
  *    Issue a discard request for the sectors in question.
  */
 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
-               sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+               sector_t nr_sects, gfp_t gfp_mask)
 {
        struct bio *bio = NULL;
        struct blk_plug plug;
        int ret;
 
        blk_start_plug(&plug);
-       ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
-                       &bio);
+       ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio);
        if (!ret && bio) {
                ret = submit_bio_wait(bio);
                if (ret == -EOPNOTSUPP)
@@ -314,3 +301,42 @@ retry:
        return ret;
 }
 EXPORT_SYMBOL(blkdev_issue_zeroout);
+
+int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
+               sector_t nr_sects, gfp_t gfp)
+{
+       sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+       unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev);
+       struct bio *bio = NULL;
+       struct blk_plug plug;
+       int ret = 0;
+
+       if (max_sectors == 0)
+               return -EOPNOTSUPP;
+       if ((sector | nr_sects) & bs_mask)
+               return -EINVAL;
+       if (bdev_read_only(bdev))
+               return -EPERM;
+
+       blk_start_plug(&plug);
+       for (;;) {
+               unsigned int len = min_t(sector_t, nr_sects, max_sectors);
+
+               bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
+               bio->bi_iter.bi_sector = sector;
+               bio->bi_iter.bi_size = len;
+
+               sector += len << SECTOR_SHIFT;
+               nr_sects -= len << SECTOR_SHIFT;
+               if (!nr_sects) {
+                       ret = submit_bio_wait(bio);
+                       bio_put(bio);
+                       break;
+               }
+               cond_resched();
+       }
+       blk_finish_plug(&plug);
+
+       return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_secure_erase);
index fd111c5001256e4720affac4ecbcdcf23e87b59a..7e4136a60e1cc69d1fddf9fb6f9b8342c5d329f7 100644 (file)
@@ -115,7 +115,6 @@ static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(IO_STAT),
        QUEUE_FLAG_NAME(NOXMERGES),
        QUEUE_FLAG_NAME(ADD_RANDOM),
-       QUEUE_FLAG_NAME(SECERASE),
        QUEUE_FLAG_NAME(SAME_FORCE),
        QUEUE_FLAG_NAME(DEAD),
        QUEUE_FLAG_NAME(INIT_DONE),
index fd83d674afd0aa05be3c27a57b4842f943e8888a..6ccceb421ed2f7503b96c5e9cf58633400510d3f 100644 (file)
@@ -46,6 +46,7 @@ void blk_set_default_limits(struct queue_limits *lim)
        lim->max_zone_append_sectors = 0;
        lim->max_discard_sectors = 0;
        lim->max_hw_discard_sectors = 0;
+       lim->max_secure_erase_sectors = 0;
        lim->discard_granularity = 0;
        lim->discard_alignment = 0;
        lim->discard_misaligned = 0;
@@ -176,6 +177,18 @@ void blk_queue_max_discard_sectors(struct request_queue *q,
 }
 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
 
+/**
+ * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
+ * @q:  the request queue for the device
+ * @max_sectors: maximum number of sectors to secure_erase
+ **/
+void blk_queue_max_secure_erase_sectors(struct request_queue *q,
+               unsigned int max_sectors)
+{
+       q->limits.max_secure_erase_sectors = max_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors);
+
 /**
  * blk_queue_max_write_zeroes_sectors - set max sectors for a single
  *                                      write zeroes
@@ -661,7 +674,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
                        t->discard_granularity;
        }
-
+       t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
+                                                  b->max_secure_erase_sectors);
        t->zone_write_granularity = max(t->zone_write_granularity,
                                        b->zone_write_granularity);
        t->zoned = max(t->zoned, b->zoned);
index ba5e7d5ff9a52e03e4bae3f838da82f55e41581c..e3643362c24407a8d18dfc9f1d8fbf98c4f3d0cb 100644 (file)
@@ -677,7 +677,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
                break;
        case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
                error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
-                                            len >> SECTOR_SHIFT, GFP_KERNEL, 0);
+                                            len >> SECTOR_SHIFT, GFP_KERNEL);
                break;
        default:
                error = -EOPNOTSUPP;
index eaee0efc0bea45c1b226c019df05dae55dd2745e..46949f1b0dba50b99ca5924a34b2b891ada2233d 100644 (file)
@@ -83,7 +83,7 @@ static int compat_blkpg_ioctl(struct block_device *bdev,
 #endif
 
 static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
-               unsigned long arg, unsigned long flags)
+               unsigned long arg)
 {
        uint64_t range[2];
        uint64_t start, len;
@@ -114,15 +114,43 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
        err = truncate_bdev_range(bdev, mode, start, start + len - 1);
        if (err)
                goto fail;
-
-       err = blkdev_issue_discard(bdev, start >> 9, len >> 9,
-                                  GFP_KERNEL, flags);
-
+       err = blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
 fail:
        filemap_invalidate_unlock(inode->i_mapping);
        return err;
 }
 
+static int blk_ioctl_secure_erase(struct block_device *bdev, fmode_t mode,
+               void __user *argp)
+{
+       uint64_t start, len;
+       uint64_t range[2];
+       int err;
+
+       if (!(mode & FMODE_WRITE))
+               return -EBADF;
+       if (!bdev_max_secure_erase_sectors(bdev))
+               return -EOPNOTSUPP;
+       if (copy_from_user(range, argp, sizeof(range)))
+               return -EFAULT;
+
+       start = range[0];
+       len = range[1];
+       if ((start & 511) || (len & 511))
+               return -EINVAL;
+       if (start + len > bdev_nr_bytes(bdev))
+               return -EINVAL;
+
+       filemap_invalidate_lock(bdev->bd_inode->i_mapping);
+       err = truncate_bdev_range(bdev, mode, start, start + len - 1);
+       if (!err)
+               err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
+                                               GFP_KERNEL);
+       filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
+       return err;
+}
+
+
 static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode,
                unsigned long arg)
 {
@@ -450,10 +478,9 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
        case BLKROSET:
                return blkdev_roset(bdev, mode, cmd, arg);
        case BLKDISCARD:
-               return blk_ioctl_discard(bdev, mode, arg, 0);
+               return blk_ioctl_discard(bdev, mode, arg);
        case BLKSECDISCARD:
-               return blk_ioctl_discard(bdev, mode, arg,
-                               BLKDEV_DISCARD_SECURE);
+               return blk_ioctl_secure_erase(bdev, mode, argp);
        case BLKZEROOUT:
                return blk_ioctl_zeroout(bdev, mode, arg);
        case BLKGETDISKSEQ:
index 275c53c7b629e25fc0885cd7570a71dea4724dbd..2957b0b68d60094d092a087bcea4949309e33e25 100644 (file)
@@ -1547,7 +1547,8 @@ int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, u
                start = tmp;
        }
        while (nr_sectors >= max_discard_sectors) {
-               err |= blkdev_issue_discard(bdev, start, max_discard_sectors, GFP_NOIO, 0);
+               err |= blkdev_issue_discard(bdev, start, max_discard_sectors,
+                                           GFP_NOIO);
                nr_sectors -= max_discard_sectors;
                start += max_discard_sectors;
        }
@@ -1559,7 +1560,7 @@ int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, u
                nr = nr_sectors;
                nr -= (unsigned int)nr % granularity;
                if (nr) {
-                       err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 0);
+                       err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO);
                        nr_sectors -= nr;
                        start += nr;
                }
index efa99a3884507b160ce2ee89ec3e190ffa4bafc0..d178be175ad995ee110908290d5a62adf52ff05d 100644 (file)
@@ -1365,8 +1365,8 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
        dev->queue->limits.discard_granularity  = dev->discard_granularity;
        dev->queue->limits.discard_alignment    = dev->discard_alignment;
        if (dev->secure_discard)
-               blk_queue_flag_set(QUEUE_FLAG_SECERASE, dev->queue);
-
+               blk_queue_max_secure_erase_sectors(dev->queue,
+                               dev->max_discard_sectors);
        blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
        blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
        blk_queue_max_segments(dev->queue, dev->max_segments);
index 1f7e1c8fd4d9bdaa907427d4037042d0fce5fa78..d080a0de59225815d8968edb8dc1fa2278d9c75b 100644 (file)
@@ -44,7 +44,7 @@ static inline int rnbd_dev_get_max_hw_sects(const struct rnbd_dev *dev)
 
 static inline int rnbd_dev_get_secure_discard(const struct rnbd_dev *dev)
 {
-       return blk_queue_secure_erase(bdev_get_queue(dev->bdev));
+       return bdev_max_secure_erase_sectors(dev->bdev);
 }
 
 static inline int rnbd_dev_get_max_discard_sects(const struct rnbd_dev *dev)
index de42458195bc1c8990faf8477168ea3b0d604e20..a97f2bf5b01b96febefd0c5637897fd744eb2918 100644 (file)
@@ -970,7 +970,6 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
        int status = BLKIF_RSP_OKAY;
        struct xen_blkif *blkif = ring->blkif;
        struct block_device *bdev = blkif->vbd.bdev;
-       unsigned long secure;
        struct phys_req preq;
 
        xen_blkif_get(blkif);
@@ -987,13 +986,15 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
        }
        ring->st_ds_req++;
 
-       secure = (blkif->vbd.discard_secure &&
-                (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
-                BLKDEV_DISCARD_SECURE : 0;
+       if (blkif->vbd.discard_secure &&
+           (req->u.discard.flag & BLKIF_DISCARD_SECURE))
+               err = blkdev_issue_secure_erase(bdev,
+                               req->u.discard.sector_number,
+                               req->u.discard.nr_sectors, GFP_KERNEL);
+       else
+               err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
+                               req->u.discard.nr_sectors, GFP_KERNEL);
 
-       err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
-                                  req->u.discard.nr_sectors,
-                                  GFP_KERNEL, secure);
 fail_response:
        if (err == -EOPNOTSUPP) {
                pr_debug("discard op failed, not supported\n");
index 83cd08041e6b3aa4486fd21b5bf26f62ae994b99..b21bffc9c50bc0c1fcc72bcfedea7ad31ea0a526 100644 (file)
@@ -484,7 +484,6 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
 {
        struct xen_vbd *vbd;
        struct block_device *bdev;
-       struct request_queue *q;
 
        vbd = &blkif->vbd;
        vbd->handle   = handle;
@@ -516,11 +515,9 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
        if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
                vbd->type |= VDISK_REMOVABLE;
 
-       q = bdev_get_queue(bdev);
        if (bdev_write_cache(bdev))
                vbd->flush_support = true;
-
-       if (q && blk_queue_secure_erase(q))
+       if (bdev_max_secure_erase_sectors(bdev))
                vbd->discard_secure = true;
 
        vbd->feature_gnt_persistent = feature_persistent;
index e13cb4d48f1ea236b8a04658096b29e3076d32fc..0f3f5238f7bce76066d310d29df3379ab0dded44 100644 (file)
@@ -949,7 +949,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
                                                 info->physical_sector_size;
                rq->limits.discard_alignment = info->discard_alignment;
                if (info->feature_secdiscard)
-                       blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
+                       blk_queue_max_secure_erase_sectors(rq,
+                                                          get_capacity(gd));
        }
 
        /* Hard sector size and max sectors impersonate the equiv. hardware. */
@@ -1606,7 +1607,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
                                blk_queue_max_discard_sectors(rq, 0);
-                               blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
+                               blk_queue_max_secure_erase_sectors(rq, 0);
                        }
                        break;
                case BLKIF_OP_FLUSH_DISKCACHE:
index 097577ae3c47177a6ec0706e106aa14e5a0b66e2..ce13c272c3872366eedd8a7d94894a81ea78d98c 100644 (file)
@@ -336,7 +336,7 @@ static int bch_allocator_thread(void *arg)
                                mutex_unlock(&ca->set->bucket_lock);
                                blkdev_issue_discard(ca->bdev,
                                        bucket_to_sector(ca->set, bucket),
-                                       ca->sb.bucket_size, GFP_KERNEL, 0);
+                                       ca->sb.bucket_size, GFP_KERNEL);
                                mutex_lock(&ca->set->bucket_lock);
                        }
 
index 0dff6907fd00db371e54c3cd2e99e814af43ce25..e7d42f6335a2af2c869b88e1a52cd48d04c886c8 100644 (file)
@@ -1920,9 +1920,7 @@ static int device_not_secure_erase_capable(struct dm_target *ti,
                                           struct dm_dev *dev, sector_t start,
                                           sector_t len, void *data)
 {
-       struct request_queue *q = bdev_get_queue(dev->bdev);
-
-       return !blk_queue_secure_erase(q);
+       return !bdev_max_secure_erase_sectors(dev->bdev);
 }
 
 static bool dm_table_supports_secure_erase(struct dm_table *t)
@@ -1975,8 +1973,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
                q->limits.discard_misaligned = 0;
        }
 
-       if (dm_table_supports_secure_erase(t))
-               blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
+       if (!dm_table_supports_secure_erase(t))
+               q->limits.max_secure_erase_sectors = 0;
 
        if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
                wc = true;
index eded4bcc4545f850728b34409270c1aa63d1bc16..84c083f766736f37e6b514e273045a72e3fe8bd8 100644 (file)
@@ -398,8 +398,8 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da
        sector_t s = block_to_sectors(tc->pool, data_b);
        sector_t len = block_to_sectors(tc->pool, data_e - data_b);
 
-       return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
-                                     GFP_NOWAIT, 0, &op->bio);
+       return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOWAIT,
+                                     &op->bio);
 }
 
 static void end_discard(struct discard_op *op, int r)
index 19636c2f2cda4cb118e97b7c1eb6c53df04e239d..2587f872c0884d6d5f2eb9b996110c27a698cd72 100644 (file)
@@ -8584,7 +8584,7 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
 {
        struct bio *discard_bio = NULL;
 
-       if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, 0,
+       if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO,
                        &discard_bio) || !discard_bio)
                return;
 
index c3cbf9a574a3978429d39b38e6d8f083567fa0c7..094a4042589eb5cfdb53393a2cf2660d60196295 100644 (file)
@@ -1344,14 +1344,14 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
        if (log->last_checkpoint < end) {
                blkdev_issue_discard(bdev,
                                log->last_checkpoint + log->rdev->data_offset,
-                               end - log->last_checkpoint, GFP_NOIO, 0);
+                               end - log->last_checkpoint, GFP_NOIO);
        } else {
                blkdev_issue_discard(bdev,
                                log->last_checkpoint + log->rdev->data_offset,
                                log->device_size - log->last_checkpoint,
-                               GFP_NOIO, 0);
+                               GFP_NOIO);
                blkdev_issue_discard(bdev, log->rdev->data_offset, end,
-                               GFP_NOIO, 0);
+                               GFP_NOIO);
        }
 }
 
index cac6315010a3d56b3e7e9caace10c47ae6b245b5..a3d4460055716f2bc7db856b5632b85f891be67d 100644 (file)
@@ -189,7 +189,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
        if (card->pref_erase > max_discard)
                q->limits.discard_granularity = SECTOR_SIZE;
        if (mmc_can_secure_erase_trim(card))
-               blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
+               blk_queue_max_secure_erase_sectors(q, max_discard);
 }
 
 static unsigned short mmc_get_max_segments(struct mmc_host *host)
index d886c2c59554f69fdc438aa4596e6a26c01584e9..27a72504d31ce1c81493963d85c1342e05c12c33 100644 (file)
@@ -360,7 +360,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
        ret = __blkdev_issue_discard(ns->bdev,
                        nvmet_lba_to_sect(ns, range->slba),
                        le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
-                       GFP_KERNEL, 0, bio);
+                       GFP_KERNEL, bio);
        if (ret && ret != -EOPNOTSUPP) {
                req->error_slba = le64_to_cpu(range->slba);
                return errno_to_nvme_status(req, ret);
index b6ba582b06775537634436b1ae4229ce6ae0ddf8..e68f1cc8ef98bd56098bdad6680576a4e6167b5b 100644 (file)
@@ -558,7 +558,7 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
                ret = blkdev_issue_discard(bdev,
                                           target_to_linux_sector(dev, lba),
                                           target_to_linux_sector(dev,  nolb),
-                                          GFP_KERNEL, 0);
+                                          GFP_KERNEL);
                if (ret < 0) {
                        pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
                                ret);
index c4a903b8a47fc0a92ef2b98104ee9e171978bf8c..378c80313a0f27a4aef7ece5f6215bcb30c237e1 100644 (file)
@@ -434,7 +434,7 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
        ret = blkdev_issue_discard(bdev,
                                   target_to_linux_sector(dev, lba),
                                   target_to_linux_sector(dev,  nolb),
-                                  GFP_KERNEL, 0);
+                                  GFP_KERNEL);
        if (ret < 0) {
                pr_err("blkdev_issue_discard() failed: %d\n", ret);
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
index aba616ff953f82e9cd03f7b03f54ec0aa4877bdf..6260784e74b5ae66b7ef6559ac67ba936376d2c6 100644 (file)
@@ -1239,7 +1239,7 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
 
                if (size) {
                        ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
-                                                  GFP_NOFS, 0);
+                                                  GFP_NOFS);
                        if (!ret)
                                *discarded_bytes += size;
                        else if (ret != -EOPNOTSUPP)
@@ -1256,7 +1256,7 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
 
        if (bytes_left) {
                ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
-                                          GFP_NOFS, 0);
+                                          GFP_NOFS);
                if (!ret)
                        *discarded_bytes += bytes_left;
        }
index 6d1820536d88d6f46e40291ed6446984891be987..ea653d19f9ec76a1996770bade735713b5989ecc 100644 (file)
@@ -3629,7 +3629,7 @@ static inline int ext4_issue_discard(struct super_block *sb,
                return __blkdev_issue_discard(sb->s_bdev,
                        (sector_t)discard_block << (sb->s_blocksize_bits - 9),
                        (sector_t)count << (sb->s_blocksize_bits - 9),
-                       GFP_NOFS, 0, biop);
+                       GFP_NOFS, biop);
        } else
                return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
 }
index 8053d99f3920be04b9a3ec5d779c4e6dc0a3dc7d..35b6c720c2bc155211ea024ee1c05a4175350e1d 100644 (file)
@@ -3685,18 +3685,18 @@ out:
 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
                pgoff_t off, block_t block, block_t len, u32 flags)
 {
-       struct request_queue *q = bdev_get_queue(bdev);
        sector_t sector = SECTOR_FROM_BLOCK(block);
        sector_t nr_sects = SECTOR_FROM_BLOCK(len);
        int ret = 0;
 
-       if (!q)
-               return -ENXIO;
-
-       if (flags & F2FS_TRIM_FILE_DISCARD)
-               ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
-                                               blk_queue_secure_erase(q) ?
-                                               BLKDEV_DISCARD_SECURE : 0);
+       if (flags & F2FS_TRIM_FILE_DISCARD) {
+               if (bdev_max_secure_erase_sectors(bdev))
+                       ret = blkdev_issue_secure_erase(bdev, sector, nr_sects,
+                                       GFP_NOFS);
+               else
+                       ret = blkdev_issue_discard(bdev, sector, nr_sects,
+                                       GFP_NOFS);
+       }
 
        if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
                if (IS_ENCRYPTED(inode))
index 71f09adbcba865544dabba788ca8cae7bb247703..e433c61e64b9352955a9395a3fb5cada488c196f 100644 (file)
@@ -1244,7 +1244,7 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
                err = __blkdev_issue_discard(bdev,
                                        SECTOR_FROM_BLOCK(start),
                                        SECTOR_FROM_BLOCK(len),
-                                       GFP_NOFS, 0, &bio);
+                                       GFP_NOFS, &bio);
 submit:
                if (err) {
                        spin_lock_irqsave(&dc->lock, flags);
index 19d226cd4ff4de879fe91e829dc3d7595c7cfee0..c0cbeeaec2d1aa33d0ff97217ae02e2205c248ba 100644 (file)
@@ -1825,7 +1825,7 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags)
                        err = blkdev_issue_discard(journal->j_dev,
                                        byte_start >> SECTOR_SHIFT,
                                        byte_count >> SECTOR_SHIFT,
-                                       GFP_NOFS, 0);
+                                       GFP_NOFS);
                } else if (flags & JBD2_JOURNAL_FLUSH_ZEROOUT) {
                        err = blkdev_issue_zeroout(journal->j_dev,
                                        byte_start >> SECTOR_SHIFT,
index e385cca2004a7bb9b8b38408989a1e07fd68e6e3..77ff8e95421fa86b3b3a0b0afa1b89936717db91 100644 (file)
@@ -1100,7 +1100,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
                                ret = blkdev_issue_discard(nilfs->ns_bdev,
                                                start * sects_per_block,
                                                nblocks * sects_per_block,
-                                               GFP_NOFS, 0);
+                                               GFP_NOFS);
                                if (ret < 0) {
                                        put_bh(su_bh);
                                        goto out_sem;
@@ -1134,7 +1134,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
                        ret = blkdev_issue_discard(nilfs->ns_bdev,
                                        start * sects_per_block,
                                        nblocks * sects_per_block,
-                                       GFP_NOFS, 0);
+                                       GFP_NOFS);
                        if (!ret)
                                ndiscarded += nblocks;
                }
index dd48a8f74d577c76aa9af5844029405ac9b16684..3b4a079c9617c78438e84c5a91817b329b57b9d8 100644 (file)
@@ -672,7 +672,7 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
                        ret = blkdev_issue_discard(nilfs->ns_bdev,
                                                   start * sects_per_block,
                                                   nblocks * sects_per_block,
-                                                  GFP_NOFS, 0);
+                                                  GFP_NOFS);
                        if (ret < 0)
                                return ret;
                        nblocks = 0;
@@ -682,7 +682,7 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
                ret = blkdev_issue_discard(nilfs->ns_bdev,
                                           start * sects_per_block,
                                           nblocks * sects_per_block,
-                                          GFP_NOFS, 0);
+                                          GFP_NOFS);
        return ret;
 }
 
index 5f2e414cfa79bfe5eadcc8f2c9eb8580af9f6739..5781b9e8e3d85b54e44f682d2cc6b49b89f97669 100644 (file)
@@ -1333,7 +1333,7 @@ int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
                return 0;
 
        err = blkdev_issue_discard(sb->s_bdev, start >> 9, (end - start) >> 9,
-                                  GFP_NOFS, 0);
+                                  GFP_NOFS);
 
        if (err == -EOPNOTSUPP)
                sbi->flags |= NTFS_FLAGS_NODISCARD;
index e2ada115c23f9ae409949b114a02e88f11f98005..c6fe3f6ebb6b01be44ec4a4d73de9ee669348204 100644 (file)
@@ -114,7 +114,7 @@ xfs_trim_extents(
                }
 
                trace_xfs_discard_extent(mp, agno, fbno, flen);
-               error = blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS, 0);
+               error = blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS);
                if (error)
                        goto out_del_cursor;
                *blocks_trimmed += flen;
index ba57323bfdcea38d0c02cc308c43260cd89d5e29..c9f55e4f095710219677be4b1478c160fa90fb57 100644 (file)
@@ -605,7 +605,7 @@ xlog_discard_busy_extents(
                error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
                                XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
                                XFS_FSB_TO_BB(mp, busyp->length),
-                               GFP_NOFS, 0, &bio);
+                               GFP_NOFS, &bio);
                if (error && error != -EOPNOTSUPP) {
                        xfs_info(mp,
         "discard failed for extent [0x%llx,%u], error %d",
index f1cf557ea20eff65dd50406e2f1cd53a3422f97c..c9b5925af5a3b7c9e34fdf43aebd41a3fae30864 100644 (file)
@@ -248,6 +248,7 @@ struct queue_limits {
        unsigned int            io_opt;
        unsigned int            max_discard_sectors;
        unsigned int            max_hw_discard_sectors;
+       unsigned int            max_secure_erase_sectors;
        unsigned int            max_write_zeroes_sectors;
        unsigned int            max_zone_append_sectors;
        unsigned int            discard_granularity;
@@ -542,7 +543,6 @@ struct request_queue {
 #define QUEUE_FLAG_IO_STAT     7       /* do disk/partitions IO accounting */
 #define QUEUE_FLAG_NOXMERGES   9       /* No extended merges */
 #define QUEUE_FLAG_ADD_RANDOM  10      /* Contributes to random pool */
-#define QUEUE_FLAG_SECERASE    11      /* supports secure erase */
 #define QUEUE_FLAG_SAME_FORCE  12      /* force complete on same CPU */
 #define QUEUE_FLAG_DEAD                13      /* queue tear-down finished */
 #define QUEUE_FLAG_INIT_DONE   14      /* queue is initialized */
@@ -583,8 +583,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
 #define blk_queue_add_random(q)        test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
 #define blk_queue_zone_resetall(q)     \
        test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
-#define blk_queue_secure_erase(q) \
-       (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
 #define blk_queue_dax(q)       test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
 #define blk_queue_pci_p2pdma(q)        \
        test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
@@ -947,6 +945,8 @@ extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
 extern void blk_queue_max_discard_segments(struct request_queue *,
                unsigned short);
+void blk_queue_max_secure_erase_sectors(struct request_queue *q,
+               unsigned int max_sectors);
 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
 extern void blk_queue_max_discard_sectors(struct request_queue *q,
                unsigned int max_discard_sectors);
@@ -1087,13 +1087,12 @@ static inline long nr_blockdev_pages(void)
 
 extern void blk_io_schedule(void);
 
-#define BLKDEV_DISCARD_SECURE  (1 << 0)        /* issue a secure erase */
-
-extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
-               sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
-extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
-               sector_t nr_sects, gfp_t gfp_mask, int flags,
-               struct bio **biop);
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+               sector_t nr_sects, gfp_t gfp_mask);
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+               sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
+int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
+               sector_t nr_sects, gfp_t gfp);
 
 #define BLKDEV_ZERO_NOUNMAP    (1 << 0)  /* do not free blocks */
 #define BLKDEV_ZERO_NOFALLBACK (1 << 1)  /* don't write explicit zeroes */
@@ -1112,7 +1111,7 @@ static inline int sb_issue_discard(struct super_block *sb, sector_t block,
                                              SECTOR_SHIFT),
                                    nr_blocks << (sb->s_blocksize_bits -
                                                  SECTOR_SHIFT),
-                                   gfp_mask, flags);
+                                   gfp_mask);
 }
 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
                sector_t nr_blocks, gfp_t gfp_mask)
@@ -1262,6 +1261,12 @@ static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
        return bdev_get_queue(bdev)->limits.discard_granularity;
 }
 
+static inline unsigned int
+bdev_max_secure_erase_sectors(struct block_device *bdev)
+{
+       return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
+}
+
 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
 {
        struct request_queue *q = bdev_get_queue(bdev);
index 5d9cedf9e7b8494a7a1d3b80ba824d442a127a64..a2b31fea0c42e2e623992d4546f543de3538be79 100644 (file)
@@ -179,7 +179,7 @@ static int discard_swap(struct swap_info_struct *si)
        nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
        if (nr_blocks) {
                err = blkdev_issue_discard(si->bdev, start_block,
-                               nr_blocks, GFP_KERNEL, 0);
+                               nr_blocks, GFP_KERNEL);
                if (err)
                        return err;
                cond_resched();
@@ -190,7 +190,7 @@ static int discard_swap(struct swap_info_struct *si)
                nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 
                err = blkdev_issue_discard(si->bdev, start_block,
-                               nr_blocks, GFP_KERNEL, 0);
+                               nr_blocks, GFP_KERNEL);
                if (err)
                        break;
 
@@ -254,7 +254,7 @@ static void discard_swap_cluster(struct swap_info_struct *si,
                start_block <<= PAGE_SHIFT - 9;
                nr_blocks <<= PAGE_SHIFT - 9;
                if (blkdev_issue_discard(si->bdev, start_block,
-                                       nr_blocks, GFP_NOIO, 0))
+                                       nr_blocks, GFP_NOIO))
                        break;
 
                se = next_se(se);