]> git.baikalelectronics.ru Git - kernel.git/commitdiff
block: pass a block_device to bio_clone_fast
authorChristoph Hellwig <hch@lst.de>
Wed, 2 Feb 2022 16:01:09 +0000 (17:01 +0100)
committerJens Axboe <axboe@kernel.dk>
Fri, 4 Feb 2022 14:43:18 +0000 (07:43 -0700)
Pass a block_device to bio_clone_fast and __bio_clone_fast and give
the functions more suitable names.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Link: https://lore.kernel.org/r/20220202160109.108149-14-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
20 files changed:
Documentation/block/biodoc.rst
block/bio.c
block/blk-mq.c
block/bounce.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_worker.c
drivers/block/pktcdvd.c
drivers/md/bcache/request.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c
drivers/md/md-faulty.c
drivers/md/md-multipath.c
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
fs/btrfs/extent_io.c
include/linux/bio.h

index 2098477851a4bf99e8c67fd0d610e4e455a9b925..4fbc367e62f95f67f6d0a00a127b32d4858e6866 100644 (file)
@@ -663,11 +663,6 @@ to i/o submission, if the bio fields are likely to be accessed after the
 i/o is issued (since the bio may otherwise get freed in case i/o completion
 happens in the meantime).
 
-The bio_clone_fast() routine may be used to duplicate a bio, where the clone
-shares the bio_vec_list with the original bio (i.e. both point to the
-same bio_vec_list). This would typically be used for splitting i/o requests
-in lvm or md.
-
 3.2 Generic bio helper Routines
 -------------------------------
 
index 74f66e22ef630e88c32fca320ffb5509c3a56c7a..18d34b33351b84fd1db29df7a6dd274a73eb76c0 100644 (file)
@@ -733,7 +733,8 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
        bio_set_flag(bio, BIO_CLONED);
        if (bio_flagged(bio_src, BIO_THROTTLED))
                bio_set_flag(bio, BIO_THROTTLED);
-       if (bio_flagged(bio_src, BIO_REMAPPED))
+       if (bio->bi_bdev == bio_src->bi_bdev &&
+           bio_flagged(bio_src, BIO_REMAPPED))
                bio_set_flag(bio, BIO_REMAPPED);
        bio->bi_ioprio = bio_src->bi_ioprio;
        bio->bi_write_hint = bio_src->bi_write_hint;
@@ -751,7 +752,8 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
 }
 
 /**
- * bio_clone_fast - clone a bio that shares the original bio's biovec
+ * bio_alloc_clone - clone a bio that shares the original bio's biovec
+ * @bdev: block_device to clone onto
  * @bio_src: bio to clone from
  * @gfp: allocation priority
  * @bs: bio_set to allocate from
@@ -761,11 +763,12 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
  *
  * The caller must ensure that the return bio is not freed before @bio_src.
  */
-struct bio *bio_clone_fast(struct bio *bio_src, gfp_t gfp, struct bio_set *bs)
+struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
+               gfp_t gfp, struct bio_set *bs)
 {
        struct bio *bio;
 
-       bio = bio_alloc_bioset(bio_src->bi_bdev, 0, bio_src->bi_opf, gfp, bs);
+       bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
        if (!bio)
                return NULL;
 
@@ -777,10 +780,11 @@ struct bio *bio_clone_fast(struct bio *bio_src, gfp_t gfp, struct bio_set *bs)
 
        return bio;
 }
-EXPORT_SYMBOL(bio_clone_fast);
+EXPORT_SYMBOL(bio_alloc_clone);
 
 /**
- * __bio_clone_fast - clone a bio that shares the original bio's biovec
+ * bio_init_clone - clone a bio that shares the original bio's biovec
+ * @bdev: block_device to clone onto
  * @bio: bio to clone into
  * @bio_src: bio to clone from
  * @gfp: allocation priority
@@ -790,17 +794,18 @@ EXPORT_SYMBOL(bio_clone_fast);
  *
  * The caller must ensure that @bio_src is not freed before @bio.
  */
-int __bio_clone_fast(struct bio *bio, struct bio *bio_src, gfp_t gfp)
+int bio_init_clone(struct block_device *bdev, struct bio *bio,
+               struct bio *bio_src, gfp_t gfp)
 {
        int ret;
 
-       bio_init(bio, bio_src->bi_bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
+       bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
        ret = __bio_clone(bio, bio_src, gfp);
        if (ret)
                bio_uninit(bio);
        return ret;
 }
-EXPORT_SYMBOL(__bio_clone_fast);
+EXPORT_SYMBOL(bio_init_clone);
 
 const char *bio_devname(struct bio *bio, char *buf)
 {
@@ -1572,7 +1577,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
        if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
                return NULL;
 
-       split = bio_clone_fast(bio, gfp, bs);
+       split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
        if (!split)
                return NULL;
 
@@ -1667,9 +1672,9 @@ EXPORT_SYMBOL(bioset_exit);
  *    Note that the bio must be embedded at the END of that structure always,
  *    or things will break badly.
  *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
- *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
- *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
- *    dispatch queued requests when the mempool runs out of space.
+ *    for allocating iovecs.  This pool is not needed e.g. for bio_init_clone().
+ *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used
+ *    to dispatch queued requests when the mempool runs out of space.
  *
  */
 int bioset_init(struct bio_set *bs,
index 1adfe4824ef5e29377874b1b337326ab3b3c498b..4b868e792ba4ac0c40cd7e9a897ec5ea76e4bb2d 100644 (file)
@@ -2975,10 +2975,10 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
                bs = &fs_bio_set;
 
        __rq_for_each_bio(bio_src, rq_src) {
-               bio = bio_clone_fast(bio_src, gfp_mask, bs);
+               bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
+                                     bs);
                if (!bio)
                        goto free_and_out;
-               bio->bi_bdev = rq->q->disk->part0;
 
                if (bio_ctr && bio_ctr(bio, bio_src, data))
                        goto free_and_out;
index 330ddde25b460d37e64e9f05db6faacc56b1031f..3fd3bc6fd5dbbd26df1368feb0a087aa80a08b10 100644 (file)
@@ -162,8 +162,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
         *    that does not own the bio - reason being drivers don't use it for
         *    iterating over the biovec anymore, so expecting it to be kept up
         *    to date (i.e. for clones that share the parent biovec) is just
-        *    asking for trouble and would force extra work on
-        *    __bio_clone_fast() anyways.
+        *    asking for trouble and would force extra work.
         */
        bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src),
                               bio_src->bi_opf, GFP_NOIO, &bounce_bio_set);
index 8d44e96c4c4efa63db78a3a44bee8f9aaa42d64d..c00ae8619519eb948523a7f4a51f9c6e72b3b399 100644 (file)
@@ -30,8 +30,8 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio
                return NULL;
        memset(req, 0, sizeof(*req));
 
-       req->private_bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
-       bio_set_dev(req->private_bio, device->ldev->backing_bdev);
+       req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, bio_src,
+                                          GFP_NOIO, &drbd_io_bio_set);
        req->private_bio->bi_private = req;
        req->private_bio->bi_end_io = drbd_request_endio;
 
index 64563bfdf0da02b1c2eb3b709d59ac3157e599da..a5e04b38006b65865b6c6b3cb47c67ab5e9757de 100644 (file)
@@ -1523,9 +1523,9 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
        if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
                drbd_al_begin_io(device, &req->i);
 
-       req->private_bio = bio_clone_fast(req->master_bio, GFP_NOIO,
+       req->private_bio = bio_alloc_clone(device->ldev->backing_bdev,
+                                          req->master_bio, GFP_NOIO,
                                          &drbd_io_bio_set);
-       bio_set_dev(req->private_bio, device->ldev->backing_bdev);
        req->private_bio->bi_private = req;
        req->private_bio->bi_end_io = drbd_request_endio;
        submit_bio_noacct(req->private_bio);
index 3aa5954429462c592c0df507bee9f9da517d6bf7..be749c686feb735d87690fd812ef0a29e89be2c4 100644 (file)
@@ -2294,12 +2294,12 @@ static void pkt_end_io_read_cloned(struct bio *bio)
 
 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
 {
-       struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, &pkt_bio_set);
+       struct bio *cloned_bio =
+               bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
        struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
 
        psd->pd = pd;
        psd->bio = bio;
-       bio_set_dev(cloned_bio, pd->bdev);
        cloned_bio->bi_private = psd;
        cloned_bio->bi_end_io = pkt_end_io_read_cloned;
        pd->stats.secs_r += bio_sectors(bio);
index d2cb853bf9173ce210f0bde7fa1d8bd6189b9456..6869e010475a3dcf4b2107d6b3c838853799ec02 100644 (file)
@@ -685,7 +685,7 @@ static void do_bio_hook(struct search *s,
 {
        struct bio *bio = &s->bio.bio;
 
-       __bio_clone_fast(bio, orig_bio, GFP_NOIO);
+       bio_init_clone(bio->bi_bdev, bio, orig_bio, GFP_NOIO);
        /*
         * bi_end_io can be set separately somewhere else, e.g. the
         * variants in,
@@ -1036,7 +1036,8 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
                        closure_bio_submit(s->iop.c, flush, cl);
                }
        } else {
-               s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
+               s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
+                                            &dc->disk.bio_split);
                /* I/O request sent to backing device */
                bio->bi_end_io = backing_request_endio;
                closure_bio_submit(s->iop.c, bio, cl);
index 1c37fe028e531e546173b5df9d36feb48e9bec8a..89fdfb49d564e4a591ae8cd6daa32e29d7e47882 100644 (file)
@@ -819,13 +819,13 @@ static void issue_op(struct bio *bio, void *context)
 static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
                                      dm_oblock_t oblock, dm_cblock_t cblock)
 {
-       struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs);
+       struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio,
+                                                GFP_NOIO, &cache->bs);
 
        BUG_ON(!origin_bio);
 
        bio_chain(origin_bio, bio);
 
-       remap_to_origin(cache, origin_bio);
        if (bio_data_dir(origin_bio) == WRITE)
                clear_discard(cache, oblock_to_dblock(cache, oblock));
        submit_bio(origin_bio);
index f7e4435b7439af000c57b5739468c6321de4b939..a5006cb6ee8add6b252c8feeb3a523573910d0aa 100644 (file)
@@ -1834,17 +1834,16 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
        struct bio *clone;
 
        /*
-        * We need the original biovec array in order to decrypt
-        * the whole bio data *afterwards* -- thanks to immutable
-        * biovecs we don't need to worry about the block layer
-        * modifying the biovec array; so leverage bio_clone_fast().
+        * We need the original biovec array in order to decrypt the whole bio
+        * data *afterwards* -- thanks to immutable biovecs we don't need to
+        * worry about the block layer modifying the biovec array; so leverage
+        * bio_alloc_clone().
         */
-       clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
+       clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs);
        if (!clone)
                return 1;
        clone->bi_private = io;
        clone->bi_end_io = crypt_endio;
-       bio_set_dev(clone, cc->dev->bdev);
 
        crypt_inc_pending(io);
 
index 166c4e9d99c979bce13ff3e85690716cc6d2599d..a3f6d3ef38174fce353bbe9e20e80aeedc8ca1bc 100644 (file)
@@ -125,11 +125,10 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
        if (dev->flags & DMZ_BDEV_DYING)
                return -EIO;
 
-       clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
+       clone = bio_alloc_clone(dev->bdev, bio, GFP_NOIO, &dmz->bio_set);
        if (!clone)
                return -ENOMEM;
 
-       bio_set_dev(clone, dev->bdev);
        bioctx->dev = dev;
        clone->bi_iter.bi_sector =
                dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
index 862564a5df74bb9530570bdcea4713e46876e90a..ab9cc91931f996a7ab5d04f169ab4e4af560feac 100644 (file)
@@ -520,7 +520,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
        struct dm_target_io *tio;
        struct bio *clone;
 
-       clone = bio_clone_fast(bio, GFP_NOIO, &md->io_bs);
+       clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs);
 
        tio = clone_to_tio(clone);
        tio->inside_dm_io = true;
@@ -553,8 +553,8 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
                /* the dm_target_io embedded in ci->io is available */
                tio = &ci->io->tio;
        } else {
-               struct bio *clone = bio_clone_fast(ci->bio, gfp_mask,
-                                                  &ci->io->md->bs);
+               struct bio *clone = bio_alloc_clone(ci->bio->bi_bdev, ci->bio,
+                                                   gfp_mask, &ci->io->md->bs);
                if (!clone)
                        return NULL;
 
index c0dc6f2ef4a3db33d2fc038dd22f1ec6af558be2..50ad818978a4333bc225cd3626c3265ded5167d0 100644 (file)
@@ -205,9 +205,9 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
                }
        }
        if (failit) {
-               struct bio *b = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+               struct bio *b = bio_alloc_clone(conf->rdev->bdev, bio, GFP_NOIO,
+                                               &mddev->bio_set);
 
-               bio_set_dev(b, conf->rdev->bdev);
                b->bi_private = bio;
                b->bi_end_io = faulty_fail;
                bio = b;
index 483a5500f83cd052b041000f4327dcdb3cd0ebd0..97fb948e3e74141ddf02299fb0e848583c32fc82 100644 (file)
@@ -121,10 +121,9 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
        }
        multipath = conf->multipaths + mp_bh->path;
 
-       __bio_clone_fast(&mp_bh->bio, bio, GFP_NOIO);
+       bio_init_clone(multipath->rdev->bdev, &mp_bh->bio, bio, GFP_NOIO);
 
        mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
-       bio_set_dev(&mp_bh->bio, multipath->rdev->bdev);
        mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
        mp_bh->bio.bi_end_io = multipath_end_request;
        mp_bh->bio.bi_private = mp_bh;
index 0a89f072dae0d04d936c7ba3b71965249be947cc..f88a9e948f3eb7edab749a94f6f4e59208b3397a 100644 (file)
@@ -8634,13 +8634,14 @@ static void md_end_io_acct(struct bio *bio)
  */
 void md_account_bio(struct mddev *mddev, struct bio **bio)
 {
+       struct block_device *bdev = (*bio)->bi_bdev;
        struct md_io_acct *md_io_acct;
        struct bio *clone;
 
-       if (!blk_queue_io_stat((*bio)->bi_bdev->bd_disk->queue))
+       if (!blk_queue_io_stat(bdev->bd_disk->queue))
                return;
 
-       clone = bio_clone_fast(*bio, GFP_NOIO, &mddev->io_acct_set);
+       clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_acct_set);
        md_io_acct = container_of(clone, struct md_io_acct, bio_clone);
        md_io_acct->orig_bio = *bio;
        md_io_acct->start_time = bio_start_io_acct(*bio);
index e7710fb5befb4056d468c953926dd3c1b65e4ca7..c3288d46948ded595223e2c5066338aff67c93af 100644 (file)
@@ -1320,13 +1320,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
        if (!r1bio_existed && blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
                r1_bio->start_time = bio_start_io_acct(bio);
 
-       read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
+       read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
+                                  &mddev->bio_set);
 
        r1_bio->bios[rdisk] = read_bio;
 
        read_bio->bi_iter.bi_sector = r1_bio->sector +
                mirror->rdev->data_offset;
-       bio_set_dev(read_bio, mirror->rdev->bdev);
        read_bio->bi_end_io = raid1_end_read_request;
        bio_set_op_attrs(read_bio, op, do_sync);
        if (test_bit(FailFast, &mirror->rdev->flags) &&
@@ -1546,24 +1546,25 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
                        first_clone = 0;
                }
 
-               if (r1_bio->behind_master_bio)
-                       mbio = bio_clone_fast(r1_bio->behind_master_bio,
-                                             GFP_NOIO, &mddev->bio_set);
-               else
-                       mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
-
                if (r1_bio->behind_master_bio) {
+                       mbio = bio_alloc_clone(rdev->bdev,
+                                              r1_bio->behind_master_bio,
+                                              GFP_NOIO, &mddev->bio_set);
                        if (test_bit(CollisionCheck, &rdev->flags))
                                wait_for_serialization(rdev, r1_bio);
                        if (test_bit(WriteMostly, &rdev->flags))
                                atomic_inc(&r1_bio->behind_remaining);
-               } else if (mddev->serialize_policy)
-                       wait_for_serialization(rdev, r1_bio);
+               } else {
+                       mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
+                                              &mddev->bio_set);
+
+                       if (mddev->serialize_policy)
+                               wait_for_serialization(rdev, r1_bio);
+               }
 
                r1_bio->bios[i] = mbio;
 
                mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
-               bio_set_dev(mbio, rdev->bdev);
                mbio->bi_end_io = raid1_end_write_request;
                mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
                if (test_bit(FailFast, &rdev->flags) &&
@@ -2416,12 +2417,12 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
                /* Write at 'sector' for 'sectors'*/
 
                if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
-                       wbio = bio_clone_fast(r1_bio->behind_master_bio,
-                                             GFP_NOIO,
-                                             &mddev->bio_set);
+                       wbio = bio_alloc_clone(rdev->bdev,
+                                              r1_bio->behind_master_bio,
+                                              GFP_NOIO, &mddev->bio_set);
                } else {
-                       wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
-                                             &mddev->bio_set);
+                       wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio,
+                                              GFP_NOIO, &mddev->bio_set);
                }
 
                bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
@@ -2430,7 +2431,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
 
                bio_trim(wbio, sector - r1_bio->sector, sectors);
                wbio->bi_iter.bi_sector += rdev->data_offset;
-               bio_set_dev(wbio, rdev->bdev);
 
                if (submit_bio_wait(wbio) < 0)
                        /* failure! */
index da07bcbc06d0856a98a728d66c7dd301ad5d277d..5dd2e17e1d0ea1a0cae811d74761adfb4fc666b2 100644 (file)
@@ -1208,14 +1208,13 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
 
        if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
                r10_bio->start_time = bio_start_io_acct(bio);
-       read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
+       read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
 
        r10_bio->devs[slot].bio = read_bio;
        r10_bio->devs[slot].rdev = rdev;
 
        read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
                choose_data_offset(r10_bio, rdev);
-       bio_set_dev(read_bio, rdev->bdev);
        read_bio->bi_end_io = raid10_end_read_request;
        bio_set_op_attrs(read_bio, op, do_sync);
        if (test_bit(FailFast, &rdev->flags) &&
@@ -1255,7 +1254,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
        } else
                rdev = conf->mirrors[devnum].rdev;
 
-       mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+       mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
        if (replacement)
                r10_bio->devs[n_copy].repl_bio = mbio;
        else
@@ -1263,7 +1262,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
 
        mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
                                   choose_data_offset(r10_bio, rdev));
-       bio_set_dev(mbio, rdev->bdev);
        mbio->bi_end_io = raid10_end_write_request;
        bio_set_op_attrs(mbio, op, do_sync | do_fua);
        if (!replacement && test_bit(FailFast,
@@ -1812,7 +1810,8 @@ retry_discard:
                 */
                if (r10_bio->devs[disk].bio) {
                        struct md_rdev *rdev = conf->mirrors[disk].rdev;
-                       mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+                       mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
+                                              &mddev->bio_set);
                        mbio->bi_end_io = raid10_end_discard_request;
                        mbio->bi_private = r10_bio;
                        r10_bio->devs[disk].bio = mbio;
@@ -1825,7 +1824,8 @@ retry_discard:
                }
                if (r10_bio->devs[disk].repl_bio) {
                        struct md_rdev *rrdev = conf->mirrors[disk].replacement;
-                       rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+                       rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
+                                              &mddev->bio_set);
                        rbio->bi_end_io = raid10_end_discard_request;
                        rbio->bi_private = r10_bio;
                        r10_bio->devs[disk].repl_bio = rbio;
@@ -2892,12 +2892,12 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
                if (sectors > sect_to_write)
                        sectors = sect_to_write;
                /* Write at 'sector' for 'sectors' */
-               wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+               wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
+                                      &mddev->bio_set);
                bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
                wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
                wbio->bi_iter.bi_sector = wsector +
                                   choose_data_offset(r10_bio, rdev);
-               bio_set_dev(wbio, rdev->bdev);
                bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
 
                if (submit_bio_wait(wbio) < 0)
index 7c119208a21436f1544b70547ca55d7332f1aec3..8891aaba65964e414add3591a1146479fbbd4f22 100644 (file)
@@ -5438,14 +5438,14 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
                return 0;
        }
 
-       align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->io_acct_set);
+       align_bio = bio_alloc_clone(rdev->bdev, raid_bio, GFP_NOIO,
+                                   &mddev->io_acct_set);
        md_io_acct = container_of(align_bio, struct md_io_acct, bio_clone);
        raid_bio->bi_next = (void *)rdev;
        if (blk_queue_io_stat(raid_bio->bi_bdev->bd_disk->queue))
                md_io_acct->start_time = bio_start_io_acct(raid_bio);
        md_io_acct->orig_bio = raid_bio;
 
-       bio_set_dev(align_bio, rdev->bdev);
        align_bio->bi_end_io = raid5_align_endio;
        align_bio->bi_private = md_io_acct;
        align_bio->bi_iter.bi_sector = sector;
index 421d921a05716e1f3386eda0743c11ddcaf59b49..dee86911a4befd0b8eaebecec587f3302f4feb2b 100644 (file)
@@ -3154,7 +3154,7 @@ struct bio *btrfs_bio_clone(struct bio *bio)
        struct bio *new;
 
        /* Bio allocation backed by a bioset does not fail */
-       new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
+       new = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOFS, &btrfs_bioset);
        bbio = btrfs_bio(new);
        btrfs_bio_init(bbio);
        bbio->iter = bio->bi_iter;
@@ -3169,7 +3169,7 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
        ASSERT(offset <= UINT_MAX && size <= UINT_MAX);
 
        /* this will never fail when it's backed by a bioset */
-       bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
+       bio = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOFS, &btrfs_bioset);
        ASSERT(bio);
 
        bbio = btrfs_bio(bio);
index b814361c957b009fa80b69649b7c7389c76b8750..7523aba4ddf7c9bf346fa9d30bceaba03aba73cc 100644 (file)
@@ -413,8 +413,10 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
 struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
 extern void bio_put(struct bio *);
 
-int __bio_clone_fast(struct bio *bio, struct bio *bio_src, gfp_t gfp);
-extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
+struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
+               gfp_t gfp, struct bio_set *bs);
+int bio_init_clone(struct block_device *bdev, struct bio *bio,
+               struct bio *bio_src, gfp_t gfp);
 
 extern struct bio_set fs_bio_set;