/**
* bio_reset - reinitialize a bio
* @bio: bio to reset
+ * @bdev: block device to use the bio for
+ * @opf: operation and flags for bio
*
* Description:
* After calling bio_reset(), @bio will be in the same state as a freshly
* preserved are the ones that are initialized by bio_alloc_bioset(). See
* comment in struct bio.
*/
-void bio_reset(struct bio *bio)
+void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf)
{
bio_uninit(bio);
memset(bio, 0, BIO_RESET_BYTES);
atomic_set(&bio->__bi_remaining, 1);
+ bio->bi_bdev = bdev;
+ bio->bi_opf = opf;
}
EXPORT_SYMBOL(bio_reset);
continue;
bio = pkt->r_bios[f];
- bio_reset(bio);
+ bio_reset(bio, pd->bdev, REQ_OP_READ);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
- bio_set_dev(bio, pd->bdev);
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
BUG();
atomic_inc(&pkt->io_wait);
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
pkt_queue_bio(pd, bio);
frames_read++;
}
{
int f;
- bio_reset(pkt->w_bio);
+ bio_reset(pkt->w_bio, pd->bdev, REQ_OP_WRITE);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
- bio_set_dev(pkt->w_bio, pd->bdev);
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
/* Start the write request */
atomic_set(&pkt->io_wait, 1);
- bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0);
pkt_queue_bio(pd, pkt->w_bio);
}
reread: left = ca->sb.bucket_size - offset;
len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
- bio_reset(bio);
+ bio_reset(bio, ca->bdev, REQ_OP_READ);
bio->bi_iter.bi_sector = bucket + offset;
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
bio->bi_private = &cl;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
bch_bio_map(bio, data);
closure_bio_submit(ca->set, bio, &cl);
atomic_long_add(sectors, &ca->meta_sectors_written);
- bio_reset(bio);
+ bio_reset(bio, ca->bdev, REQ_OP_WRITE |
+ REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
+ bch_bio_map(bio, w->data);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
- bio_set_op_attrs(bio, REQ_OP_WRITE,
- REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
- bch_bio_map(bio, w->data);
trace_bcache_journal_write(bio, w->data->keys);
bio_list_add(&list, bio);
*/
if (s->iop.bio) {
- bio_reset(s->iop.bio);
+ bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ);
s->iop.bio->bi_iter.bi_sector =
s->cache_miss->bi_iter.bi_sector;
- bio_copy_dev(s->iop.bio, s->cache_miss);
s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
+ bio_clone_blkg_association(s->iop.bio, s->cache_miss);
bch_bio_map(s->iop.bio, NULL);
bio_copy_data(s->cache_miss, s->iop.bio);
continue;
/* fixup the bio for reuse, but preserve errno */
status = b->bi_status;
- bio_reset(b);
+ bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ);
b->bi_status = status;
b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
- bio_set_dev(b, conf->mirrors[i].rdev->bdev);
b->bi_end_io = end_sync_read;
rp->raid_bio = r1_bio;
b->bi_private = rp;
for (i = conf->poolinfo->raid_disks; i--; ) {
bio = r1bio->bios[i];
rps = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rps;
}
r1bio->master_bio = NULL;
* bi_vecs, as the read request might have corrupted these
*/
rp = get_resync_pages(tbio);
- bio_reset(tbio);
+ bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
tbio->bi_private = rp;
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
tbio->bi_end_io = end_sync_write;
- bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
bio_copy_data(tbio, fbio);
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
- bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
submit_bio_noacct(tbio);
}
for (i = 0; i < nalloc; i++) {
bio = r10bio->devs[i].bio;
rp = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rp;
bio = r10bio->devs[i].repl_bio;
if (bio) {
rp = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rp;
}
}
if (!do_flush)
return;
- bio_reset(&log->flush_bio);
- bio_set_dev(&log->flush_bio, log->rdev->bdev);
+ bio_reset(&log->flush_bio, log->rdev->bdev,
+ REQ_OP_WRITE | REQ_PREFLUSH);
log->flush_bio.bi_end_io = r5l_log_flush_endio;
- log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(&log->flush_bio);
}
struct r5l_recovery_ctx *ctx,
sector_t offset)
{
- bio_reset(ctx->ra_bio);
- bio_set_dev(ctx->ra_bio, log->rdev->bdev);
- bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
+ bio_reset(ctx->ra_bio, log->rdev->bdev, REQ_OP_READ);
ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
ctx->valid_pages = 0;
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_status);
if (i == disks) {
- bio_reset(bi);
+ bio_reset(bi, NULL, 0);
BUG();
return;
}
}
}
rdev_dec_pending(rdev, conf->mddev);
- bio_reset(bi);
+ bio_reset(bi, NULL, 0);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_status);
if (i == disks) {
- bio_reset(bi);
+ bio_reset(bi, NULL, 0);
BUG();
return;
}
if (sh->batch_head && bi->bi_status && !replacement)
set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
- bio_reset(bi);
+ bio_reset(bi, NULL, 0);
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
return;
#endif
- bio_reset(bio);
+ bio_reset(bio, device->bdev, REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
bio->bi_end_io = btrfs_end_empty_barrier;
- bio_set_dev(bio, device->bdev);
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
init_completion(&device->flush_wait);
bio->bi_private = &device->flush_wait;
err = submit_bio_wait(bio);
if (err)
goto out;
- bio_reset(bio);
- bio_set_dev(bio, inode->i_sb->s_bdev);
- bio->bi_opf = REQ_OP_WRITE;
+ bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
num_pages = 0;
}
}
err = submit_bio_wait(bio);
if (err)
goto out;
- bio_reset(bio);
- bio_set_dev(bio, inode->i_sb->s_bdev);
- bio->bi_opf = REQ_OP_WRITE;
+ bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
} while (len != 0);
err = 0;
out:
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
unsigned short max_vecs, unsigned int opf);
extern void bio_uninit(struct bio *);
-extern void bio_reset(struct bio *);
+void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf);
void bio_chain(struct bio *, struct bio *);
int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off);
bio_associate_blkg(bio);
}
-static inline void bio_copy_dev(struct bio *dst, struct bio *src)
-{
- bio_clear_flag(dst, BIO_REMAPPED);
- dst->bi_bdev = src->bi_bdev;
- bio_clone_blkg_association(dst, src);
-}
-
/*
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
*