}
/* Do io completion on the original bio */
- if (cb->status != BLK_STS_OK)
- cb->orig_bio->bi_status = cb->status;
- bio_endio(cb->orig_bio);
+ btrfs_bio_end_io(btrfs_bio(cb->orig_bio), cb->status);
/* Finally free the cb struct */
kfree(cb->compressed_pages);
* before decompressing it into the original bio and freeing the uncompressed
* pages.
*/
-static void end_compressed_bio_read(struct bio *bio)
+static void end_compressed_bio_read(struct btrfs_bio *bbio)
{
- struct compressed_bio *cb = bio->bi_private;
+ struct compressed_bio *cb = bbio->private;
struct inode *inode = cb->inode;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_inode *bi = BTRFS_I(inode);
bool csum = !(bi->flags & BTRFS_INODE_NODATASUM) &&
!test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
- blk_status_t status = bio->bi_status;
- struct btrfs_bio *bbio = btrfs_bio(bio);
+ blk_status_t status = bbio->bio.bi_status;
struct bvec_iter iter;
struct bio_vec bv;
u32 offset;
if (refcount_dec_and_test(&cb->pending_ios))
finish_compressed_bio_read(cb);
btrfs_bio_free_csum(bbio);
- bio_put(bio);
+ bio_put(&bbio->bio);
}
/*
* This also calls the writeback end hooks for the file pages so that metadata
* and checksums can be updated in the file.
*/
-static void end_compressed_bio_write(struct bio *bio)
+static void end_compressed_bio_write(struct btrfs_bio *bbio)
{
- struct compressed_bio *cb = bio->bi_private;
+ struct compressed_bio *cb = bbio->private;
- if (bio->bi_status)
- cb->status = bio->bi_status;
+ if (bbio->bio.bi_status)
+ cb->status = bbio->bio.bi_status;
if (refcount_dec_and_test(&cb->pending_ios)) {
struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
- btrfs_record_physical_zoned(cb->inode, cb->start, bio);
+ btrfs_record_physical_zoned(cb->inode, cb->start, &bbio->bio);
queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
}
- bio_put(bio);
+ bio_put(&bbio->bio);
}
/*
static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_bytenr,
- blk_opf_t opf, bio_end_io_t endio_func,
+ blk_opf_t opf,
+ btrfs_bio_end_io_t endio_func,
u64 *next_stripe_start)
{
struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
struct bio *bio;
int ret;
- bio = btrfs_bio_alloc(BIO_MAX_VECS, opf);
+ bio = btrfs_bio_alloc(BIO_MAX_VECS, opf, endio_func, cb);
bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
- bio->bi_private = cb;
- bio->bi_end_io = endio_func;
em = btrfs_get_chunk_map(fs_info, disk_bytenr, fs_info->sectorsize);
if (IS_ERR(em)) {
if (!skip_sum) {
ret = btrfs_csum_one_bio(inode, bio, start, true);
if (ret) {
- bio->bi_status = ret;
- bio_endio(bio);
+ btrfs_bio_end_io(btrfs_bio(bio), ret);
break;
}
}
ret = btrfs_lookup_bio_sums(inode, comp_bio, NULL);
if (ret) {
- comp_bio->bi_status = ret;
- bio_endio(comp_bio);
+ btrfs_bio_end_io(btrfs_bio(comp_bio), ret);
break;
}
kfree(cb);
out:
free_extent_map(em);
- bio->bi_status = ret;
- bio_endio(bio);
+ btrfs_bio_end_io(btrfs_bio(bio), ret);
return;
}
*/
static void run_one_async_done(struct btrfs_work *work)
{
- struct async_submit_bio *async;
- struct inode *inode;
-
- async = container_of(work, struct async_submit_bio, work);
- inode = async->inode;
+ struct async_submit_bio *async =
+ container_of(work, struct async_submit_bio, work);
+ struct inode *inode = async->inode;
+ struct btrfs_bio *bbio = btrfs_bio(async->bio);
/* If an error occurred we just want to clean up the bio and move on */
if (async->status) {
- async->bio->bi_status = async->status;
- bio_endio(async->bio);
+ btrfs_bio_end_io(bbio, async->status);
return;
}
void btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, int mirror_num)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_bio *bbio = btrfs_bio(bio);
blk_status_t ret;
bio->bi_opf |= REQ_META;
ret = btree_csum_one_bio(bio);
if (ret) {
- bio->bi_status = ret;
- bio_endio(bio);
+ btrfs_bio_end_io(bbio, ret);
return;
}
btrfs_submit_data_read_bio(inode, bio, mirror_num,
bio_ctrl->compress_type);
- /* The bio is owned by the bi_end_io handler now */
+ /* The bio is owned by the end_io handler now */
bio_ctrl->bio = NULL;
}
if (ret) {
ASSERT(ret < 0);
- bio->bi_status = errno_to_blk_status(ret);
- bio_endio(bio);
- /* The bio is owned by the bi_end_io handler now */
+ btrfs_bio_end_io(btrfs_bio(bio), errno_to_blk_status(ret));
+ /* The bio is owned by the end_io handler now */
epd->bio_ctrl.bio = NULL;
} else {
submit_one_bio(&epd->bio_ctrl);
return -EIO;
}
- repair_bio = btrfs_bio_alloc(1, REQ_OP_READ);
+ repair_bio = btrfs_bio_alloc(1, REQ_OP_READ, failed_bbio->end_io,
+ failed_bbio->private);
repair_bbio = btrfs_bio(repair_bio);
repair_bbio->file_offset = start;
- repair_bio->bi_end_io = failed_bio->bi_end_io;
repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
- repair_bio->bi_private = failed_bio->bi_private;
if (failed_bbio->csum) {
const u32 csum_size = fs_info->csum_size;
* Scheduling is not allowed, so the extent state tree is expected
* to have one and only one object corresponding to this IO.
*/
-static void end_bio_extent_writepage(struct bio *bio)
+static void end_bio_extent_writepage(struct btrfs_bio *bbio)
{
+ struct bio *bio = &bbio->bio;
int error = blk_status_to_errno(bio->bi_status);
struct bio_vec *bvec;
u64 start;
* Scheduling is not allowed, so the extent state tree is expected
* to have one and only one object corresponding to this IO.
*/
-static void end_bio_extent_readpage(struct bio *bio)
+static void end_bio_extent_readpage(struct btrfs_bio *bbio)
{
+ struct bio *bio = &bbio->bio;
struct bio_vec *bvec;
- struct btrfs_bio *bbio = btrfs_bio(bio);
struct extent_io_tree *tree, *failure_tree;
struct processed_extent processed = { 0 };
/*
struct btrfs_bio_ctrl *bio_ctrl,
struct writeback_control *wbc,
blk_opf_t opf,
- bio_end_io_t end_io_func,
+ btrfs_bio_end_io_t end_io_func,
u64 disk_bytenr, u32 offset, u64 file_offset,
enum btrfs_compression_type compress_type)
{
struct bio *bio;
int ret;
- bio = btrfs_bio_alloc(BIO_MAX_VECS, opf);
+ bio = btrfs_bio_alloc(BIO_MAX_VECS, opf, end_io_func, NULL);
/*
* For compressed page range, its disk_bytenr is always @disk_bytenr
* passed in, no matter if we have added any range into previous bio.
bio->bi_iter.bi_sector = (disk_bytenr + offset) >> SECTOR_SHIFT;
bio_ctrl->bio = bio;
bio_ctrl->compress_type = compress_type;
- bio->bi_end_io = end_io_func;
ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
if (ret < 0)
goto error;
return 0;
error:
bio_ctrl->bio = NULL;
- bio->bi_status = errno_to_blk_status(ret);
- bio_endio(bio);
+ btrfs_bio_end_io(btrfs_bio(bio), errno_to_blk_status(ret));
return ret;
}
struct btrfs_bio_ctrl *bio_ctrl,
struct page *page, u64 disk_bytenr,
size_t size, unsigned long pg_offset,
- bio_end_io_t end_io_func,
+ btrfs_bio_end_io_t end_io_func,
enum btrfs_compression_type compress_type,
bool force_bio_submit)
{
* Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback()
* after all extent buffers in the page has finished their writeback.
*/
-static void end_bio_subpage_eb_writepage(struct bio *bio)
+static void end_bio_subpage_eb_writepage(struct btrfs_bio *bbio)
{
+ struct bio *bio = &bbio->bio;
struct btrfs_fs_info *fs_info;
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
bio_put(bio);
}
-static void end_bio_extent_buffer_writepage(struct bio *bio)
+static void end_bio_extent_buffer_writepage(struct btrfs_bio *bbio)
{
+ struct bio *bio = &bbio->bio;
struct bio_vec *bvec;
struct extent_buffer *eb;
int done;
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
ret = extract_ordered_extent(bi, bio,
page_offset(bio_first_bvec_all(bio)->bv_page));
- if (ret)
- goto out;
+ if (ret) {
+ btrfs_bio_end_io(btrfs_bio(bio), ret);
+ return;
+ }
}
/*
return;
ret = btrfs_csum_one_bio(bi, bio, (u64)-1, false);
- if (ret)
- goto out;
+ if (ret) {
+ btrfs_bio_end_io(btrfs_bio(bio), ret);
+ return;
+ }
}
btrfs_submit_bio(fs_info, bio, mirror_num);
- return;
-out:
- if (ret) {
- bio->bi_status = ret;
- bio_endio(bio);
- }
}
void btrfs_submit_data_read_bio(struct inode *inode, struct bio *bio,
*/
ret = btrfs_lookup_bio_sums(inode, bio, NULL);
if (ret) {
- bio->bi_status = ret;
- bio_endio(bio);
+ btrfs_bio_end_io(btrfs_bio(bio), ret);
return;
}
int mirror_num,
enum btrfs_compression_type compress_type)
{
- struct btrfs_dio_private *dip = bio->bi_private;
+ struct btrfs_dio_private *dip = btrfs_bio(bio)->private;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
BUG_ON(bio_op(bio) == REQ_OP_WRITE);
return btrfs_csum_one_bio(BTRFS_I(inode), bio, dio_file_offset, false);
}
-static void btrfs_end_dio_bio(struct bio *bio)
+static void btrfs_end_dio_bio(struct btrfs_bio *bbio)
{
- struct btrfs_dio_private *dip = bio->bi_private;
- struct btrfs_bio *bbio = btrfs_bio(bio);
+ struct btrfs_dio_private *dip = bbio->private;
+ struct bio *bio = &bbio->bio;
blk_status_t err = bio->bi_status;
if (err)
u64 file_offset, int async_submit)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_dio_private *dip = bio->bi_private;
+ struct btrfs_dio_private *dip = btrfs_bio(bio)->private;
blk_status_t ret;
/* Save the original iter for read repair */
*/
ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, file_offset, false);
if (ret) {
- bio->bi_status = ret;
- bio_endio(bio);
+ btrfs_bio_end_io(btrfs_bio(bio), ret);
return;
}
} else {
* This will never fail as it's passing GPF_NOFS and
* the allocation is backed by btrfs_bioset.
*/
- bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len);
- bio->bi_private = dip;
- bio->bi_end_io = btrfs_end_dio_bio;
+ bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len,
+ btrfs_end_dio_bio, dip);
btrfs_bio(bio)->file_offset = file_offset;
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
static blk_status_t submit_encoded_read_bio(struct btrfs_inode *inode,
struct bio *bio, int mirror_num)
{
- struct btrfs_encoded_read_private *priv = bio->bi_private;
+ struct btrfs_encoded_read_private *priv = btrfs_bio(bio)->private;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
blk_status_t ret;
static blk_status_t btrfs_encoded_read_verify_csum(struct btrfs_bio *bbio)
{
const bool uptodate = (bbio->bio.bi_status == BLK_STS_OK);
- struct btrfs_encoded_read_private *priv = bbio->bio.bi_private;
+ struct btrfs_encoded_read_private *priv = bbio->private;
struct btrfs_inode *inode = priv->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
u32 sectorsize = fs_info->sectorsize;
return BLK_STS_OK;
}
-static void btrfs_encoded_read_endio(struct bio *bio)
+static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
{
- struct btrfs_encoded_read_private *priv = bio->bi_private;
- struct btrfs_bio *bbio = btrfs_bio(bio);
+ struct btrfs_encoded_read_private *priv = bbio->private;
blk_status_t status;
status = btrfs_encoded_read_verify_csum(bbio);
if (!atomic_dec_return(&priv->pending))
wake_up(&priv->wait);
btrfs_bio_free_csum(bbio);
- bio_put(bio);
+ bio_put(&bbio->bio);
}
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
size_t bytes = min_t(u64, remaining, PAGE_SIZE);
if (!bio) {
- bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ);
+ bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ,
+ btrfs_encoded_read_endio,
+ &priv);
bio->bi_iter.bi_sector =
(disk_bytenr + cur) >> SECTOR_SHIFT;
- bio->bi_end_io = btrfs_encoded_read_endio;
- bio->bi_private = &priv;
}
if (!bytes ||
* Initialize a btrfs_bio structure. This skips the embedded bio itself as it
* is already initialized by the block layer.
*/
-static inline void btrfs_bio_init(struct btrfs_bio *bbio)
+static inline void btrfs_bio_init(struct btrfs_bio *bbio,
+ btrfs_bio_end_io_t end_io, void *private)
{
memset(bbio, 0, offsetof(struct btrfs_bio, bio));
+ bbio->end_io = end_io;
+ bbio->private = private;
}
/*
* Just like the underlying bio_alloc_bioset it will not fail as it is backed by
* a mempool.
*/
-struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf)
+struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
+ btrfs_bio_end_io_t end_io, void *private)
{
struct bio *bio;
bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset);
- btrfs_bio_init(btrfs_bio(bio));
+ btrfs_bio_init(btrfs_bio(bio), end_io, private);
return bio;
}
-struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
+struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size,
+ btrfs_bio_end_io_t end_io, void *private)
{
struct bio *bio;
struct btrfs_bio *bbio;
bio = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOFS, &btrfs_bioset);
bbio = btrfs_bio(bio);
- btrfs_bio_init(bbio);
+ btrfs_bio_init(bbio, end_io, private);
bio_trim(bio, offset >> 9, size >> 9);
bbio->iter = bio->bi_iter;
struct btrfs_bio *bbio =
container_of(work, struct btrfs_bio, end_io_work);
- bio_endio(&bbio->bio);
+ bbio->end_io(bbio);
}
static void btrfs_raid56_end_io(struct bio *bio)
btrfs_bio_counter_dec(bioc->fs_info);
bbio->mirror_num = bioc->mirror_num;
- bio->bi_end_io = bioc->end_io;
- bio->bi_private = bioc->private;
- bio->bi_end_io(bio);
+ bbio->end_io(bbio);
btrfs_put_bioc(bioc);
}
}
bbio->mirror_num = bioc->mirror_num;
- bio->bi_end_io = bioc->end_io;
- bio->bi_private = bioc->private;
/*
* Only send an error to the higher layers if it is beyond the tolerance
INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work);
queue_work(btrfs_end_io_wq(bioc), &bbio->end_io_work);
} else {
- bio_endio(bio);
+ bbio->end_io(bbio);
}
btrfs_put_bioc(bioc);
&map_length, &bioc, mirror_num, 1);
if (ret) {
btrfs_bio_counter_dec(fs_info);
- bio->bi_status = errno_to_blk_status(ret);
- bio_endio(bio);
+ btrfs_bio_end_io(btrfs_bio(bio), errno_to_blk_status(ret));
return;
}
total_devs = bioc->num_stripes;
bioc->orig_bio = bio;
- bioc->private = bio->bi_private;
- bioc->end_io = bio->bi_end_io;
if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
*/
#define BTRFS_MAX_BIO_SECTORS (256)
+typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *bbio);
+
/*
* Additional info to pass along bio.
*
u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
struct bvec_iter iter;
+ /* End I/O information supplied to btrfs_bio_alloc */
+ btrfs_bio_end_io_t end_io;
+ void *private;
+
/* For read end I/O handling */
struct work_struct end_io_work;
int __init btrfs_bioset_init(void);
void __cold btrfs_bioset_exit(void);
-struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf);
-struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size);
+struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
+ btrfs_bio_end_io_t end_io, void *private);
+struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size,
+ btrfs_bio_end_io_t end_io, void *private);
+
+static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
+{
+ bbio->bio.bi_status = status;
+ bbio->end_io(bbio);
+}
static inline void btrfs_bio_free_csum(struct btrfs_bio *bbio)
{
refcount_t refs;
struct btrfs_fs_info *fs_info;
u64 map_type; /* get from map_lookup->type */
- bio_end_io_t *end_io;
struct bio *orig_bio;
- void *private;
atomic_t error;
int max_errors;
int num_stripes;