There is no point in allocating memory for a synchronous flush.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Acked-by: Damien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
/**
* blkdev_issue_flush - queue a flush
* @bdev: blockdev to issue flush for
- * @gfp_mask: memory allocation flags (for bio_alloc)
*
* Description:
* Issue a flush for the block device in question.
*/
-int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
+int blkdev_issue_flush(struct block_device *bdev)
{
- struct bio *bio;
- int ret = 0;
+ struct bio bio;
- bio = bio_alloc(gfp_mask, 0);
- bio_set_dev(bio, bdev);
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
-
- ret = submit_bio_wait(bio);
- bio_put(bio);
- return ret;
+ bio_init(&bio, NULL, 0);
+ bio_set_dev(&bio, bdev);
+ bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+ return submit_bio_wait(&bio);
}
EXPORT_SYMBOL(blkdev_issue_flush);
ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block,
mblk->page);
if (ret == 0)
- ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev);
return ret;
}
/* Flush drive cache (this will also sync data) */
if (ret == 0)
- ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev);
return ret;
}
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
- ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev);
goto err;
}
}
/* flush the disk cache after recovery if necessary */
- ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL);
+ ret = blkdev_issue_flush(rdev->bdev);
out:
__free_page(page);
return ret;
u16 nvmet_bdev_flush(struct nvmet_req *req)
{
- if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
+ if (blkdev_issue_flush(req->ns->bdev))
return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0;
}
* i_mutex and doing so causes performance issues with concurrent
* O_SYNC writers to a block device.
*/
- error = blkdev_issue_flush(bdev, GFP_KERNEL);
+ error = blkdev_issue_flush(bdev);
if (error == -EOPNOTSUPP)
error = 0;
if (err)
return err;
- return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ return blkdev_issue_flush(inode->i_sb->s_bdev);
}
const struct file_operations exfat_file_operations = {
* flush before we start writing fast commit blocks.
*/
if (journal->j_fs_dev != journal->j_dev)
- blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
+ blkdev_issue_flush(journal->j_fs_dev);
blk_start_plug(&plug);
if (sbi->s_fc_bytes == 0) {
out:
iput(inode);
if (!ret)
- blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
+ blkdev_issue_flush(sb->s_bdev);
return 0;
}
ret = ext4_fsync_journal(inode, datasync, &needs_barrier);
if (needs_barrier) {
- err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ err = blkdev_issue_flush(inode->i_sb->s_bdev);
if (!ret)
ret = err;
}
if (ret < 0)
goto err_out;
if (barrier)
- blkdev_issue_flush(sb->s_bdev, GFP_NOFS);
+ blkdev_issue_flush(sb->s_bdev);
skip_zeroout:
ext4_lock_group(sb, group);
needs_barrier = true;
if (needs_barrier) {
int err;
- err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
+ err = blkdev_issue_flush(sb->s_bdev);
if (!ret)
ret = err;
}
if (err)
return err;
- return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ return blkdev_issue_flush(inode->i_sb->s_bdev);
}
}
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
- blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ blkdev_issue_flush(inode->i_sb->s_bdev);
inode_unlock(inode);
mutex_unlock(&sbi->vh_mutex);
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
- blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
+ blkdev_issue_flush(sb->s_bdev);
return error;
}
* jbd2_cleanup_journal_tail() doesn't get called all that often.
*/
if (journal->j_flags & JBD2_BARRIER)
- blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
+ blkdev_issue_flush(journal->j_fs_dev);
return __jbd2_update_log_tail(journal, first_tid, blocknr);
}
if (commit_transaction->t_need_data_flush &&
(journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER))
- blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
+ blkdev_issue_flush(journal->j_fs_dev);
/* Done it all: now write the commit record asynchronously. */
if (jbd2_has_feature_async_commit(journal)) {
stats.run.rs_blocks_logged++;
if (jbd2_has_feature_async_commit(journal) &&
journal->j_flags & JBD2_BARRIER) {
- blkdev_issue_flush(journal->j_dev, GFP_NOFS);
+ blkdev_issue_flush(journal->j_dev);
}
if (err)
err = err2;
/* Make sure all replayed data is on permanent storage */
if (journal->j_flags & JBD2_BARRIER) {
- err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL);
+ err2 = blkdev_issue_flush(journal->j_fs_dev);
if (!err)
err = err2;
}
err = __generic_file_fsync(file, start, end, datasync);
if (err)
return err;
- return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ return blkdev_issue_flush(inode->i_sb->s_bdev);
}
EXPORT_SYMBOL(generic_file_fsync);
*/
smp_wmb();
- err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL);
+ err = blkdev_issue_flush(nilfs->ns_bdev);
if (err != -EIO)
err = 0;
return err;
needs_barrier = true;
err = jbd2_complete_transaction(journal, commit_tid);
if (needs_barrier) {
- ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ ret = blkdev_issue_flush(inode->i_sb->s_bdev);
if (!err)
err = ret;
}
barrier_done = reiserfs_commit_for_inode(inode);
reiserfs_write_unlock(inode->i_sb);
if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
- blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ blkdev_issue_flush(inode->i_sb->s_bdev);
inode_unlock(inode);
if (barrier_done < 0)
return barrier_done;
xfs_blkdev_issue_flush(
xfs_buftarg_t *buftarg)
{
- blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS);
+ blkdev_issue_flush(buftarg->bt_bdev);
}
STATIC void
if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
ret = file_write_and_wait_range(file, start, end);
if (!ret)
- ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ ret = blkdev_issue_flush(inode->i_sb->s_bdev);
if (ret)
zonefs_io_error(inode, true);
!list_empty(&plug->cb_list));
}
-int blkdev_issue_flush(struct block_device *, gfp_t);
+int blkdev_issue_flush(struct block_device *bdev);
long nr_blockdev_pages(void);
#else /* CONFIG_BLOCK */
struct blk_plug {
return false;
}
-static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
+static inline int blkdev_issue_flush(struct block_device *bdev)
{
return 0;
}