/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
+ *
+ * Note that the aops->readpages() function is ONLY used for read-ahead. If
+ * this function ever deviates from doing just read-ahead, it should either
+ * use ->readpage() or do the necessary surgery to decouple ->readpages()
+ * readom read-ahead.
*/
static int f2fs_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
struct bio *bio;
struct page *page;
unsigned int nr_pages;
+ bool is_readahead;
sector_t last_block_in_bio;
struct buffer_head map_bh;
unsigned long first_logical_block;
get_block_t *get_block;
- gfp_t gfp;
};
/*
struct block_device *bdev = NULL;
int length;
int fully_mapped = 1;
+ int op_flags;
unsigned nblocks;
unsigned relative_block;
+ gfp_t gfp;
+
+ if (args->is_readahead) {
+ op_flags = REQ_RAHEAD;
+ gfp = readahead_gfp_mask(page->mapping);
+ } else {
+ op_flags = 0;
+ gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+ }
if (page_has_buffers(page))
goto confused;
* This page will go to BIO. Do we need to send this BIO off first?
*/
if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
- args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio);
+ args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
alloc_new:
if (args->bio == NULL) {
args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
min_t(int, args->nr_pages,
BIO_MAX_PAGES),
- args->gfp);
+ gfp);
if (args->bio == NULL)
goto confused;
}
length = first_hole << blkbits;
if (bio_add_page(args->bio, page, length, 0) < length) {
- args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio);
+ args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
goto alloc_new;
}
nblocks = map_bh->b_size >> blkbits;
if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
(first_hole != blocks_per_page))
- args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio);
+ args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
else
args->last_block_in_bio = blocks[blocks_per_page - 1];
out:
confused:
if (args->bio)
- args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio);
+ args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
if (!PageUptodate(page))
block_read_full_page(page, args->get_block);
else
{
struct mpage_readpage_args args = {
.get_block = get_block,
- .gfp = readahead_gfp_mask(mapping),
+ .is_readahead = true,
};
unsigned page_idx;
list_del(&page->lru);
if (!add_to_page_cache_lru(page, mapping,
page->index,
- args.gfp)) {
+ readahead_gfp_mask(mapping))) {
args.page = page;
args.nr_pages = nr_pages - page_idx;
args.bio = do_mpage_readpage(&args);
}
BUG_ON(!list_empty(pages));
if (args.bio)
- mpage_bio_submit(REQ_OP_READ, 0, args.bio);
+ mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio);
return 0;
}
EXPORT_SYMBOL(mpage_readpages);
.page = page,
.nr_pages = 1,
.get_block = get_block,
- .gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL),
};
args.bio = do_mpage_readpage(&args);