return NULL;
}
+static void
+ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req,
+ bool strict_iomode)
+{
+retry_strict:
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ 0,
+ NFS4_MAX_UINT64,
+ IOMODE_READ,
+ strict_iomode,
+ GFP_KERNEL);
+ if (IS_ERR(pgio->pg_lseg)) {
+ pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ }
+
+ /* If we don't have checking, do get a IOMODE_RW
+ * segment, and the server wants to avoid READs
+ * there, then retry!
+ */
+ if (pgio->pg_lseg && !strict_iomode &&
+ ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
+ strict_iomode = true;
+ goto retry_strict;
+ }
+}
+
static void
ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
int ds_idx;
/* Use full layout for now */
- if (!pgio->pg_lseg || ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
- pnfs_put_lseg(pgio->pg_lseg);
- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
- req->wb_context,
- 0,
- NFS4_MAX_UINT64,
- IOMODE_READ,
- GFP_KERNEL);
- if (IS_ERR(pgio->pg_lseg)) {
- pgio->pg_error = PTR_ERR(pgio->pg_lseg);
- pgio->pg_lseg = NULL;
- }
- }
+ if (!pgio->pg_lseg)
+ ff_layout_pg_get_read(pgio, req, false);
+ else if (ff_layout_avoid_read_on_rw(pgio->pg_lseg))
+ ff_layout_pg_get_read(pgio, req, true);
/* If no lseg, fall back to read through mds */
if (pgio->pg_lseg == NULL)
0,
NFS4_MAX_UINT64,
IOMODE_RW,
+ false,
GFP_NOFS);
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
0,
NFS4_MAX_UINT64,
IOMODE_RW,
+ false,
GFP_NOFS);
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
/*
* iomode matching rules:
- * iomode lseg match
- * ----- ----- -----
- * ANY READ true
- * ANY RW true
- * RW READ false
- * RW RW true
- * READ READ true
- * READ RW true
+ * iomode lseg strict match
+ * iomode
+ * ----- ----- ------ -----
+ * ANY READ N/A true
+ * ANY RW N/A true
+ * RW READ N/A false
+ * RW RW N/A true
+ * READ READ N/A true
+ * READ RW true false
+ * READ RW false true
*/
static bool
pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
- const struct pnfs_layout_range *range)
+ const struct pnfs_layout_range *range,
+ bool strict_iomode)
{
struct pnfs_layout_range range1;
if ((range->iomode == IOMODE_RW &&
ls_range->iomode != IOMODE_RW) ||
+ (range->iomode != ls_range->iomode &&
+ strict_iomode == true) ||
!pnfs_lseg_range_intersecting(ls_range, range))
return 0;
*/
static struct pnfs_layout_segment *
pnfs_find_lseg(struct pnfs_layout_hdr *lo,
- struct pnfs_layout_range *range)
+ struct pnfs_layout_range *range,
+ bool strict_iomode)
{
struct pnfs_layout_segment *lseg, *ret = NULL;
list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
- pnfs_lseg_range_match(&lseg->pls_range, range)) {
+ pnfs_lseg_range_match(&lseg->pls_range, range,
+ strict_iomode)) {
ret = pnfs_get_lseg(lseg);
break;
}
loff_t pos,
u64 count,
enum pnfs_iomode iomode,
+ bool strict_iomode,
gfp_t gfp_flags)
{
struct pnfs_layout_range arg = {
goto out_unlock;
}
- lseg = pnfs_find_lseg(lo, &arg);
+ lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
if (lseg) {
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_FOUND_CACHED);
req_offset(req),
rd_size,
IOMODE_READ,
+ false,
GFP_KERNEL);
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
req_offset(req),
wb_size,
IOMODE_RW,
+ false,
GFP_NOFS);
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);