kunmap_atomic(array);
}
+static void nfs_readdir_page_unlock_and_put(struct page *page)
+{
+ unlock_page(page);
+ put_page(page);
+}
+
+static struct page *nfs_readdir_page_get_next(struct address_space *mapping,
+ pgoff_t index, u64 cookie)
+{
+ struct page *page;
+
+ page = nfs_readdir_page_get_locked(mapping, index, cookie);
+ if (page) {
+ if (nfs_readdir_page_last_cookie(page) == cookie)
+ return page;
+ nfs_readdir_page_unlock_and_put(page);
+ }
+ return NULL;
+}
+
static inline
int is_32bit_api(void)
{
}
/* Perform conversion from xdr to cache array */
-static
-int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry,
- struct page **xdr_pages, struct page *page, unsigned int buflen)
+static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc,
+ struct nfs_entry *entry,
+ struct page **xdr_pages,
+ struct page *fillme, unsigned int buflen)
{
+ struct address_space *mapping = desc->file->f_mapping;
struct xdr_stream stream;
struct xdr_buf buf;
- struct page *scratch;
+ struct page *scratch, *new, *page = fillme;
int status;
scratch = alloc_page(GFP_KERNEL);
desc->dir_verifier);
status = nfs_readdir_add_to_array(entry, page);
+ if (status != -ENOSPC)
+ continue;
+
+ if (page->mapping != mapping)
+ break;
+ new = nfs_readdir_page_get_next(mapping, page->index + 1,
+ entry->prev_cookie);
+ if (!new)
+ break;
+ if (page != fillme)
+ nfs_readdir_page_unlock_and_put(page);
+ page = new;
+ status = nfs_readdir_add_to_array(entry, page);
} while (!status && !entry->eof);
switch (status) {
break;
}
+ if (page != fillme)
+ nfs_readdir_page_unlock_and_put(page);
+
put_page(scratch);
return status;
}