bool (*release_folio)(struct folio *, gfp_t);
void (*free_folio)(struct folio *);
int (*direct_IO)(struct kiocb *, struct iov_iter *iter);
- int (*migratepage)(struct address_space *, struct page *, struct page *);
+ int (*migrate_folio)(struct address_space *, struct folio *dst,
+ struct folio *src, enum migrate_mode);
int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate)(struct folio *, size_t from, size_t count);
int (*error_remove_page)(struct address_space *, struct page *);
release_folio: yes
free_folio: yes
direct_IO:
-migratepage: yes (both)
+migrate_folio: yes (both)
launder_folio: yes
is_partially_uptodate: yes
error_remove_page: yes
bool (*release_folio)(struct folio *, gfp_t);
void (*free_folio)(struct folio *);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
- /* migrate the contents of a page to the specified target */
- int (*migratepage) (struct page *, struct page *);
+ int (*migrate_folio)(struct mapping *, struct folio *dst,
+ struct folio *src, enum migrate_mode);
int (*launder_folio) (struct folio *);
bool (*is_partially_uptodate) (struct folio *, size_t from,
data directly between the storage and the application's address
space.
-``migrate_page``
+``migrate_folio``
This is used to compact the physical memory usage. If the VM
- wants to relocate a page (maybe off a memory card that is
- signalling imminent failure) it will pass a new page and an old
- page to this function. migrate_page should transfer any private
- data across and update any references that it has to the page.
+ wants to relocate a folio (maybe from a memory device that is
+ signalling imminent failure) it will pass a new folio and an old
+ folio to this function. migrate_folio should transfer any private
+ data across and update any references that it has to the folio.
``launder_folio``
Called before freeing a folio - it writes back the dirty folio.
void (*free_folio)(struct folio *folio);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/*
- * migrate the contents of a page to the specified target. If
+ * migrate the contents of a folio to the specified target. If
* migrate_mode is MIGRATE_ASYNC, it must not block.
*/
+ int (*migrate_folio)(struct address_space *, struct folio *dst,
+ struct folio *src, enum migrate_mode);
int (*migratepage) (struct address_space *,
struct page *, struct page *, enum migrate_mode);
int (*launder_folio)(struct folio *);
goto isolate_fail_put;
mapping = page_mapping(page);
- migrate_dirty = !mapping || mapping->a_ops->migratepage;
+ migrate_dirty = !mapping ||
+ mapping->a_ops->migrate_folio ||
+ mapping->a_ops->migratepage;
unlock_page(page);
if (!migrate_dirty)
goto isolate_fail_put;
if (!mapping)
rc = migrate_page(mapping, &dst->page, &src->page, mode);
- else if (mapping->a_ops->migratepage)
+ else if (mapping->a_ops->migrate_folio)
/*
- * Most pages have a mapping and most filesystems
- * provide a migratepage callback. Anonymous pages
+ * Most folios have a mapping and most filesystems
+ * provide a migrate_folio callback. Anonymous folios
* are part of swap space which also has its own
- * migratepage callback. This is the most common path
+ * migrate_folio callback. This is the most common path
* for page migration.
*/
+ rc = mapping->a_ops->migrate_folio(mapping, dst, src,
+ mode);
+ else if (mapping->a_ops->migratepage)
rc = mapping->a_ops->migratepage(mapping, &dst->page,
&src->page, mode);
else