]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/migrate: Convert migrate_page() to migrate_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 6 Jun 2022 14:27:41 +0000 (10:27 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 2 Aug 2022 16:34:04 +0000 (12:34 -0400)
Convert all callers to pass a folio.  Most have the folio
already available.  Switch all users from aops->migratepage to
aops->migrate_folio.  Also turn the documentation into kerneldoc.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: David Sterba <dsterba@suse.com>
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
fs/btrfs/disk-io.c
fs/nfs/write.c
include/linux/migrate.h
mm/migrate.c
mm/migrate_device.c
mm/shmem.c
mm/swap_state.c

index 094f06b4ce3359142d8cfb27d7b00c3372e9fee9..8423df021b7138a02f77897e1b9cc98af743e35d 100644 (file)
@@ -216,8 +216,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
                         * However...!
                         *
                         * The mmu-notifier can be invalidated for a
-                        * migrate_page, that is alreadying holding the lock
-                        * on the page. Such a try_to_unmap() will result
+                        * migrate_folio, that is alreadying holding the lock
+                        * on the folio. Such a try_to_unmap() will result
                         * in us calling put_pages() and so recursively try
                         * to lock the page. We avoid that deadlock with
                         * a trylock_page() and in exchange we risk missing
index 4aeb68f8450e742e4c54ef7d9255299d4d87b795..64d9299218f2f04a87eb18e7eb11dc5b8ddfb2d8 100644 (file)
@@ -968,7 +968,7 @@ static int btree_migrate_folio(struct address_space *mapping,
        if (folio_get_private(src) &&
            !filemap_release_folio(src, GFP_KERNEL))
                return -EAGAIN;
-       return migrate_page(mapping, &dst->page, &src->page, mode);
+       return migrate_folio(mapping, dst, src, mode);
 }
 #else
 #define btree_migrate_folio NULL
index 649b9e63345974ec02b1683a5c9c764512a7c3ba..69569696dde0bd82e6088cb83e4b499f7c373df8 100644 (file)
@@ -2139,7 +2139,7 @@ int nfs_migrate_folio(struct address_space *mapping, struct folio *dst,
                folio_wait_fscache(src);
        }
 
-       return migrate_page(mapping, &dst->page, &src->page, mode);
+       return migrate_folio(mapping, dst, src, mode);
 }
 #endif
 
index 82c735ba6109f54002469adb312a460238028bf0..c9986d5da335feedff525a2d7cb9650b948423d1 100644 (file)
@@ -62,9 +62,8 @@ extern const char *migrate_reason_names[MR_TYPES];
 #ifdef CONFIG_MIGRATION
 
 extern void putback_movable_pages(struct list_head *l);
-extern int migrate_page(struct address_space *mapping,
-                       struct page *newpage, struct page *page,
-                       enum migrate_mode mode);
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+               struct folio *src, enum migrate_mode mode);
 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
                unsigned long private, enum migrate_mode mode, int reason,
                unsigned int *ret_succeeded);
index 61cd8d270b030d5fabe8502885ee36b44471f0fc..77aeb7e12f62986a4c525e476356bb28cf754cd4 100644 (file)
@@ -593,34 +593,37 @@ EXPORT_SYMBOL(folio_migrate_copy);
  *                    Migration functions
  ***********************************************************/
 
-/*
- * Common logic to directly migrate a single LRU page suitable for
- * pages that do not use PagePrivate/PagePrivate2.
+/**
+ * migrate_folio() - Simple folio migration.
+ * @mapping: The address_space containing the folio.
+ * @dst: The folio to migrate the data to.
+ * @src: The folio containing the current data.
+ * @mode: How to migrate the page.
  *
- * Pages are locked upon entry and exit.
+ * Common logic to directly migrate a single LRU folio suitable for
+ * folios that do not use PagePrivate/PagePrivate2.
+ *
+ * Folios are locked upon entry and exit.
  */
-int migrate_page(struct address_space *mapping,
-               struct page *newpage, struct page *page,
-               enum migrate_mode mode)
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+               struct folio *src, enum migrate_mode mode)
 {
-       struct folio *newfolio = page_folio(newpage);
-       struct folio *folio = page_folio(page);
        int rc;
 
-       BUG_ON(folio_test_writeback(folio));    /* Writeback must be complete */
+       BUG_ON(folio_test_writeback(src));      /* Writeback must be complete */
 
-       rc = folio_migrate_mapping(mapping, newfolio, folio, 0);
+       rc = folio_migrate_mapping(mapping, dst, src, 0);
 
        if (rc != MIGRATEPAGE_SUCCESS)
                return rc;
 
        if (mode != MIGRATE_SYNC_NO_COPY)
-               folio_migrate_copy(newfolio, folio);
+               folio_migrate_copy(dst, src);
        else
-               folio_migrate_flags(newfolio, folio);
+               folio_migrate_flags(dst, src);
        return MIGRATEPAGE_SUCCESS;
 }
-EXPORT_SYMBOL(migrate_page);
+EXPORT_SYMBOL(migrate_folio);
 
 #ifdef CONFIG_BLOCK
 /* Returns true if all buffers are successfully locked */
@@ -671,7 +674,7 @@ static int __buffer_migrate_folio(struct address_space *mapping,
 
        head = folio_buffers(src);
        if (!head)
-               return migrate_page(mapping, &dst->page, &src->page, mode);
+               return migrate_folio(mapping, dst, src, mode);
 
        /* Check whether page does not have extra refs before we do more work */
        expected_count = folio_expected_refs(mapping, src);
@@ -848,7 +851,7 @@ static int fallback_migrate_folio(struct address_space *mapping,
            !filemap_release_folio(src, GFP_KERNEL))
                return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
 
-       return migrate_page(mapping, &dst->page, &src->page, mode);
+       return migrate_folio(mapping, dst, src, mode);
 }
 
 /*
@@ -875,7 +878,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
                struct address_space *mapping = folio_mapping(src);
 
                if (!mapping)
-                       rc = migrate_page(mapping, &dst->page, &src->page, mode);
+                       rc = migrate_folio(mapping, dst, src, mode);
                else if (mapping->a_ops->migrate_folio)
                        /*
                         * Most folios have a mapping and most filesystems
index 5052093d0262d708eb9b573154d1038d07a462eb..5dd97c39ca6ae1157b12fd6b3aa62510e3690bf8 100644 (file)
@@ -718,7 +718,8 @@ void migrate_vma_pages(struct migrate_vma *migrate)
                        continue;
                }
 
-               r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
+               r = migrate_folio(mapping, page_folio(newpage),
+                               page_folio(page), MIGRATE_SYNC_NO_COPY);
                if (r != MIGRATEPAGE_SUCCESS)
                        migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
        }
index 28a62be1d41e5dc9b79fd53efbfd4507ea15fed5..15c61456e0875f8076ae7f2facd5310495f739d0 100644 (file)
@@ -3801,7 +3801,7 @@ const struct address_space_operations shmem_aops = {
        .write_end      = shmem_write_end,
 #endif
 #ifdef CONFIG_MIGRATION
-       .migratepage    = migrate_page,
+       .migrate_folio  = migrate_folio,
 #endif
        .error_remove_page = shmem_error_remove_page,
 };
index f5b6f563890805e8a500ccd73d54284388c288a8..0a2021fc55ade0fe98da715e33c78e3d2272e2ad 100644 (file)
@@ -33,7 +33,7 @@ static const struct address_space_operations swap_aops = {
        .writepage      = swap_writepage,
        .dirty_folio    = noop_dirty_folio,
 #ifdef CONFIG_MIGRATION
-       .migratepage    = migrate_page,
+       .migrate_folio  = migrate_folio,
 #endif
 };