]> git.baikalelectronics.ru Git - kernel.git/commitdiff
aio: Convert to migrate_folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 6 Jun 2022 14:47:21 +0000 (10:47 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 2 Aug 2022 16:34:04 +0000 (12:34 -0400)
Use a folio throughout this function.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
fs/aio.c

index 3c249b93863274ccb6b1d10c05fa901861e5c848..a1911e86859c70e5d2d0d8ad869e66b1e513ab63 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -400,8 +400,8 @@ static const struct file_operations aio_ring_fops = {
 };
 
 #if IS_ENABLED(CONFIG_MIGRATION)
-static int aio_migratepage(struct address_space *mapping, struct page *new,
-                       struct page *old, enum migrate_mode mode)
+static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
+                       struct folio *src, enum migrate_mode mode)
 {
        struct kioctx *ctx;
        unsigned long flags;
@@ -435,10 +435,10 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
                goto out;
        }
 
-       idx = old->index;
+       idx = src->index;
        if (idx < (pgoff_t)ctx->nr_pages) {
-               /* Make sure the old page hasn't already been changed */
-               if (ctx->ring_pages[idx] != old)
+               /* Make sure the old folio hasn't already been changed */
+               if (ctx->ring_pages[idx] != &src->page)
                        rc = -EAGAIN;
        } else
                rc = -EINVAL;
@@ -447,27 +447,27 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
                goto out_unlock;
 
        /* Writeback must be complete */
-       BUG_ON(PageWriteback(old));
-       get_page(new);
+       BUG_ON(folio_test_writeback(src));
+       folio_get(dst);
 
-       rc = migrate_page_move_mapping(mapping, new, old, 1);
+       rc = folio_migrate_mapping(mapping, dst, src, 1);
        if (rc != MIGRATEPAGE_SUCCESS) {
-               put_page(new);
+               folio_put(dst);
                goto out_unlock;
        }
 
        /* Take completion_lock to prevent other writes to the ring buffer
-        * while the old page is copied to the new.  This prevents new
+        * while the old folio is copied to the new.  This prevents new
         * events from being lost.
         */
        spin_lock_irqsave(&ctx->completion_lock, flags);
-       migrate_page_copy(new, old);
-       BUG_ON(ctx->ring_pages[idx] != old);
-       ctx->ring_pages[idx] = new;
+       folio_migrate_copy(dst, src);
+       BUG_ON(ctx->ring_pages[idx] != &src->page);
+       ctx->ring_pages[idx] = &dst->page;
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
-       /* The old page is no longer accessible. */
-       put_page(old);
+       /* The old folio is no longer accessible. */
+       folio_put(src);
 
 out_unlock:
        mutex_unlock(&ctx->ring_lock);
@@ -475,13 +475,13 @@ out:
        spin_unlock(&mapping->private_lock);
        return rc;
 }
+#else
+#define aio_migrate_folio NULL
 #endif
 
 static const struct address_space_operations aio_ctx_aops = {
        .dirty_folio    = noop_dirty_folio,
-#if IS_ENABLED(CONFIG_MIGRATION)
-       .migratepage    = aio_migratepage,
-#endif
+       .migrate_folio  = aio_migrate_folio,
 };
 
 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)