]> git.baikalelectronics.ru Git - kernel.git/commitdiff
nilfs2: Convert nilfs_copy_back_pages() to use filemap_get_folios()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 4 Jun 2022 20:40:39 +0000 (16:40 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 29 Jun 2022 12:51:06 +0000 (08:51 -0400)
Use folios throughout.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Christian Brauner (Microsoft) <brauner@kernel.org>
fs/nilfs2/page.c

index a8e88cc38e16a88a9346c36a5babca39c1517759..3267e96c256cabdf36f45ab1ce91fa57a0a4ddcb 100644 (file)
@@ -294,57 +294,57 @@ repeat:
 void nilfs_copy_back_pages(struct address_space *dmap,
                           struct address_space *smap)
 {
-       struct pagevec pvec;
+       struct folio_batch fbatch;
        unsigned int i, n;
-       pgoff_t index = 0;
+       pgoff_t start = 0;
 
-       pagevec_init(&pvec);
+       folio_batch_init(&fbatch);
 repeat:
-       n = pagevec_lookup(&pvec, smap, &index);
+       n = filemap_get_folios(smap, &start, ~0UL, &fbatch);
        if (!n)
                return;
 
-       for (i = 0; i < pagevec_count(&pvec); i++) {
-               struct page *page = pvec.pages[i], *dpage;
-               pgoff_t offset = page->index;
-
-               lock_page(page);
-               dpage = find_lock_page(dmap, offset);
-               if (dpage) {
-                       /* overwrite existing page in the destination cache */
-                       WARN_ON(PageDirty(dpage));
-                       nilfs_copy_page(dpage, page, 0);
-                       unlock_page(dpage);
-                       put_page(dpage);
-                       /* Do we not need to remove page from smap here? */
+       for (i = 0; i < folio_batch_count(&fbatch); i++) {
+               struct folio *folio = fbatch.folios[i], *dfolio;
+               pgoff_t index = folio->index;
+
+               folio_lock(folio);
+               dfolio = filemap_lock_folio(dmap, index);
+               if (dfolio) {
+                       /* overwrite existing folio in the destination cache */
+                       WARN_ON(folio_test_dirty(dfolio));
+                       nilfs_copy_page(&dfolio->page, &folio->page, 0);
+                       folio_unlock(dfolio);
+                       folio_put(dfolio);
+                       /* Do we not need to remove folio from smap here? */
                } else {
-                       struct page *p;
+                       struct folio *f;
 
-                       /* move the page to the destination cache */
+                       /* move the folio to the destination cache */
                        xa_lock_irq(&smap->i_pages);
-                       p = __xa_erase(&smap->i_pages, offset);
-                       WARN_ON(page != p);
+                       f = __xa_erase(&smap->i_pages, index);
+                       WARN_ON(folio != f);
                        smap->nrpages--;
                        xa_unlock_irq(&smap->i_pages);
 
                        xa_lock_irq(&dmap->i_pages);
-                       p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS);
-                       if (unlikely(p)) {
+                       f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS);
+                       if (unlikely(f)) {
                                /* Probably -ENOMEM */
-                               page->mapping = NULL;
-                               put_page(page);
+                               folio->mapping = NULL;
+                               folio_put(folio);
                        } else {
-                               page->mapping = dmap;
+                               folio->mapping = dmap;
                                dmap->nrpages++;
-                               if (PageDirty(page))
-                                       __xa_set_mark(&dmap->i_pages, offset,
+                               if (folio_test_dirty(folio))
+                                       __xa_set_mark(&dmap->i_pages, index,
                                                        PAGECACHE_TAG_DIRTY);
                        }
                        xa_unlock_irq(&dmap->i_pages);
                }
-               unlock_page(page);
+               folio_unlock(folio);
        }
-       pagevec_release(&pvec);
+       folio_batch_release(&fbatch);
        cond_resched();
 
        goto repeat;