]> git.baikalelectronics.ru Git - kernel.git/commitdiff
hugetlb: Convert huge_add_to_page_cache() to use a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 1 Jun 2022 19:11:01 +0000 (15:11 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 29 Jun 2022 12:51:05 +0000 (08:51 -0400)
Remove the last caller of add_to_page_cache()

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
fs/hugetlbfs/inode.c
mm/hugetlb.c

index 62408047e8d7bf78d23775217f475f9048db8652..ae2524480f2395dd08dfc9b176621976cf9bc161 100644 (file)
@@ -759,7 +759,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
 
                SetHPageMigratable(page);
                /*
-                * unlock_page because locked by add_to_page_cache()
+                * unlock_page because locked by huge_add_to_page_cache()
                 * put_page() due to reference from alloc_huge_page()
                 */
                unlock_page(page);
index a57e1be41401b4105c27c7fc84bc54c1ac7cc555..33b2c27e7c61e5968f6db947f1d991aa9b1a138d 100644 (file)
@@ -5414,19 +5414,25 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
                           pgoff_t idx)
 {
+       struct folio *folio = page_folio(page);
        struct inode *inode = mapping->host;
        struct hstate *h = hstate_inode(inode);
-       int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
+       int err;
 
-       if (err)
+       __folio_set_locked(folio);
+       err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
+
+       if (unlikely(err)) {
+               __folio_clear_locked(folio);
                return err;
+       }
        ClearHPageRestoreReserve(page);
 
        /*
-        * set page dirty so that it will not be removed from cache/file
+        * mark folio dirty so that it will not be removed from cache/file
         * by non-hugetlbfs specific code paths.
         */
-       set_page_dirty(page);
+       folio_mark_dirty(folio);
 
        spin_lock(&inode->i_lock);
        inode->i_blocks += blocks_per_huge_page(h);