]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/shmem: convert shmem_alloc_and_acct_page to use a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 13 May 2022 03:23:04 +0000 (20:23 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 13 May 2022 14:20:16 +0000 (07:20 -0700)
Convert shmem_alloc_hugepage() to return the folio that it uses and use a
folio throughout shmem_alloc_and_acct_page().  Continue to return a page
from shmem_alloc_and_acct_page() for now.

Link: https://lkml.kernel.org/r/20220504182857.4013401-22-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index 668d054728bcc2acf8c2fb70896cc7755de5a6f1..d5b23932357d380fafff45838d82cdc74943fab5 100644 (file)
@@ -1523,7 +1523,7 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
        return result;
 }
 
-static struct page *shmem_alloc_hugepage(gfp_t gfp,
+static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
                struct shmem_inode_info *info, pgoff_t index)
 {
        struct vm_area_struct pvma;
@@ -1541,7 +1541,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
        shmem_pseudo_vma_destroy(&pvma);
        if (!folio)
                count_vm_event(THP_FILE_FALLBACK);
-       return &folio->page;
+       return folio;
 }
 
 static struct folio *shmem_alloc_folio(gfp_t gfp,
@@ -1568,7 +1568,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
                pgoff_t index, bool huge)
 {
        struct shmem_inode_info *info = SHMEM_I(inode);
-       struct page *page;
+       struct folio *folio;
        int nr;
        int err = -ENOSPC;
 
@@ -1580,13 +1580,13 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
                goto failed;
 
        if (huge)
-               page = shmem_alloc_hugepage(gfp, info, index);
+               folio = shmem_alloc_hugefolio(gfp, info, index);
        else
-               page = shmem_alloc_page(gfp, info, index);
-       if (page) {
-               __SetPageLocked(page);
-               __SetPageSwapBacked(page);
-               return page;
+               folio = shmem_alloc_folio(gfp, info, index);
+       if (folio) {
+               __folio_set_locked(folio);
+               __folio_set_swapbacked(folio);
+               return &folio->page;
        }
 
        err = -ENOMEM;