]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/shmem: convert shmem_add_to_page_cache to take a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 13 May 2022 03:23:04 +0000 (20:23 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 13 May 2022 14:20:16 +0000 (07:20 -0700)
Shrinks shmem_add_to_page_cache() by 16 bytes.  All the callers grow,
but this is temporary as they will all be converted to folios soon.

Link: https://lkml.kernel.org/r/20220504182857.4013401-19-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index 6cba5fd9b694de010a91ef14d75242462b633236..2e800d4e0d03a613bc4ee7009a28830aa9af437c 100644 (file)
@@ -696,36 +696,35 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 /*
  * Like add_to_page_cache_locked, but error if expected item has gone.
  */
-static int shmem_add_to_page_cache(struct page *page,
+static int shmem_add_to_page_cache(struct folio *folio,
                                   struct address_space *mapping,
                                   pgoff_t index, void *expected, gfp_t gfp,
                                   struct mm_struct *charge_mm)
 {
-       XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
-       unsigned long nr = compound_nr(page);
+       XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
+       long nr = folio_nr_pages(folio);
        int error;
 
-       VM_BUG_ON_PAGE(PageTail(page), page);
-       VM_BUG_ON_PAGE(index != round_down(index, nr), page);
-       VM_BUG_ON_PAGE(!PageLocked(page), page);
-       VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
-       VM_BUG_ON(expected && PageTransHuge(page));
+       VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
+       VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+       VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
+       VM_BUG_ON(expected && folio_test_large(folio));
 
-       page_ref_add(page, nr);
-       page->mapping = mapping;
-       page->index = index;
+       folio_ref_add(folio, nr);
+       folio->mapping = mapping;
+       folio->index = index;
 
-       if (!PageSwapCache(page)) {
-               error = mem_cgroup_charge(page_folio(page), charge_mm, gfp);
+       if (!folio_test_swapcache(folio)) {
+               error = mem_cgroup_charge(folio, charge_mm, gfp);
                if (error) {
-                       if (PageTransHuge(page)) {
+                       if (folio_test_pmd_mappable(folio)) {
                                count_vm_event(THP_FILE_FALLBACK);
                                count_vm_event(THP_FILE_FALLBACK_CHARGE);
                        }
                        goto error;
                }
        }
-       cgroup_throttle_swaprate(page, gfp);
+       folio_throttle_swaprate(folio, gfp);
 
        do {
                xas_lock_irq(&xas);
@@ -737,16 +736,16 @@ static int shmem_add_to_page_cache(struct page *page,
                        xas_set_err(&xas, -EEXIST);
                        goto unlock;
                }
-               xas_store(&xas, page);
+               xas_store(&xas, folio);
                if (xas_error(&xas))
                        goto unlock;
-               if (PageTransHuge(page)) {
+               if (folio_test_pmd_mappable(folio)) {
                        count_vm_event(THP_FILE_ALLOC);
-                       __mod_lruvec_page_state(page, NR_SHMEM_THPS, nr);
+                       __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
                }
                mapping->nrpages += nr;
-               __mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
-               __mod_lruvec_page_state(page, NR_SHMEM, nr);
+               __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
+               __lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
 unlock:
                xas_unlock_irq(&xas);
        } while (xas_nomem(&xas, gfp));
@@ -758,8 +757,8 @@ unlock:
 
        return 0;
 error:
-       page->mapping = NULL;
-       page_ref_sub(page, nr);
+       folio->mapping = NULL;
+       folio_ref_sub(folio, nr);
        return error;
 }
 
@@ -1691,7 +1690,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
        struct address_space *mapping = inode->i_mapping;
        struct shmem_inode_info *info = SHMEM_I(inode);
        struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
-       struct page *page;
+       struct page *page = NULL;
+       struct folio *folio;
        swp_entry_t swap;
        int error;
 
@@ -1741,7 +1741,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
                        goto failed;
        }
 
-       error = shmem_add_to_page_cache(page, mapping, index,
+       folio = page_folio(page);
+       error = shmem_add_to_page_cache(folio, mapping, index,
                                        swp_to_radix_entry(swap), gfp,
                                        charge_mm);
        if (error)
@@ -1792,6 +1793,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
        struct shmem_inode_info *info = SHMEM_I(inode);
        struct shmem_sb_info *sbinfo;
        struct mm_struct *charge_mm;
+       struct folio *folio;
        struct page *page;
        pgoff_t hindex = index;
        gfp_t huge_gfp;
@@ -1906,7 +1908,8 @@ alloc_nohuge:
        if (sgp == SGP_WRITE)
                __SetPageReferenced(page);
 
-       error = shmem_add_to_page_cache(page, mapping, hindex,
+       folio = page_folio(page);
+       error = shmem_add_to_page_cache(folio, mapping, hindex,
                                        NULL, gfp & GFP_RECLAIM_MASK,
                                        charge_mm);
        if (error)
@@ -2328,6 +2331,7 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
        gfp_t gfp = mapping_gfp_mask(mapping);
        pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
        void *page_kaddr;
+       struct folio *folio;
        struct page *page;
        int ret;
        pgoff_t max_off;
@@ -2386,7 +2390,8 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
        if (unlikely(pgoff >= max_off))
                goto out_release;
 
-       ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
+       folio = page_folio(page);
+       ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
                                      gfp & GFP_RECLAIM_MASK, dst_mm);
        if (ret)
                goto out_release;