]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/migrate: Use a folio in alloc_migration_target()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 4 Apr 2022 18:35:04 +0000 (14:35 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 7 Apr 2022 13:43:41 +0000 (09:43 -0400)
This removes an assumption that a large folio is HPAGE_PMD_ORDER
as well as letting us remove the call to prep_transhuge_page()
and a few hidden calls to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
mm/migrate.c

index de175e2fdba5d8c4b91ca68460ecb13cf06a2c66..9894e90db0069bde6b101e2283023b7b53bdcc3c 100644 (file)
@@ -1520,10 +1520,11 @@ out:
 
 struct page *alloc_migration_target(struct page *page, unsigned long private)
 {
+       struct folio *folio = page_folio(page);
        struct migration_target_control *mtc;
        gfp_t gfp_mask;
        unsigned int order = 0;
-       struct page *new_page = NULL;
+       struct folio *new_folio = NULL;
        int nid;
        int zidx;
 
@@ -1531,34 +1532,31 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
        gfp_mask = mtc->gfp_mask;
        nid = mtc->nid;
        if (nid == NUMA_NO_NODE)
-               nid = page_to_nid(page);
+               nid = folio_nid(folio);
 
-       if (PageHuge(page)) {
-               struct hstate *h = page_hstate(compound_head(page));
+       if (folio_test_hugetlb(folio)) {
+               struct hstate *h = page_hstate(&folio->page);
 
                gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
                return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
        }
 
-       if (PageTransHuge(page)) {
+       if (folio_test_large(folio)) {
                /*
                 * clear __GFP_RECLAIM to make the migration callback
                 * consistent with regular THP allocations.
                 */
                gfp_mask &= ~__GFP_RECLAIM;
                gfp_mask |= GFP_TRANSHUGE;
-               order = HPAGE_PMD_ORDER;
+               order = folio_order(folio);
        }
-       zidx = zone_idx(page_zone(page));
+       zidx = zone_idx(folio_zone(folio));
        if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
                gfp_mask |= __GFP_HIGHMEM;
 
-       new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
-
-       if (new_page && PageTransHuge(new_page))
-               prep_transhuge_page(new_page);
+       new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
 
-       return new_page;
+       return &new_folio->page;
 }
 
 #ifdef CONFIG_NUMA