]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: replace hpage_nr_pages with thp_nr_pages
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 15 Aug 2020 00:30:37 +0000 (17:30 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 15 Aug 2020 02:56:56 +0000 (19:56 -0700)
The thp prefix is more frequently used than hpage and we should be
consistent between the various functions.

[akpm@linux-foundation.org: fix mm/migrate.c]

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/20200629151959.15779-6-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
20 files changed:
include/linux/huge_mm.h
include/linux/mm_inline.h
include/linux/pagemap.h
mm/compaction.c
mm/filemap.c
mm/gup.c
mm/internal.h
mm/memcontrol.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/mlock.c
mm/page_io.c
mm/page_vma_mapped.c
mm/rmap.c
mm/swap.c
mm/swap_state.c
mm/swapfile.c
mm/vmscan.c
mm/workingset.c

index 9b33ac774fdda3f8ed44d3989a347d44e80173bb..229f986d535a126bfd835fb38891f06af6f17a33 100644 (file)
@@ -271,9 +271,14 @@ static inline unsigned int thp_order(struct page *page)
        return 0;
 }
 
-static inline int hpage_nr_pages(struct page *page)
+/**
+ * thp_nr_pages - The number of regular pages in this huge page.
+ * @page: The head page of a huge page.
+ */
+static inline int thp_nr_pages(struct page *page)
 {
-       if (unlikely(PageTransHuge(page)))
+       VM_BUG_ON_PGFLAGS(PageTail(page), page);
+       if (PageHead(page))
                return HPAGE_PMD_NR;
        return 1;
 }
@@ -336,9 +341,9 @@ static inline unsigned int thp_order(struct page *page)
        return 0;
 }
 
-static inline int hpage_nr_pages(struct page *page)
+static inline int thp_nr_pages(struct page *page)
 {
-       VM_BUG_ON_PAGE(PageTail(page), page);
+       VM_BUG_ON_PGFLAGS(PageTail(page), page);
        return 1;
 }
 
index 219bef41d87c334816d761b6f9b93b31bde99b54..8fc71e9d7bb079dab4b6057a062d5d34822a6cac 100644 (file)
@@ -48,14 +48,14 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
 static __always_inline void add_page_to_lru_list(struct page *page,
                                struct lruvec *lruvec, enum lru_list lru)
 {
-       update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+       update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
        list_add(&page->lru, &lruvec->lists[lru]);
 }
 
 static __always_inline void add_page_to_lru_list_tail(struct page *page,
                                struct lruvec *lruvec, enum lru_list lru)
 {
-       update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+       update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
        list_add_tail(&page->lru, &lruvec->lists[lru]);
 }
 
@@ -63,7 +63,7 @@ static __always_inline void del_page_from_lru_list(struct page *page,
                                struct lruvec *lruvec, enum lru_list lru)
 {
        list_del(&page->lru);
-       update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
+       update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page));
 }
 
 /**
index d1f4eff605ad933fce09988f34b79f4ea2007400..7de11dcd534d6fdb171c382bf14e896ca0e6b5c3 100644 (file)
@@ -381,7 +381,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
        if (PageHuge(head))
                return head;
 
-       return head + (index & (hpage_nr_pages(head) - 1));
+       return head + (index & (thp_nr_pages(head) - 1));
 }
 
 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
@@ -773,7 +773,7 @@ static inline struct page *readahead_page(struct readahead_control *rac)
 
        page = xa_load(&rac->mapping->i_pages, rac->_index);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
-       rac->_batch_count = hpage_nr_pages(page);
+       rac->_batch_count = thp_nr_pages(page);
 
        return page;
 }
@@ -796,7 +796,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
                VM_BUG_ON_PAGE(!PageLocked(page), page);
                VM_BUG_ON_PAGE(PageTail(page), page);
                array[i++] = page;
-               rac->_batch_count += hpage_nr_pages(page);
+               rac->_batch_count += thp_nr_pages(page);
 
                /*
                 * The page cache isn't using multi-index entries yet,
index b89581bf859c94e2a02e64c9512625ea91ff489e..176dcded298ee88517a5d7b9df4ab7520bde61e9 100644 (file)
@@ -1009,7 +1009,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                del_page_from_lru_list(page, lruvec, page_lru(page));
                mod_node_page_state(page_pgdat(page),
                                NR_ISOLATED_ANON + page_is_file_lru(page),
-                               hpage_nr_pages(page));
+                               thp_nr_pages(page));
 
 isolate_success:
                list_add(&page->lru, &cc->migratepages);
index 8e75bce0346d2c9155e481d7907750b685a57078..653190943aa72cf27480153424bac5077cd3eaa6 100644 (file)
@@ -198,7 +198,7 @@ static void unaccount_page_cache_page(struct address_space *mapping,
        if (PageHuge(page))
                return;
 
-       nr = hpage_nr_pages(page);
+       nr = thp_nr_pages(page);
 
        __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
        if (PageSwapBacked(page)) {
index 39e58df6925d40225915c49842546cf88bae2e82..ae096ea7583fe72bfebfd2378914e04901d1d4a4 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1637,7 +1637,7 @@ check_again:
                                        mod_node_page_state(page_pgdat(head),
                                                            NR_ISOLATED_ANON +
                                                            page_is_file_lru(head),
-                                                           hpage_nr_pages(head));
+                                                           thp_nr_pages(head));
                                }
                        }
                }
index 912bb1a1c10eac6025ab47043b8bcecddef130ff..10c677655912aedf246ea4356dbec4b16164cd53 100644 (file)
@@ -369,7 +369,7 @@ extern void clear_page_mlock(struct page *page);
 static inline void mlock_migrate_page(struct page *newpage, struct page *page)
 {
        if (TestClearPageMlocked(page)) {
-               int nr_pages = hpage_nr_pages(page);
+               int nr_pages = thp_nr_pages(page);
 
                /* Holding pmd lock, no change in irq context: __mod is safe */
                __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
index 9d87082e64aa308a4a1bc22ed5ce10b4207d9fe1..b807952b4d431b60c52e2fef59a093ed6cc6f373 100644 (file)
@@ -5589,7 +5589,7 @@ static int mem_cgroup_move_account(struct page *page,
 {
        struct lruvec *from_vec, *to_vec;
        struct pglist_data *pgdat;
-       unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
+       unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
        int ret;
 
        VM_BUG_ON(from == to);
@@ -6682,7 +6682,7 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root,
  */
 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
 {
-       unsigned int nr_pages = hpage_nr_pages(page);
+       unsigned int nr_pages = thp_nr_pages(page);
        struct mem_cgroup *memcg = NULL;
        int ret = 0;
 
@@ -6912,7 +6912,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
                return;
 
        /* Force-charge the new page. The old one will be freed soon */
-       nr_pages = hpage_nr_pages(newpage);
+       nr_pages = thp_nr_pages(newpage);
 
        page_counter_charge(&memcg->memory, nr_pages);
        if (do_memsw_account())
@@ -7114,7 +7114,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
         * ancestor for the swap instead and transfer the memory+swap charge.
         */
        swap_memcg = mem_cgroup_id_get_online(memcg);
-       nr_entries = hpage_nr_pages(page);
+       nr_entries = thp_nr_pages(page);
        /* Get references for the tail pages, too */
        if (nr_entries > 1)
                mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
@@ -7158,7 +7158,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
  */
 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
 {
-       unsigned int nr_pages = hpage_nr_pages(page);
+       unsigned int nr_pages = thp_nr_pages(page);
        struct page_counter *counter;
        struct mem_cgroup *memcg;
        unsigned short oldid;
index c32ead89c9112279b464b6db0a7dcaceaafb9547..e9d5ab5d3ca097ed96fc7050d485dc6a2c07630b 100644 (file)
@@ -1299,7 +1299,7 @@ static int
 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 {
        unsigned long pfn;
-       struct page *page;
+       struct page *page, *head;
        int ret = 0;
        LIST_HEAD(source);
 
@@ -1307,15 +1307,14 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                if (!pfn_valid(pfn))
                        continue;
                page = pfn_to_page(pfn);
+               head = compound_head(page);
 
                if (PageHuge(page)) {
-                       struct page *head = compound_head(page);
                        pfn = page_to_pfn(head) + compound_nr(head) - 1;
                        isolate_huge_page(head, &source);
                        continue;
                } else if (PageTransHuge(page))
-                       pfn = page_to_pfn(compound_head(page))
-                               + hpage_nr_pages(page) - 1;
+                       pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
 
                /*
                 * HWPoison pages have elevated reference counts so the migration would
index afaa09ff9f6cf8f328b2f2ddf0f63ef4dbcb2016..eddbe4e56c739f0504f64da6837e53bff2c38457 100644 (file)
@@ -1049,7 +1049,7 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist,
                        list_add_tail(&head->lru, pagelist);
                        mod_node_page_state(page_pgdat(head),
                                NR_ISOLATED_ANON + page_is_file_lru(head),
-                               hpage_nr_pages(head));
+                               thp_nr_pages(head));
                } else if (flags & MPOL_MF_STRICT) {
                        /*
                         * Non-movable page may reach here.  And, there may be
index 5053439be6abeec4bdb15e74e259492211043f69..34a842a8eb6a7b85b191e58adca66002a1a5072c 100644 (file)
@@ -193,7 +193,7 @@ void putback_movable_pages(struct list_head *l)
                        put_page(page);
                } else {
                        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
-                                       page_is_file_lru(page), -hpage_nr_pages(page));
+                                       page_is_file_lru(page), -thp_nr_pages(page));
                        putback_lru_page(page);
                }
        }
@@ -386,7 +386,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
         */
        expected_count += is_device_private_page(page);
        if (mapping)
-               expected_count += hpage_nr_pages(page) + page_has_private(page);
+               expected_count += thp_nr_pages(page) + page_has_private(page);
 
        return expected_count;
 }
@@ -441,7 +441,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         */
        newpage->index = page->index;
        newpage->mapping = page->mapping;
-       page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
+       page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
        if (PageSwapBacked(page)) {
                __SetPageSwapBacked(newpage);
                if (PageSwapCache(page)) {
@@ -474,7 +474,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         * to one less reference.
         * We know this isn't the last reference.
         */
-       page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
+       page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
 
        xas_unlock(&xas);
        /* Leave irq disabled to prevent preemption while updating stats */
@@ -591,7 +591,7 @@ static void copy_huge_page(struct page *dst, struct page *src)
        } else {
                /* thp page */
                BUG_ON(!PageTransHuge(src));
-               nr_pages = hpage_nr_pages(src);
+               nr_pages = thp_nr_pages(src);
        }
 
        for (i = 0; i < nr_pages; i++) {
@@ -1213,7 +1213,7 @@ out:
                 */
                if (likely(!__PageMovable(page)))
                        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
-                                       page_is_file_lru(page), -hpage_nr_pages(page));
+                                       page_is_file_lru(page), -thp_nr_pages(page));
        }
 
        /*
@@ -1446,7 +1446,7 @@ retry:
                         * during migration.
                         */
                        is_thp = PageTransHuge(page);
-                       nr_subpages = hpage_nr_pages(page);
+                       nr_subpages = thp_nr_pages(page);
                        cond_resched();
 
                        if (PageHuge(page))
@@ -1670,7 +1670,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
                list_add_tail(&head->lru, pagelist);
                mod_node_page_state(page_pgdat(head),
                        NR_ISOLATED_ANON + page_is_file_lru(head),
-                       hpage_nr_pages(head));
+                       thp_nr_pages(head));
        }
 out_putpage:
        /*
@@ -2034,7 +2034,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
 
        page_lru = page_is_file_lru(page);
        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
-                               hpage_nr_pages(page));
+                               thp_nr_pages(page));
 
        /*
         * Isolating the page has taken another reference, so the
index f8736136fad7f84b1b78a6417751e823fada1b5b..93ca2bf30b4fd262fe4a4f432fa01ad96d145e74 100644 (file)
@@ -61,8 +61,7 @@ void clear_page_mlock(struct page *page)
        if (!TestClearPageMlocked(page))
                return;
 
-       mod_zone_page_state(page_zone(page), NR_MLOCK,
-                           -hpage_nr_pages(page));
+       mod_zone_page_state(page_zone(page), NR_MLOCK, -thp_nr_pages(page));
        count_vm_event(UNEVICTABLE_PGCLEARED);
        /*
         * The previous TestClearPageMlocked() corresponds to the smp_mb()
@@ -95,7 +94,7 @@ void mlock_vma_page(struct page *page)
 
        if (!TestSetPageMlocked(page)) {
                mod_zone_page_state(page_zone(page), NR_MLOCK,
-                                   hpage_nr_pages(page));
+                                   thp_nr_pages(page));
                count_vm_event(UNEVICTABLE_PGMLOCKED);
                if (!isolate_lru_page(page))
                        putback_lru_page(page);
@@ -192,7 +191,7 @@ unsigned int munlock_vma_page(struct page *page)
        /*
         * Serialize with any parallel __split_huge_page_refcount() which
         * might otherwise copy PageMlocked to part of the tail pages before
-        * we clear it in the head page. It also stabilizes hpage_nr_pages().
+        * we clear it in the head page. It also stabilizes thp_nr_pages().
         */
        spin_lock_irq(&pgdat->lru_lock);
 
@@ -202,7 +201,7 @@ unsigned int munlock_vma_page(struct page *page)
                goto unlock_out;
        }
 
-       nr_pages = hpage_nr_pages(page);
+       nr_pages = thp_nr_pages(page);
        __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
 
        if (__munlock_isolate_lru_page(page, true)) {
index f5e8bec8a8c7012726e4c0e0417c97defcb66dd0..454b70d8cda7b8fd7a31cc6323f5ab40d206dd45 100644 (file)
@@ -274,7 +274,7 @@ static inline void count_swpout_vm_event(struct page *page)
        if (unlikely(PageTransHuge(page)))
                count_vm_event(THP_SWPOUT);
 #endif
-       count_vm_events(PSWPOUT, hpage_nr_pages(page));
+       count_vm_events(PSWPOUT, thp_nr_pages(page));
 }
 
 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
index e65629c056e80b88bde8e00067ab226500f88bfd..5e77b269c330a29a6c5bebdf963bdcd0b08d3780 100644 (file)
@@ -61,7 +61,7 @@ static inline bool pfn_is_match(struct page *page, unsigned long pfn)
                return page_pfn == pfn;
 
        /* THP can be referenced by any subpage */
-       return pfn >= page_pfn && pfn - page_pfn < hpage_nr_pages(page);
+       return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
 }
 
 /**
index 6cce9ef06753b19313b209d9f975fa69ca4b62eb..4ace1e32f705364eec7b05218b6a66b84b4ea82a 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1130,7 +1130,7 @@ void do_page_add_anon_rmap(struct page *page,
        }
 
        if (first) {
-               int nr = compound ? hpage_nr_pages(page) : 1;
+               int nr = compound ? thp_nr_pages(page) : 1;
                /*
                 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
                 * these counters are not modified in interrupt context, and
@@ -1169,7 +1169,7 @@ void do_page_add_anon_rmap(struct page *page,
 void page_add_new_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address, bool compound)
 {
-       int nr = compound ? hpage_nr_pages(page) : 1;
+       int nr = compound ? thp_nr_pages(page) : 1;
 
        VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
        __SetPageSwapBacked(page);
@@ -1860,7 +1860,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
                return;
 
        pgoff_start = page_to_pgoff(page);
-       pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
+       pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
                        pgoff_start, pgoff_end) {
                struct vm_area_struct *vma = avc->vma;
@@ -1913,7 +1913,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
                return;
 
        pgoff_start = page_to_pgoff(page);
-       pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
+       pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
        if (!locked)
                i_mmap_lock_read(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap,
index 9285e60c7d6e8949cf912b421c1ecd7951e560fb..d26c22baf7c516c47a111bbdf753b04178fc36ba 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -241,7 +241,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
                del_page_from_lru_list(page, lruvec, page_lru(page));
                ClearPageActive(page);
                add_page_to_lru_list_tail(page, lruvec, page_lru(page));
-               (*pgmoved) += hpage_nr_pages(page);
+               (*pgmoved) += thp_nr_pages(page);
        }
 }
 
@@ -312,7 +312,7 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
 void lru_note_cost_page(struct page *page)
 {
        lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)),
-                     page_is_file_lru(page), hpage_nr_pages(page));
+                     page_is_file_lru(page), thp_nr_pages(page));
 }
 
 static void __activate_page(struct page *page, struct lruvec *lruvec,
@@ -320,7 +320,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
 {
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                int lru = page_lru_base_type(page);
-               int nr_pages = hpage_nr_pages(page);
+               int nr_pages = thp_nr_pages(page);
 
                del_page_from_lru_list(page, lruvec, lru);
                SetPageActive(page);
@@ -500,7 +500,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
                 * lock is held(spinlock), which implies preemption disabled.
                 */
                __mod_zone_page_state(page_zone(page), NR_MLOCK,
-                                   hpage_nr_pages(page));
+                                   thp_nr_pages(page));
                count_vm_event(UNEVICTABLE_PGMLOCKED);
        }
        lru_cache_add(page);
@@ -532,7 +532,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
 {
        int lru;
        bool active;
-       int nr_pages = hpage_nr_pages(page);
+       int nr_pages = thp_nr_pages(page);
 
        if (!PageLRU(page))
                return;
@@ -580,7 +580,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
 {
        if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
                int lru = page_lru_base_type(page);
-               int nr_pages = hpage_nr_pages(page);
+               int nr_pages = thp_nr_pages(page);
 
                del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
                ClearPageActive(page);
@@ -599,7 +599,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
        if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
            !PageSwapCache(page) && !PageUnevictable(page)) {
                bool active = PageActive(page);
-               int nr_pages = hpage_nr_pages(page);
+               int nr_pages = thp_nr_pages(page);
 
                del_page_from_lru_list(page, lruvec,
                                       LRU_INACTIVE_ANON + active);
@@ -972,7 +972,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
 {
        enum lru_list lru;
        int was_unevictable = TestClearPageUnevictable(page);
-       int nr_pages = hpage_nr_pages(page);
+       int nr_pages = thp_nr_pages(page);
 
        VM_BUG_ON_PAGE(PageLRU(page), page);
 
index b73aabdfd35ad28bb7b07d41bef6585b84d073ae..d9d4a49f32415a347ff34a3e3c7543f378a2c15c 100644 (file)
@@ -130,7 +130,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry,
        struct address_space *address_space = swap_address_space(entry);
        pgoff_t idx = swp_offset(entry);
        XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
-       unsigned long i, nr = hpage_nr_pages(page);
+       unsigned long i, nr = thp_nr_pages(page);
        void *old;
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -183,7 +183,7 @@ void __delete_from_swap_cache(struct page *page,
                        swp_entry_t entry, void *shadow)
 {
        struct address_space *address_space = swap_address_space(entry);
-       int i, nr = hpage_nr_pages(page);
+       int i, nr = thp_nr_pages(page);
        pgoff_t idx = swp_offset(entry);
        XA_STATE(xas, &address_space->i_pages, idx);
 
@@ -278,7 +278,7 @@ void delete_from_swap_cache(struct page *page)
        xa_unlock_irq(&address_space->i_pages);
 
        put_swap_page(page, entry);
-       page_ref_sub(page, hpage_nr_pages(page));
+       page_ref_sub(page, thp_nr_pages(page));
 }
 
 void clear_shadow_from_swap_cache(int type, unsigned long begin,
index e653eea1eb88b6a301810f86719e381d227679d5..eb410d3c8de8c430047b74b9269d0268d2126f93 100644 (file)
@@ -1370,7 +1370,7 @@ void put_swap_page(struct page *page, swp_entry_t entry)
        unsigned char *map;
        unsigned int i, free_entries = 0;
        unsigned char val;
-       int size = swap_entry_size(hpage_nr_pages(page));
+       int size = swap_entry_size(thp_nr_pages(page));
 
        si = _swap_info_get(entry);
        if (!si)
index 738115ed75e23166e64935850908db20cace36c5..99e1796eb8336236d4eacefd2e51ca00d76786b8 100644 (file)
@@ -1354,7 +1354,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
                        case PAGE_ACTIVATE:
                                goto activate_locked;
                        case PAGE_SUCCESS:
-                               stat->nr_pageout += hpage_nr_pages(page);
+                               stat->nr_pageout += thp_nr_pages(page);
 
                                if (PageWriteback(page))
                                        goto keep;
@@ -1862,7 +1862,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
                SetPageLRU(page);
                lru = page_lru(page);
 
-               nr_pages = hpage_nr_pages(page);
+               nr_pages = thp_nr_pages(page);
                update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
                list_move(&page->lru, &lruvec->lists[lru]);
 
@@ -2065,7 +2065,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
                         * so we ignore them here.
                         */
                        if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
-                               nr_rotated += hpage_nr_pages(page);
+                               nr_rotated += thp_nr_pages(page);
                                list_add(&page->lru, &l_active);
                                continue;
                        }
index 8cbe4e3cbe5c181be697649216110eebc1af159a..92e66113a5779e6a68db8c8f90934fae36e7d39d 100644 (file)
@@ -263,7 +263,7 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
        VM_BUG_ON_PAGE(!PageLocked(page), page);
 
        lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
-       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
+       workingset_age_nonresident(lruvec, thp_nr_pages(page));
        /* XXX: target_memcg can be NULL, go through lruvec */
        memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
        eviction = atomic_long_read(&lruvec->nonresident_age);
@@ -374,7 +374,7 @@ void workingset_refault(struct page *page, void *shadow)
                goto out;
 
        SetPageActive(page);
-       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
+       workingset_age_nonresident(lruvec, thp_nr_pages(page));
        inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
 
        /* Page was active prior to eviction */
@@ -411,7 +411,7 @@ void workingset_activation(struct page *page)
        if (!mem_cgroup_disabled() && !memcg)
                goto out;
        lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
-       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
+       workingset_age_nonresident(lruvec, thp_nr_pages(page));
 out:
        rcu_read_unlock();
 }