]> git.baikalelectronics.ru Git - kernel.git/commitdiff
vmscan: Add check_move_unevictable_folios()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 4 Jun 2022 21:39:09 +0000 (17:39 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 29 Jun 2022 12:51:06 +0000 (08:51 -0400)
Change the guts of check_move_unevictable_pages() over to use folios
and add check_move_unevictable_pages() as a wrapper.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Christian Brauner (Microsoft) <brauner@kernel.org>
include/linux/swap.h
mm/vmscan.c

index 0c0fed1b348f20cc04e3c8c3fc8e0672bacca7c7..8672a7123ccdc0a4d5e704621a77477c275f4456 100644 (file)
@@ -438,7 +438,8 @@ static inline bool node_reclaim_enabled(void)
        return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
 }
 
-extern void check_move_unevictable_pages(struct pagevec *pvec);
+void check_move_unevictable_folios(struct folio_batch *fbatch);
+void check_move_unevictable_pages(struct pagevec *pvec);
 
 extern void kswapd_run(int nid);
 extern void kswapd_stop(int nid);
index f7d9a683e3a7d38fe1ffd0265b9f7d7acad938b4..04f8671caad9a9fa599d2b68f3363c90c094fdd7 100644 (file)
@@ -4790,45 +4790,57 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
 }
 #endif
 
+void check_move_unevictable_pages(struct pagevec *pvec)
+{
+       struct folio_batch fbatch;
+       unsigned i;
+
+       folio_batch_init(&fbatch);
+       for (i = 0; i < pvec->nr; i++) {
+               struct page *page = pvec->pages[i];
+
+               if (PageTransTail(page))
+                       continue;
+               folio_batch_add(&fbatch, page_folio(page));
+       }
+       check_move_unevictable_folios(&fbatch);
+}
+EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
+
 /**
- * check_move_unevictable_pages - check pages for evictability and move to
- * appropriate zone lru list
- * @pvec: pagevec with lru pages to check
+ * check_move_unevictable_folios - Move evictable folios to appropriate zone
+ * lru list
+ * @fbatch: Batch of lru folios to check.
  *
- * Checks pages for evictability, if an evictable page is in the unevictable
+ * Checks folios for evictability, if an evictable folio is in the unevictable
  * lru list, moves it to the appropriate evictable lru list. This function
- * should be only used for lru pages.
+ * should be only used for lru folios.
  */
-void check_move_unevictable_pages(struct pagevec *pvec)
+void check_move_unevictable_folios(struct folio_batch *fbatch)
 {
        struct lruvec *lruvec = NULL;
        int pgscanned = 0;
        int pgrescued = 0;
        int i;
 
-       for (i = 0; i < pvec->nr; i++) {
-               struct page *page = pvec->pages[i];
-               struct folio *folio = page_folio(page);
-               int nr_pages;
-
-               if (PageTransTail(page))
-                       continue;
+       for (i = 0; i < fbatch->nr; i++) {
+               struct folio *folio = fbatch->folios[i];
+               int nr_pages = folio_nr_pages(folio);
 
-               nr_pages = thp_nr_pages(page);
                pgscanned += nr_pages;
 
-               /* block memcg migration during page moving between lru */
-               if (!TestClearPageLRU(page))
+               /* block memcg migration while the folio moves between lrus */
+               if (!folio_test_clear_lru(folio))
                        continue;
 
                lruvec = folio_lruvec_relock_irq(folio, lruvec);
-               if (page_evictable(page) && PageUnevictable(page)) {
-                       del_page_from_lru_list(page, lruvec);
-                       ClearPageUnevictable(page);
-                       add_page_to_lru_list(page, lruvec);
+               if (folio_evictable(folio) && folio_test_unevictable(folio)) {
+                       lruvec_del_folio(lruvec, folio);
+                       folio_clear_unevictable(folio);
+                       lruvec_add_folio(lruvec, folio);
                        pgrescued += nr_pages;
                }
-               SetPageLRU(page);
+               folio_set_lru(folio);
        }
 
        if (lruvec) {
@@ -4839,4 +4851,4 @@ void check_move_unevictable_pages(struct pagevec *pvec)
                count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
        }
 }
-EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
+EXPORT_SYMBOL_GPL(check_move_unevictable_folios);