]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: remove last argument of reuse_swap_page()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 14 Jan 2022 22:06:44 +0000 (14:06 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 15 Jan 2022 14:30:28 +0000 (16:30 +0200)
None of the callers care about the total_map_swapcount() any more.

Link: https://lkml.kernel.org/r/20211220205943.456187-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/swap.h
mm/huge_memory.c
mm/khugepaged.c
mm/memory.c
mm/swapfile.c

index d1ea44b31f19f511108193f5344d5c6777083068..bdccbf1efa6198a2b9af64c6df0400f301795972 100644 (file)
@@ -514,7 +514,7 @@ extern int __swp_swapcount(swp_entry_t entry);
 extern int swp_swapcount(swp_entry_t entry);
 extern struct swap_info_struct *page_swap_info(struct page *);
 extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
-extern bool reuse_swap_page(struct page *, int *);
+extern bool reuse_swap_page(struct page *);
 extern int try_to_free_swap(struct page *);
 struct backing_dev_info;
 extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
@@ -680,8 +680,8 @@ static inline int swp_swapcount(swp_entry_t entry)
        return 0;
 }
 
-#define reuse_swap_page(page, total_map_swapcount) \
-       (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
+#define reuse_swap_page(page) \
+       (page_trans_huge_mapcount(page, NULL) == 1)
 
 static inline int try_to_free_swap(struct page *page)
 {
index e5483347291c0720697125e3fa3b4056412caf31..b61fbe95c856c7193e8608832ec333a8557fae06 100644 (file)
@@ -1322,7 +1322,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
         * We can only reuse the page if nobody else maps the huge page or it's
         * part.
         */
-       if (reuse_swap_page(page, NULL)) {
+       if (reuse_swap_page(page)) {
                pmd_t entry;
                entry = pmd_mkyoung(orig_pmd);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
index 9d40dd8890e581dde2cb17ac2d7f7b1c1280be8f..698ea19775ac3100a9c07af2ee54c1a1f89f18ab 100644 (file)
@@ -681,7 +681,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                        goto out;
                }
                if (!pte_write(pteval) && PageSwapCache(page) &&
-                               !reuse_swap_page(page, NULL)) {
+                               !reuse_swap_page(page)) {
                        /*
                         * Page is in the swap cache and cannot be re-used.
                         * It cannot be collapsed into a THP.
index 5fea331b15607d67c8af2b82a0745800947eba4c..571d02f419baa4682515e48e9fcdb270988b8b1f 100644 (file)
@@ -3627,7 +3627,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
        inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
        dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
        pte = mk_pte(page, vma->vm_page_prot);
-       if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
+       if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
                pte = maybe_mkwrite(pte_mkdirty(pte), vma);
                vmf->flags &= ~FAULT_FLAG_WRITE;
                ret |= VM_FAULT_WRITE;
index e64207e2ef1dd46f1c85239c22cd4721b5901bf0..31d13a393cf0f2c3651033fada8152d1defda730 100644 (file)
@@ -1668,12 +1668,8 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
  * to it.  And as a side-effect, free up its swap: because the old content
  * on disk will never be read, and seeking back there to write new content
  * later would only waste time away from clustering.
- *
- * NOTE: total_map_swapcount should not be relied upon by the caller if
- * reuse_swap_page() returns false, but it may be always overwritten
- * (see the other implementation for CONFIG_SWAP=n).
  */
-bool reuse_swap_page(struct page *page, int *total_map_swapcount)
+bool reuse_swap_page(struct page *page)
 {
        int count, total_mapcount, total_swapcount;
 
@@ -1682,8 +1678,6 @@ bool reuse_swap_page(struct page *page, int *total_map_swapcount)
                return false;
        count = page_trans_huge_map_swapcount(page, &total_mapcount,
                                              &total_swapcount);
-       if (total_map_swapcount)
-               *total_map_swapcount = total_mapcount + total_swapcount;
        if (count == 1 && PageSwapCache(page) &&
            (likely(!PageTransCompound(page)) ||
             /* The remaining swap count will be freed soon */