]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: thp: don't need to drain lru cache when splitting and mlocking THP
authorYang Shi <yang.shi@linux.alibaba.com>
Wed, 3 Jun 2020 23:03:37 +0000 (16:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 4 Jun 2020 03:09:49 +0000 (20:09 -0700)
Since commit 4f1d33f5ce94 ("mm/swap.c: flush lru pvecs on compound page
arrival") THP would not stay in pagevec anymore.  So the optimization made
by commit 3166d3d92f77 ("thp: increase split_huge_page() success rate")
doesn't make sense anymore, which tries to unpin munlocked THPs from
pagevec by draining pagevec.

Draining lru cache before isolating THP in mlock path is also unnecessary.
634720d40076 ("mm, thp: fix mapped pages avoiding unevictable list on
mlock") added it and 28d7bdb0830c ("thp, mlock: do not mlock PTE-mapped
file huge pages") accidentally carried it over after the above
optimization went in.

Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Link: http://lkml.kernel.org/r/1585946493-7531-1-git-send-email-yang.shi@linux.alibaba.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/huge_memory.c

index 6df182a18d2cc9369014f3aa3890145fdc8e26a5..fb357f02046a39d9ae8d185439a29501a00f199e 100644 (file)
@@ -1378,7 +1378,6 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                        goto skip_mlock;
                if (!trylock_page(page))
                        goto skip_mlock;
-               lru_add_drain();
                if (page->mapping && !PageDoubleMap(page))
                        mlock_vma_page(page);
                unlock_page(page);
@@ -2582,7 +2581,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        struct anon_vma *anon_vma = NULL;
        struct address_space *mapping = NULL;
        int count, mapcount, extra_pins, ret;
-       bool mlocked;
        unsigned long flags;
        pgoff_t end;
 
@@ -2641,14 +2639,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                goto out_unlock;
        }
 
-       mlocked = PageMlocked(head);
        unmap_page(head);
        VM_BUG_ON_PAGE(compound_mapcount(head), head);
 
-       /* Make sure the page is not on per-CPU pagevec as it takes pin */
-       if (mlocked)
-               lru_add_drain();
-
        /* prevent PageLRU to go away from under us, and freeze lru stats */
        spin_lock_irqsave(&pgdata->lru_lock, flags);