]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: khugepaged: make hugepage_vma_check() non-static
authorYang Shi <shy828301@gmail.com>
Thu, 19 May 2022 21:08:49 +0000 (14:08 -0700)
committerakpm <akpm@linux-foundation.org>
Thu, 19 May 2022 21:08:49 +0000 (14:08 -0700)
The hugepage_vma_check() could be reused by khugepaged_enter() and
khugepaged_enter_vma_merge(), but it is static in khugepaged.c.  Make it
non-static and declare it in khugepaged.h.

Link: https://lkml.kernel.org/r/20220510203222.24246-7-shy828301@gmail.com
Signed-off-by: Yang Shi <shy828301@gmail.com>
Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Song Liu <song@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/khugepaged.h
mm/khugepaged.c

index 0423d3619f26303b974a09ff12bb82173ed46e78..c340f6bb39d6e566db4ec2fc8b633beab950175d 100644 (file)
@@ -3,8 +3,6 @@
 #define _LINUX_KHUGEPAGED_H
 
 #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
-#include <linux/shmem_fs.h>
-
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 extern struct attribute_group khugepaged_attr_group;
@@ -12,6 +10,8 @@ extern struct attribute_group khugepaged_attr_group;
 extern int khugepaged_init(void);
 extern void khugepaged_destroy(void);
 extern int start_stop_khugepaged(void);
+extern bool hugepage_vma_check(struct vm_area_struct *vma,
+                              unsigned long vm_flags);
 extern void __khugepaged_enter(struct mm_struct *mm);
 extern void __khugepaged_exit(struct mm_struct *mm);
 extern void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
@@ -55,13 +55,11 @@ static inline void khugepaged_exit(struct mm_struct *mm)
 static inline void khugepaged_enter(struct vm_area_struct *vma,
                                   unsigned long vm_flags)
 {
-       if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
-               if ((khugepaged_always() ||
-                    (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) ||
-                    (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
-                   !(vm_flags & VM_NOHUGEPAGE) &&
-                   !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+       if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
+           khugepaged_enabled()) {
+               if (hugepage_vma_check(vma, vm_flags))
                        __khugepaged_enter(vma->vm_mm);
+       }
 }
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
index b43b294ca5f8d190edc5e5a99d7b0c639c271aac..9ef626e2c750c85936c2af328594dacaf943fca5 100644 (file)
@@ -437,8 +437,8 @@ static inline int khugepaged_test_exit(struct mm_struct *mm)
        return atomic_read(&mm->mm_users) == 0;
 }
 
-static bool hugepage_vma_check(struct vm_area_struct *vma,
-                              unsigned long vm_flags)
+bool hugepage_vma_check(struct vm_area_struct *vma,
+                       unsigned long vm_flags)
 {
        if (!transhuge_vma_enabled(vma, vm_flags))
                return false;
@@ -508,20 +508,13 @@ void __khugepaged_enter(struct mm_struct *mm)
 void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
                               unsigned long vm_flags)
 {
-       unsigned long hstart, hend;
-
-       /*
-        * khugepaged only supports read-only files for non-shmem files.
-        * khugepaged does not yet work on special mappings. And
-        * file-private shmem THP is not supported.
-        */
-       if (!hugepage_vma_check(vma, vm_flags))
-               return;
-
-       hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
-       hend = vma->vm_end & HPAGE_PMD_MASK;
-       if (hstart < hend)
-               khugepaged_enter(vma, vm_flags);
+       if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
+           khugepaged_enabled() &&
+           (((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
+            (vma->vm_end & HPAGE_PMD_MASK))) {
+               if (hugepage_vma_check(vma, vm_flags))
+                       __khugepaged_enter(vma->vm_mm);
+       }
 }
 
 void __khugepaged_exit(struct mm_struct *mm)