]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: merge parameters for change_protection()
authorPeter Xu <peterx@redhat.com>
Tue, 7 Apr 2020 03:05:45 +0000 (20:05 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 Apr 2020 17:43:39 +0000 (10:43 -0700)
change_protection() was used by either the NUMA or mprotect() code,
there's one parameter for each of the callers (dirty_accountable and
prot_numa).  Further, these parameters are passed along the calls:

  - change_protection_range()
  - change_p4d_range()
  - change_pud_range()
  - change_pmd_range()
  - ...

Now we introduce a flag for change_protect() and all these helpers to
replace these parameters.  Then we can avoid passing multiple parameters
multiple times along the way.

More importantly, it'll greatly simplify the work if we want to introduce
any new parameters to change_protection().  In the follow up patches, a
new parameter for userfaultfd write protection will be introduced.

No functional change at all.

Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Bobby Powers <bobbypowers@gmail.com>
Cc: Brian Geffon <bgeffon@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Denis Plotnikov <dplotnikov@virtuozzo.com>
Cc: "Dr . David Alan Gilbert" <dgilbert@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Martin Cracauer <cracauer@cons.org>
Cc: Marty McFadden <mcfadden8@llnl.gov>
Cc: Maya Gokhale <gokhale2@llnl.gov>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Shaohua Li <shli@fb.com>
Link: http://lkml.kernel.org/r/20200220163112.11409-7-peterx@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/huge_mm.h
include/linux/mm.h
mm/huge_memory.c
mm/mempolicy.c
mm/mprotect.c

index f2df2247026aff875f3cf6ded7aa5af6bb26cce0..cfbb0a87c5f0d02610275758df302968f860280e 100644 (file)
@@ -46,7 +46,7 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
                         pmd_t *old_pmd, pmd_t *new_pmd);
 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                        unsigned long addr, pgprot_t newprot,
-                       int prot_numa);
+                       unsigned long cp_flags);
 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
                                   pgprot_t pgprot, bool write);
 
index be49e371e4b54fd19328b3128d3d9d7f25e1303d..c7d87ff5027b5a1cfebb3edd5ba6b31c5aeb2354 100644 (file)
@@ -1771,9 +1771,21 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
                unsigned long old_addr, struct vm_area_struct *new_vma,
                unsigned long new_addr, unsigned long len,
                bool need_rmap_locks);
+
+/*
+ * Flags used by change_protection().  For now we make it a bitmap so
+ * that we can pass in multiple flags just like parameters.  However
+ * for now all the callers are only use one of the flags at the same
+ * time.
+ */
+/* Whether we should allow dirty bit accounting */
+#define  MM_CP_DIRTY_ACCT                  (1UL << 0)
+/* Whether this protection change is for NUMA hints */
+#define  MM_CP_PROT_NUMA                   (1UL << 1)
+
 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
                              unsigned long end, pgprot_t newprot,
-                             int dirty_accountable, int prot_numa);
+                             unsigned long cp_flags);
 extern int mprotect_fixup(struct vm_area_struct *vma,
                          struct vm_area_struct **pprev, unsigned long start,
                          unsigned long end, unsigned long newflags);
index c1e7c71db1e65a2696ef70b6f12cc79d5d2474e3..dc12249af6dfda6a0cc2aeb828af839cd80ebad2 100644 (file)
@@ -1979,13 +1979,14 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
  *  - HPAGE_PMD_NR is protections changed and TLB flush necessary
  */
 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-               unsigned long addr, pgprot_t newprot, int prot_numa)
+               unsigned long addr, pgprot_t newprot, unsigned long cp_flags)
 {
        struct mm_struct *mm = vma->vm_mm;
        spinlock_t *ptl;
        pmd_t entry;
        bool preserve_write;
        int ret;
+       bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
 
        ptl = __pmd_trans_huge_lock(pmd, vma);
        if (!ptl)
index 037e5f5481180a93bb35f6409ef070e0e11ea3bc..145be04b710872a4dcfce9c1868710ef6a554d2c 100644 (file)
@@ -627,7 +627,7 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
 {
        int nr_updated;
 
-       nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
+       nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
        if (nr_updated)
                count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
 
index 0fee14b3941679eac5069206a31acb14c7cfe7ed..046e0889e65f5d9881f8aae4a89c4d830ccfb39c 100644 (file)
 
 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable, int prot_numa)
+               unsigned long cp_flags)
 {
        pte_t *pte, oldpte;
        spinlock_t *ptl;
        unsigned long pages = 0;
        int target_node = NUMA_NO_NODE;
+       bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT;
+       bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
 
        /*
         * Can be called with only the mmap_sem for reading by
@@ -188,7 +190,7 @@ static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
 
 static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
                pud_t *pud, unsigned long addr, unsigned long end,
-               pgprot_t newprot, int dirty_accountable, int prot_numa)
+               pgprot_t newprot, unsigned long cp_flags)
 {
        pmd_t *pmd;
        unsigned long next;
@@ -229,7 +231,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
                                __split_huge_pmd(vma, pmd, addr, false, NULL);
                        } else {
                                int nr_ptes = change_huge_pmd(vma, pmd, addr,
-                                               newprot, prot_numa);
+                                                             newprot, cp_flags);
 
                                if (nr_ptes) {
                                        if (nr_ptes == HPAGE_PMD_NR) {
@@ -244,7 +246,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
                        /* fall through, the trans huge pmd just split */
                }
                this_pages = change_pte_range(vma, pmd, addr, next, newprot,
-                                dirty_accountable, prot_numa);
+                                             cp_flags);
                pages += this_pages;
 next:
                cond_resched();
@@ -260,7 +262,7 @@ next:
 
 static inline unsigned long change_pud_range(struct vm_area_struct *vma,
                p4d_t *p4d, unsigned long addr, unsigned long end,
-               pgprot_t newprot, int dirty_accountable, int prot_numa)
+               pgprot_t newprot, unsigned long cp_flags)
 {
        pud_t *pud;
        unsigned long next;
@@ -272,7 +274,7 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma,
                if (pud_none_or_clear_bad(pud))
                        continue;
                pages += change_pmd_range(vma, pud, addr, next, newprot,
-                                dirty_accountable, prot_numa);
+                                         cp_flags);
        } while (pud++, addr = next, addr != end);
 
        return pages;
@@ -280,7 +282,7 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma,
 
 static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
                pgd_t *pgd, unsigned long addr, unsigned long end,
-               pgprot_t newprot, int dirty_accountable, int prot_numa)
+               pgprot_t newprot, unsigned long cp_flags)
 {
        p4d_t *p4d;
        unsigned long next;
@@ -292,7 +294,7 @@ static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
                if (p4d_none_or_clear_bad(p4d))
                        continue;
                pages += change_pud_range(vma, p4d, addr, next, newprot,
-                                dirty_accountable, prot_numa);
+                                         cp_flags);
        } while (p4d++, addr = next, addr != end);
 
        return pages;
@@ -300,7 +302,7 @@ static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
 
 static unsigned long change_protection_range(struct vm_area_struct *vma,
                unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable, int prot_numa)
+               unsigned long cp_flags)
 {
        struct mm_struct *mm = vma->vm_mm;
        pgd_t *pgd;
@@ -317,7 +319,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
                if (pgd_none_or_clear_bad(pgd))
                        continue;
                pages += change_p4d_range(vma, pgd, addr, next, newprot,
-                                dirty_accountable, prot_numa);
+                                         cp_flags);
        } while (pgd++, addr = next, addr != end);
 
        /* Only flush the TLB if we actually modified any entries: */
@@ -330,14 +332,15 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
 
 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
                       unsigned long end, pgprot_t newprot,
-                      int dirty_accountable, int prot_numa)
+                      unsigned long cp_flags)
 {
        unsigned long pages;
 
        if (is_vm_hugetlb_page(vma))
                pages = hugetlb_change_protection(vma, start, end, newprot);
        else
-               pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
+               pages = change_protection_range(vma, start, end, newprot,
+                                               cp_flags);
 
        return pages;
 }
@@ -459,7 +462,7 @@ success:
        vma_set_page_prot(vma);
 
        change_protection(vma, start, end, vma->vm_page_prot,
-                         dirty_accountable, 0);
+                         dirty_accountable ? MM_CP_DIRTY_ACCT : 0);
 
        /*
         * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major