]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/mlock: use vma iterator and maple state instead of vma linked list
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 6 Sep 2022 19:49:02 +0000 (19:49 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 27 Sep 2022 02:46:24 +0000 (19:46 -0700)
Handle overflow checking in count_mm_mlocked_page_nr() differently.

Link: https://lkml.kernel.org/r/20220906194824.2110408-58-Liam.Howlett@oracle.com
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Tested-by: Yu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mlock.c

index b14e929084ccaa5b86d5c9c199d7054b10e0ccab..43d19a1f28eb37cbc66a309e83a4fb31cff07019 100644 (file)
@@ -471,6 +471,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
        unsigned long nstart, end, tmp;
        struct vm_area_struct *vma, *prev;
        int error;
+       MA_STATE(mas, &current->mm->mm_mt, start, start);
 
        VM_BUG_ON(offset_in_page(start));
        VM_BUG_ON(len != PAGE_ALIGN(len));
@@ -479,13 +480,14 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
                return -EINVAL;
        if (end == start)
                return 0;
-       vma = find_vma(current->mm, start);
-       if (!vma || vma->vm_start > start)
+       vma = mas_walk(&mas);
+       if (!vma)
                return -ENOMEM;
 
-       prev = vma->vm_prev;
        if (start > vma->vm_start)
                prev = vma;
+       else
+               prev = mas_prev(&mas, 0);
 
        for (nstart = start ; ; ) {
                vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
@@ -505,7 +507,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
                if (nstart >= end)
                        break;
 
-               vma = prev->vm_next;
+               vma = find_vma(prev->vm_mm, prev->vm_end);
                if (!vma || vma->vm_start != nstart) {
                        error = -ENOMEM;
                        break;
@@ -526,24 +528,23 @@ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
 {
        struct vm_area_struct *vma;
        unsigned long count = 0;
+       unsigned long end;
+       VMA_ITERATOR(vmi, mm, start);
 
        if (mm == NULL)
                mm = current->mm;
 
-       vma = find_vma(mm, start);
-       if (vma == NULL)
-               return 0;
-
-       for (; vma ; vma = vma->vm_next) {
-               if (start >= vma->vm_end)
-                       continue;
-               if (start + len <=  vma->vm_start)
-                       break;
+       /* Don't overflow past ULONG_MAX */
+       if (unlikely(ULONG_MAX - len < start))
+               end = ULONG_MAX;
+       else
+               end = start + len;
+       for_each_vma_range(vmi, vma, end) {
                if (vma->vm_flags & VM_LOCKED) {
                        if (start > vma->vm_start)
                                count -= (start - vma->vm_start);
-                       if (start + len < vma->vm_end) {
-                               count += start + len - vma->vm_start;
+                       if (end < vma->vm_end) {
+                               count += end - vma->vm_start;
                                break;
                        }
                        count += vma->vm_end - vma->vm_start;
@@ -659,6 +660,7 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
  */
 static int apply_mlockall_flags(int flags)
 {
+       MA_STATE(mas, &current->mm->mm_mt, 0, 0);
        struct vm_area_struct *vma, *prev = NULL;
        vm_flags_t to_add = 0;
 
@@ -679,7 +681,7 @@ static int apply_mlockall_flags(int flags)
                        to_add |= VM_LOCKONFAULT;
        }
 
-       for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
+       mas_for_each(&mas, vma, ULONG_MAX) {
                vm_flags_t newflags;
 
                newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
@@ -687,6 +689,7 @@ static int apply_mlockall_flags(int flags)
 
                /* Ignore errors */
                mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
+               mas_pause(&mas);
                cond_resched();
        }
 out: