]> git.baikalelectronics.ru Git - kernel.git/commitdiff
exec: use VMA iterator instead of linked list
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 6 Sep 2022 19:48:56 +0000 (19:48 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 27 Sep 2022 02:46:21 +0000 (19:46 -0700)
Remove a use of the vm_next list by doing the initial lookup with the VMA
iterator and then using it to find the next entry.

Link: https://lkml.kernel.org/r/20220906194824.2110408-42-Liam.Howlett@oracle.com
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Tested-by: Yu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/exec.c

index 2b919b30dc97b775ffe69f1f1e67ac39c9d3d718..afe55d0c3bcfb4445f61c321a294a7071473ccad 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -683,6 +683,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
        unsigned long length = old_end - old_start;
        unsigned long new_start = old_start - shift;
        unsigned long new_end = old_end - shift;
+       VMA_ITERATOR(vmi, mm, new_start);
+       struct vm_area_struct *next;
        struct mmu_gather tlb;
 
        BUG_ON(new_start > new_end);
@@ -691,7 +693,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
         * ensure there are no vmas between where we want to go
         * and where we are
         */
-       if (vma != find_vma(mm, new_start))
+       if (vma != vma_next(&vmi))
                return -EFAULT;
 
        /*
@@ -710,12 +712,13 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 
        lru_add_drain();
        tlb_gather_mmu(&tlb, mm);
+       next = vma_next(&vmi);
        if (new_end > old_start) {
                /*
                 * when the old and new regions overlap clear from new_end.
                 */
                free_pgd_range(&tlb, new_end, old_end, new_end,
-                       vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
+                       next ? next->vm_start : USER_PGTABLES_CEILING);
        } else {
                /*
                 * otherwise, clean from old_start; this is done to not touch
@@ -724,7 +727,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
                 * for the others its just a little faster.
                 */
                free_pgd_range(&tlb, old_start, old_end, new_end,
-                       vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
+                       next ? next->vm_start : USER_PGTABLES_CEILING);
        }
        tlb_finish_mmu(&tlb);