]> git.baikalelectronics.ru Git - kernel.git/commitdiff
ipc/shm: use VMA iterator instead of linked list
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Tue, 6 Sep 2022 19:48:58 +0000 (19:48 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 27 Sep 2022 02:46:21 +0000 (19:46 -0700)
The VMA iterator is faster than the linked llist, and it can be walked
even when VMAs are being removed from the address space, so there's no
need to keep track of 'next'.

Link: https://lkml.kernel.org/r/20220906194824.2110408-46-Liam.Howlett@oracle.com
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Tested-by: Yu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
ipc/shm.c

index b3048ebd5c315c3768e376a87019c85bd8b86c0d..7d86f058fb861b8331faa0edd20fa890424561e7 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1721,7 +1721,7 @@ long ksys_shmdt(char __user *shmaddr)
 #ifdef CONFIG_MMU
        loff_t size = 0;
        struct file *file;
-       struct vm_area_struct *next;
+       VMA_ITERATOR(vmi, mm, addr);
 #endif
 
        if (addr & ~PAGE_MASK)
@@ -1751,12 +1751,9 @@ long ksys_shmdt(char __user *shmaddr)
         * match the usual checks anyway. So assume all vma's are
         * above the starting address given.
         */
-       vma = find_vma(mm, addr);
 
 #ifdef CONFIG_MMU
-       while (vma) {
-               next = vma->vm_next;
-
+       for_each_vma(vmi, vma) {
                /*
                 * Check if the starting address would match, i.e. it's
                 * a fragment created by mprotect() and/or munmap(), or it
@@ -1774,6 +1771,7 @@ long ksys_shmdt(char __user *shmaddr)
                        file = vma->vm_file;
                        size = i_size_read(file_inode(vma->vm_file));
                        do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
+                       mas_pause(&vmi.mas);
                        /*
                         * We discovered the size of the shm segment, so
                         * break out of here and fall through to the next
@@ -1781,10 +1779,9 @@ long ksys_shmdt(char __user *shmaddr)
                         * searching for matching vma's.
                         */
                        retval = 0;
-                       vma = next;
+                       vma = vma_next(&vmi);
                        break;
                }
-               vma = next;
        }
 
        /*
@@ -1794,17 +1791,19 @@ long ksys_shmdt(char __user *shmaddr)
         */
        size = PAGE_ALIGN(size);
        while (vma && (loff_t)(vma->vm_end - addr) <= size) {
-               next = vma->vm_next;
-
                /* finding a matching vma now does not alter retval */
                if ((vma->vm_ops == &shm_vm_ops) &&
                    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
-                   (vma->vm_file == file))
+                   (vma->vm_file == file)) {
                        do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
-               vma = next;
+                       mas_pause(&vmi.mas);
+               }
+
+               vma = vma_next(&vmi);
        }
 
 #else  /* CONFIG_MMU */
+       vma = vma_lookup(mm, addr);
        /* under NOMMU conditions, the exact address to be destroyed must be
         * given
         */