]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: add VMA iterator
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 6 Sep 2022 19:48:46 +0000 (19:48 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 27 Sep 2022 02:46:15 +0000 (19:46 -0700)
This thin layer of abstraction over the maple tree state is for iterating
over VMAs.  You can go forwards, go backwards or ask where the iterator
is.  Rename the existing vma_next() to __vma_next() -- it will be removed
by the end of this series.

Link: https://lkml.kernel.org/r/20220906194824.2110408-10-Liam.Howlett@oracle.com
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Davidlohr Bueso <dave@stgolabs.net>
Tested-by: Yu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Howells <dhowells@redhat.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
include/linux/mm_types.h
mm/mmap.c

index 896d04248e6623b8efaa82fc274faaa210218769..3701da1fac5fdd73d940b0265f8ec92c784876cc 100644 (file)
@@ -661,6 +661,38 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
        return vma->vm_flags & VM_ACCESS_FLAGS;
 }
 
+static inline
+struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
+{
+       return mas_find(&vmi->mas, max);
+}
+
+static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
+{
+       /*
+        * Uses vma_find() to get the first VMA when the iterator starts.
+        * Calling mas_next() could skip the first entry.
+        */
+       return vma_find(vmi, ULONG_MAX);
+}
+
+static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
+{
+       return mas_prev(&vmi->mas, 0);
+}
+
+static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
+{
+       return vmi->mas.index;
+}
+
+#define for_each_vma(__vmi, __vma)                                     \
+       while (((__vma) = vma_next(&(__vmi))) != NULL)
+
+/* The MM code likes to work with exclusive end addresses */
+#define for_each_vma_range(__vmi, __vma, __end)                                \
+       while (((__vma) = vma_find(&(__vmi), (__end) - 1)) != NULL)
+
 #ifdef CONFIG_SHMEM
 /*
  * The vma_is_shmem is not inline because it is used only by slow
index 425bc5f7d477c675a17c73b09311078cf52cae7c..d0b51fbdf5d4479e004c7712cb8b735231725546 100644 (file)
@@ -777,6 +777,27 @@ static inline void lru_gen_use_mm(struct mm_struct *mm)
 
 #endif /* CONFIG_LRU_GEN */
 
+struct vma_iterator {
+       struct ma_state mas;
+};
+
+#define VMA_ITERATOR(name, __mm, __addr)                               \
+       struct vma_iterator name = {                                    \
+               .mas = {                                                \
+                       .tree = &(__mm)->mm_mt,                         \
+                       .index = __addr,                                \
+                       .node = MAS_START,                              \
+               },                                                      \
+       }
+
+static inline void vma_iter_init(struct vma_iterator *vmi,
+               struct mm_struct *mm, unsigned long addr)
+{
+       vmi->mas.tree = &mm->mm_mt;
+       vmi->mas.index = addr;
+       vmi->mas.node = MAS_START;
+}
+
 struct mmu_gather;
 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
 extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
index 5115eea6a0e6c05506f1dc81aed7a363f145a7fe..20718645d82f2da5b78a31f3d5e1a1cca724ebcd 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -586,7 +586,7 @@ static int find_vma_links(struct mm_struct *mm, unsigned long addr,
 }
 
 /*
- * vma_next() - Get the next VMA.
+ * __vma_next() - Get the next VMA.
  * @mm: The mm_struct.
  * @vma: The current vma.
  *
@@ -594,7 +594,7 @@ static int find_vma_links(struct mm_struct *mm, unsigned long addr,
  *
  * Returns: The next VMA after @vma.
  */
-static inline struct vm_area_struct *vma_next(struct mm_struct *mm,
+static inline struct vm_area_struct *__vma_next(struct mm_struct *mm,
                                         struct vm_area_struct *vma)
 {
        if (!vma)
@@ -1291,7 +1291,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
        if (vm_flags & VM_SPECIAL)
                return NULL;
 
-       next = vma_next(mm, prev);
+       next = __vma_next(mm, prev);
        area = next;
        if (area && area->vm_end == end)                /* cases 6, 7, 8 */
                next = next->vm_next;
@@ -2843,7 +2843,7 @@ static void unmap_region(struct mm_struct *mm,
                struct vm_area_struct *vma, struct vm_area_struct *prev,
                unsigned long start, unsigned long end)
 {
-       struct vm_area_struct *next = vma_next(mm, prev);
+       struct vm_area_struct *next = __vma_next(mm, prev);
        struct mmu_gather tlb;
 
        lru_add_drain();
@@ -3051,7 +3051,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
                if (error)
                        goto split_failed;
        }
-       vma = vma_next(mm, prev);
+       vma = __vma_next(mm, prev);
 
        if (unlikely(uf)) {
                /*