]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: mmu_notifier fix for tlb_end_vma
authorNicholas Piggin <npiggin@gmail.com>
Thu, 23 Aug 2018 08:47:09 +0000 (18:47 +1000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 23 Aug 2018 18:55:58 +0000 (11:55 -0700)
The generic tlb_end_vma does not call invalidate_range mmu notifier, and
it resets resets the mmu_gather range, which means the notifier won't be
called on part of the range in case of an unmap that spans multiple
vmas.

ARM64 seems to be the only arch I could see that has notifiers and uses
the generic tlb_end_vma.  I have not actually tested it.

[ Catalin and Will point out that ARM64 currently only uses the
  notifiers for KVM, which doesn't use the ->invalidate_range()
  callback right now, so it's a bug, but one that happens to
  not affect them.  So not necessary for stable.  - Linus ]

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/asm-generic/tlb.h
mm/memory.c

index 3063125197adabb38876a9dc001986062db24658..b3353e21f3b3ec95220e1706bae89d3969d9e918 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef _ASM_GENERIC__TLB_H
 #define _ASM_GENERIC__TLB_H
 
+#include <linux/mmu_notifier.h>
 #include <linux/swap.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
@@ -138,6 +139,16 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
        }
 }
 
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+       if (!tlb->end)
+               return;
+
+       tlb_flush(tlb);
+       mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
+       __tlb_reset_range(tlb);
+}
+
 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
                                        struct page *page, int page_size)
 {
@@ -186,10 +197,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
 
 #define __tlb_end_vma(tlb, vma)                                        \
        do {                                                    \
-               if (!tlb->fullmm && tlb->end) {                 \
-                       tlb_flush(tlb);                         \
-                       __tlb_reset_range(tlb);                 \
-               }                                               \
+               if (!tlb->fullmm)                               \
+                       tlb_flush_mmu_tlbonly(tlb);             \
        } while (0)
 
 #ifndef tlb_end_vma
index 8b344297985546870c58d9fa7cbcc84689f94eb5..3ff4394a2e1b01ae9a4ace4b0cede070f8545645 100644 (file)
@@ -238,16 +238,6 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
        __tlb_reset_range(tlb);
 }
 
-static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
-       if (!tlb->end)
-               return;
-
-       tlb_flush(tlb);
-       mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
-       __tlb_reset_range(tlb);
-}
-
 static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 {
        struct mmu_gather_batch *batch;