]> git.baikalelectronics.ru Git - kernel.git/commitdiff
Revert "riscv: mm: notify remote harts about mmu cache updates"
authorSergey Matyukevich <sergey.matyukevich@syntacore.com>
Sun, 26 Feb 2023 15:01:36 +0000 (18:01 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 22 Mar 2023 12:34:00 +0000 (13:34 +0100)
commit 1fb567bb185923bbcd33373cc699809e8369841d upstream.

This reverts the remaining bits of commit 88329b081509 ("riscv: mm:
notify remote harts harts about mmu cache updates").

According to bug reports, suggested approach to fix stale TLB entries
is not sufficient. It needs to be replaced by a more robust solution.

Fixes: 88329b081509 ("riscv: mm: notify remote harts about mmu cache updates")
Reported-by: Zong Li <zong.li@sifive.com>
Reported-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
Signed-off-by: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
Cc: stable@vger.kernel.org
Reviewed-by: Guo Ren <guoren@kernel.org>
Link: https://lore.kernel.org/r/20230226150137.1919750-2-geomatsi@gmail.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/riscv/include/asm/mmu.h
arch/riscv/include/asm/tlbflush.h
arch/riscv/mm/context.c
arch/riscv/mm/tlbflush.c

index 5ff1f19fd45c29b4fc7d2c8b44ac4984caf1e75c..0099dc1161683ddd1e3c45460309e331b7e6b0a7 100644 (file)
@@ -19,8 +19,6 @@ typedef struct {
 #ifdef CONFIG_SMP
        /* A local icache flush is needed before user execution can resume. */
        cpumask_t icache_stale_mask;
-       /* A local tlb flush is needed before user execution can resume. */
-       cpumask_t tlb_stale_mask;
 #endif
 } mm_context_t;
 
index 907b9efd39a87dd3853c1f8f21f4baa2fdd1125c..801019381dea3fec53f50fe443fb350c3ab92c43 100644 (file)
@@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr)
 {
        ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
 }
-
-static inline void local_flush_tlb_all_asid(unsigned long asid)
-{
-       __asm__ __volatile__ ("sfence.vma x0, %0"
-                       :
-                       : "r" (asid)
-                       : "memory");
-}
-
-static inline void local_flush_tlb_page_asid(unsigned long addr,
-               unsigned long asid)
-{
-       __asm__ __volatile__ ("sfence.vma %0, %1"
-                       :
-                       : "r" (addr), "r" (asid)
-                       : "memory");
-}
-
 #else /* CONFIG_MMU */
 #define local_flush_tlb_all()                  do { } while (0)
 #define local_flush_tlb_page(addr)             do { } while (0)
index 80ce9caba8d225979426f01f9a636d11890f9de6..7acbfbd14557e600ba69b888c7389fef64f72428 100644 (file)
@@ -196,16 +196,6 @@ switch_mm_fast:
 
        if (need_flush_tlb)
                local_flush_tlb_all();
-#ifdef CONFIG_SMP
-       else {
-               cpumask_t *mask = &mm->context.tlb_stale_mask;
-
-               if (cpumask_test_cpu(cpu, mask)) {
-                       cpumask_clear_cpu(cpu, mask);
-                       local_flush_tlb_all_asid(cntx & asid_mask);
-               }
-       }
-#endif
 }
 
 static void set_mm_noasid(struct mm_struct *mm)
index ce7dfc81bb3fe386748557f44310aa4b1a86f3a9..37ed760d007c3eef9c302adda361cd4ad7d1db9d 100644 (file)
@@ -5,7 +5,23 @@
 #include <linux/sched.h>
 #include <asm/sbi.h>
 #include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
+
+static inline void local_flush_tlb_all_asid(unsigned long asid)
+{
+       __asm__ __volatile__ ("sfence.vma x0, %0"
+                       :
+                       : "r" (asid)
+                       : "memory");
+}
+
+static inline void local_flush_tlb_page_asid(unsigned long addr,
+               unsigned long asid)
+{
+       __asm__ __volatile__ ("sfence.vma %0, %1"
+                       :
+                       : "r" (addr), "r" (asid)
+                       : "memory");
+}
 
 void flush_tlb_all(void)
 {
@@ -15,7 +31,6 @@ void flush_tlb_all(void)
 static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
                                  unsigned long size, unsigned long stride)
 {
-       struct cpumask *pmask = &mm->context.tlb_stale_mask;
        struct cpumask *cmask = mm_cpumask(mm);
        unsigned int cpuid;
        bool broadcast;
@@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
        if (static_branch_unlikely(&use_asid_allocator)) {
                unsigned long asid = atomic_long_read(&mm->context.id);
 
-               /*
-                * TLB will be immediately flushed on harts concurrently
-                * executing this MM context. TLB flush on other harts
-                * is deferred until this MM context migrates there.
-                */
-               cpumask_setall(pmask);
-               cpumask_clear_cpu(cpuid, pmask);
-               cpumask_andnot(pmask, pmask, cmask);
-
                if (broadcast) {
                        sbi_remote_sfence_vma_asid(cmask, start, size, asid);
                } else if (size <= stride) {