With the optimizations introduced by commit
a46cc7a90fd8
("powerpc/mm/radix: Improve TLB/PWC flushes"), flush_tlb_mm() no
longer flushes the page walk cache (PWC) with radix. This patch
introduces flush_all_mm(), which flushes everything, TLB and PWC, for
a given mm.
Signed-off-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
Reviewed-By: Alistair Popple <alistair@popple.id.au>
[mpe: Add a WARN_ON_ONCE() in the empty hash routines]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
{
}
+static inline void hash__local_flush_all_mm(struct mm_struct *mm)
+{
+ /*
+ * There's no Page Walk Cache for hash, so what is needed is
+ * the same as flush_tlb_mm(), which doesn't really make sense
+ * with hash. So the only thing we could do is flush the
+ * entire LPID! Punt for now, as it's not being used.
+ */
+ WARN_ON_ONCE(1);
+}
+
+static inline void hash__flush_all_mm(struct mm_struct *mm)
+{
+ /*
+ * There's no Page Walk Cache for hash, so what is needed is
+ * the same as flush_tlb_mm(), which doesn't really make sense
+ * with hash. So the only thing we could do is flush the
+ * entire LPID! Punt for now, as it's not being used.
+ */
+ WARN_ON_ONCE(1);
+}
+
static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
+extern void radix__local_flush_all_mm(struct mm_struct *mm);
extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
int psize);
extern void radix__tlb_flush(struct mmu_gather *tlb);
#ifdef CONFIG_SMP
extern void radix__flush_tlb_mm(struct mm_struct *mm);
+extern void radix__flush_all_mm(struct mm_struct *mm);
extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
int psize);
#else
#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm)
+#define radix__flush_all_mm(mm) radix__local_flush_all_mm(mm)
#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr)
#define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p)
#endif
return hash__local_flush_tlb_page(vma, vmaddr);
}
+static inline void local_flush_all_mm(struct mm_struct *mm)
+{
+ if (radix_enabled())
+ return radix__local_flush_all_mm(mm);
+ return hash__local_flush_all_mm(mm);
+}
+
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (radix_enabled())
return radix__flush_tlb_page(vma, vmaddr);
return hash__flush_tlb_page(vma, vmaddr);
}
+
+static inline void flush_all_mm(struct mm_struct *mm)
+{
+ if (radix_enabled())
+ return radix__flush_all_mm(mm);
+ return hash__flush_all_mm(mm);
+}
#else
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
+#define flush_all_mm(mm) local_flush_all_mm(mm)
#endif /* CONFIG_SMP */
/*
* flush the page walk cache for the address
EXPORT_SYMBOL(radix__local_flush_tlb_mm);
#ifndef CONFIG_SMP
-static void radix__local_flush_all_mm(struct mm_struct *mm)
+void radix__local_flush_all_mm(struct mm_struct *mm)
{
unsigned long pid;
_tlbiel_pid(pid, RIC_FLUSH_ALL);
preempt_enable();
}
+EXPORT_SYMBOL(radix__local_flush_all_mm);
#endif /* CONFIG_SMP */
void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
}
EXPORT_SYMBOL(radix__flush_tlb_mm);
-static void radix__flush_all_mm(struct mm_struct *mm)
+void radix__flush_all_mm(struct mm_struct *mm)
{
unsigned long pid;
no_context:
preempt_enable();
}
+EXPORT_SYMBOL(radix__flush_all_mm);
void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
{