]> git.baikalelectronics.ru Git - kernel.git/commitdiff
powerpc/64s: add pte_needs_flush and huge_pmd_needs_flush
authorNicholas Piggin <npiggin@gmail.com>
Thu, 1 Sep 2022 11:03:34 +0000 (21:03 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 8 Sep 2022 01:11:18 +0000 (11:11 +1000)
Allow PTE changes to avoid flushing the TLB when access permissions are
being relaxed, the dirty bit is being set, and the accessed bit is being
changed.

Relaxing access permissions and setting dirty and accessed bits do not
require a flush because the MMU will re-load the PTE and notice the
updates (it may also cause a spurious fault).

Clearing the accessed bit does not require a flush because of the
imprecise PTE accessed bit accounting that is already performed, as
documented in ptep_clear_flush_young().

This reduces TLB flushing for some mprotect(2) calls.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Link: https://lore.kernel.org/r/20220901110334.1618913-1-npiggin@gmail.com
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/book3s/64/tlbflush.h

index 486902aff040ca1c1bc347e8ebc51f477c038f1b..b7fd9b69e8282df79bbab2c5d73d05612e0379c9 100644 (file)
@@ -413,6 +413,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
  * event of it not getting flushed for a long time the delay
  * shouldn't really matter because there's no real memory
  * pressure for swapout to react to. ]
+ *
+ * Note: this optimisation also exists in pte_needs_flush() and
+ * huge_pmd_needs_flush().
  */
 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
 #define ptep_clear_flush_young ptep_test_and_clear_young
index 206f920fe5b9655f979d001ef4b893648babe2d0..67655cd6054563bbdcbd84396921a5f05ff419d4 100644 (file)
@@ -163,6 +163,62 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
         */
 }
 
+static inline bool __pte_flags_need_flush(unsigned long oldval,
+                                         unsigned long newval)
+{
+       unsigned long delta = oldval ^ newval;
+
+       /*
+        * The return value of this function doesn't matter for hash,
+        * ptep_modify_prot_start() does a pte_update() which does or schedules
+        * any necessary hash table update and flush.
+        */
+       if (!radix_enabled())
+               return true;
+
+       /*
+        * We do not expect kernel mappings or non-PTEs or not-present PTEs.
+        */
+       VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
+       VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
+       VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
+       VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
+       VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
+       VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));
+
+       /*
+       *  Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED.
+       *
+        * In theory, some changed software bits could be tolerated, in
+        * practice those should rarely if ever matter.
+        */
+
+       if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))
+               return true;
+
+       /*
+        * If any of the above was present in old but cleared in new, flush.
+        * With the exception of _PAGE_ACCESSED, don't worry about flushing
+        * if that was cleared (see the comment in ptep_clear_flush_young()).
+        */
+       if ((delta & ~_PAGE_ACCESSED) & oldval)
+               return true;
+
+       return false;
+}
+
+static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
+{
+       return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
+}
+#define pte_needs_flush pte_needs_flush
+
+static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
+{
+       return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
+}
+#define huge_pmd_needs_flush huge_pmd_needs_flush
+
 extern bool tlbie_capable;
 extern bool tlbie_enabled;