From 9aa0f076ddf2335d66da5bb1c578f2ee82cc8c0a Mon Sep 17 00:00:00 2001
From: James Hogan <james.hogan@imgtec.com>
Date: Tue, 19 Apr 2016 09:25:10 +0100
Subject: [PATCH] MIPS: mm: Don't do MTHC0 if XPA not present

Performing an MTHC0 instruction without XPA being present will trigger a
reserved instruction exception, therefore conditionalise the use of this
instruction when building TLB handlers (build_update_entries()), and in
__update_tlb().

This allows an XPA kernel to run on non XPA hardware without that
instruction implemented, just like it can run on XPA capable hardware
without XPA in use (with the noxpa kernel argument) or with XPA not
configured in hardware.

[paul.burton@imgtec.com:
  - Rebase atop other TLB work.
  - Add "mm" to subject.
  - Handle the __kmap_pgprot case.]

Fixes: 620119a62535 ("MIPS: Add support for XPA.")
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: David Hildenbrand <dahi@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/13124/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
 arch/mips/mm/init.c    |  8 +++++---
 arch/mips/mm/tlb-r4k.c |  6 ++++--
 arch/mips/mm/tlbex.c   | 16 ++++++++++------
 3 files changed, 19 insertions(+), 11 deletions(-)

diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 134c988bc61fb..9b58eb5fd0d57 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -112,9 +112,11 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
 	write_c0_entrylo0(entrylo);
 	write_c0_entrylo1(entrylo);
 #ifdef CONFIG_XPA
-	entrylo = (pte.pte_low & _PFNX_MASK);
-	writex_c0_entrylo0(entrylo);
-	writex_c0_entrylo1(entrylo);
+	if (cpu_has_xpa) {
+		entrylo = (pte.pte_low & _PFNX_MASK);
+		writex_c0_entrylo0(entrylo);
+		writex_c0_entrylo1(entrylo);
+	}
 #endif
 	tlbidx = read_c0_wired();
 	write_c0_wired(tlbidx + 1);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 61f1f1ba4a174..5a5c7fec645e8 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -339,10 +339,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 #ifdef CONFIG_XPA
 		write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
-		writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
+		if (cpu_has_xpa)
+			writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
 		ptep++;
 		write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
-		writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
+		if (cpu_has_xpa)
+			writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
 #else
 		write_c0_entrylo0(ptep->pte_high);
 		ptep++;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 588bde3bdbe7d..0efa6211cc409 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1030,17 +1030,21 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
 		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
 		UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
 
-		uasm_i_lw(p, tmp, 0, ptep);
-		uasm_i_ext(p, tmp, tmp, 0, 24);
-		uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
+		if (cpu_has_xpa && !mips_xpa_disabled) {
+			uasm_i_lw(p, tmp, 0, ptep);
+			uasm_i_ext(p, tmp, tmp, 0, 24);
+			uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
+		}
 
 		uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
 		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
 		UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
 
-		uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
-		uasm_i_ext(p, tmp, tmp, 0, 24);
-		uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
+		if (cpu_has_xpa && !mips_xpa_disabled) {
+			uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
+			uasm_i_ext(p, tmp, tmp, 0, 24);
+			uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
+		}
 		return;
 	}
 
-- 
2.39.5