]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: PPC: Book3S HV P9: implement hash host / hash guest support
authorNicholas Piggin <npiggin@gmail.com>
Fri, 28 May 2021 09:07:51 +0000 (19:07 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 10 Jun 2021 12:12:15 +0000 (22:12 +1000)
Implement support for hash guests under hash host. This has to save and
restore the host SLB, and ensure that the MMU is off while switching
into the guest SLB.

POWER9 and later CPUs now always go via the P9 path. The "fast" guest
mode is now renamed to the P9 mode, which is consistent with its
functionality and the rest of the naming.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210528090752.3542186-32-npiggin@gmail.com
arch/powerpc/include/asm/kvm_asm.h
arch/powerpc/kvm/book3s_64_entry.S
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_p9_entry.c

index e479487488f4738d60f24861fcb30a6dd6d933c9..fbbf3cec92e90710f667bcefc2cb67ba270f26c5 100644 (file)
 #define KVM_GUEST_MODE_SKIP    2
 #define KVM_GUEST_MODE_GUEST_HV        3
 #define KVM_GUEST_MODE_HOST_HV 4
-#define KVM_GUEST_MODE_HV_FAST 5 /* ISA >= v3.0 host radix */
+#define KVM_GUEST_MODE_HV_P9   5 /* ISA >= v3.0 path */
 
 #define KVM_INST_FETCH_FAILED  -1
 
index 7322fea971e43b758b5c9a8d1509c5738eb42c37..983b8c18bc31e8b25179cf361a263c397f171e8c 100644 (file)
@@ -36,7 +36,7 @@
 kvmppc_hcall:
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
        lbz     r10,HSTATE_IN_GUEST(r13)
-       cmpwi   r10,KVM_GUEST_MODE_HV_FAST
+       cmpwi   r10,KVM_GUEST_MODE_HV_P9
        beq     kvmppc_p9_exit_hcall
 #endif
        ld      r10,PACA_EXGEN+EX_R13(r13)
@@ -68,7 +68,7 @@ kvmppc_interrupt:
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
        std     r10,HSTATE_SCRATCH0(r13)
        lbz     r10,HSTATE_IN_GUEST(r13)
-       cmpwi   r10,KVM_GUEST_MODE_HV_FAST
+       cmpwi   r10,KVM_GUEST_MODE_HV_P9
        beq     kvmppc_p9_exit_interrupt
        ld      r10,HSTATE_SCRATCH0(r13)
 #endif
@@ -183,8 +183,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 /*
  * void kvmppc_p9_enter_guest(struct vcpu *vcpu);
  *
- * Enter the guest on a ISAv3.0 or later system where we have exactly
- * one vcpu per vcore, and the host is radix.
+ * Enter the guest on a ISAv3.0 or later system.
  */
 .balign        IFETCH_ALIGN_BYTES
 _GLOBAL(kvmppc_p9_enter_guest)
@@ -284,7 +283,7 @@ kvmppc_p9_exit_hcall:
 .balign        IFETCH_ALIGN_BYTES
 kvmppc_p9_exit_interrupt:
        /*
-        * If set to KVM_GUEST_MODE_HV_FAST but we're still in the
+        * If set to KVM_GUEST_MODE_HV_P9 but we're still in the
         * hypervisor, that means we can't return from the entry stack.
         */
        rldicl. r10,r12,64-MSR_HV_LG,63
@@ -358,6 +357,12 @@ kvmppc_p9_exit_interrupt:
  * effort for a small bit of code. Lots of other things to do first.
  */
 kvmppc_p9_bad_interrupt:
+BEGIN_MMU_FTR_SECTION
+       /*
+        * Hash host doesn't try to recover MMU (requires host SLB reload)
+        */
+       b       .
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
        /*
         * Clean up guest registers to give host a chance to run.
         */
index 662f599bdc0e97a835f76b9542ac82f2c3121c00..045458e7192a48b9f10869f8bf7a3f7f25af7860 100644 (file)
@@ -4511,7 +4511,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 
        do {
-               if (radix_enabled())
+               if (cpu_has_feature(CPU_FTR_ARCH_300))
                        r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
                                                  vcpu->arch.vcore->lpcr);
                else
@@ -5599,6 +5599,8 @@ static int kvmhv_enable_nested(struct kvm *kvm)
                return -EPERM;
        if (!cpu_has_feature(CPU_FTR_ARCH_300))
                return -ENODEV;
+       if (!radix_enabled())
+               return -ENODEV;
 
        /* kvm == NULL means the caller is testing if the capability exists */
        if (kvm)
index 4460f1c23a9dbe818f8f5bb4781c85e5ac787770..83f592eadcd2d8046ddd83c1dc42a405f43c4661 100644 (file)
@@ -130,7 +130,7 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
        isync();
 }
 
-static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid)
+static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
 {
        isync();
        mtspr(SPRN_PID, pid);
@@ -139,6 +139,22 @@ static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid)
        isync();
        mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
        isync();
+
+       if (!radix_enabled())
+               slb_restore_bolted_realmode();
+}
+
+static void save_clear_host_mmu(struct kvm *kvm)
+{
+       if (!radix_enabled()) {
+               /*
+                * Hash host could save and restore host SLB entries to
+                * reduce SLB fault overheads of VM exits, but for now the
+                * existing code clears all entries and restores just the
+                * bolted ones when switching back to host.
+                */
+               slb_clear_invalidate_partition();
+       }
 }
 
 static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
@@ -271,16 +287,24 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
 
        mtspr(SPRN_AMOR, ~0UL);
 
-       local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_FAST;
+       local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_P9;
+
+       /*
+        * Hash host, hash guest, or radix guest with prefetch bug, all have
+        * to disable the MMU before switching to guest MMU state.
+        */
+       if (!radix_enabled() || !kvm_is_radix(kvm) ||
+                       cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+               __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
+
+       save_clear_host_mmu(kvm);
+
        if (kvm_is_radix(kvm)) {
-               if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
-                       __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
                switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
                if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
                        __mtmsrd(0, 1); /* clear RI */
 
        } else {
-               __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
                switch_mmu_to_guest_hpt(kvm, vcpu, lpcr);
        }
 
@@ -468,7 +492,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
        mtspr(SPRN_HDEC, 0x7fffffff);
 
        save_clear_guest_mmu(kvm, vcpu);
-       switch_mmu_to_host_radix(kvm, host_pidr);
+       switch_mmu_to_host(kvm, host_pidr);
        local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE;
 
        /*