#define KVM_GUEST_MODE_SKIP 2
#define KVM_GUEST_MODE_GUEST_HV 3
#define KVM_GUEST_MODE_HOST_HV 4
-#define KVM_GUEST_MODE_HV_FAST 5 /* ISA >= v3.0 host radix */
+#define KVM_GUEST_MODE_HV_P9 5 /* ISA >= v3.0 path */
#define KVM_INST_FETCH_FAILED -1
kvmppc_hcall:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
lbz r10,HSTATE_IN_GUEST(r13)
- cmpwi r10,KVM_GUEST_MODE_HV_FAST
+ cmpwi r10,KVM_GUEST_MODE_HV_P9
beq kvmppc_p9_exit_hcall
#endif
ld r10,PACA_EXGEN+EX_R13(r13)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
std r10,HSTATE_SCRATCH0(r13)
lbz r10,HSTATE_IN_GUEST(r13)
- cmpwi r10,KVM_GUEST_MODE_HV_FAST
+ cmpwi r10,KVM_GUEST_MODE_HV_P9
beq kvmppc_p9_exit_interrupt
ld r10,HSTATE_SCRATCH0(r13)
#endif
/*
* void kvmppc_p9_enter_guest(struct vcpu *vcpu);
*
- * Enter the guest on a ISAv3.0 or later system where we have exactly
- * one vcpu per vcore, and the host is radix.
+ * Enter the guest on a ISAv3.0 or later system.
*/
.balign IFETCH_ALIGN_BYTES
_GLOBAL(kvmppc_p9_enter_guest)
.balign IFETCH_ALIGN_BYTES
kvmppc_p9_exit_interrupt:
/*
- * If set to KVM_GUEST_MODE_HV_FAST but we're still in the
+ * If set to KVM_GUEST_MODE_HV_P9 but we're still in the
* hypervisor, that means we can't return from the entry stack.
*/
rldicl. r10,r12,64-MSR_HV_LG,63
* effort for a small bit of code. Lots of other things to do first.
*/
kvmppc_p9_bad_interrupt:
+BEGIN_MMU_FTR_SECTION
+ /*
+ * Hash host doesn't try to recover MMU (requires host SLB reload)
+ */
+ b .
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
/*
* Clean up guest registers to give host a chance to run.
*/
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
do {
- if (radix_enabled())
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
vcpu->arch.vcore->lpcr);
else
return -EPERM;
if (!cpu_has_feature(CPU_FTR_ARCH_300))
return -ENODEV;
+ if (!radix_enabled())
+ return -ENODEV;
/* kvm == NULL means the caller is testing if the capability exists */
if (kvm)
isync();
}
-static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid)
+static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
{
isync();
mtspr(SPRN_PID, pid);
isync();
mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
isync();
+
+ if (!radix_enabled())
+ slb_restore_bolted_realmode();
+}
+
+static void save_clear_host_mmu(struct kvm *kvm)
+{
+ if (!radix_enabled()) {
+ /*
+ * Hash host could save and restore host SLB entries to
+ * reduce SLB fault overheads of VM exits, but for now the
+ * existing code clears all entries and restores just the
+ * bolted ones when switching back to host.
+ */
+ slb_clear_invalidate_partition();
+ }
}
static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
mtspr(SPRN_AMOR, ~0UL);
- local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_FAST;
+ local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_P9;
+
+ /*
+ * Hash host, hash guest, or radix guest with prefetch bug, all have
+ * to disable the MMU before switching to guest MMU state.
+ */
+ if (!radix_enabled() || !kvm_is_radix(kvm) ||
+ cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
+
+ save_clear_host_mmu(kvm);
+
if (kvm_is_radix(kvm)) {
- if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
- __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
__mtmsrd(0, 1); /* clear RI */
} else {
- __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
switch_mmu_to_guest_hpt(kvm, vcpu, lpcr);
}
mtspr(SPRN_HDEC, 0x7fffffff);
save_clear_guest_mmu(kvm, vcpu);
- switch_mmu_to_host_radix(kvm, host_pidr);
+ switch_mmu_to_host(kvm, host_pidr);
local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE;
/*