]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: PPC: Book3S HV: add virtual mode handlers for HPT hcalls and page faults
authorNicholas Piggin <npiggin@gmail.com>
Fri, 28 May 2021 09:07:48 +0000 (19:07 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 10 Jun 2021 12:12:15 +0000 (22:12 +1000)
In order to support hash guests in the P9 path (which does not do real
mode hcalls or page fault handling), these real-mode hash specific
interrupts need to be implemented in virt mode.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210528090752.3542186-29-npiggin@gmail.com
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c

index 9ba77747bf00595812514315100d202eaa4473c0..dee740a3ace9ea5ffa094476042d008557cd2503 100644 (file)
@@ -939,6 +939,52 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                return RESUME_HOST;
 
        switch (req) {
+       case H_REMOVE:
+               ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                       kvmppc_get_gpr(vcpu, 5),
+                                       kvmppc_get_gpr(vcpu, 6));
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
+       case H_ENTER:
+               ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                       kvmppc_get_gpr(vcpu, 5),
+                                       kvmppc_get_gpr(vcpu, 6),
+                                       kvmppc_get_gpr(vcpu, 7));
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
+       case H_READ:
+               ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                       kvmppc_get_gpr(vcpu, 5));
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
+       case H_CLEAR_MOD:
+               ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                       kvmppc_get_gpr(vcpu, 5));
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
+       case H_CLEAR_REF:
+               ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                       kvmppc_get_gpr(vcpu, 5));
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
+       case H_PROTECT:
+               ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                       kvmppc_get_gpr(vcpu, 5),
+                                       kvmppc_get_gpr(vcpu, 6));
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
+       case H_BULK_REMOVE:
+               ret = kvmppc_h_bulk_remove(vcpu);
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
+
        case H_CEDE:
                break;
        case H_PROD:
@@ -1138,6 +1184,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
        default:
                return RESUME_HOST;
        }
+       WARN_ON_ONCE(ret == H_TOO_HARD);
        kvmppc_set_gpr(vcpu, 3, ret);
        vcpu->arch.hcall_needed = 0;
        return RESUME_GUEST;
@@ -1438,22 +1485,102 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
         * We get these next two if the guest accesses a page which it thinks
         * it has mapped but which is not actually present, either because
         * it is for an emulated I/O device or because the corresonding
-        * host page has been paged out.  Any other HDSI/HISI interrupts
-        * have been handled already.
+        * host page has been paged out.
+        *
+        * Any other HDSI/HISI interrupts have been handled already for P7/8
+        * guests. For POWER9 hash guests not using rmhandlers, basic hash
+        * fault handling is done here.
         */
-       case BOOK3S_INTERRUPT_H_DATA_STORAGE:
-               r = RESUME_PAGE_FAULT;
-               if (vcpu->arch.fault_dsisr == HDSISR_CANARY)
+       case BOOK3S_INTERRUPT_H_DATA_STORAGE: {
+               unsigned long vsid;
+               long err;
+
+               if (vcpu->arch.fault_dsisr == HDSISR_CANARY) {
                        r = RESUME_GUEST; /* Just retry if it's the canary */
+                       break;
+               }
+
+               if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
+                       /*
+                        * Radix doesn't require anything, and pre-ISAv3.0 hash
+                        * already attempted to handle this in rmhandlers. The
+                        * hash fault handling below is v3 only (it uses ASDR
+                        * via fault_gpa).
+                        */
+                       r = RESUME_PAGE_FAULT;
+                       break;
+               }
+
+               if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
+                       kvmppc_core_queue_data_storage(vcpu,
+                               vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
+                       r = RESUME_GUEST;
+                       break;
+               }
+
+               if (!(vcpu->arch.shregs.msr & MSR_DR))
+                       vsid = vcpu->kvm->arch.vrma_slb_v;
+               else
+                       vsid = vcpu->arch.fault_gpa;
+
+               err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
+                               vsid, vcpu->arch.fault_dsisr, true);
+               if (err == 0) {
+                       r = RESUME_GUEST;
+               } else if (err == -1 || err == -2) {
+                       r = RESUME_PAGE_FAULT;
+               } else {
+                       kvmppc_core_queue_data_storage(vcpu,
+                               vcpu->arch.fault_dar, err);
+                       r = RESUME_GUEST;
+               }
                break;
-       case BOOK3S_INTERRUPT_H_INST_STORAGE:
+       }
+       case BOOK3S_INTERRUPT_H_INST_STORAGE: {
+               unsigned long vsid;
+               long err;
+
                vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
                vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
                        DSISR_SRR1_MATCH_64S;
-               if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
-                       vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
-               r = RESUME_PAGE_FAULT;
+               if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
+                       /*
+                        * Radix doesn't require anything, and pre-ISAv3.0 hash
+                        * already attempted to handle this in rmhandlers. The
+                        * hash fault handling below is v3 only (it uses ASDR
+                        * via fault_gpa).
+                        */
+                       if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+                               vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
+                       r = RESUME_PAGE_FAULT;
+                       break;
+               }
+
+               if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
+                       kvmppc_core_queue_inst_storage(vcpu,
+                               vcpu->arch.fault_dsisr);
+                       r = RESUME_GUEST;
+                       break;
+               }
+
+               if (!(vcpu->arch.shregs.msr & MSR_IR))
+                       vsid = vcpu->kvm->arch.vrma_slb_v;
+               else
+                       vsid = vcpu->arch.fault_gpa;
+
+               err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
+                               vsid, vcpu->arch.fault_dsisr, false);
+               if (err == 0) {
+                       r = RESUME_GUEST;
+               } else if (err == -1) {
+                       r = RESUME_PAGE_FAULT;
+               } else {
+                       kvmppc_core_queue_inst_storage(vcpu, err);
+                       r = RESUME_GUEST;
+               }
                break;
+       }
+
        /*
         * This occurs if the guest executes an illegal instruction.
         * If the guest debug is disabled, generate a program interrupt
index 7af7c70f14680e2e3530c1d174b26454519dd38c..8cc73abbf42b376cfa8a7fe50045442d04dceb8d 100644 (file)
@@ -409,6 +409,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
                                 vcpu->arch.pgdir, true,
                                 &vcpu->arch.regs.gpr[4]);
 }
+EXPORT_SYMBOL_GPL(kvmppc_h_enter);
 
 #ifdef __BIG_ENDIAN__
 #define LOCK_TOKEN     (*(u32 *)(&get_paca()->lock_token))
@@ -553,6 +554,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
        return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
                                  &vcpu->arch.regs.gpr[4]);
 }
+EXPORT_SYMBOL_GPL(kvmppc_h_remove);
 
 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
 {
@@ -671,6 +673,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(kvmppc_h_bulk_remove);
 
 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
                      unsigned long pte_index, unsigned long avpn)
@@ -741,6 +744,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
 
        return H_SUCCESS;
 }
+EXPORT_SYMBOL_GPL(kvmppc_h_protect);
 
 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
                   unsigned long pte_index)
@@ -781,6 +785,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
        }
        return H_SUCCESS;
 }
+EXPORT_SYMBOL_GPL(kvmppc_h_read);
 
 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
                        unsigned long pte_index)
@@ -829,6 +834,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
        unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
        return ret;
 }
+EXPORT_SYMBOL_GPL(kvmppc_h_clear_ref);
 
 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
                        unsigned long pte_index)
@@ -876,6 +882,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
        unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
        return ret;
 }
+EXPORT_SYMBOL_GPL(kvmppc_h_clear_mod);
 
 static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
                          unsigned long gpa, int writing, unsigned long *hpa,
@@ -1294,3 +1301,4 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
 
        return -1;              /* send fault up to host kernel mode */
 }
+EXPORT_SYMBOL_GPL(kvmppc_hpte_hv_fault);