]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: MMU: change page_fault_handle_page_track() arguments to kvm_page_fault
authorPaolo Bonzini <pbonzini@redhat.com>
Fri, 6 Aug 2021 08:21:58 +0000 (04:21 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 1 Oct 2021 07:44:49 +0000 (03:44 -0400)
Add fields to struct kvm_page_fault corresponding to the arguments
of page_fault_handle_page_track().  The fields are initialized in the
callers, and page_fault_handle_page_track() receives a struct
kvm_page_fault instead of having to extract the arguments out of it.

Suggested-by: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h

index 8d001b56f7b57ec51fcac356d9570b4cf4b4d581..a5c2d4069964fe426d54560b1c08bde222e3ce00 100644 (file)
@@ -132,6 +132,9 @@ struct kvm_page_fault {
 
        /* Input to FNAME(fetch), __direct_map and kvm_tdp_mmu_map.  */
        u8 max_level;
+
+       /* Shifted addr, or result of guest page table walk if addr is a gva.  */
+       gfn_t gfn;
 };
 
 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
index 7685b4270d8c23dd00207d718b2511619650da81..41dc6796b80b71e99f425cf6c7999173806bd9de 100644 (file)
@@ -3846,20 +3846,19 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
 }
 
 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
-                                        u32 error_code, gfn_t gfn)
+                                        struct kvm_page_fault *fault)
 {
-       if (unlikely(error_code & PFERR_RSVD_MASK))
+       if (unlikely(fault->rsvd))
                return false;
 
-       if (!(error_code & PFERR_PRESENT_MASK) ||
-             !(error_code & PFERR_WRITE_MASK))
+       if (!fault->present || !fault->write)
                return false;
 
        /*
         * guest is writing the page which is write tracked which can
         * not be fixed by page fault handler.
         */
-       if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
+       if (kvm_page_track_is_active(vcpu, fault->gfn, KVM_PAGE_TRACK_WRITE))
                return true;
 
        return false;
@@ -3956,13 +3955,13 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
        bool map_writable;
 
-       gfn_t gfn = gpa >> PAGE_SHIFT;
        unsigned long mmu_seq;
        kvm_pfn_t pfn;
        hva_t hva;
        int r;
 
-       if (page_fault_handle_page_track(vcpu, error_code, gfn))
+       fault->gfn = gpa >> PAGE_SHIFT;
+       if (page_fault_handle_page_track(vcpu, fault))
                return RET_PF_EMULATE;
 
        r = fast_page_fault(vcpu, gpa, error_code);
@@ -3976,11 +3975,12 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
        smp_rmb();
 
-       if (kvm_faultin_pfn(vcpu, fault->prefault, gfn, gpa, &pfn, &hva,
+       if (kvm_faultin_pfn(vcpu, fault->prefault, fault->gfn, gpa, &pfn, &hva,
                            fault->write, &map_writable, &r))
                return r;
 
-       if (handle_abnormal_pfn(vcpu, fault->is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
+       if (handle_abnormal_pfn(vcpu, fault->is_tdp ? 0 : gpa,
+                               fault->gfn, pfn, ACC_ALL, &r))
                return r;
 
        r = RET_PF_RETRY;
index a39881a8ba785eca7cee418f6f8dbddafdb79f03..44a19dde5e70e275092d2898827f81b6628a7247 100644 (file)
@@ -869,7 +869,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
                return RET_PF_RETRY;
        }
 
-       if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
+       fault->gfn = walker.gfn;
+       if (page_fault_handle_page_track(vcpu, fault)) {
                shadow_page_table_clear_flood(vcpu, addr);
                return RET_PF_EMULATE;
        }
@@ -891,11 +892,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
        smp_rmb();
 
-       if (kvm_faultin_pfn(vcpu, fault->prefault, walker.gfn, addr, &pfn, &hva,
+       if (kvm_faultin_pfn(vcpu, fault->prefault, fault->gfn, addr, &pfn, &hva,
                            fault->write, &map_writable, &r))
                return r;
 
-       if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
+       if (handle_abnormal_pfn(vcpu, addr, fault->gfn, pfn, walker.pte_access, &r))
                return r;
 
        /*