]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86/mmu: Verify shadow walk doesn't terminate early in page faults
authorSean Christopherson <seanjc@google.com>
Mon, 6 Sep 2021 12:25:46 +0000 (20:25 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 1 Oct 2021 07:44:52 +0000 (03:44 -0400)
WARN and bail if the shadow walk for faulting in a SPTE terminates early,
i.e. doesn't reach the expected level because the walk encountered a
terminal SPTE.  The shadow walks for page faults are subtle in that they
install non-leaf SPTEs (zapping leaf SPTEs if necessary!) in the loop
body, and consume the newly created non-leaf SPTE in the loop control,
e.g. __shadow_walk_next().  In other words, the walks guarantee that the
walk will stop if and only if the target level is reached by installing
non-leaf SPTEs to guarantee the walk remains valid.

Opportunistically use fault->goal-level instead of it.level in
FNAME(fetch) to further clarify that KVM always installs the leaf SPTE at
the target level.

Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
Message-Id: <20210906122547.263316-1-jiangshanlai@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h

index 5ba0a844f5760f0744777c323eed1eb38a7ea42c..2ddbabad5bd2e8aedbcf22e6269272eebbfc2efc 100644 (file)
@@ -3012,6 +3012,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
                        account_huge_nx_page(vcpu->kvm, sp);
        }
 
+       if (WARN_ON_ONCE(it.level != fault->goal_level))
+               return -EFAULT;
+
        ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
                           fault->write, fault->goal_level, base_gfn, fault->pfn,
                           fault->prefault, fault->map_writable);
index 6bc0dbc0bafff0d2ec37fd450b7342d04605d306..7a8a2d14a3c7dce1b521519f418264e847e73be3 100644 (file)
@@ -760,9 +760,12 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
                }
        }
 
+       if (WARN_ON_ONCE(it.level != fault->goal_level))
+               return -EFAULT;
+
        ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, fault->write,
-                          it.level, base_gfn, fault->pfn, fault->prefault,
-                          fault->map_writable);
+                          fault->goal_level, base_gfn, fault->pfn,
+                          fault->prefault, fault->map_writable);
        if (ret == RET_PF_SPURIOUS)
                return ret;