]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: X86: Move PTE present check from loop body to __shadow_walk_next()
authorLai Jiangshan <laijs@linux.alibaba.com>
Mon, 6 Sep 2021 12:25:47 +0000 (20:25 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 1 Oct 2021 07:44:46 +0000 (03:44 -0400)
So far, the loop bodies already ensure the PTE is present before calling
__shadow_walk_next():  Some loop bodies simply exit with a !PRESENT
directly and some other loop bodies, i.e. FNAME(fetch) and __direct_map()
do not currently guard their walks with is_shadow_present_pte, but only
because they install present non-leaf SPTEs in the loop itself.

But checking pte present in __shadow_walk_next() (which is called from
shadow_walk_okay()) is more prudent; walking past a !PRESENT SPTE
would lead to attempting to read a the next level SPTE from a garbage
iter->shadow_addr.  It also allows to remove the is_shadow_present_pte()
checks from the loop bodies.

Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
Message-Id: <20210906122547.263316-2-jiangshanlai@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h

index 73aa15e893115b352fb0d4353c54c5d1e091f100..7ef9c001d1b6974626cc49279657158774b48627 100644 (file)
@@ -2220,7 +2220,7 @@ static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
                               u64 spte)
 {
-       if (is_last_spte(spte, iterator->level)) {
+       if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
                iterator->level = 0;
                return;
        }
@@ -3189,9 +3189,6 @@ static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
        for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
                sptep = iterator.sptep;
                *spte = old_spte;
-
-               if (!is_shadow_present_pte(old_spte))
-                       break;
        }
 
        return sptep;
@@ -3759,9 +3756,6 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
                spte = mmu_spte_get_lockless(iterator.sptep);
 
                sptes[leaf] = spte;
-
-               if (!is_shadow_present_pte(spte))
-                       break;
        }
 
        return leaf;
@@ -3877,11 +3871,8 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
        u64 spte;
 
        walk_shadow_page_lockless_begin(vcpu);
-       for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
+       for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
                clear_sp_write_flooding_count(iterator.sptep);
-               if (!is_shadow_present_pte(spte))
-                       break;
-       }
        walk_shadow_page_lockless_end(vcpu);
 }
 
index 08f466ac36ff11cc156a79ffe1c5e75b6b220c55..b908d2ff6d4c83db1d4c236996e77324f96d6c81 100644 (file)
@@ -1002,7 +1002,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
                        FNAME(prefetch_gpte)(vcpu, sp, sptep, gpte, false);
                }
 
-               if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
+               if (!sp->unsync_children)
                        break;
        }
        write_unlock(&vcpu->kvm->mmu_lock);