rmap_remove(kvm, sptep);
}
-static void drop_large_spte(struct kvm *kvm, u64 *sptep)
+static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
{
struct kvm_mmu_page *sp;
WARN_ON(sp->role.level == PG_LEVEL_4K);
drop_spte(kvm, sptep);
- kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
+
+ if (flush)
+ kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
KVM_PAGES_PER_HPAGE(sp->role.level));
}
static void __link_shadow_page(struct kvm *kvm,
struct kvm_mmu_memory_cache *cache, u64 *sptep,
- struct kvm_mmu_page *sp)
+ struct kvm_mmu_page *sp, bool flush)
{
u64 spte;
/*
* If an SPTE is present already, it must be a leaf and therefore
- * a large one. Drop it and flush the TLB before installing sp.
+ * a large one. Drop it, and flush the TLB if needed, before
+ * installing sp.
*/
if (is_shadow_present_pte(*sptep))
- drop_large_spte(kvm, sptep);
+ drop_large_spte(kvm, sptep, flush);
spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
struct kvm_mmu_page *sp)
{
- __link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp);
+ __link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp, true);
}
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
struct kvm_mmu_memory_cache *cache = &kvm->arch.split_desc_cache;
u64 huge_spte = READ_ONCE(*huge_sptep);
struct kvm_mmu_page *sp;
+ bool flush = false;
u64 *sptep, spte;
gfn_t gfn;
int index;
* gfn-to-pfn translation since the SP is direct, so no need to
* modify them.
*
- * If a given SPTE points to a lower level page table, installing
- * such SPTEs would effectively unmap a potion of the huge page.
- * This is not an issue because __link_shadow_page() flushes the TLB
- * when the passed sp replaces a large SPTE.
+ * However, if a given SPTE points to a lower level page table,
+ * that lower level page table may only be partially populated.
+ * Installing such SPTEs would effectively unmap a potion of the
+ * huge page. Unmapping guest memory always requires a TLB flush
+ * since a subsequent operation on the unmapped regions would
+ * fail to detect the need to flush.
*/
- if (is_shadow_present_pte(*sptep))
+ if (is_shadow_present_pte(*sptep)) {
+ flush |= !is_last_spte(*sptep, sp->role.level);
continue;
+ }
spte = make_huge_page_split_spte(kvm, huge_spte, sp->role, index);
mmu_spte_set(sptep, spte);
__rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
}
- __link_shadow_page(kvm, cache, huge_sptep, sp);
+ __link_shadow_page(kvm, cache, huge_sptep, sp, flush);
}
static int shadow_mmu_try_split_huge_page(struct kvm *kvm,