From 0723ca9d8bf98fb1ff6525e2733d183e249ad1c8 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 24 Sep 2021 04:52:23 -0400 Subject: [PATCH] KVM: MMU: mark page dirty in make_spte This simplifies set_spte, which we want to remove, and unifies code between the shadow MMU and the TDP MMU. The warning will be added back later to make_spte as well. There is a small disadvantage in the TDP MMU; it may unnecessarily mark a page as dirty twice if two vCPUs end up mapping the same page twice. However, this is a very small cost for a case that is already rare. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 3 --- arch/x86/kvm/mmu/spte.c | 3 +++ arch/x86/kvm/mmu/tdp_mmu.c | 21 +-------------------- 3 files changed, 4 insertions(+), 23 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 8b6dc276935f7..5a757953b98b2 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2688,9 +2688,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, can_unsync, host_writable, sp_ad_disabled(sp), &spte); - if (spte & PT_WRITABLE_MASK) - kvm_vcpu_mark_page_dirty(vcpu, gfn); - if (*sptep == spte) ret |= SET_SPTE_SPURIOUS; else if (mmu_spte_update(sptep, spte)) diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index a33c581aabd69..66be9452ded10 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -179,6 +179,9 @@ out: "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level, get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level)); + if (spte & PT_WRITABLE_MASK) + kvm_vcpu_mark_page_dirty(vcpu, gfn); + *new_spte = spte; return ret; } diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 3bf85a8c7d156..b41b6f5ea82ba 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -542,26 +542,7 @@ static inline bool tdp_mmu_map_set_spte_atomic(struct kvm_vcpu *vcpu, struct tdp_iter *iter, u64 new_spte) { - struct kvm *kvm = vcpu->kvm; - - if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte)) - return false; - - /* - * Use kvm_vcpu_gfn_to_memslot() instead of going through - * handle_changed_spte_dirty_log() to leverage vcpu->last_used_slot. - */ - if (is_writable_pte(new_spte)) { - struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, iter->gfn); - - if (slot && kvm_slot_dirty_track_enabled(slot)) { - /* Enforced by kvm_mmu_hugepage_adjust. */ - WARN_ON_ONCE(iter->level > PG_LEVEL_4K); - mark_page_dirty_in_slot(kvm, slot, iter->gfn); - } - } - - return true; + return tdp_mmu_set_spte_atomic_no_dirty_log(vcpu->kvm, iter, new_spte); } static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, -- 2.39.5