]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: X86: Remove kvm_mmu_flush_or_zap()
authorLai Jiangshan <laijs@linux.alibaba.com>
Sat, 18 Sep 2021 00:56:31 +0000 (08:56 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 30 Sep 2021 08:27:09 +0000 (04:27 -0400)
Because local_flush is useless, kvm_mmu_flush_or_zap() can be removed
and kvm_mmu_remote_flush_or_zap is used instead.

Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20210918005636.3675-6-jiangshanlai@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index c031daa49638c62c6aed9e89c4345dd4f0801b76..443b67d94784e0c48b1124f4783bfc8b04dbe850 100644 (file)
@@ -1931,14 +1931,6 @@ static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
        return true;
 }
 
-static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
-                                struct list_head *invalid_list,
-                                bool remote_flush, bool local_flush)
-{
-       if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
-               return;
-}
-
 #ifdef CONFIG_KVM_MMU_AUDIT
 #include "mmu_audit.c"
 #else
@@ -2032,7 +2024,6 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu,
        struct mmu_page_path parents;
        struct kvm_mmu_pages pages;
        LIST_HEAD(invalid_list);
-       bool flush = false;
 
        while (mmu_unsync_walk(parent, &pages)) {
                bool protected = false;
@@ -2042,27 +2033,25 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu,
 
                if (protected) {
                        kvm_flush_remote_tlbs(vcpu->kvm);
-                       flush = false;
                }
 
                for_each_sp(pages, sp, parents, i) {
                        kvm_unlink_unsync_page(vcpu->kvm, sp);
-                       flush |= kvm_sync_page(vcpu, sp, &invalid_list);
+                       kvm_sync_page(vcpu, sp, &invalid_list);
                        mmu_pages_clear_parents(&parents);
                }
                if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
-                       kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
+                       kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, false);
                        if (!can_yield) {
                                kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
                                return -EINTR;
                        }
 
                        cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
-                       flush = false;
                }
        }
 
-       kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
+       kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, false);
        return 0;
 }
 
@@ -5209,7 +5198,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        LIST_HEAD(invalid_list);
        u64 entry, gentry, *spte;
        int npte;
-       bool remote_flush, local_flush;
+       bool flush = false;
 
        /*
         * If we don't have indirect shadow pages, it means no page is
@@ -5218,8 +5207,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
                return;
 
-       remote_flush = local_flush = false;
-
        pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
 
        /*
@@ -5248,18 +5235,17 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                if (!spte)
                        continue;
 
-               local_flush = true;
                while (npte--) {
                        entry = *spte;
                        mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
                        if (gentry && sp->role.level != PG_LEVEL_4K)
                                ++vcpu->kvm->stat.mmu_pde_zapped;
                        if (need_remote_flush(entry, *spte))
-                               remote_flush = true;
+                               flush = true;
                        ++spte;
                }
        }
-       kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
+       kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
        kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
        write_unlock(&vcpu->kvm->mmu_lock);
 }