]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: X86: Don't flush current tlb on shadow page modification
authorLai Jiangshan <laijs@linux.alibaba.com>
Sat, 18 Sep 2021 00:56:30 +0000 (08:56 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 30 Sep 2021 08:27:09 +0000 (04:27 -0400)
After any shadow page modification, flushing tlb only on current VCPU
is weird due to other VCPU's tlb might still be stale.

In other words, if there is any mandatory tlb-flushing after shadow page
modification, SET_SPTE_NEED_REMOTE_TLB_FLUSH or remote_flush should be
set and the tlbs of all VCPUs should be flushed.  There is not point to
only flush current tlb except when the request is from vCPU's or pCPU's
activities.

If there was any bug that mandatory tlb-flushing is required and
SET_SPTE_NEED_REMOTE_TLB_FLUSH/remote_flush is failed to set, this patch
would expose the bug in a more destructive way.  The related code paths
are checked and no missing SET_SPTE_NEED_REMOTE_TLB_FLUSH is found yet.

Currently, there is no optional tlb-flushing after sync page related code
is changed to flush tlb timely.  So we can just remove these local flushing
code.

Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20210918005636.3675-5-jiangshanlai@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c

index 4238fe3e91c24181b5f01dbbfc384556e99a89b9..c031daa49638c62c6aed9e89c4345dd4f0801b76 100644 (file)
@@ -1937,9 +1937,6 @@ static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
 {
        if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
                return;
-
-       if (local_flush)
-               kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
 }
 
 #ifdef CONFIG_KVM_MMU_AUDIT
@@ -2149,7 +2146,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                                break;
 
                        WARN_ON(!list_empty(&invalid_list));
-                       kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
                }
 
                __clear_sp_write_flooding_count(sp);
@@ -2757,7 +2753,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
        if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
                if (write_fault)
                        ret = RET_PF_EMULATE;
-               kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
        }
 
        if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
index 64ccfc1fa5535cfdf993e5343662878db66484b5..7a5a24ca50e446cf253809734992759c7d24b41a 100644 (file)
@@ -959,7 +959,6 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
        if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
                if (write)
                        ret = RET_PF_EMULATE;
-               kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
        }
 
        /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */