]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: SVM: Don't bother checking for "running" AVIC when kicking for IPIs
authorSean Christopherson <seanjc@google.com>
Wed, 8 Dec 2021 01:52:21 +0000 (01:52 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 19 Jan 2022 17:14:42 +0000 (12:14 -0500)
Drop the avic_vcpu_is_running() check when waking vCPUs in response to a
VM-Exit due to incomplete IPI delivery.  The check isn't wrong per se, but
it's not 100% accurate in the sense that it doesn't guarantee that the vCPU
was one of the vCPUs that didn't receive the IPI.

The check isn't required for correctness as blocking == !running in this
context.

From a performance perspective, waking a live task is not expensive as the
only moderately costly operation is a locked operation to temporarily
disable preemption.  And if that is indeed a performance issue,
kvm_vcpu_is_blocking() would be a better check than poking into the AVIC.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20211208015236.1616697-12-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/svm.h

index 40039477bebb22090145827374505864c3121f5d..2d8278167c0f0a88ee849467b96d7e04c9dcfc39 100644 (file)
@@ -295,13 +295,16 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
        struct kvm_vcpu *vcpu;
        unsigned long i;
 
+       /*
+        * Wake any target vCPUs that are blocking, i.e. waiting for a wake
+        * event.  There's no need to signal doorbells, as hardware has handled
+        * vCPUs that were in guest at the time of the IPI, and vCPUs that have
+        * since entered the guest will have processed pending IRQs at VMRUN.
+        */
        kvm_for_each_vcpu(i, vcpu, kvm) {
-               bool m = kvm_apic_match_dest(vcpu, source,
-                                            icrl & APIC_SHORT_MASK,
-                                            GET_APIC_DEST_FIELD(icrh),
-                                            icrl & APIC_DEST_MASK);
-
-               if (m && !avic_vcpu_is_running(vcpu))
+               if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
+                                       GET_APIC_DEST_FIELD(icrh),
+                                       icrl & APIC_DEST_MASK))
                        kvm_vcpu_wake_up(vcpu);
        }
 }
index 6d29421dd4bf3f4ed70c8775763866045bfd9053..fd0685b2ce55a411192b9c0321e4dc7777d87582 100644 (file)
@@ -573,17 +573,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
 
 #define VMCB_AVIC_APIC_BAR_MASK                0xFFFFFFFFFF000ULL
 
-static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
-{
-       struct vcpu_svm *svm = to_svm(vcpu);
-       u64 *entry = svm->avic_physical_id_cache;
-
-       if (!entry)
-               return false;
-
-       return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
-}
-
 int avic_ga_log_notifier(u32 ga_tag);
 void avic_vm_destroy(struct kvm *kvm);
 int avic_vm_init(struct kvm *kvm);