]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: SVM: Skip AVIC and IRTE updates when loading blocking vCPU
authorSean Christopherson <seanjc@google.com>
Wed, 8 Dec 2021 01:52:24 +0000 (01:52 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 19 Jan 2022 17:14:44 +0000 (12:14 -0500)
Don't bother updating the Physical APIC table or IRTE when loading a vCPU
that is blocking, i.e. won't be marked IsRun{ning}=1, as the pCPU is
queried if and only if IsRunning is '1'.  If the vCPU was migrated, the
new pCPU will be picked up when avic_vcpu_load() is called by
svm_vcpu_unblocking().

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20211208015236.1616697-15-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/avic.c

index 368813320121db633410978694f3a2a9892ddc86..b7353e11da2e61ed39b42f53b2e25f6bb6bbc022 100644 (file)
@@ -975,7 +975,6 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        u64 entry;
        /* ID = 0xff (broadcast), ID > 0xff (reserved) */
-       bool is_blocking = kvm_vcpu_is_blocking(vcpu);
        int h_physical_id = kvm_cpu_get_apicid(cpu);
        struct vcpu_svm *svm = to_svm(vcpu);
 
@@ -986,24 +985,25 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
                return;
 
+       /*
+        * No need to update anything if the vCPU is blocking, i.e. if the vCPU
+        * is being scheduled in after being preempted.  The CPU entries in the
+        * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
+        * If the vCPU was migrated, its new CPU value will be stuffed when the
+        * vCPU unblocks.
+        */
+       if (kvm_vcpu_is_blocking(vcpu))
+               return;
+
        entry = READ_ONCE(*(svm->avic_physical_id_cache));
        WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
 
        entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
        entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
-
-       entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
-
-       /*
-        * Don't mark the vCPU as running if its blocking, i.e. if the vCPU is
-        * preempted after svm_vcpu_blocking() but before KVM voluntarily
-        * schedules out the vCPU.
-        */
-       if (!is_blocking)
-               entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
+       entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
 
        WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
-       avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, !is_blocking);
+       avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
 }
 
 void avic_vcpu_put(struct kvm_vcpu *vcpu)
@@ -1012,8 +1012,12 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
 
        entry = READ_ONCE(*(svm->avic_physical_id_cache));
-       if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
-               avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
+
+       /* Nothing to do if IsRunning == '0' due to vCPU blocking. */
+       if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
+               return;
+
+       avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
 
        entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
        WRITE_ONCE(*(svm->avic_physical_id_cache), entry);