]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: SVM: Use kvm_vcpu_is_blocking() in AVIC load to handle preemption
authorSean Christopherson <seanjc@google.com>
Wed, 8 Dec 2021 01:52:23 +0000 (01:52 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 19 Jan 2022 17:14:43 +0000 (12:14 -0500)
Use kvm_vcpu_is_blocking() to determine whether or not the vCPU should be
marked running during avic_vcpu_load().  Drop avic_is_running, which
really should have been named "vcpu_is_not_blocking", as it tracked if
the vCPU was blocking, not if it was actually running, e.g. it was set
during svm_create_vcpu() when the vCPU was obviously not running.

This is technically a teeny tiny functional change, as the vCPU will be
marked IsRunning=1 on being reloaded if the vCPU is preempted between
svm_vcpu_blocking() and prepare_to_rcuwait().  But that's a benign change
as the vCPU will be marked IsRunning=0 when KVM voluntarily schedules out
the vCPU.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20211208015236.1616697-14-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index 7efe034c44c17fad1690086281f31a8c85e11d13..368813320121db633410978694f3a2a9892ddc86 100644 (file)
@@ -975,6 +975,7 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        u64 entry;
        /* ID = 0xff (broadcast), ID > 0xff (reserved) */
+       bool is_blocking = kvm_vcpu_is_blocking(vcpu);
        int h_physical_id = kvm_cpu_get_apicid(cpu);
        struct vcpu_svm *svm = to_svm(vcpu);
 
@@ -992,12 +993,17 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
 
        entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
-       if (svm->avic_is_running)
+
+       /*
+        * Don't mark the vCPU as running if its blocking, i.e. if the vCPU is
+        * preempted after svm_vcpu_blocking() but before KVM voluntarily
+        * schedules out the vCPU.
+        */
+       if (!is_blocking)
                entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
 
        WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
-       avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
-                                       svm->avic_is_running);
+       avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, !is_blocking);
 }
 
 void avic_vcpu_put(struct kvm_vcpu *vcpu)
@@ -1018,11 +1024,9 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
  */
 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
 {
-       struct vcpu_svm *svm = to_svm(vcpu);
        int cpu = get_cpu();
 
        WARN_ON(cpu != vcpu->cpu);
-       svm->avic_is_running = is_run;
 
        if (kvm_vcpu_apicv_active(vcpu)) {
                if (is_run)
index bf0c3a67d83643c09ee78afc4c275d8acd1062aa..d21ef0381d29914db6ff5d1a641058949c1411a4 100644 (file)
@@ -1440,12 +1440,6 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
        if (err)
                goto error_free_vmsa_page;
 
-       /* We initialize this flag to true to make sure that the is_running
-        * bit would be set the first time the vcpu is loaded.
-        */
-       if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm))
-               svm->avic_is_running = true;
-
        svm->msrpm = svm_vcpu_alloc_msrpm();
        if (!svm->msrpm) {
                err = -ENOMEM;
index fd0685b2ce55a411192b9c0321e4dc7777d87582..5178f6b245e15132b15926e5e10568514213df40 100644 (file)
@@ -225,7 +225,6 @@ struct vcpu_svm {
        u32 dfr_reg;
        struct page *avic_backing_page;
        u64 *avic_physical_id_cache;
-       bool avic_is_running;
 
        /*
         * Per-vcpu list of struct amd_svm_iommu_ir: