]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: Move x86 VMX's posted interrupt list_head to vcpu_vmx
authorSean Christopherson <seanjc@google.com>
Wed, 8 Dec 2021 01:52:16 +0000 (01:52 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 19 Jan 2022 17:14:38 +0000 (12:14 -0500)
Move the seemingly generic block_vcpu_list from kvm_vcpu to vcpu_vmx, and
rename the list and all associated variables to clarify that it tracks
the set of vCPU that need to be poked on a posted interrupt to the wakeup
vector.  The list is not used to track _all_ vCPUs that are blocking, and
the term "blocked" can be misleading as it may refer to a blocking
condition in the host or the guest, where as the PI wakeup case is
specifically for the vCPUs that are actively blocking from within the
guest.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20211208015236.1616697-7-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/posted_intr.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 63e2399f353a2384c733880858863bb881403a7c..901eea44cf24baa7e3070452922053e79bf21173 100644 (file)
@@ -19,7 +19,7 @@
  * wake the target vCPUs.  vCPUs are removed from the list and the notification
  * vector is reset when the vCPU is scheduled in.
  */
-static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
+static DEFINE_PER_CPU(struct list_head, wakeup_vcpus_on_cpu);
 /*
  * Protect the per-CPU list with a per-CPU spinlock to handle task migration.
  * When a blocking vCPU is awakened _and_ migrated to a different pCPU, the
@@ -27,7 +27,7 @@ static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
  * CPU.  IRQs must be disabled when taking this lock, otherwise deadlock will
  * occur if a wakeup IRQ arrives and attempts to acquire the lock.
  */
-static DEFINE_PER_CPU(raw_spinlock_t, blocked_vcpu_on_cpu_lock);
+static DEFINE_PER_CPU(raw_spinlock_t, wakeup_vcpus_on_cpu_lock);
 
 static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
 {
@@ -51,6 +51,7 @@ static int pi_try_set_control(struct pi_desc *pi_desc, u64 old, u64 new)
 void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
 {
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct pi_desc old, new;
        unsigned long flags;
        unsigned int dest;
@@ -86,9 +87,9 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
         * current pCPU if the task was migrated.
         */
        if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR) {
-               raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu));
-               list_del(&vcpu->blocked_vcpu_list);
-               raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu));
+               raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
+               list_del(&vmx->pi_wakeup_list);
+               raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
        }
 
        dest = cpu_physical_id(cpu);
@@ -142,15 +143,16 @@ static bool vmx_can_use_vtd_pi(struct kvm *kvm)
 static void pi_enable_wakeup_handler(struct kvm_vcpu *vcpu)
 {
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct pi_desc old, new;
        unsigned long flags;
 
        local_irq_save(flags);
 
-       raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu));
-       list_add_tail(&vcpu->blocked_vcpu_list,
-                     &per_cpu(blocked_vcpu_on_cpu, vcpu->cpu));
-       raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu));
+       raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
+       list_add_tail(&vmx->pi_wakeup_list,
+                     &per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu));
+       raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
 
        WARN(pi_desc->sn, "PI descriptor SN field set before blocking");
 
@@ -199,24 +201,23 @@ void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
  */
 void pi_wakeup_handler(void)
 {
-       struct kvm_vcpu *vcpu;
        int cpu = smp_processor_id();
+       struct vcpu_vmx *vmx;
 
-       raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
-       list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
-                       blocked_vcpu_list) {
-               struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+       raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu));
+       list_for_each_entry(vmx, &per_cpu(wakeup_vcpus_on_cpu, cpu),
+                           pi_wakeup_list) {
 
-               if (pi_test_on(pi_desc))
-                       kvm_vcpu_kick(vcpu);
+               if (pi_test_on(&vmx->pi_desc))
+                       kvm_vcpu_kick(&vmx->vcpu);
        }
-       raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+       raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu));
 }
 
 void __init pi_init_cpu(int cpu)
 {
-       INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
-       raw_spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+       INIT_LIST_HEAD(&per_cpu(wakeup_vcpus_on_cpu, cpu));
+       raw_spin_lock_init(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu));
 }
 
 bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
index 3a9a49a87b9dfb24443cd3cf7afc5876c05be20c..1840898bb184ab1f262e902223db329b7571e484 100644 (file)
@@ -6943,6 +6943,8 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
        BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
        vmx = to_vmx(vcpu);
 
+       INIT_LIST_HEAD(&vmx->pi_wakeup_list);
+
        err = -ENOMEM;
 
        vmx->vpid = allocate_vpid();
index f8fc7441baea93da21762f92b93e45e45849cf81..7f2c82e7f38f86073549795c256f6451700faa58 100644 (file)
@@ -317,6 +317,9 @@ struct vcpu_vmx {
        /* Posted interrupt descriptor */
        struct pi_desc pi_desc;
 
+       /* Used if this vCPU is waiting for PI notification wakeup. */
+       struct list_head pi_wakeup_list;
+
        /* Support for a guest hypervisor (nested VMX) */
        struct nested_vmx nested;
 
index 5c3c67b6318fa18f16b07ecd2555fdc717d1e370..f079820f52b50c7785174b3c36c6b421b64ff74d 100644 (file)
@@ -309,8 +309,6 @@ struct kvm_vcpu {
        u64 requests;
        unsigned long guest_debug;
 
-       struct list_head blocked_vcpu_list;
-
        struct mutex mutex;
        struct kvm_run *run;
 
index c301cd8d583e9c1d777b17ea56b9c6cf667fafad..5a1164483e6c990b1c98b716615c3ef3f323ea53 100644 (file)
@@ -427,8 +427,6 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 #endif
        kvm_async_pf_vcpu_init(vcpu);
 
-       INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
-
        kvm_vcpu_set_in_spin_loop(vcpu, false);
        kvm_vcpu_set_dy_eligible(vcpu, false);
        vcpu->preempted = false;