{
u64 entry;
/* ID = 0xff (broadcast), ID > 0xff (reserved) */
+ bool is_blocking = kvm_vcpu_is_blocking(vcpu);
int h_physical_id = kvm_cpu_get_apicid(cpu);
struct vcpu_svm *svm = to_svm(vcpu);
entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
- if (svm->avic_is_running)
+
+ /*
+ * Don't mark the vCPU as running if its blocking, i.e. if the vCPU is
+ * preempted after svm_vcpu_blocking() but before KVM voluntarily
+ * schedules out the vCPU.
+ */
+ if (!is_blocking)
entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
- avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
- svm->avic_is_running);
+ avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, !is_blocking);
}
void avic_vcpu_put(struct kvm_vcpu *vcpu)
*/
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
{
- struct vcpu_svm *svm = to_svm(vcpu);
int cpu = get_cpu();
WARN_ON(cpu != vcpu->cpu);
- svm->avic_is_running = is_run;
if (kvm_vcpu_apicv_active(vcpu)) {
if (is_run)
if (err)
goto error_free_vmsa_page;
- /* We initialize this flag to true to make sure that the is_running
- * bit would be set the first time the vcpu is loaded.
- */
- if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm))
- svm->avic_is_running = true;
-
svm->msrpm = svm_vcpu_alloc_msrpm();
if (!svm->msrpm) {
err = -ENOMEM;