]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: VMX: Move preemption timer <=> hrtimer dance to common x86
authorSean Christopherson <seanjc@google.com>
Wed, 8 Dec 2021 01:52:17 +0000 (01:52 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 19 Jan 2022 17:14:39 +0000 (12:14 -0500)
Handle the switch to/from the hypervisor/software timer when a vCPU is
blocking in common x86 instead of in VMX.  Even though VMX is the only
user of a hypervisor timer, the logic and all functions involved are
generic x86 (unless future CPUs do something completely different and
implement a hypervisor timer that runs regardless of mode).

Handling the switch in common x86 will allow for the elimination of the
pre/post_blocks hooks, and also lets KVM switch back to the hypervisor
timer if and only if it was in use (without additional params).  Add a
comment explaining why the switch cannot be deferred to kvm_sched_out()
or kvm_vcpu_block().

Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20211208015236.1616697-8-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 1840898bb184ab1f262e902223db329b7571e484..70be166833ac25f06282c0965c0b212347a08522 100644 (file)
@@ -7568,16 +7568,12 @@ void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
 
 static int vmx_pre_block(struct kvm_vcpu *vcpu)
 {
-       if (kvm_lapic_hv_timer_in_use(vcpu))
-               kvm_lapic_switch_to_sw_timer(vcpu);
-
        return 0;
 }
 
 static void vmx_post_block(struct kvm_vcpu *vcpu)
 {
-       if (kvm_x86_ops.set_hv_timer)
-               kvm_lapic_switch_to_hv_timer(vcpu);
+
 }
 
 static void vmx_setup_mce(struct kvm_vcpu *vcpu)
index 49ff85e966d7deaec23b2223cd9d1d4558f27a72..943706a09e6ea9a06ef611da517f03a6ec2cd7d1 100644 (file)
@@ -10146,8 +10146,21 @@ out:
 
 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
 {
+       bool hv_timer;
+
        if (!kvm_arch_vcpu_runnable(vcpu) &&
            (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) {
+               /*
+                * Switch to the software timer before halt-polling/blocking as
+                * the guest's timer may be a break event for the vCPU, and the
+                * hypervisor timer runs only when the CPU is in guest mode.
+                * Switch before halt-polling so that KVM recognizes an expired
+                * timer before blocking.
+                */
+               hv_timer = kvm_lapic_hv_timer_in_use(vcpu);
+               if (hv_timer)
+                       kvm_lapic_switch_to_sw_timer(vcpu);
+
                srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
                if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
                        kvm_vcpu_halt(vcpu);
@@ -10155,6 +10168,9 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
                        kvm_vcpu_block(vcpu);
                vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
 
+               if (hv_timer)
+                       kvm_lapic_switch_to_hv_timer(vcpu);
+
                if (kvm_x86_ops.post_block)
                        static_call(kvm_x86_post_block)(vcpu);
 
@@ -10349,6 +10365,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                        r = -EINTR;
                        goto out;
                }
+               /*
+                * It should be impossible for the hypervisor timer to be in
+                * use before KVM has ever run the vCPU.
+                */
+               WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu));
                kvm_vcpu_block(vcpu);
                if (kvm_apic_accept_events(vcpu) < 0) {
                        r = 0;