]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86: do not report a vCPU as preempted outside instruction boundaries
authorPaolo Bonzini <pbonzini@redhat.com>
Tue, 7 Jun 2022 14:09:03 +0000 (10:09 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Jun 2022 08:21:07 +0000 (04:21 -0400)
If a vCPU is outside guest mode and is scheduled out, it might be in the
process of making a memory access.  A problem occurs if another vCPU uses
the PV TLB flush feature during the period when the vCPU is scheduled
out, and a virtual address has already been translated but has not yet
been accessed, because this is equivalent to using a stale TLB entry.

To avoid this, only report a vCPU as preempted if sure that the guest
is at an instruction boundary.  A rescheduling request will be delivered
to the host physical CPU as an external interrupt, so for simplicity
consider any vmexit *not* instruction boundary except for external
interrupts.

It would in principle be okay to report the vCPU as preempted also
if it is sleeping in kvm_vcpu_block(): a TLB flush IPI will incur the
vmentry/vmexit overhead unnecessarily, and optimistic spinning is
also unlikely to succeed.  However, leave it for later because right
now kvm_vcpu_check_block() is doing memory accesses.  Even
though the TLB flush issue only applies to virtual memory address,
it's very much preferrable to be conservative.

Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 959d66b9be94d0230445faa642fc41e380bf5552..3a240a64ac68f4274f093b99b54f4a8914f9372d 100644 (file)
@@ -653,6 +653,7 @@ struct kvm_vcpu_arch {
        u64 ia32_misc_enable_msr;
        u64 smbase;
        u64 smi_count;
+       bool at_instruction_boundary;
        bool tpr_access_reporting;
        bool xsaves_enabled;
        bool xfd_no_write_intercept;
@@ -1300,6 +1301,8 @@ struct kvm_vcpu_stat {
        u64 nested_run;
        u64 directed_yield_attempted;
        u64 directed_yield_successful;
+       u64 preemption_reported;
+       u64 preemption_other;
        u64 guest_mode;
 };
 
index 478e6ee81d8838b43a8972608769c98cc9c6560a..921fcb85a9cdb9cecf0fbbcadc1f4dffc383728b 100644 (file)
@@ -4263,6 +4263,8 @@ out:
 
 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
 {
+       if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
+               vcpu->arch.at_instruction_boundary = true;
 }
 
 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
index f5aeade623d6125f1de75cf7e69605dba70191d2..14e01178a753c5b0c8f35a401ff6da4a059b5fb0 100644 (file)
@@ -6547,6 +6547,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
                return;
 
        handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
+       vcpu->arch.at_instruction_boundary = true;
 }
 
 static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
index a8bb635cb76b24ca44a7cf185bcc3b4cd5b5ec21..25a517206c4d33fa2130904f22a541d9eaa0e680 100644 (file)
@@ -296,6 +296,8 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
        STATS_DESC_COUNTER(VCPU, nested_run),
        STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
        STATS_DESC_COUNTER(VCPU, directed_yield_successful),
+       STATS_DESC_COUNTER(VCPU, preemption_reported),
+       STATS_DESC_COUNTER(VCPU, preemption_other),
        STATS_DESC_ICOUNTER(VCPU, guest_mode)
 };
 
@@ -4625,6 +4627,19 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
        struct kvm_memslots *slots;
        static const u8 preempted = KVM_VCPU_PREEMPTED;
 
+       /*
+        * The vCPU can be marked preempted if and only if the VM-Exit was on
+        * an instruction boundary and will not trigger guest emulation of any
+        * kind (see vcpu_run).  Vendor specific code controls (conservatively)
+        * when this is true, for example allowing the vCPU to be marked
+        * preempted if and only if the VM-Exit was due to a host interrupt.
+        */
+       if (!vcpu->arch.at_instruction_boundary) {
+               vcpu->stat.preemption_other++;
+               return;
+       }
+
+       vcpu->stat.preemption_reported++;
        if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
                return;
 
@@ -10424,6 +10439,13 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
        vcpu->arch.l1tf_flush_l1d = true;
 
        for (;;) {
+               /*
+                * If another guest vCPU requests a PV TLB flush in the middle
+                * of instruction emulation, the rest of the emulation could
+                * use a stale page translation. Assume that any code after
+                * this point can start executing an instruction.
+                */
+               vcpu->arch.at_instruction_boundary = false;
                if (kvm_vcpu_running(vcpu)) {
                        r = vcpu_enter_guest(vcpu);
                } else {