]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86: Move "apicv_active" into "struct kvm_lapic"
authorSean Christopherson <seanjc@google.com>
Tue, 14 Jun 2022 23:05:47 +0000 (23:05 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 20 Jun 2022 10:21:24 +0000 (06:21 -0400)
Move the per-vCPU apicv_active flag into KVM's local APIC instance.
APICv is fully dependent on an in-kernel local APIC, but that's not at
all clear when reading the current code due to the flag being stored in
the generic kvm_vcpu_arch struct.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220614230548.3852141-5-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 16acc54d49a73d0cc1985de51bc231724df5d845..1038ccb7056a39685cc190e5359d89c7acc7bb2b 100644 (file)
@@ -663,7 +663,6 @@ struct kvm_vcpu_arch {
        u64 efer;
        u64 apic_base;
        struct kvm_lapic *apic;    /* kernel irqchip context */
-       bool apicv_active;
        bool load_eoi_exitmap_pending;
        DECLARE_BITMAP(ioapic_handled_vectors, 256);
        unsigned long apic_attention;
index cc0da5671eb97f8082e3d645c4b8da01b95b238a..43c42a58029590fdaac1995c31c124c54f0c04ac 100644 (file)
@@ -519,14 +519,11 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
 
 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
 {
-       struct kvm_vcpu *vcpu;
-
-       vcpu = apic->vcpu;
-
-       if (unlikely(vcpu->arch.apicv_active)) {
+       if (unlikely(apic->apicv_active)) {
                /* need to update RVI */
                kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
-               static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
+               static_call_cond(kvm_x86_hwapic_irr_update)(apic->vcpu,
+                                                           apic_find_highest_irr(apic));
        } else {
                apic->irr_pending = false;
                kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
@@ -543,19 +540,15 @@ EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
 
 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
 {
-       struct kvm_vcpu *vcpu;
-
        if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
                return;
 
-       vcpu = apic->vcpu;
-
        /*
         * With APIC virtualization enabled, all caching is disabled
         * because the processor can modify ISR under the hood.  Instead
         * just set SVI.
         */
-       if (unlikely(vcpu->arch.apicv_active))
+       if (unlikely(apic->apicv_active))
                static_call_cond(kvm_x86_hwapic_isr_update)(vec);
        else {
                ++apic->isr_count;
@@ -590,12 +583,9 @@ static inline int apic_find_highest_isr(struct kvm_lapic *apic)
 
 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
 {
-       struct kvm_vcpu *vcpu;
        if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
                return;
 
-       vcpu = apic->vcpu;
-
        /*
         * We do get here for APIC virtualization enabled if the guest
         * uses the Hyper-V APIC enlightenment.  In this case we may need
@@ -603,7 +593,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
         * on the other hand isr_count and highest_isr_cache are unused
         * and must be left alone.
         */
-       if (unlikely(vcpu->arch.apicv_active))
+       if (unlikely(apic->apicv_active))
                static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
        else {
                --apic->isr_count;
@@ -1584,7 +1574,7 @@ static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
                int vec = reg & APIC_VECTOR_MASK;
                void *bitmap = apic->regs + APIC_ISR;
 
-               if (vcpu->arch.apicv_active)
+               if (apic->apicv_active)
                        bitmap = apic->regs + APIC_IRR;
 
                if (apic_test_vector(vec, bitmap))
@@ -1701,7 +1691,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
        if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
                ktimer->expired_tscdeadline = ktimer->tscdeadline;
 
-       if (!from_timer_fn && vcpu->arch.apicv_active) {
+       if (!from_timer_fn && apic->apicv_active) {
                WARN_ON(kvm_get_running_vcpu() != vcpu);
                kvm_apic_inject_pending_timer_irqs(apic);
                return;
@@ -2379,7 +2369,7 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
 
-       if (vcpu->arch.apicv_active) {
+       if (apic->apicv_active) {
                /* irr_pending is always true when apicv is activated. */
                apic->irr_pending = true;
                apic->isr_count = 1;
@@ -2454,7 +2444,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
 
        vcpu->arch.pv_eoi.msr_val = 0;
        apic_update_ppr(apic);
-       if (vcpu->arch.apicv_active) {
+       if (apic->apicv_active) {
                static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
                static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1);
                static_call_cond(kvm_x86_hwapic_isr_update)(-1);
@@ -2734,7 +2724,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
        kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
        kvm_apic_update_apicv(vcpu);
        apic->highest_isr_cache = -1;
-       if (vcpu->arch.apicv_active) {
+       if (apic->apicv_active) {
                static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
                static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
                static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
index 65bb2a8cf145954967769ce93a0b08924bc9448e..e09ad97f3250b5c913749c809f12177c28c23224 100644 (file)
@@ -48,6 +48,7 @@ struct kvm_lapic {
        struct kvm_timer lapic_timer;
        u32 divide_count;
        struct kvm_vcpu *vcpu;
+       bool apicv_active;
        bool sw_enabled;
        bool irr_pending;
        bool lvt0_in_nmi_mode;
@@ -204,7 +205,7 @@ static inline int apic_x2apic_mode(struct kvm_lapic *apic)
 
 static inline bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.apic && vcpu->arch.apicv_active;
+       return vcpu->arch.apic && vcpu->arch.apic->apicv_active;
 }
 
 static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
index 2ba0c62df8fb6f6cda1d77581422f657ca2db4c9..136298cfb3fb57d62d912b46d82569153243df58 100644 (file)
@@ -3465,12 +3465,13 @@ void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
                                     int trig_mode, int vector)
 {
        /*
-        * vcpu->arch.apicv_active must be read after vcpu->mode.
+        * apic->apicv_active must be read after vcpu->mode.
         * Pairs with smp_store_release in vcpu_enter_guest.
         */
        bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
 
-       if (!READ_ONCE(vcpu->arch.apicv_active)) {
+       /* Note, this is called iff the local APIC is in-kernel. */
+       if (!READ_ONCE(vcpu->arch.apic->apicv_active)) {
                /* Process the interrupt via inject_pending_event */
                kvm_make_request(KVM_REQ_EVENT, vcpu);
                kvm_vcpu_kick(vcpu);
index 380c8e7c1fb8606e8026a6a396bdb662d9d36086..2609bcc8c130c907c10e33f2a351ee3054a50bd6 100644 (file)
@@ -4099,7 +4099,8 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
        if (!r)
                return 0;
 
-       if (!vcpu->arch.apicv_active)
+       /* Note, this is called iff the local APIC is in-kernel. */
+       if (!vcpu->arch.apic->apicv_active)
                return -1;
 
        if (pi_test_and_set_pir(vector, &vmx->pi_desc))
index db35d40bf996f62f3b3f2c4c959996b798b8d33a..00e23dc518e091ac7c5e24a4bd3c93b24b9bbb2f 100644 (file)
@@ -9460,7 +9460,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
        if (!lapic_in_kernel(vcpu))
                return;
 
-       if (vcpu->arch.apicv_active)
+       if (vcpu->arch.apic->apicv_active)
                return;
 
        if (!vcpu->arch.apic->vapic_addr)
@@ -9913,6 +9913,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm)
 
 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
 {
+       struct kvm_lapic *apic = vcpu->arch.apic;
        bool activate;
 
        if (!lapic_in_kernel(vcpu))
@@ -9923,10 +9924,10 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
 
        activate = kvm_vcpu_apicv_activated(vcpu);
 
-       if (vcpu->arch.apicv_active == activate)
+       if (apic->apicv_active == activate)
                goto out;
 
-       vcpu->arch.apicv_active = activate;
+       apic->apicv_active = activate;
        kvm_apic_update_apicv(vcpu);
        static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu);
 
@@ -9936,7 +9937,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
         * still active when the interrupt got accepted. Make sure
         * inject_pending_event() is called to check for that.
         */
-       if (!vcpu->arch.apicv_active)
+       if (!apic->apicv_active)
                kvm_make_request(KVM_REQ_EVENT, vcpu);
 
 out:
@@ -11379,7 +11380,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
                 * will ensure the vCPU gets the correct state before VM-Entry.
                 */
                if (enable_apicv) {
-                       vcpu->arch.apicv_active = true;
+                       vcpu->arch.apic->apicv_active = true;
                        kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
                }
        } else