]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: arm/arm64: Handle VGICv2 save/restore from the main VGIC code
authorChristoffer Dall <christoffer.dall@linaro.org>
Thu, 22 Dec 2016 19:39:10 +0000 (20:39 +0100)
committerMarc Zyngier <marc.zyngier@arm.com>
Mon, 19 Mar 2018 10:53:20 +0000 (10:53 +0000)
We can program the GICv2 hypervisor control interface logic directly
from the core vgic code and can instead do the save/restore directly
from the flush/sync functions, which can lead to a number of future
optimizations.

Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
arch/arm/kvm/hyp/switch.c
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/kvm/hyp/switch.c
virt/kvm/arm/hyp/vgic-v2-sr.c
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic.c
virt/kvm/arm/vgic/vgic.h

index aac025783ee8a42b554094401035f1b7331f5e04..882b9b9e0077a5363c46f431b168f1e5a78300b6 100644 (file)
@@ -92,16 +92,12 @@ static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
 {
        if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
                __vgic_v3_save_state(vcpu);
-       else
-               __vgic_v2_save_state(vcpu);
 }
 
 static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
 {
        if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
                __vgic_v3_restore_state(vcpu);
-       else
-               __vgic_v2_restore_state(vcpu);
 }
 
 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
index 949f2e77ae58a24917f92dec48b67898e5ed7040..febe417b8b4efa1d4a8920febabdfd1bbcc7fcae 100644 (file)
@@ -120,8 +120,6 @@ typeof(orig) * __hyp_text fname(void)                                       \
        return val;                                                     \
 }
 
-void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
-void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
 int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
 
 void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
index 67c66b4e237e1685d3fd22d46600387ff6935bbe..31badf6e91e8a37c9c62a5c293d29a93922c5943 100644 (file)
@@ -196,16 +196,12 @@ static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
 {
        if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
                __vgic_v3_save_state(vcpu);
-       else
-               __vgic_v2_save_state(vcpu);
 }
 
 static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
 {
        if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
                __vgic_v3_restore_state(vcpu);
-       else
-               __vgic_v2_restore_state(vcpu);
 }
 
 static bool __hyp_text __true_value(void)
index a91b0d2b9249803c91723bd4d1de210a94b83376..0bbafdfd4adbebef1e1611b0f56ef6ab51d6085d 100644 (file)
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
 
-static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
-{
-       struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
-       u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
-       u64 elrsr;
-       int i;
-
-       elrsr = readl_relaxed(base + GICH_ELRSR0);
-       if (unlikely(used_lrs > 32))
-               elrsr |= ((u64)readl_relaxed(base + GICH_ELRSR1)) << 32;
-
-       for (i = 0; i < used_lrs; i++) {
-               if (elrsr & (1UL << i))
-                       cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
-               else
-                       cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
-
-               writel_relaxed(0, base + GICH_LR0 + (i * 4));
-       }
-}
-
-/* vcpu is already in the HYP VA space */
-void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
-{
-       struct kvm *kvm = kern_hyp_va(vcpu->kvm);
-       struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
-       struct vgic_dist *vgic = &kvm->arch.vgic;
-       void __iomem *base = kern_hyp_va(vgic->vctrl_base);
-       u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
-
-       if (!base)
-               return;
-
-       if (used_lrs) {
-               cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
-               save_lrs(vcpu, base);
-               writel_relaxed(0, base + GICH_HCR);
-       } else {
-               cpu_if->vgic_apr = 0;
-       }
-}
-
-/* vcpu is already in the HYP VA space */
-void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
-{
-       struct kvm *kvm = kern_hyp_va(vcpu->kvm);
-       struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
-       struct vgic_dist *vgic = &kvm->arch.vgic;
-       void __iomem *base = kern_hyp_va(vgic->vctrl_base);
-       int i;
-       u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
-
-       if (!base)
-               return;
-
-       if (used_lrs) {
-               writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
-               writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
-               for (i = 0; i < used_lrs; i++) {
-                       writel_relaxed(cpu_if->vgic_lr[i],
-                                      base + GICH_LR0 + (i * 4));
-               }
-       }
-}
-
 #ifdef CONFIG_ARM64
 /*
  * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
index bb305d49cfddeea67dd5cbddb28f9a8d5d715d3d..1e5f3eb6973d78b04629253a30f1e2ad35f915d4 100644 (file)
@@ -421,6 +421,69 @@ out:
        return ret;
 }
 
+static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
+{
+       struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+       u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
+       u64 elrsr;
+       int i;
+
+       elrsr = readl_relaxed(base + GICH_ELRSR0);
+       if (unlikely(used_lrs > 32))
+               elrsr |= ((u64)readl_relaxed(base + GICH_ELRSR1)) << 32;
+
+       for (i = 0; i < used_lrs; i++) {
+               if (elrsr & (1UL << i))
+                       cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
+               else
+                       cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
+
+               writel_relaxed(0, base + GICH_LR0 + (i * 4));
+       }
+}
+
+void vgic_v2_save_state(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct vgic_dist *vgic = &kvm->arch.vgic;
+       struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+       void __iomem *base = vgic->vctrl_base;
+       u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
+
+       if (!base)
+               return;
+
+       if (used_lrs) {
+               cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
+               save_lrs(vcpu, base);
+               writel_relaxed(0, base + GICH_HCR);
+       } else {
+               cpu_if->vgic_apr = 0;
+       }
+}
+
+void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct vgic_dist *vgic = &kvm->arch.vgic;
+       struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+       void __iomem *base = vgic->vctrl_base;
+       u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
+       int i;
+
+       if (!base)
+               return;
+
+       if (used_lrs) {
+               writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
+               writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
+               for (i = 0; i < used_lrs; i++) {
+                       writel_relaxed(cpu_if->vgic_lr[i],
+                                      base + GICH_LR0 + (i * 4));
+               }
+       }
+}
+
 void vgic_v2_load(struct kvm_vcpu *vcpu)
 {
        struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
index c7c5ef190afa0c3984d5b9051f59ca72c696eb20..12e2a28f437ed1d8768f6735820fd72767b22261 100644 (file)
@@ -749,11 +749,19 @@ next:
                vgic_clear_lr(vcpu, count);
 }
 
+static inline void vgic_save_state(struct kvm_vcpu *vcpu)
+{
+       if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+               vgic_v2_save_state(vcpu);
+}
+
 /* Sync back the hardware VGIC state into our emulation after a guest's run. */
 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 
+       vgic_save_state(vcpu);
+
        WARN_ON(vgic_v4_sync_hwstate(vcpu));
 
        /* An empty ap_list_head implies used_lrs == 0 */
@@ -765,6 +773,12 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
        vgic_prune_ap_list(vcpu);
 }
 
+static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
+{
+       if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+               vgic_v2_restore_state(vcpu);
+}
+
 /* Flush our emulation state into the GIC hardware before entering the guest. */
 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
 {
@@ -780,13 +794,16 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
         * this.
         */
        if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
-               return;
+               goto out;
 
        DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
 
        spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
        vgic_flush_lr_state(vcpu);
        spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+
+out:
+       vgic_restore_state(vcpu);
 }
 
 void kvm_vgic_load(struct kvm_vcpu *vcpu)
index 12c37b89f7a38212c5eec4a115b9e2bb20d28fdc..89b9547fba27f3963679f5be243d9f7cb2e41fe7 100644 (file)
@@ -176,6 +176,9 @@ void vgic_v2_init_lrs(void);
 void vgic_v2_load(struct kvm_vcpu *vcpu);
 void vgic_v2_put(struct kvm_vcpu *vcpu);
 
+void vgic_v2_save_state(struct kvm_vcpu *vcpu);
+void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+
 static inline void vgic_get_irq_kref(struct vgic_irq *irq)
 {
        if (irq->intid < VGIC_MIN_LPI)