]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: arm64: Minor cleanup of hyp variables used in host
authorDavid Brazdil <dbrazdil@google.com>
Tue, 8 Dec 2020 14:24:50 +0000 (14:24 +0000)
committerMarc Zyngier <maz@kernel.org>
Tue, 22 Dec 2020 10:49:01 +0000 (10:49 +0000)
Small cleanup moving declarations of hyp-exported variables to
kvm_host.h and using macros to avoid having to refer to them with
kvm_nvhe_sym() in host.

No functional change intended.

Signed-off-by: David Brazdil <dbrazdil@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20201208142452.87237-5-dbrazdil@google.com
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/hyp-smp.c
arch/arm64/kvm/va_layout.c

index 828d50d40dc2f38c1b0852506af9001f622bd8c2..bce2452b305c0905c92d03a43b32cee43662c1d7 100644 (file)
@@ -260,6 +260,12 @@ struct kvm_host_psci_config {
 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
 
+extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
+#define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
+
+extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
+#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
+
 struct vcpu_reset_state {
        unsigned long   pc;
        unsigned long   r0;
index 6a2f4e01b04f3d417333ad33684bc14076e9801d..836ca763b91d7bdedea38a446536dfff47efc156 100644 (file)
@@ -65,8 +65,6 @@ static bool vgic_present;
 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
 
-extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
-
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
        return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -1602,7 +1600,7 @@ static void init_cpu_logical_map(void)
         * allow any other CPUs from the `possible` set to boot.
         */
        for_each_online_cpu(cpu)
-               kvm_nvhe_sym(__cpu_logical_map)[cpu] = cpu_logical_map(cpu);
+               hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
 }
 
 static bool init_psci_relay(void)
index cbab0c6246e20fdafe100846d7471ab775adda0b..2997aa156d8e5c2d17ad241e1abcc388c6116276 100644 (file)
  * Other CPUs should not be allowed to boot because their features were
  * not checked against the finalized system capabilities.
  */
-u64 __ro_after_init __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
+u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
 
 u64 cpu_logical_map(unsigned int cpu)
 {
-       if (cpu >= ARRAY_SIZE(__cpu_logical_map))
+       if (cpu >= ARRAY_SIZE(hyp_cpu_logical_map))
                hyp_panic();
 
-       return __cpu_logical_map[cpu];
+       return hyp_cpu_logical_map[cpu];
 }
 
 unsigned long __hyp_per_cpu_offset(unsigned int cpu)
index 914732b88c69f55cd63af2b8b67d223dea3807a9..70fcd6a12fe1f1f1f7830e0f4370aff3b9ce98c4 100644 (file)
@@ -34,17 +34,16 @@ static u64 __early_kern_hyp_va(u64 addr)
 }
 
 /*
- * Store a hyp VA <-> PA offset into a hyp-owned variable.
+ * Store a hyp VA <-> PA offset into a EL2-owned variable.
  */
 static void init_hyp_physvirt_offset(void)
 {
-       extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
        u64 kern_va, hyp_va;
 
        /* Compute the offset from the hyp VA and PA of a random symbol. */
        kern_va = (u64)lm_alias(__hyp_text_start);
        hyp_va = __early_kern_hyp_va(kern_va);
-       CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va;
+       hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
 }
 
 /*