]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: arm/arm64: Do not use kern_hyp_va() with kvm_vgic_global_state
authorMarc Zyngier <marc.zyngier@arm.com>
Sun, 3 Dec 2017 19:28:56 +0000 (19:28 +0000)
committerMarc Zyngier <marc.zyngier@arm.com>
Mon, 19 Mar 2018 13:03:33 +0000 (13:03 +0000)
kvm_vgic_global_state is part of the read-only section, and is
usually accessed using a PC-relative address generation (adrp + add).

It is thus useless to use kern_hyp_va() on it, and actively problematic
if kern_hyp_va() becomes non-idempotent. On the other hand, there is
no way that the compiler is going to guarantee that such access is
always PC relative.

So let's bite the bullet and provide our own accessor.

Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: James Morse <james.morse@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
arch/arm/include/asm/kvm_mmu.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c

index de1b919404e43a3c01cc8511b92862d4fa7c3f42..93395b7c2322ce9e16df59eb9631cbb164ca6a98 100644 (file)
  */
 #define kern_hyp_va(kva)       (kva)
 
+/* Contrary to arm64, there is no need to generate a PC-relative address */
+#define hyp_symbol_addr(s)                                             \
+       ({                                                              \
+               typeof(s) *addr = &(s);                                 \
+               addr;                                                   \
+       })
+
 /*
  * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
  */
index 0656c79d968fdbef669b3ba706cb0f0605ed10aa..021d3a8117a8b0e915fda54412f37349eb4f3c39 100644 (file)
@@ -110,6 +110,26 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
 
 #define kern_hyp_va(v)         ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
 
+/*
+ * Obtain the PC-relative address of a kernel symbol
+ * s: symbol
+ *
+ * The goal of this macro is to return a symbol's address based on a
+ * PC-relative computation, as opposed to a loading the VA from a
+ * constant pool or something similar. This works well for HYP, as an
+ * absolute VA is guaranteed to be wrong. Only use this if trying to
+ * obtain the address of a symbol (i.e. not something you obtained by
+ * following a pointer).
+ */
+#define hyp_symbol_addr(s)                                             \
+       ({                                                              \
+               typeof(s) *addr;                                        \
+               asm("adrp       %0, %1\n"                               \
+                   "add        %0, %0, :lo12:%1\n"                     \
+                   : "=r" (addr) : "S" (&s));                          \
+               addr;                                                   \
+       })
+
 /*
  * We currently only support a 40bit IPA.
  */
index 97f357ea9c7284f1dedd0c8b03fd2f6a588a73a3..10eb2e96b3e6b2a6b09877d45d59ca9001d9e746 100644 (file)
@@ -60,7 +60,7 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
                return -1;
 
        rd = kvm_vcpu_dabt_get_rd(vcpu);
-       addr  = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va);
+       addr  = kern_hyp_va(hyp_symbol_addr(kvm_vgic_global_state)->vcpu_base_va);
        addr += fault_ipa - vgic->vgic_cpu_base;
 
        if (kvm_vcpu_dabt_iswrite(vcpu)) {