]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: arm64: pmu: Restore compilation when HW_PERF_EVENTS isn't selected
authorMarc Zyngier <maz@kernel.org>
Mon, 16 May 2022 12:02:24 +0000 (13:02 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 16 May 2022 12:42:41 +0000 (13:42 +0100)
Moving kvm_pmu_events into the vcpu (and refering to it) broke the
somewhat unusual case where the kernel has no support for a PMU
at all.

In order to solve this, move things around a bit so that we can
easily avoid refering to the pmu structure outside of PMU-aware
code. As a bonus, pmu.c isn't compiled in when HW_PERF_EVENTS
isn't selected.

Reported-by: kernel test robot <lkp@intel.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/202205161814.KQHpOzsJ-lkp@intel.com
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/Makefile
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/switch.c
include/kvm/arm_pmu.h

index efca5a63bdaf89229b90b93f96eb9111536bb32f..ef774b8955bcaa73330398db3d353c1e772b1373 100644 (file)
@@ -789,10 +789,6 @@ void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
 #ifdef CONFIG_KVM
 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
 void kvm_clr_pmu_events(u32 clr);
-
-struct kvm_pmu_events *kvm_get_pmu_events(void);
-void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
-void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
 #else
 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
 static inline void kvm_clr_pmu_events(u32 clr) {}
@@ -824,8 +820,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
 #define kvm_has_mte(kvm)                                       \
        (system_supports_mte() &&                               \
         test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
-#define kvm_vcpu_has_pmu(vcpu)                                 \
-       (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
 
 int kvm_trng_call(struct kvm_vcpu *vcpu);
 #ifdef CONFIG_KVM
index 261644b1a6bb44f3e56a3e9a12843212160c3497..aa127ae9f675b406c4f55f83de2291ba5d62b920 100644 (file)
@@ -13,7 +13,7 @@ obj-$(CONFIG_KVM) += hyp/
 kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
         inject_fault.o va_layout.o handle_exit.o \
         guest.o debug.o reset.o sys_regs.o \
-        vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \
+        vgic-sys-reg-v3.o fpsimd.o pkvm.o \
         arch_timer.o trng.o vmid.o \
         vgic/vgic.o vgic/vgic-init.o \
         vgic/vgic-irqfd.o vgic/vgic-v2.o \
@@ -22,7 +22,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
         vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
         vgic/vgic-its.o vgic/vgic-debug.o
 
-kvm-$(CONFIG_HW_PERF_EVENTS)  += pmu-emul.o
+kvm-$(CONFIG_HW_PERF_EVENTS)  += pmu-emul.o pmu.o
 
 always-y := hyp_constants.h hyp-constants.s
 
index aa1b15e9d5d9b5ca79b46401f7fc0358298dcebb..b3821c430ec9b666292f838efa149f3e1c8ee82f 100644 (file)
@@ -751,19 +751,6 @@ static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
        return ret;
 }
 
-/*
- * Updates the vcpu's view of the pmu events for this cpu.
- * Must be called before every vcpu run after disabling interrupts, to ensure
- * that an interrupt cannot fire and update the structure.
- */
-static void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu)
-{
-       if (has_vhe() || !kvm_vcpu_has_pmu(vcpu))
-               return;
-
-       vcpu->arch.pmu.events = *kvm_get_pmu_events();
-}
-
 /**
  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
  * @vcpu:      The VCPU pointer
index ff7b29fb978780b4c9fc44e49b6aa92b0f813efe..c7cd2036a75e99abeff50d5958f65e0004687760 100644 (file)
@@ -123,6 +123,7 @@ static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
 /**
  * Disable host events, enable guest events
  */
+#ifdef CONFIG_HW_PERF_EVENTS
 static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu)
 {
        struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
@@ -149,6 +150,10 @@ static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
        if (pmu->events_host)
                write_sysreg(pmu->events_host, pmcntenset_el0);
 }
+#else
+#define __pmu_switch_to_guest(v)       ({ false; })
+#define __pmu_switch_to_host(v)                do {} while (0)
+#endif
 
 /**
  * Handler for protected VM MSR, MRS or System instruction execution in AArch64.
index 35a0903cae321247b0bb263d3dfd349956b11c24..c0b868ce6a8f2cb6f2ee741902fd419e290449f1 100644 (file)
@@ -72,6 +72,25 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
                            struct kvm_device_attr *attr);
 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
+
+struct kvm_pmu_events *kvm_get_pmu_events(void);
+void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
+void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
+
+#define kvm_vcpu_has_pmu(vcpu)                                 \
+       (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
+
+/*
+ * Updates the vcpu's view of the pmu events for this cpu.
+ * Must be called before every vcpu run after disabling interrupts, to ensure
+ * that an interrupt cannot fire and update the structure.
+ */
+#define kvm_pmu_update_vcpu_events(vcpu)                               \
+       do {                                                            \
+               if (!has_vhe() && kvm_vcpu_has_pmu(vcpu))               \
+                       vcpu->arch.pmu.events = *kvm_get_pmu_events();  \
+       } while (0)
+
 #else
 struct kvm_pmu {
 };
@@ -133,6 +152,11 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
        return 0;
 }
 
+#define kvm_vcpu_has_pmu(vcpu)         ({ false; })
+static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
+
 #endif
 
 #endif