]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86: Move kvm_ops_static_call_update() to x86.c
authorLike Xu <likexu@tencent.com>
Tue, 29 Mar 2022 23:50:51 +0000 (23:50 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 13 Apr 2022 17:37:44 +0000 (13:37 -0400)
The kvm_ops_static_call_update() is defined in kvm_host.h. That's
completely unnecessary, it should have exactly one caller,
kvm_arch_hardware_setup().  Move the helper to x86.c and have it do the
actual memcpy() of the ops in addition to the static call updates.  This
will also allow for cleanly giving kvm_pmu_ops static_call treatment.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Like Xu <likexu@tencent.com>
[sean: Move memcpy() into the helper and rename accordingly]
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220329235054.3534728-2-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c

index e9c9a23d6623dc2fa65b7a5361b5ff7574bd7fd0..25d1aac74c550a62bde095e7d8449f669cc7e5e3 100644 (file)
@@ -1563,20 +1563,6 @@ extern struct kvm_x86_ops kvm_x86_ops;
 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
 #include <asm/kvm-x86-ops.h>
 
-static inline void kvm_ops_static_call_update(void)
-{
-#define __KVM_X86_OP(func) \
-       static_call_update(kvm_x86_##func, kvm_x86_ops.func);
-#define KVM_X86_OP(func) \
-       WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func)
-#define KVM_X86_OP_OPTIONAL __KVM_X86_OP
-#define KVM_X86_OP_OPTIONAL_RET0(func) \
-       static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \
-                                          (void *)__static_call_return0);
-#include <asm/kvm-x86-ops.h>
-#undef __KVM_X86_OP
-}
-
 #define __KVM_HAVE_ARCH_VM_ALLOC
 static inline struct kvm *kvm_arch_alloc_vm(void)
 {
index 10ad1029f69a41fb3d9e935abe970b38d7e8f83f..d22fc56d8bb88d57c6ee65a57827aacba3320e64 100644 (file)
@@ -11618,6 +11618,22 @@ void kvm_arch_hardware_disable(void)
        drop_user_return_notifiers();
 }
 
+static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
+{
+       memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
+
+#define __KVM_X86_OP(func) \
+       static_call_update(kvm_x86_##func, kvm_x86_ops.func);
+#define KVM_X86_OP(func) \
+       WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func)
+#define KVM_X86_OP_OPTIONAL __KVM_X86_OP
+#define KVM_X86_OP_OPTIONAL_RET0(func) \
+       static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \
+                                          (void *)__static_call_return0);
+#include <asm/kvm-x86-ops.h>
+#undef __KVM_X86_OP
+}
+
 int kvm_arch_hardware_setup(void *opaque)
 {
        struct kvm_x86_init_ops *ops = opaque;
@@ -11632,8 +11648,7 @@ int kvm_arch_hardware_setup(void *opaque)
        if (r != 0)
                return r;
 
-       memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
-       kvm_ops_static_call_update();
+       kvm_ops_update(ops);
 
        kvm_register_perf_callbacks(ops->handle_intel_pt_intr);