]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86: Factor out x86 instruction emulation with decoding
authorWei Huang <wei.huang2@amd.com>
Tue, 26 Jan 2021 08:18:28 +0000 (03:18 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 4 Feb 2021 10:27:27 +0000 (05:27 -0500)
Move the instruction decode part out of x86_emulate_instruction() for it
to be used in other places. Also kvm_clear_exception_queue() is moved
inside the if-statement as it doesn't apply when KVM are coming back from
userspace.

Co-developed-by: Bandan Das <bsd@redhat.com>
Signed-off-by: Bandan Das <bsd@redhat.com>
Signed-off-by: Wei Huang <wei.huang2@amd.com>
Message-Id: <20210126081831.570253-2-wei.huang2@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h

index fae4ccbd767b2c21fc116fe07751b4aafdaff930..ae8efb0442cfd30929f5db569c2d991a17a4bfd4 100644 (file)
@@ -7317,6 +7317,42 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
        return false;
 }
 
+/*
+ * Decode to be emulated instruction. Return EMULATION_OK if success.
+ */
+int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
+                                   void *insn, int insn_len)
+{
+       int r = EMULATION_OK;
+       struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
+
+       init_emulate_ctxt(vcpu);
+
+       /*
+        * We will reenter on the same instruction since we do not set
+        * complete_userspace_io. This does not handle watchpoints yet,
+        * those would be handled in the emulate_ops.
+        */
+       if (!(emulation_type & EMULTYPE_SKIP) &&
+           kvm_vcpu_check_breakpoint(vcpu, &r))
+               return r;
+
+       ctxt->interruptibility = 0;
+       ctxt->have_exception = false;
+       ctxt->exception.vector = -1;
+       ctxt->perm_ok = false;
+
+       ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
+
+       r = x86_decode_insn(ctxt, insn, insn_len);
+
+       trace_kvm_emulate_insn_start(vcpu);
+       ++vcpu->stat.insn_emulation;
+
+       return r;
+}
+EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction);
+
 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
                            int emulation_type, void *insn, int insn_len)
 {
@@ -7336,32 +7372,12 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
         */
        write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
        vcpu->arch.write_fault_to_shadow_pgtable = false;
-       kvm_clear_exception_queue(vcpu);
 
        if (!(emulation_type & EMULTYPE_NO_DECODE)) {
-               init_emulate_ctxt(vcpu);
-
-               /*
-                * We will reenter on the same instruction since
-                * we do not set complete_userspace_io.  This does not
-                * handle watchpoints yet, those would be handled in
-                * the emulate_ops.
-                */
-               if (!(emulation_type & EMULTYPE_SKIP) &&
-                   kvm_vcpu_check_breakpoint(vcpu, &r))
-                       return r;
-
-               ctxt->interruptibility = 0;
-               ctxt->have_exception = false;
-               ctxt->exception.vector = -1;
-               ctxt->perm_ok = false;
-
-               ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
-
-               r = x86_decode_insn(ctxt, insn, insn_len);
+               kvm_clear_exception_queue(vcpu);
 
-               trace_kvm_emulate_insn_start(vcpu);
-               ++vcpu->stat.insn_emulation;
+               r = x86_decode_emulated_instruction(vcpu, emulation_type,
+                                                   insn, insn_len);
                if (r != EMULATION_OK)  {
                        if ((emulation_type & EMULTYPE_TRAP_UD) ||
                            (emulation_type & EMULTYPE_TRAP_UD_FORCED)) {
index 5214b3be30ad2afc3b86438a314c9999b654f5ed..f88045a8af53332abcded2c38d708cd6d597f437 100644 (file)
@@ -273,6 +273,8 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
                                          int page_num);
 bool kvm_vector_hashing_enabled(void);
 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
+int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
+                                   void *insn, int insn_len);
 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
                            int emulation_type, void *insn, int insn_len);
 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);