]> git.baikalelectronics.ru Git - kernel.git/commitdiff
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 16 Jan 2022 14:15:14 +0000 (16:15 +0200)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 16 Jan 2022 14:15:14 +0000 (16:15 +0200)
Pull kvm updates from Paolo Bonzini:
 "RISCV:

   - Use common KVM implementation of MMU memory caches

   - SBI v0.2 support for Guest

   - Initial KVM selftests support

   - Fix to avoid spurious virtual interrupts after clearing hideleg CSR

   - Update email address for Anup and Atish

  ARM:

   - Simplification of the 'vcpu first run' by integrating it into KVM's
     'pid change' flow

   - Refactoring of the FP and SVE state tracking, also leading to a
     simpler state and less shared data between EL1 and EL2 in the nVHE
     case

   - Tidy up the header file usage for the nvhe hyp object

   - New HYP unsharing mechanism, finally allowing pages to be unmapped
     from the Stage-1 EL2 page-tables

   - Various pKVM cleanups around refcounting and sharing

   - A couple of vgic fixes for bugs that would trigger once the vcpu
     xarray rework is merged, but not sooner

   - Add minimal support for ARMv8.7's PMU extension

   - Rework kvm_pgtable initialisation ahead of the NV work

   - New selftest for IRQ injection

   - Teach selftests about the lack of default IPA space and page sizes

   - Expand sysreg selftest to deal with Pointer Authentication

   - The usual bunch of cleanups and doc update

  s390:

   - fix sigp sense/start/stop/inconsistency

   - cleanups

  x86:

   - Clean up some function prototypes more

   - improved gfn_to_pfn_cache with proper invalidation, used by Xen
     emulation

   - add KVM_IRQ_ROUTING_XEN_EVTCHN and event channel delivery

   - completely remove potential TOC/TOU races in nested SVM consistency
     checks

   - update some PMCs on emulated instructions

   - Intel AMX support (joint work between Thomas and Intel)

   - large MMU cleanups

   - module parameter to disable PMU virtualization

   - cleanup register cache

   - first part of halt handling cleanups

   - Hyper-V enlightened MSR bitmap support for nested hypervisors

  Generic:

   - clean up Makefiles

   - introduce CONFIG_HAVE_KVM_DIRTY_RING

   - optimize memslot lookup using a tree

   - optimize vCPU array usage by converting to xarray"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (268 commits)
  x86/fpu: Fix inline prefix warnings
  selftest: kvm: Add amx selftest
  selftest: kvm: Move struct kvm_x86_state to header
  selftest: kvm: Reorder vcpu_load_state steps for AMX
  kvm: x86: Disable interception for IA32_XFD on demand
  x86/fpu: Provide fpu_sync_guest_vmexit_xfd_state()
  kvm: selftests: Add support for KVM_CAP_XSAVE2
  kvm: x86: Add support for getting/setting expanded xstate buffer
  x86/fpu: Add uabi_size to guest_fpu
  kvm: x86: Add CPUID support for Intel AMX
  kvm: x86: Add XCR0 support for Intel AMX
  kvm: x86: Disable RDMSR interception of IA32_XFD_ERR
  kvm: x86: Emulate IA32_XFD_ERR for guest
  kvm: x86: Intercept #NM for saving IA32_XFD_ERR
  x86/fpu: Prepare xfd_err in struct fpu_guest
  kvm: x86: Add emulation for IA32_XFD
  x86/fpu: Provide fpu_update_guest_xfd() for IA32_XFD emulation
  kvm: x86: Enable dynamic xfeatures at KVM_SET_CPUID2
  x86/fpu: Provide fpu_enable_guest_xfd_features() for KVM
  x86/fpu: Add guest support to xfd_enable_feature()
  ...

31 files changed:
1  2 
.mailmap
MAINTAINERS
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/fpsimd.c
arch/arm64/kvm/Kconfig
arch/arm64/kvm/Makefile
arch/arm64/kvm/arm.c
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/reset.c
arch/mips/include/asm/kvm_host.h
arch/powerpc/kvm/Kconfig
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/xstate.h
arch/x86/kernel/process.c
arch/x86/kvm/Kconfig
arch/x86/kvm/debugfs.c
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu/spte.c
arch/x86/kvm/pmu.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx_ops.h
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
include/linux/kvm_host.h
virt/kvm/kvm_main.c

diff --cc .mailmap
Simple merge
diff --cc MAINTAINERS
Simple merge
Simple merge
Simple merge
Simple merge
index e9761d84f982e8afb80380ffc53a1a84d8e661ce,f1f8fc069a9705201877473bf549b48ed0504929..8a5fbbf084dfe41a1bdc9597f9028c4c56b340c6
@@@ -39,7 -39,7 +39,8 @@@ menuconfig KV
        select HAVE_KVM_IRQ_BYPASS
        select HAVE_KVM_VCPU_RUN_PID_CHANGE
        select SCHED_INFO
 +      select GUEST_PERF_EVENTS if PERF_EVENTS
+       select INTERVAL_TREE
        help
          Support hosting virtualized guest machines.
  
index 0bcc378b796154c99f9e1dd31e8ec38d7cd30882,39b11a4f9063eec0c9e9bbefc087e3e5fdc8a550..91861fd8b897cb7f48494c88a5c2327a573e3650
@@@ -10,12 -10,10 +10,10 @@@ include $(srctree)/virt/kvm/Makefile.kv
  obj-$(CONFIG_KVM) += kvm.o
  obj-$(CONFIG_KVM) += hyp/
  
- kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
-        $(KVM)/vfio.o $(KVM)/irqchip.o $(KVM)/binary_stats.o \
-        arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
 -kvm-y += arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
++kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
         inject_fault.o va_layout.o handle_exit.o \
         guest.o debug.o reset.o sys_regs.o \
-        vgic-sys-reg-v3.o fpsimd.o pmu.o \
+        vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \
         arch_timer.o trng.o\
         vgic/vgic.o vgic/vgic-init.o \
         vgic/vgic-irqfd.o vgic/vgic-v2.o \
Simple merge
Simple merge
Simple merge
index 27386f0d81e456e09d2b2001d4ccd0fdf4829553,25e0041d840b7f06f2d1632d341b7568f596f3d1..ecc40c8cd6f643301c4c3a76f9eb6933ca8350e7
@@@ -103,10 -105,11 +105,11 @@@ static int kvm_vcpu_finalize_sve(struc
         * set_sve_vls().  Double-check here just to be sure:
         */
        if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
 -                  vl > SVE_VL_ARCH_MAX))
 +                  vl > VL_ARCH_MAX))
                return -EIO;
  
-       buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL_ACCOUNT);
+       reg_sz = vcpu_sve_state_size(vcpu);
+       buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
        if (!buf)
                return -ENOMEM;
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 0c2133eb4cf69fbd3280b3c444fb4722f1da5445,8abdadb7e22ad0b565c17a8cc142da4bdb047adf..261b39cbef6ea52c77473a097839cf1f08438387
@@@ -55,43 -55,41 +55,41 @@@ static void kvm_pmi_trigger_fn(struct i
        kvm_pmu_deliver_pmi(vcpu);
  }
  
- static void kvm_perf_overflow(struct perf_event *perf_event,
-                             struct perf_sample_data *data,
-                             struct pt_regs *regs)
+ static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
  {
-       struct kvm_pmc *pmc = perf_event->overflow_handler_context;
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
  
-       if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
-               __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
-               kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
-       }
+       /* Ignore counters that have been reprogrammed already. */
+       if (test_and_set_bit(pmc->idx, pmu->reprogram_pmi))
+               return;
+       __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
+       kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+       if (!pmc->intr)
+               return;
+       /*
+        * Inject PMI. If vcpu was in a guest mode during NMI PMI
+        * can be ejected on a guest mode re-entry. Otherwise we can't
+        * be sure that vcpu wasn't executing hlt instruction at the
+        * time of vmexit and is not going to re-enter guest mode until
+        * woken up. So we should wake it, but this is impossible from
+        * NMI context. Do it from irq work instead.
+        */
 -      if (in_pmi && !kvm_is_in_guest())
++      if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
+               irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
+       else
+               kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
  }
  
- static void kvm_perf_overflow_intr(struct perf_event *perf_event,
-                                  struct perf_sample_data *data,
-                                  struct pt_regs *regs)
+ static void kvm_perf_overflow(struct perf_event *perf_event,
+                             struct perf_sample_data *data,
+                             struct pt_regs *regs)
  {
        struct kvm_pmc *pmc = perf_event->overflow_handler_context;
-       struct kvm_pmu *pmu = pmc_to_pmu(pmc);
-       if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
-               __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
-               kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
  
-               /*
-                * Inject PMI. If vcpu was in a guest mode during NMI PMI
-                * can be ejected on a guest mode re-entry. Otherwise we can't
-                * be sure that vcpu wasn't executing hlt instruction at the
-                * time of vmexit and is not going to re-enter guest mode until
-                * woken up. So we should wake it, but this is impossible from
-                * NMI context. Do it from irq work instead.
-                */
-               if (!kvm_handling_nmi_from_guest(pmc->vcpu))
-                       irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
-               else
-                       kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
-       }
+       __kvm_perf_overflow(pmc, true);
  }
  
  static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
Simple merge
index 9079d2fdc12e58628a6ef1f91cbdff81a25757f5,c3d9006478a424c5bfad6a2cc29baa3d3b8cdf83..46bcc706f25740b3f0a1956a4d2124fda095306d
@@@ -3931,9 -3964,10 +3964,10 @@@ static __no_kcsan fastpath_t svm_vcpu_r
                vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
                vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
        }
+       vcpu->arch.regs_dirty = 0;
  
        if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
 -              kvm_before_interrupt(vcpu);
 +              kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
  
        kvm_load_host_xsave_state(vcpu);
        stgi();
Simple merge
index 35d9324c2f2a52f30c6495b7e34ab36bf7ad945a,67f745250e50ae329b5cfe6d27504b6cff4ce856..5e7f4122578014a630b3ba99d82e464b6dd7bf7a
@@@ -95,10 -118,16 +120,12 @@@ do_exception
                     "3:\n\t"
  
                     /* VMREAD faulted.  As above, except push '1' for @fault. */
 -                   ".pushsection .fixup, \"ax\"\n\t"
 -                   "4: push $1\n\t"
 -                   "push %2\n\t"
 -                   "jmp 2b\n\t"
 -                   ".popsection\n\t"
 -                   _ASM_EXTABLE(1b, 4b)
 -                   : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
 +                   _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %1)
 +
 +                   : ASM_CALL_CONSTRAINT, "=&r"(value) : "r"(field) : "cc");
        return value;
+ #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
  }
  
  static __always_inline u16 vmcs_read16(unsigned long field)
Simple merge
Simple merge
Simple merge
Simple merge