]> git.baikalelectronics.ru Git - kernel.git/commitdiff
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 8 May 2017 19:37:56 +0000 (12:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 8 May 2017 19:37:56 +0000 (12:37 -0700)
Pull KVM updates from Paolo Bonzini:
 "ARM:
   - HYP mode stub supports kexec/kdump on 32-bit
   - improved PMU support
   - virtual interrupt controller performance improvements
   - support for userspace virtual interrupt controller (slower, but
     necessary for KVM on the weird Broadcom SoCs used by the Raspberry
     Pi 3)

  MIPS:
   - basic support for hardware virtualization (ImgTec P5600/P6600/I6400
     and Cavium Octeon III)

  PPC:
   - in-kernel acceleration for VFIO

  s390:
   - support for guests without storage keys
   - adapter interruption suppression

  x86:
   - usual range of nVMX improvements, notably nested EPT support for
     accessed and dirty bits
   - emulation of CPL3 CPUID faulting

  generic:
   - first part of VCPU thread request API
   - kvm_stat improvements"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (227 commits)
  kvm: nVMX: Don't validate disabled secondary controls
  KVM: put back #ifndef CONFIG_S390 around kvm_vcpu_kick
  Revert "KVM: Support vCPU-based gfn->hva cache"
  tools/kvm: fix top level makefile
  KVM: x86: don't hold kvm->lock in KVM_SET_GSI_ROUTING
  KVM: Documentation: remove VM mmap documentation
  kvm: nVMX: Remove superfluous VMX instruction fault checks
  KVM: x86: fix emulation of RSM and IRET instructions
  KVM: mark requests that need synchronization
  KVM: return if kvm_vcpu_wake_up() did wake up the VCPU
  KVM: add explicit barrier to kvm_vcpu_kick
  KVM: perform a wake_up in kvm_make_all_cpus_request
  KVM: mark requests that do not need a wakeup
  KVM: remove #ifndef CONFIG_S390 around kvm_vcpu_wake_up
  KVM: x86: always use kvm_make_request instead of set_bit
  KVM: add kvm_{test,clear}_request to replace {test,clear}_bit
  s390: kvm: Cpu model support for msa6, msa7 and msa8
  KVM: x86: remove irq disablement around KVM_SET_CLOCK/KVM_GET_CLOCK
  kvm: better MWAIT emulation for guests
  KVM: x86: virtualize cpuid faulting
  ...

21 files changed:
1  2 
arch/arm/kvm/arm.c
arch/arm/kvm/mmu.c
arch/arm/mm/mmu.c
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kvm/sys_regs.c
arch/mips/Kconfig
arch/mips/kernel/cpu-probe.c
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_64_mmu_host.c
arch/powerpc/kvm/book3s_hv.c
arch/s390/kvm/gaccess.c
arch/s390/kvm/kvm-s390.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
include/kvm/arm_vgic.h
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic.c
virt/kvm/arm/vgic/vgic.h

index 314eb6abe1ff9879272fdae542c2e0043f3759eb,8966e59806aaaa154ca5ffbc160f5fac1b526009..8a31906bdc9b94ca153f7ea2bc811666e6b9524b
@@@ -1121,21 -1138,10 +1138,13 @@@ static void cpu_hyp_reinit(void
                 */
                __cpu_init_stage2();
        } else {
-               if (__hyp_get_vectors() == hyp_default_vectors)
-                       cpu_init_hyp_mode(NULL);
+               cpu_init_hyp_mode(NULL);
        }
 +
 +      if (vgic_present)
 +              kvm_vgic_init_cpu_hardware();
  }
  
- static void cpu_hyp_reset(void)
- {
-       if (!is_kernel_in_hyp_mode())
-               __cpu_reset_hyp_mode(hyp_default_vectors,
-                                    kvm_get_idmap_start());
- }
  static void _kvm_arch_hardware_enable(void *discard)
  {
        if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
Simple merge
index 347cca9657838bb9f937cbeb8ad9d6bcaae6de6f,e98a2b5c4e8506f3b79a4a84560ee29875e64bde..31af3cb59a60c56fb7b428486c7d356a2ddd22b4
@@@ -1639,10 -1637,7 +1641,13 @@@ void __init paging_init(const struct ma
  
        empty_zero_page = virt_to_page(zero_page);
        __flush_dcache_page(NULL, empty_zero_page);
+       /* Compute the virt/idmap offset, mostly for the sake of KVM */
+       kimage_voffset = (unsigned long)&kimage_voffset - virt_to_idmap(&kimage_voffset);
  }
 +
 +void __init early_mm_init(const struct machine_desc *mdesc)
 +{
 +      build_mem_type_table();
 +      early_paging_init(mdesc);
 +}
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index d5c5c911821ae85006488913cf72625630ece940,4bafb0a0c8b5b778cbdff2d28ac16a2dcac7da88..aeb3feb9de534bd5e9d8fc2da9770a594f6adaa2
@@@ -273,9 -273,13 +273,13 @@@ static void kvm_s390_cpu_feat_init(void
                              kvm_s390_available_subfunc.pcc);
        }
        if (test_facility(57)) /* MSA5 */
 -              __cpacf_query(CPACF_PPNO, (cpacf_mask_t *)
 +              __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
                              kvm_s390_available_subfunc.ppno);
  
+       if (test_facility(146)) /* MSA8 */
+               __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
+                             kvm_s390_available_subfunc.kma);
        if (MACHINE_HAS_ESOP)
                allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
        /*
Simple merge
index 1a471e5f963f8a31f45b6d971c2b262848876067,e7d929103f4af6297f47d093017c3ead20c44c36..c5fd459c404367405f36d728951982322bc5ada6
@@@ -3462,13 -3444,11 +3442,9 @@@ static int hardware_enable(void
                /* enable and lock */
                wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
        }
-       cr4_set_bits(X86_CR4_VMXE);
-       if (vmm_exclusive) {
-               kvm_cpu_vmxon(phys_addr);
-               ept_sync_global();
-       }
+       kvm_cpu_vmxon(phys_addr);
+       ept_sync_global();
  
 -      native_store_gdt(this_cpu_ptr(&host_gdt));
 -
        return 0;
  }
  
@@@ -10266,21 -10163,11 +10162,23 @@@ static int prepare_vmcs02(struct kvm_vc
  
        }
  
 +      if (enable_pml) {
 +              /*
 +               * Conceptually we want to copy the PML address and index from
 +               * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
 +               * since we always flush the log on each vmexit, this happens
 +               * to be equivalent to simply resetting the fields in vmcs02.
 +               */
 +              ASSERT(vmx->pml_pg);
 +              vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
 +              vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
 +      }
 +
        if (nested_cpu_has_ept(vmcs12)) {
-               kvm_mmu_unload(vcpu);
-               nested_ept_init_mmu_context(vcpu);
+               if (nested_ept_init_mmu_context(vcpu)) {
+                       *entry_failure_code = ENTRY_FAIL_DEFAULT;
+                       return 1;
+               }
        } else if (nested_cpu_has2(vmcs12,
                                   SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
                vmx_flush_tlb_ept_only(vcpu);
Simple merge
Simple merge
index b637d9c7afe3ff51b9e8dfdcd947ee37ef2df029,025b57d5787edabb568ff1415eb1fdc6c6e262d5..a65757aab6d32cef8fff2306e80fb6df0486bbc7
  
  #include "vgic.h"
  
- /*
-  * Call this function to convert a u64 value to an unsigned long * bitmask
-  * in a way that works on both 32-bit and 64-bit LE and BE platforms.
-  *
-  * Warning: Calling this function may modify *val.
-  */
- static unsigned long *u64_to_bitmask(u64 *val)
- {
- #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
-       *val = (*val >> 32) | (*val << 32);
- #endif
-       return (unsigned long *)val;
- }
 +static inline void vgic_v2_write_lr(int lr, u32 val)
 +{
 +      void __iomem *base = kvm_vgic_global_state.vctrl_base;
 +
 +      writel_relaxed(val, base + GICH_LR0 + (lr * 4));
 +}
 +
 +void vgic_v2_init_lrs(void)
 +{
 +      int i;
 +
 +      for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
 +              vgic_v2_write_lr(i, 0);
 +}
 +
- void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
+ void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
  {
        struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
  
@@@ -206,10 -160,10 +175,10 @@@ void vgic_v2_set_vmcr(struct kvm_vcpu *
                GICH_VMCR_ALIAS_BINPOINT_MASK;
        vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
                GICH_VMCR_BINPOINT_MASK;
 -      vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) &
 -              GICH_VMCR_PRIMASK_MASK;
 +      vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
 +               GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
  
-       vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
+       cpu_if->vgic_vmcr = vmcr;
  }
  
  void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
Simple merge
index 6cf557e9f71807f05017d10af8074bd4416c151c,44445dac0835479832e739bc820c086d7ea06ec6..799fd651b2605d92ae3982c2e314b36d6486a8db
@@@ -137,7 -129,8 +136,9 @@@ int vgic_v2_map_resources(struct kvm *k
  int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
                             enum vgic_type);
  
 +void vgic_v2_init_lrs(void);
+ void vgic_v2_load(struct kvm_vcpu *vcpu);
+ void vgic_v2_put(struct kvm_vcpu *vcpu);
  
  static inline void vgic_get_irq_kref(struct vgic_irq *irq)
  {