]> git.baikalelectronics.ru Git - kernel.git/commitdiff
Merge branch 'kvm-older-features' into HEAD
authorPaolo Bonzini <pbonzini@redhat.com>
Fri, 8 Apr 2022 16:43:40 +0000 (12:43 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 13 Apr 2022 17:37:17 +0000 (13:37 -0400)
Merge branch for features that did not make it into 5.18:

* New ioctls to get/set TSC frequency for a whole VM

* Allow userspace to opt out of hypercall patching

Nested virtualization improvements for AMD:

* Support for "nested nested" optimizations (nested vVMLOAD/VMSAVE,
  nested vGIF)

* Allow AVIC to co-exist with a nested guest running

* Fixes for LBR virtualizations when a nested guest is running,
  and nested LBR virtualization support

* PAUSE filtering for nested hypervisors

Guest support:

* Decoupling of vcpu_is_preempted from PV spinlocks

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1  2 
Documentation/virt/kvm/api.rst
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/kvm.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/avic.c
arch/x86/kvm/x86.c
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile

Simple merge
Simple merge
index a22deb58f86d2e6d2f5709af95671be9c63c39c6,774d924aeda8d386a3a62ac41f17857610755b54..d0bb2b3fb305f92e1254152764dc5c7e785cea1c
@@@ -752,6 -752,41 +752,42 @@@ static void kvm_crash_shutdown(struct p
  }
  #endif
  
 -"ret;"
+ #if defined(CONFIG_X86_32) || !defined(CONFIG_SMP)
+ bool __kvm_vcpu_is_preempted(long cpu);
+ __visible bool __kvm_vcpu_is_preempted(long cpu)
+ {
+       struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
+       return !!(src->preempted & KVM_VCPU_PREEMPTED);
+ }
+ PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
+ #else
+ #include <asm/asm-offsets.h>
+ extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
+ /*
+  * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
+  * restoring to/from the stack.
+  */
+ asm(
+ ".pushsection .text;"
+ ".global __raw_callee_save___kvm_vcpu_is_preempted;"
+ ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
+ "__raw_callee_save___kvm_vcpu_is_preempted:"
++ASM_ENDBR
+ "movq __per_cpu_offset(,%rdi,8), %rax;"
+ "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
+ "setne        %al;"
++ASM_RET
+ ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
+ ".popsection");
+ #endif
  static void __init kvm_guest_init(void)
  {
        int i;
Simple merge
Simple merge
index 547ba00ef64fc3d12f2e3457221b2b96685688e0,7a066cf926926bd567c47c17cf4ceada5a213785..10ad1029f69a41fb3d9e935abe970b38d7e8f83f
@@@ -3106,14 -3101,15 +3101,14 @@@ static int kvm_guest_time_update(struc
  
        vcpu->hv_clock.flags = pvclock_flags;
  
-       if (vcpu->pv_time_enabled)
-               kvm_setup_pvclock_page(v, &vcpu->pv_time, 0);
-       if (vcpu->xen.vcpu_info_set)
-               kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache,
-                                      offsetof(struct compat_vcpu_info, time));
-       if (vcpu->xen.vcpu_time_info_set)
-               kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
+       if (vcpu->pv_time.active)
+               kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0);
+       if (vcpu->xen.vcpu_info_cache.active)
+               kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache,
+                                       offsetof(struct compat_vcpu_info, time));
+       if (vcpu->xen.vcpu_time_info_cache.active)
+               kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0);
 -      if (!v->vcpu_idx)
 -              kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
 +      kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
        return 0;
  }
  
Simple merge