]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: async_pf: Inject 'page ready' event only if 'page not present' was previously...
authorVitaly Kuznetsov <vkuznets@redhat.com>
Wed, 10 Jun 2020 17:55:32 +0000 (19:55 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 11 Jun 2020 16:35:19 +0000 (12:35 -0400)
'Page not present' event may or may not get injected depending on
guest's state. If the event wasn't injected, there is no need to
inject the corresponding 'page ready' event as the guest may get
confused. E.g. Linux thinks that the corresponding 'page not present'
event wasn't delivered *yet* and allocates a 'dummy entry' for it.
This entry is never freed.

Note, 'wakeup all' events have no corresponding 'page not present'
event and always get injected.

s390 seems to always be able to inject 'page not present', the
change is effectively a nop.

Suggested-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20200610175532.779793-2-vkuznets@redhat.com>
Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=208081
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/s390/include/asm/kvm_host.h
arch/s390/kvm/kvm-s390.c
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/async_pf.c

index 3d554887794ebdad8f61b648b32cecf8ff396c80..cee3cb6455a29885605c54c14a6bce32bff2fae4 100644 (file)
@@ -978,7 +978,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
                               struct kvm_async_pf *work);
 
-void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
                                     struct kvm_async_pf *work);
 
 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
index 06bde4bad20593e92debf16beb9b82040af7319b..33fea4488ef39abaa475b58bcba199aee1ee94d0 100644 (file)
@@ -3923,11 +3923,13 @@ static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
        }
 }
 
-void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
                                     struct kvm_async_pf *work)
 {
        trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
        __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
+
+       return true;
 }
 
 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
index 7030f222125954a6e98622404f35b0fc3de6744e..f8998e97457f46e53d63f51ef4adb308d15e22fc 100644 (file)
@@ -1670,7 +1670,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm);
 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
                                       unsigned long *vcpu_bitmap);
 
-void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
                                     struct kvm_async_pf *work);
 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
                                 struct kvm_async_pf *work);
index 172843a8c3141e3f9cae679d810a9e79439ff29c..290784ba63e410c51ec2bb17fb9875520e4c0cfc 100644 (file)
@@ -10511,7 +10511,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
        return kvm_arch_interrupt_allowed(vcpu);
 }
 
-void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
                                     struct kvm_async_pf *work)
 {
        struct x86_exception fault;
@@ -10528,6 +10528,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
                fault.address = work->arch.token;
                fault.async_page_fault = true;
                kvm_inject_page_fault(vcpu, &fault);
+               return true;
        } else {
                /*
                 * It is not possible to deliver a paravirtualized asynchronous
@@ -10538,6 +10539,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
                 * fault is retried, hopefully the page will be ready in the host.
                 */
                kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+               return false;
        }
 }
 
@@ -10555,7 +10557,8 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
                kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
        trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
 
-       if (kvm_pv_async_pf_enabled(vcpu) &&
+       if ((work->wakeup_all || work->notpresent_injected) &&
+           kvm_pv_async_pf_enabled(vcpu) &&
            !apf_put_user_ready(vcpu, work->arch.token)) {
                vcpu->arch.apf.pageready_pending = true;
                kvm_apic_set_irq(vcpu, &irq, NULL);
index e2f82131bb3e9e93d31f7ae787e4c80ed2d57a91..62ec926c78a0ec9fa4e7541debb5220c7740f380 100644 (file)
@@ -206,6 +206,7 @@ struct kvm_async_pf {
        unsigned long addr;
        struct kvm_arch_async_pf arch;
        bool   wakeup_all;
+       bool notpresent_injected;
 };
 
 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
index ba080088da769c180b920bd0f56b07eaad47da6c..a36828fbf40a6da5544d59d930df2abd55bf0d36 100644 (file)
@@ -189,7 +189,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 
        list_add_tail(&work->queue, &vcpu->async_pf.queue);
        vcpu->async_pf.queued++;
-       kvm_arch_async_page_not_present(vcpu, work);
+       work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
 
        schedule_work(&work->work);