__clear_sp_write_flooding_count(sptep_to_sp(spte));
}
+/*
+ * The vCPU is required when finding indirect shadow pages; the shadow
+ * page may already exist and syncing it needs the vCPU pointer in
+ * order to read guest page tables. Direct shadow pages are never
+ * unsync, thus @vcpu can be NULL if @role.direct is true.
+ */
static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm,
struct kvm_vcpu *vcpu,
gfn_t gfn,
goto out;
if (sp->unsync) {
+ if (KVM_BUG_ON(!vcpu, kvm))
+ break;
+
/*
* The page is good, but is stale. kvm_sync_page does
* get the latest guest state, but (unlike mmu_unsync_children)
return sp;
}
+/* Note, @vcpu may be NULL if @role.direct is true; see kvm_mmu_find_shadow_page. */
static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm,
struct kvm_vcpu *vcpu,
struct shadow_page_caches *caches,