}
static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
- unsigned int pte_access, bool write_fault, int level,
+ unsigned int pte_access, bool write_fault,
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
bool host_writable)
{
struct kvm_mmu_page *sp = sptep_to_sp(sptep);
+ int level = sp->role.level;
int was_rmapped = 0;
int ret = RET_PF_FIXED;
bool flush = false;
return -1;
for (i = 0; i < ret; i++, gfn++, start++) {
- mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
+ mmu_set_spte(vcpu, start, access, false, gfn,
page_to_pfn(pages[i]), true, true);
put_page(pages[i]);
}
return -EFAULT;
ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
- fault->write, fault->goal_level, base_gfn, fault->pfn,
+ fault->write, base_gfn, fault->pfn,
fault->prefault, fault->map_writable);
if (ret == RET_PF_SPURIOUS)
return ret;
* we call mmu_set_spte() with host_writable = true because
* pte_prefetch_gfn_to_pfn always gets a writable pfn.
*/
- mmu_set_spte(vcpu, spte, pte_access, false, PG_LEVEL_4K, gfn, pfn,
+ mmu_set_spte(vcpu, spte, pte_access, false, gfn, pfn,
true, true);
kvm_release_pfn_clean(pfn);
return -EFAULT;
ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, fault->write,
- fault->goal_level, base_gfn, fault->pfn,
- fault->prefault, fault->map_writable);
+ base_gfn, fault->pfn, fault->prefault,
+ fault->map_writable);
if (ret == RET_PF_SPURIOUS)
return ret;