kvm->arch.tdp_mmu_enabled = true;
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
+ INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
}
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
bool is_leaf = is_present && is_last_spte(new_spte, level);
bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
u64 *pt;
+ struct kvm_mmu_page *sp;
u64 old_child_spte;
int i;
*/
if (was_present && !was_leaf && (pfn_changed || !is_present)) {
pt = spte_to_child_pt(old_spte, level);
+ sp = sptep_to_sp(pt);
+
+ list_del(&sp->link);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
old_child_spte = READ_ONCE(*(pt + i));
KVM_PAGES_PER_HPAGE(level));
free_page((unsigned long)pt);
+ kmem_cache_free(mmu_page_header_cache, sp);
}
}
bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
struct kvm_mmu *mmu = vcpu->arch.mmu;
struct tdp_iter iter;
- struct kvm_mmu_memory_cache *pf_pt_cache =
- &vcpu->arch.mmu_shadow_page_cache;
+ struct kvm_mmu_page *sp;
u64 *child_pt;
u64 new_spte;
int ret;
}
if (!is_shadow_present_pte(iter.old_spte)) {
- child_pt = kvm_mmu_memory_cache_alloc(pf_pt_cache);
+ sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
+ list_add(&sp->link, &vcpu->kvm->arch.tdp_mmu_pages);
+ child_pt = sp->spt;
clear_page(child_pt);
new_spte = make_nonleaf_spte(child_pt,
!shadow_accessed_mask);