struct kvm_mmu_role_regs *regs,
union kvm_mmu_role new_role)
{
+ if (new_role.as_u64 == context->mmu_role.as_u64)
+ return;
+
+ context->mmu_role.as_u64 = new_role.as_u64;
+
if (!____is_cr0_pg(regs))
nonpaging_init_context(vcpu, context);
else if (____is_efer_lma(regs))
}
context->shadow_root_level = new_role.base.level;
- context->mmu_role.as_u64 = new_role.as_u64;
reset_shadow_zero_bits_mask(vcpu, context);
}
union kvm_mmu_role new_role =
kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
- if (new_role.as_u64 != context->mmu_role.as_u64)
- shadow_mmu_init_context(vcpu, context, regs, new_role);
+ shadow_mmu_init_context(vcpu, context, regs, new_role);
}
static union kvm_mmu_role
__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
- if (new_role.as_u64 != context->mmu_role.as_u64)
- shadow_mmu_init_context(vcpu, context, ®s, new_role);
+ shadow_mmu_init_context(vcpu, context, ®s, new_role);
/*
* Redo the shadow bits, the reset done by shadow_mmu_init_context()
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
+ context->mmu_role.as_u64 = new_role.as_u64;
+
context->shadow_root_level = level;
context->nx = true;
context->invlpg = ept_invlpg;
context->root_level = level;
context->direct_map = false;
- context->mmu_role.as_u64 = new_role.as_u64;
update_permission_bitmask(vcpu, context, true);
update_pkru_bitmask(vcpu, context, true);