]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86/mmu: cleanup computation of MMU roles for shadow paging
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 10 Feb 2022 12:33:26 +0000 (07:33 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 29 Apr 2022 16:49:55 +0000 (12:49 -0400)
Pass the already-computed CPU role, instead of redoing it.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index 0b97aee6b735c7e96eba09375811348ef6aa1718..3f4777849040536963132ccd6dbb49667c4e7fb3 100644 (file)
@@ -4846,15 +4846,14 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
 
 static union kvm_mmu_role
 kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
-                                  const struct kvm_mmu_role_regs *regs)
+                                  union kvm_mmu_role cpu_role)
 {
-       union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
        union kvm_mmu_role role;
 
        role = cpu_role;
-       if (!____is_efer_lma(regs))
+       if (!cpu_role.ext.efer_lma)
                role.base.level = PT32E_ROOT_LEVEL;
-       else if (____is_cr4_la57(regs))
+       else if (cpu_role.ext.cr4_la57)
                role.base.level = PT64_ROOT_5LEVEL;
        else
                role.base.level = PT64_ROOT_4LEVEL;
@@ -4893,16 +4892,15 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
        struct kvm_mmu *context = &vcpu->arch.root_mmu;
        union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
        union kvm_mmu_role mmu_role =
-               kvm_calc_shadow_mmu_root_page_role(vcpu, regs);
+               kvm_calc_shadow_mmu_root_page_role(vcpu, cpu_role);
 
        shadow_mmu_init_context(vcpu, context, cpu_role, mmu_role);
 }
 
 static union kvm_mmu_role
 kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
-                                  const struct kvm_mmu_role_regs *regs)
+                                  union kvm_mmu_role cpu_role)
 {
-       union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
        union kvm_mmu_role role;
 
        WARN_ON_ONCE(cpu_role.base.direct);
@@ -4922,7 +4920,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
                .efer = efer,
        };
        union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs);
-       union kvm_mmu_role mmu_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
+       union kvm_mmu_role mmu_role = kvm_calc_shadow_npt_root_page_role(vcpu, cpu_role);
 
        shadow_mmu_init_context(vcpu, context, cpu_role, mmu_role);
        kvm_mmu_new_pgd(vcpu, nested_cr3);