From b1186a50bf77834d9342e6c836b396520fe09180 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 22 Jun 2021 10:57:09 -0700 Subject: [PATCH] KVM: x86/mmu: Rename "nxe" role bit to "efer_nx" for macro shenanigans Rename "nxe" to "efer_nx" so that future macro magic can use the pattern _ for all CR0, CR4, and EFER bits that included in the role. Using "efer_nx" also makes it clear that the role bit reflects EFER.NX, not the NX bit in the corresponding PTE. Signed-off-by: Sean Christopherson Message-Id: <20210622175739.3610207-25-seanjc@google.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/mmu.rst | 4 ++-- arch/x86/include/asm/kvm_host.h | 4 ++-- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/mmu/mmutrace.h | 2 +- tools/lib/traceevent/plugins/plugin_kvm.c | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Documentation/virt/kvm/mmu.rst b/Documentation/virt/kvm/mmu.rst index ddbb23998742c..f60f5488e1219 100644 --- a/Documentation/virt/kvm/mmu.rst +++ b/Documentation/virt/kvm/mmu.rst @@ -180,8 +180,8 @@ Shadow pages contain the following information: role.gpte_is_8_bytes: Reflects the size of the guest PTE for which the page is valid, i.e. '1' if 64-bit gptes are in use, '0' if 32-bit gptes are in use. - role.nxe: - Contains the value of efer.nxe for which the page is valid. + role.efer_nx: + Contains the value of efer.nx for which the page is valid. role.cr0_wp: Contains the value of cr0.wp for which the page is valid. role.smep_andnot_wp: diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 250915da1681d..520140eed4239 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -274,7 +274,7 @@ struct kvm_kernel_irq_routing_entry; * by indirect shadow page can not be more than 15 bits. * * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access, - * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. + * @efer_nx, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. */ union kvm_mmu_page_role { u32 word; @@ -285,7 +285,7 @@ union kvm_mmu_page_role { unsigned direct:1; unsigned access:3; unsigned invalid:1; - unsigned nxe:1; + unsigned efer_nx:1; unsigned cr0_wp:1; unsigned smep_andnot_wp:1; unsigned smap_andnot_wp:1; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 81992ba2899fb..25f23de89cdff 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4567,7 +4567,7 @@ static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, union kvm_mmu_role role = {0}; role.base.access = ACC_ALL; - role.base.nxe = ____is_efer_nx(regs); + role.base.efer_nx = ____is_efer_nx(regs); role.base.cr0_wp = ____is_cr0_wp(regs); role.base.smm = is_smm(vcpu); role.base.guest_mode = is_guest_mode(vcpu); diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h index e798489b56b55..efbad33a06457 100644 --- a/arch/x86/kvm/mmu/mmutrace.h +++ b/arch/x86/kvm/mmu/mmutrace.h @@ -40,7 +40,7 @@ role.direct ? " direct" : "", \ access_str[role.access], \ role.invalid ? " invalid" : "", \ - role.nxe ? "" : "!", \ + role.efer_nx ? "" : "!", \ role.ad_disabled ? "!" : "", \ __entry->root_count, \ __entry->unsync ? "unsync" : "sync", 0); \ diff --git a/tools/lib/traceevent/plugins/plugin_kvm.c b/tools/lib/traceevent/plugins/plugin_kvm.c index 51ceeb9147eb6..9ce7b4b68e3fe 100644 --- a/tools/lib/traceevent/plugins/plugin_kvm.c +++ b/tools/lib/traceevent/plugins/plugin_kvm.c @@ -366,7 +366,7 @@ union kvm_mmu_page_role { unsigned direct:1; unsigned access:3; unsigned invalid:1; - unsigned nxe:1; + unsigned efer_nx:1; unsigned cr0_wp:1; unsigned smep_and_not_wp:1; unsigned smap_and_not_wp:1; @@ -403,7 +403,7 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record, access_str[role.access], role.invalid ? " invalid" : "", role.cr4_pae ? "" : "!", - role.nxe ? "" : "!", + role.efer_nx ? "" : "!", role.cr0_wp ? "" : "!", role.smep_and_not_wp ? " smep" : "", role.smap_and_not_wp ? " smap" : "", -- 2.39.5