]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: x86/mmu: Rename rmap zap helpers to eliminate "unmap" wrapper
authorSean Christopherson <seanjc@google.com>
Fri, 15 Jul 2022 22:42:24 +0000 (22:42 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 28 Jul 2022 17:22:17 +0000 (13:22 -0400)
Rename kvm_unmap_rmap() and kvm_zap_rmap() to kvm_zap_rmap() and
__kvm_zap_rmap() respectively to show that what was the "unmap" helper is
just a wrapper for the "zap" helper, i.e. that they do the exact same
thing, one just exists to deal with its caller passing in more params.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220715224226.3749507-6-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index 61c32d8d1f6dac9f6936e14b0cd21e9ad1ff0387..00be88e0a5f7f5282aecd62e16b33addee8b60c8 100644 (file)
@@ -1383,17 +1383,17 @@ static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
        return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
 }
 
-static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
-                        const struct kvm_memory_slot *slot)
+static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+                          const struct kvm_memory_slot *slot)
 {
        return pte_list_destroy(kvm, rmap_head);
 }
 
-static bool kvm_unmap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
-                          struct kvm_memory_slot *slot, gfn_t gfn, int level,
-                          pte_t unused)
+static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+                        struct kvm_memory_slot *slot, gfn_t gfn, int level,
+                        pte_t unused)
 {
-       return kvm_zap_rmap(kvm, rmap_head, slot);
+       return __kvm_zap_rmap(kvm, rmap_head, slot);
 }
 
 static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
@@ -1529,7 +1529,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
        bool flush = false;
 
        if (kvm_memslots_have_rmaps(kvm))
-               flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmap);
+               flush = kvm_handle_gfn_range(kvm, range, kvm_zap_rmap);
 
        if (is_tdp_mmu_enabled(kvm))
                flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
@@ -6004,7 +6004,7 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
                        if (WARN_ON_ONCE(start >= end))
                                continue;
 
-                       flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmap,
+                       flush = slot_handle_level_range(kvm, memslot, __kvm_zap_rmap,
                                                        PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
                                                        start, end - 1, true, flush);
                }