]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: selftests: Open code and drop 'struct kvm_vm' accessors
authorSean Christopherson <seanjc@google.com>
Thu, 17 Feb 2022 00:51:20 +0000 (16:51 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Sat, 11 Jun 2022 15:47:24 +0000 (11:47 -0400)
Drop a variety of 'struct kvm_vm' accessors that wrap a single variable
now that tests can simply reference the variable directly.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/dirty_log_perf_test.c
tools/testing/selftests/kvm/dirty_log_test.c
tools/testing/selftests/kvm/include/kvm_util_base.h
tools/testing/selftests/kvm/kvm_page_table_test.c
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/perf_test_util.c
tools/testing/selftests/kvm/max_guest_memory_test.c
tools/testing/selftests/kvm/memslot_modification_stress_test.c
tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c

index 2027208e7d1092e6e6d3b396ba2795fa0f163575..808a36dbf0c04010fd42aae5a4446d41cbf2c319 100644 (file)
@@ -221,7 +221,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 
        perf_test_set_wr_fract(vm, p->wr_fract);
 
-       guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
+       guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift;
        guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
        host_num_pages = vm_num_host_pages(mode, guest_num_pages);
        pages_per_slot = host_num_pages / p->slots;
index 906e893375df8b3e5d5c5ff036d07b6e9eab2715..ca584b9bf5c030c87ed9a3a8793588f6cb61e7aa 100644 (file)
@@ -713,21 +713,20 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        vm = create_vm(mode, &vcpu,
                       2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K), guest_code);
 
-       guest_page_size = vm_get_page_size(vm);
+       guest_page_size = vm->page_size;
        /*
         * A little more than 1G of guest page sized pages.  Cover the
         * case where the size is not aligned to 64 pages.
         */
-       guest_num_pages = (1ul << (DIRTY_MEM_BITS -
-                                  vm_get_page_shift(vm))) + 3;
+       guest_num_pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3;
        guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
 
        host_page_size = getpagesize();
        host_num_pages = vm_num_host_pages(mode, guest_num_pages);
 
        if (!p->phys_offset) {
-               guest_test_phys_mem = (vm_get_max_gfn(vm) -
-                                      guest_num_pages) * guest_page_size;
+               guest_test_phys_mem = (vm->max_gfn - guest_num_pages) *
+                                     guest_page_size;
                guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size);
        } else {
                guest_test_phys_mem = p->phys_offset;
index 5741a999aca17a83109642b422f849a96d4bcd8b..45f536f993996ce947c55ae6e4a7d312b12ade9b 100644 (file)
@@ -592,13 +592,7 @@ static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
 
 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
 
-unsigned int vm_get_page_size(struct kvm_vm *vm);
-unsigned int vm_get_page_shift(struct kvm_vm *vm);
 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
-uint64_t vm_get_max_gfn(struct kvm_vm *vm);
-int vm_get_kvm_fd(struct kvm_vm *vm);
-int vm_get_fd(struct kvm_vm *vm);
-
 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
index 8706ae35844495947303c19b085c4b6ef9a40fcb..0f8792aa036683f976a070a1f9038a77cb6511f5 100644 (file)
@@ -260,7 +260,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
 
        /* Align down GPA of the testing memslot */
        if (!p->phys_offset)
-               guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
+               guest_test_phys_mem = (vm->max_gfn - guest_num_pages) *
                                       guest_page_size;
        else
                guest_test_phys_mem = p->phys_offset;
index ec4642f79fa31b05085cedc550b4a78af3e50471..548c3c366bf55483487e405410c8b1d2d847c238 100644 (file)
@@ -1827,36 +1827,11 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
        return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
 }
 
-unsigned int vm_get_page_size(struct kvm_vm *vm)
-{
-       return vm->page_size;
-}
-
-unsigned int vm_get_page_shift(struct kvm_vm *vm)
-{
-       return vm->page_shift;
-}
-
 unsigned long __attribute__((weak)) vm_compute_max_gfn(struct kvm_vm *vm)
 {
        return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
 }
 
-uint64_t vm_get_max_gfn(struct kvm_vm *vm)
-{
-       return vm->max_gfn;
-}
-
-int vm_get_kvm_fd(struct kvm_vm *vm)
-{
-       return vm->kvm_fd;
-}
-
-int vm_get_fd(struct kvm_vm *vm)
-{
-       return vm->fd;
-}
-
 static unsigned int vm_calc_num_pages(unsigned int num_pages,
                                      unsigned int page_shift,
                                      unsigned int new_page_shift,
index a8c7b2785cc4090b18830e2d94f80f67777dc485..2649595194ade0d5b9bfe888c357afcee8e18629 100644 (file)
@@ -159,7 +159,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
        pta->vm = vm;
 
        /* Put the test region at the top guest physical memory. */
-       region_end_gfn = vm_get_max_gfn(vm) + 1;
+       region_end_gfn = vm->max_gfn + 1;
 
 #ifdef __x86_64__
        /*
index 8f34c5aca42036e5ec0396e6faaacd34821e8457..9a6e4f3ad6b57ba2fa867d6975025ae5cbd8d8e9 100644 (file)
@@ -65,8 +65,7 @@ static void *vcpu_worker(void *data)
        struct kvm_sregs sregs;
        struct kvm_regs regs;
 
-       vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa,
-                     vm_get_page_size(vm));
+       vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size);
 
        /* Snapshot regs before the first run. */
        vcpu_regs_get(vcpu, &regs);
@@ -104,7 +103,7 @@ static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus,
        TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges");
 
        nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) &
-                       ~((uint64_t)vm_get_page_size(vm) - 1);
+                       ~((uint64_t)vm->page_size - 1);
        TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus);
 
        for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) {
@@ -220,7 +219,7 @@ int main(int argc, char *argv[])
 
        vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
 
-       max_gpa = vm_get_max_gfn(vm) << vm_get_page_shift(vm);
+       max_gpa = vm->max_gfn << vm->page_shift;
        TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
 
        fd = kvm_memfd_alloc(slot_size, hugepages);
@@ -230,7 +229,7 @@ int main(int argc, char *argv[])
        TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed");
 
        /* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */
-       for (i = 0; i < slot_size; i += vm_get_page_size(vm))
+       for (i = 0; i < slot_size; i += vm->page_size)
                ((uint8_t *)mem)[i] = 0xaa;
 
        gpa = 0;
@@ -249,7 +248,7 @@ int main(int argc, char *argv[])
                for (i = 0; i < slot_size; i += size_1gb)
                        __virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G);
 #else
-               for (i = 0; i < slot_size; i += vm_get_page_size(vm))
+               for (i = 0; i < slot_size; i += vm->page_size)
                        virt_pg_map(vm, gpa + i, gpa + i);
 #endif
        }
index 1f9036cdcaa93439cb61df700a647be87592acc3..6ee7e1dde40430edef52b4699074b397b70783fa 100644 (file)
@@ -75,7 +75,7 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
         * Add the dummy memslot just below the perf_test_util memslot, which is
         * at the top of the guest physical address space.
         */
-       gpa = perf_test_args.gpa - pages * vm_get_page_size(vm);
+       gpa = perf_test_args.gpa - pages * vm->page_size;
 
        for (i = 0; i < nr_modifications; i++) {
                usleep(delay);
index af13c48f0f304737a1c71a98be078408eb0608f5..6df5a6356181516b14094476474133c9892a8978 100644 (file)
@@ -121,7 +121,7 @@ void test_hv_cpuid_e2big(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
        if (vcpu)
                ret = __vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
        else
-               ret = __kvm_ioctl(vm_get_kvm_fd(vm), KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
+               ret = __kvm_ioctl(vm->kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
 
        TEST_ASSERT(ret == -1 && errno == E2BIG,
                    "%s KVM_GET_SUPPORTED_HV_CPUID didn't fail with -E2BIG when"