]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: selftests: Rename vm_vcpu_add* helpers to better show relationships
authorSean Christopherson <seanjc@google.com>
Wed, 16 Feb 2022 17:56:24 +0000 (09:56 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Sat, 11 Jun 2022 15:47:04 +0000 (11:47 -0400)
Rename vm_vcpu_add() to __vm_vcpu_add(), and vm_vcpu_add_default() to
vm_vcpu_add() to show the relationship between the newly minted
vm_vcpu_add() and __vm_vcpu_add().

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
23 files changed:
tools/testing/selftests/kvm/aarch64/get-reg-list.c
tools/testing/selftests/kvm/aarch64/psci_test.c
tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
tools/testing/selftests/kvm/aarch64/vgic_init.c
tools/testing/selftests/kvm/dirty_log_test.c
tools/testing/selftests/kvm/hardware_disable_test.c
tools/testing/selftests/kvm/include/aarch64/processor.h
tools/testing/selftests/kvm/include/kvm_util_base.h
tools/testing/selftests/kvm/kvm_binary_stats_test.c
tools/testing/selftests/kvm/kvm_create_max_vcpus.c
tools/testing/selftests/kvm/lib/aarch64/processor.c
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/riscv/processor.c
tools/testing/selftests/kvm/lib/s390x/processor.c
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/set_memory_region_test.c
tools/testing/selftests/kvm/steal_time.c
tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
tools/testing/selftests/kvm/x86_64/set_sregs_test.c
tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c

index 5476bb465b784effc63bd9d2c1dc4ce4ee80d5b8..fbb0c714211ded610fe5d7ad9086562a8bc76cca 100644 (file)
@@ -418,7 +418,7 @@ static void run_test(struct vcpu_config *c)
 
        vm = vm_create_barebones();
        prepare_vcpu_init(c, &init);
-       vm_vcpu_add(vm, 0);
+       __vm_vcpu_add(vm, 0);
        aarch64_vcpu_setup(vm, 0, &init);
        finalize_vcpu(vm, 0, c);
 
index fa4e6c3343d7a1e5db18567e9aad08016042d782..347cb5c130e2a1f780db44315ff0e42e316d95f1 100644 (file)
@@ -84,8 +84,8 @@ static struct kvm_vm *setup_vm(void *guest_code)
        vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
        init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
 
-       aarch64_vcpu_add_default(vm, VCPU_ID_SOURCE, &init, guest_code);
-       aarch64_vcpu_add_default(vm, VCPU_ID_TARGET, &init, guest_code);
+       aarch64_vcpu_add(vm, VCPU_ID_SOURCE, &init, guest_code);
+       aarch64_vcpu_add(vm, VCPU_ID_TARGET, &init, guest_code);
 
        return vm;
 }
index 1757f44dd3e23fdc2f1e1f0a950e7e53c231a577..1dd856a58f5d000de558207713477837b7ce8e4f 100644 (file)
@@ -26,12 +26,12 @@ static int add_init_2vcpus(struct kvm_vcpu_init *init1,
 
        vm = vm_create_barebones();
 
-       vm_vcpu_add(vm, 0);
+       __vm_vcpu_add(vm, 0);
        ret = __vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
        if (ret)
                goto free_exit;
 
-       vm_vcpu_add(vm, 1);
+       __vm_vcpu_add(vm, 1);
        ret = __vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
 
 free_exit:
@@ -51,8 +51,8 @@ static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init1,
 
        vm = vm_create_barebones();
 
-       vm_vcpu_add(vm, 0);
-       vm_vcpu_add(vm, 1);
+       __vm_vcpu_add(vm, 0);
+       __vm_vcpu_add(vm, 1);
 
        ret = __vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
        if (ret)
index c5866c3f45167a640c3b4336b6fc4b6ff9a88c39..451f65b199adb41ed65210ef8c5770200ca600f0 100644 (file)
@@ -331,7 +331,7 @@ static void test_vgic_then_vcpus(uint32_t gic_dev_type)
 
        /* Add the rest of the VCPUs */
        for (i = 1; i < NR_VCPUS; ++i)
-               vm_vcpu_add_default(v.vm, i, guest_code);
+               vm_vcpu_add(v.vm, i, guest_code);
 
        ret = run_vcpu(v.vm, 3);
        TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run");
@@ -418,17 +418,17 @@ static void test_v3_typer_accesses(void)
 
        v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3);
 
-       vm_vcpu_add_default(v.vm, 3, guest_code);
+       vm_vcpu_add(v.vm, 3, guest_code);
 
        v3_redist_reg_get_errno(v.gic_fd, 1, GICR_TYPER, EINVAL,
                                "attempting to read GICR_TYPER of non created vcpu");
 
-       vm_vcpu_add_default(v.vm, 1, guest_code);
+       vm_vcpu_add(v.vm, 1, guest_code);
 
        v3_redist_reg_get_errno(v.gic_fd, 1, GICR_TYPER, EBUSY,
                                "read GICR_TYPER before GIC initialized");
 
-       vm_vcpu_add_default(v.vm, 2, guest_code);
+       vm_vcpu_add(v.vm, 2, guest_code);
 
        kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
                            KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
@@ -559,7 +559,7 @@ static void test_v3_redist_ipa_range_check_at_vcpu_run(void)
 
        /* Add the rest of the VCPUs */
        for (i = 1; i < NR_VCPUS; ++i)
-               vm_vcpu_add_default(v.vm, i, guest_code);
+               vm_vcpu_add(v.vm, i, guest_code);
 
        kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
                            KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
index 23e0c727e375de09305755dfbe93632a5d00a6f3..1a5c01c650441bc157e2ecab48282c9187ed0824 100644 (file)
@@ -676,7 +676,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
        vm = __vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages);
 
        log_mode_create_vm_done(vm);
-       vm_vcpu_add_default(vm, vcpuid, guest_code);
+       vm_vcpu_add(vm, vcpuid, guest_code);
        return vm;
 }
 
index 29f6ca51408fad7814166fa4425be24d6ab43da1..be2763ecb6e7aa079ecf3328d1cfd6701cca500a 100644 (file)
@@ -108,7 +108,7 @@ static void run_test(uint32_t run)
 
        pr_debug("%s: [%d] start vcpus\n", __func__, run);
        for (i = 0; i < VCPU_NUM; ++i) {
-               vm_vcpu_add_default(vm, i, guest_code);
+               vm_vcpu_add(vm, i, guest_code);
                payloads[i].vm = vm;
                payloads[i].index = i;
 
index 9dad391b4fecaecc8c23701476402c20e24acb10..f774609f7848049524b64491c08ccf14819d8538 100644 (file)
@@ -64,9 +64,8 @@ static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint
 }
 
 void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init);
-struct kvm_vcpu *aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpu_id,
-                                         struct kvm_vcpu_init *init,
-                                         void *guest_code);
+struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+                                 struct kvm_vcpu_init *init, void *guest_code);
 
 struct ex_regs {
        u64 regs[31];
index 622b09ec23dda63039e02c7bf5f12449a15ca7ee..2c7a8a91ebe28c88f2c2d791287d3fad677f23d8 100644 (file)
@@ -288,7 +288,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
-struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
+struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
@@ -659,9 +659,8 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
                                  void *guest_code);
 
-static inline struct kvm_vcpu *vm_vcpu_add_default(struct kvm_vm *vm,
-                                                  uint32_t vcpu_id,
-                                                  void *guest_code)
+static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+                                          void *guest_code)
 {
        return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
 }
index edeb08239036b066e9ca0df36eb8f4a95ef37d51..407e9ea8e6f3d2a2a19859b6c1ff6b6c6b2720ab 100644 (file)
@@ -223,7 +223,7 @@ int main(int argc, char *argv[])
        for (i = 0; i < max_vm; ++i) {
                vms[i] = vm_create_barebones();
                for (j = 0; j < max_vcpu; ++j)
-                       vm_vcpu_add(vms[i], j);
+                       __vm_vcpu_add(vms[i], j);
        }
 
        /* Check stats read for every VM and VCPU */
index acc92703f563b560816f24ea6f2de3466fc9a6e8..3ae0237e96b2eee09b3339b5c16903f3c3eaefc5 100644 (file)
@@ -32,7 +32,7 @@ void test_vcpu_creation(int first_vcpu_id, int num_vcpus)
 
        for (i = first_vcpu_id; i < first_vcpu_id + num_vcpus; i++)
                /* This asserts that the vCPU was created. */
-               vm_vcpu_add(vm, i);
+               __vm_vcpu_add(vm, i);
 
        kvm_vm_free(vm);
 }
index 2b169b4ec29e5856b1bdea5e5be6aad7b961c22e..5b95fa2cce182168e22e216bae6e462d7fcc3795 100644 (file)
@@ -314,16 +314,15 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t in
                indent, "", pstate, pc);
 }
 
-struct kvm_vcpu *aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpu_id,
-                                         struct kvm_vcpu_init *init,
-                                         void *guest_code)
+struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+                                 struct kvm_vcpu_init *init, void *guest_code)
 {
        size_t stack_size = vm->page_size == 4096 ?
                                        DEFAULT_STACK_PGS * vm->page_size :
                                        vm->page_size;
        uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
                                              DEFAULT_ARM64_GUEST_STACK_VADDR_MIN);
-       struct kvm_vcpu *vcpu = vm_vcpu_add(vm, vcpu_id);
+       struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
 
        aarch64_vcpu_setup(vm, vcpu_id, init);
 
@@ -336,7 +335,7 @@ struct kvm_vcpu *aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpu_id,
 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
                                  void *guest_code)
 {
-       return aarch64_vcpu_add_default(vm, vcpu_id, NULL, guest_code);
+       return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code);
 }
 
 void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
index 752731cf4292a14310b7db2ba043e0d2054c29b8..c2a99f26e9baa8fbf2dff499a49a8b2e8c2a029b 100644 (file)
@@ -328,7 +328,7 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
        for (i = 0; i < nr_vcpus; ++i) {
                uint32_t vcpuid = vcpuids ? vcpuids[i] : i;
 
-               vm_vcpu_add_default(vm, vcpuid, guest_code);
+               vm_vcpu_add(vm, vcpuid, guest_code);
        }
 
        return vm;
@@ -397,7 +397,7 @@ struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
 {
        kvm_vm_restart(vm);
 
-       return vm_vcpu_add(vm, 0);
+       return __vm_vcpu_add(vm, 0);
 }
 
 /*
@@ -1065,7 +1065,7 @@ static int vcpu_mmap_sz(void)
  * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
  * No additional vCPU setup is done.  Returns the vCPU.
  */
-struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
 {
        struct kvm_vcpu *vcpu;
 
index 5946101144eb6d0ea8355811a77f3fed1860bc82..ba5761843c76ca0005ebabcc82bd5e1c8dc61e2f 100644 (file)
@@ -287,7 +287,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
        struct kvm_mp_state mps;
        struct kvm_vcpu *vcpu;
 
-       vcpu = vm_vcpu_add(vm, vcpu_id);
+       vcpu = __vm_vcpu_add(vm, vcpu_id);
        riscv_vcpu_mmu_setup(vm, vcpu_id);
 
        /*
index cf759844b226b4a0dede08aa2b356f069150d4e7..f8170e97eeb7b7ad4dacd2d9ca74cea0dcdc7648 100644 (file)
@@ -170,7 +170,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
        stack_vaddr = vm_vaddr_alloc(vm, stack_size,
                                     DEFAULT_GUEST_STACK_VADDR_MIN);
 
-       vcpu = vm_vcpu_add(vm, vcpu_id);
+       vcpu = __vm_vcpu_add(vm, vcpu_id);
 
        /* Setup guest registers */
        vcpu_regs_get(vm, vcpu_id, &regs);
index bafa8ec5456908ed5dcb3ffe4d9315eb01838fb6..f89d67101bf155db5c5e33a2358e9ff04d6c8b13 100644 (file)
@@ -643,7 +643,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
        stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
                                     DEFAULT_GUEST_STACK_VADDR_MIN);
 
-       vcpu = vm_vcpu_add(vm, vcpu_id);
+       vcpu = __vm_vcpu_add(vm, vcpu_id);
        vcpu_set_cpuid(vm, vcpu_id, kvm_get_supported_cpuid());
        vcpu_setup(vm, vcpu_id);
 
index 1274bbb0e30b9891343adad7dc2dee9c79ec7961..d832fc12984ed2f711b46fc60413141752698694 100644 (file)
@@ -315,7 +315,7 @@ static void test_zero_memory_regions(void)
        pr_info("Testing KVM_RUN with zero added memory regions\n");
 
        vm = vm_create_barebones();
-       vcpu = vm_vcpu_add(vm, 0);
+       vcpu = __vm_vcpu_add(vm, 0);
 
        vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul);
        vcpu_run(vm, vcpu->id);
index 75303fe8359d1a05d7d5374c1986ffd5ad7cfbe6..fd3533582509a827aae32db63b16af0c12dddc45 100644 (file)
@@ -275,7 +275,7 @@ int main(int ac, char **av)
 
        /* Add the rest of the VCPUs */
        for (i = 1; i < NR_VCPUS; ++i)
-               vm_vcpu_add_default(vm, i, guest_code);
+               vm_vcpu_add(vm, i, guest_code);
 
        steal_time_init(vm);
 
index 2faa43336131d99d14d4dbb0eab8df9b6590133d..ffa9e267188c62622afcceb515027af992cb1fe6 100644 (file)
@@ -369,7 +369,7 @@ static void test_pmu_config_disable(void (*guest_code)(void))
 
        vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE);
 
-       vcpu = vm_vcpu_add_default(vm, 0, guest_code);
+       vcpu = vm_vcpu_add(vm, 0, guest_code);
        vm_init_descriptor_tables(vm);
        vcpu_init_descriptor_tables(vm, vcpu->id);
 
index 9ba3cd4e7f20b7fba754ca0d2a9215f8c549f56b..e637098940302abb1dbc3b7ae9ea4b97a2dee45f 100644 (file)
@@ -92,9 +92,9 @@ static struct kvm_vm *create_vm(void)
 static void add_x86_vcpu(struct kvm_vm *vm, uint32_t vcpuid, bool bsp_code)
 {
        if (bsp_code)
-               vm_vcpu_add_default(vm, vcpuid, guest_bsp_vcpu);
+               vm_vcpu_add(vm, vcpuid, guest_bsp_vcpu);
        else
-               vm_vcpu_add_default(vm, vcpuid, guest_not_bsp_vcpu);
+               vm_vcpu_add(vm, vcpuid, guest_not_bsp_vcpu);
 }
 
 static void run_vm_bsp(uint32_t bsp_vcpu)
index 8a5c1f76287c0f9ea446bd404da3e9ba44a3d2d5..2e67df3a95ba50e2b37432fdd4982865eee4a95d 100644 (file)
@@ -95,7 +95,7 @@ int main(int argc, char *argv[])
         * the vCPU model, i.e. without doing KVM_SET_CPUID2.
         */
        vm = vm_create_barebones();
-       vcpu = vm_vcpu_add(vm, 0);
+       vcpu = __vm_vcpu_add(vm, 0);
 
        vcpu_sregs_get(vm, vcpu->id, &sregs);
 
index 245fd07553902d4c952c347ab7dc51ba8819b159..ec418b823273e1607d08406dfe79309b84388695 100644 (file)
@@ -56,7 +56,7 @@ static struct kvm_vm *sev_vm_create(bool es)
        vm = vm_create_barebones();
        sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
        for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
-               vm_vcpu_add(vm, i);
+               __vm_vcpu_add(vm, i);
        if (es)
                start.policy |= SEV_POLICY_ES;
        sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
@@ -75,7 +75,7 @@ static struct kvm_vm *aux_vm_create(bool with_vcpus)
                return vm;
 
        for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
-               vm_vcpu_add(vm, i);
+               __vm_vcpu_add(vm, i);
 
        return vm;
 }
@@ -182,7 +182,7 @@ static void test_sev_migrate_parameters(void)
        sev_es_vm = sev_vm_create(/* es= */ true);
        sev_es_vm_no_vmsa = vm_create_barebones();
        sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
-       vm_vcpu_add(sev_es_vm_no_vmsa, 1);
+       __vm_vcpu_add(sev_es_vm_no_vmsa, 1);
 
        ret = __sev_migrate_from(sev_vm, sev_es_vm);
        TEST_ASSERT(
@@ -278,7 +278,7 @@ static void test_sev_mirror(bool es)
 
        /* Check that we can complete creation of the mirror VM.  */
        for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
-               vm_vcpu_add(dst_vm, i);
+               __vm_vcpu_add(dst_vm, i);
 
        if (es)
                sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
index ea70ca2e63c3c7c6d5489740aceef84fc61cec30..2411215e7ae87036e4dd5c24aaf23d81a20bd06f 100644 (file)
@@ -51,10 +51,10 @@ static void *run_vcpu(void *_cpu_nr)
        static bool first_cpu_done;
        struct kvm_vcpu *vcpu;
 
-       /* The kernel is fine, but vm_vcpu_add_default() needs locking */
+       /* The kernel is fine, but vm_vcpu_add() needs locking */
        pthread_spin_lock(&create_lock);
 
-       vcpu = vm_vcpu_add_default(vm, vcpu_id, guest_code);
+       vcpu = vm_vcpu_add(vm, vcpu_id, guest_code);
 
        if (!first_cpu_done) {
                first_cpu_done = true;
index afbbc40df88408f205c23994e78b0786ba787014..8b366652be3117fbd7459bb6298bf0a17549ac75 100644 (file)
@@ -425,7 +425,7 @@ int main(int argc, char *argv[])
 
        virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
 
-       vm_vcpu_add_default(vm, SENDER_VCPU_ID, sender_guest_code);
+       vm_vcpu_add(vm, SENDER_VCPU_ID, sender_guest_code);
 
        test_data_page_vaddr = vm_vaddr_alloc_page(vm);
        data =