vm = vm_create_barebones();
prepare_vcpu_init(c, &init);
- vm_vcpu_add(vm, 0);
+ __vm_vcpu_add(vm, 0);
aarch64_vcpu_setup(vm, 0, &init);
finalize_vcpu(vm, 0, c);
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
- aarch64_vcpu_add_default(vm, VCPU_ID_SOURCE, &init, guest_code);
- aarch64_vcpu_add_default(vm, VCPU_ID_TARGET, &init, guest_code);
+ aarch64_vcpu_add(vm, VCPU_ID_SOURCE, &init, guest_code);
+ aarch64_vcpu_add(vm, VCPU_ID_TARGET, &init, guest_code);
return vm;
}
vm = vm_create_barebones();
- vm_vcpu_add(vm, 0);
+ __vm_vcpu_add(vm, 0);
ret = __vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
if (ret)
goto free_exit;
- vm_vcpu_add(vm, 1);
+ __vm_vcpu_add(vm, 1);
ret = __vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
free_exit:
vm = vm_create_barebones();
- vm_vcpu_add(vm, 0);
- vm_vcpu_add(vm, 1);
+ __vm_vcpu_add(vm, 0);
+ __vm_vcpu_add(vm, 1);
ret = __vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
if (ret)
/* Add the rest of the VCPUs */
for (i = 1; i < NR_VCPUS; ++i)
- vm_vcpu_add_default(v.vm, i, guest_code);
+ vm_vcpu_add(v.vm, i, guest_code);
ret = run_vcpu(v.vm, 3);
TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run");
v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3);
- vm_vcpu_add_default(v.vm, 3, guest_code);
+ vm_vcpu_add(v.vm, 3, guest_code);
v3_redist_reg_get_errno(v.gic_fd, 1, GICR_TYPER, EINVAL,
"attempting to read GICR_TYPER of non created vcpu");
- vm_vcpu_add_default(v.vm, 1, guest_code);
+ vm_vcpu_add(v.vm, 1, guest_code);
v3_redist_reg_get_errno(v.gic_fd, 1, GICR_TYPER, EBUSY,
"read GICR_TYPER before GIC initialized");
- vm_vcpu_add_default(v.vm, 2, guest_code);
+ vm_vcpu_add(v.vm, 2, guest_code);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
/* Add the rest of the VCPUs */
for (i = 1; i < NR_VCPUS; ++i)
- vm_vcpu_add_default(v.vm, i, guest_code);
+ vm_vcpu_add(v.vm, i, guest_code);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
vm = __vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages);
log_mode_create_vm_done(vm);
- vm_vcpu_add_default(vm, vcpuid, guest_code);
+ vm_vcpu_add(vm, vcpuid, guest_code);
return vm;
}
pr_debug("%s: [%d] start vcpus\n", __func__, run);
for (i = 0; i < VCPU_NUM; ++i) {
- vm_vcpu_add_default(vm, i, guest_code);
+ vm_vcpu_add(vm, i, guest_code);
payloads[i].vm = vm;
payloads[i].index = i;
}
void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init);
-struct kvm_vcpu *aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpu_id,
- struct kvm_vcpu_init *init,
- void *guest_code);
+struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+ struct kvm_vcpu_init *init, void *guest_code);
struct ex_regs {
u64 regs[31];
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
-struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
+struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code);
-static inline struct kvm_vcpu *vm_vcpu_add_default(struct kvm_vm *vm,
- uint32_t vcpu_id,
- void *guest_code)
+static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+ void *guest_code)
{
return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
}
for (i = 0; i < max_vm; ++i) {
vms[i] = vm_create_barebones();
for (j = 0; j < max_vcpu; ++j)
- vm_vcpu_add(vms[i], j);
+ __vm_vcpu_add(vms[i], j);
}
/* Check stats read for every VM and VCPU */
for (i = first_vcpu_id; i < first_vcpu_id + num_vcpus; i++)
/* This asserts that the vCPU was created. */
- vm_vcpu_add(vm, i);
+ __vm_vcpu_add(vm, i);
kvm_vm_free(vm);
}
indent, "", pstate, pc);
}
-struct kvm_vcpu *aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpu_id,
- struct kvm_vcpu_init *init,
- void *guest_code)
+struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+ struct kvm_vcpu_init *init, void *guest_code)
{
size_t stack_size = vm->page_size == 4096 ?
DEFAULT_STACK_PGS * vm->page_size :
vm->page_size;
uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
DEFAULT_ARM64_GUEST_STACK_VADDR_MIN);
- struct kvm_vcpu *vcpu = vm_vcpu_add(vm, vcpu_id);
+ struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
aarch64_vcpu_setup(vm, vcpu_id, init);
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code)
{
- return aarch64_vcpu_add_default(vm, vcpu_id, NULL, guest_code);
+ return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code);
}
void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
for (i = 0; i < nr_vcpus; ++i) {
uint32_t vcpuid = vcpuids ? vcpuids[i] : i;
- vm_vcpu_add_default(vm, vcpuid, guest_code);
+ vm_vcpu_add(vm, vcpuid, guest_code);
}
return vm;
{
kvm_vm_restart(vm);
- return vm_vcpu_add(vm, 0);
+ return __vm_vcpu_add(vm, 0);
}
/*
* Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
* No additional vCPU setup is done. Returns the vCPU.
*/
-struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
{
struct kvm_vcpu *vcpu;
struct kvm_mp_state mps;
struct kvm_vcpu *vcpu;
- vcpu = vm_vcpu_add(vm, vcpu_id);
+ vcpu = __vm_vcpu_add(vm, vcpu_id);
riscv_vcpu_mmu_setup(vm, vcpu_id);
/*
stack_vaddr = vm_vaddr_alloc(vm, stack_size,
DEFAULT_GUEST_STACK_VADDR_MIN);
- vcpu = vm_vcpu_add(vm, vcpu_id);
+ vcpu = __vm_vcpu_add(vm, vcpu_id);
/* Setup guest registers */
vcpu_regs_get(vm, vcpu_id, ®s);
stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
DEFAULT_GUEST_STACK_VADDR_MIN);
- vcpu = vm_vcpu_add(vm, vcpu_id);
+ vcpu = __vm_vcpu_add(vm, vcpu_id);
vcpu_set_cpuid(vm, vcpu_id, kvm_get_supported_cpuid());
vcpu_setup(vm, vcpu_id);
pr_info("Testing KVM_RUN with zero added memory regions\n");
vm = vm_create_barebones();
- vcpu = vm_vcpu_add(vm, 0);
+ vcpu = __vm_vcpu_add(vm, 0);
vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul);
vcpu_run(vm, vcpu->id);
/* Add the rest of the VCPUs */
for (i = 1; i < NR_VCPUS; ++i)
- vm_vcpu_add_default(vm, i, guest_code);
+ vm_vcpu_add(vm, i, guest_code);
steal_time_init(vm);
vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE);
- vcpu = vm_vcpu_add_default(vm, 0, guest_code);
+ vcpu = vm_vcpu_add(vm, 0, guest_code);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id);
static void add_x86_vcpu(struct kvm_vm *vm, uint32_t vcpuid, bool bsp_code)
{
if (bsp_code)
- vm_vcpu_add_default(vm, vcpuid, guest_bsp_vcpu);
+ vm_vcpu_add(vm, vcpuid, guest_bsp_vcpu);
else
- vm_vcpu_add_default(vm, vcpuid, guest_not_bsp_vcpu);
+ vm_vcpu_add(vm, vcpuid, guest_not_bsp_vcpu);
}
static void run_vm_bsp(uint32_t bsp_vcpu)
* the vCPU model, i.e. without doing KVM_SET_CPUID2.
*/
vm = vm_create_barebones();
- vcpu = vm_vcpu_add(vm, 0);
+ vcpu = __vm_vcpu_add(vm, 0);
vcpu_sregs_get(vm, vcpu->id, &sregs);
vm = vm_create_barebones();
sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
- vm_vcpu_add(vm, i);
+ __vm_vcpu_add(vm, i);
if (es)
start.policy |= SEV_POLICY_ES;
sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
return vm;
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
- vm_vcpu_add(vm, i);
+ __vm_vcpu_add(vm, i);
return vm;
}
sev_es_vm = sev_vm_create(/* es= */ true);
sev_es_vm_no_vmsa = vm_create_barebones();
sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
- vm_vcpu_add(sev_es_vm_no_vmsa, 1);
+ __vm_vcpu_add(sev_es_vm_no_vmsa, 1);
ret = __sev_migrate_from(sev_vm, sev_es_vm);
TEST_ASSERT(
/* Check that we can complete creation of the mirror VM. */
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
- vm_vcpu_add(dst_vm, i);
+ __vm_vcpu_add(dst_vm, i);
if (es)
sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
static bool first_cpu_done;
struct kvm_vcpu *vcpu;
- /* The kernel is fine, but vm_vcpu_add_default() needs locking */
+ /* The kernel is fine, but vm_vcpu_add() needs locking */
pthread_spin_lock(&create_lock);
- vcpu = vm_vcpu_add_default(vm, vcpu_id, guest_code);
+ vcpu = vm_vcpu_add(vm, vcpu_id, guest_code);
if (!first_cpu_done) {
first_cpu_done = true;
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
- vm_vcpu_add_default(vm, SENDER_VCPU_ID, sender_guest_code);
+ vm_vcpu_add(vm, SENDER_VCPU_ID, sender_guest_code);
test_data_page_vaddr = vm_vaddr_alloc_page(vm);
data =