}
struct vcpu_info {
- struct kvm_vm *vm;
- uint32_t id;
+ struct kvm_vcpu *vcpu;
uint64_t start_gpa;
uint64_t end_gpa;
};
static void *vcpu_worker(void *data)
{
- struct vcpu_info *vcpu = data;
+ struct vcpu_info *info = data;
+ struct kvm_vcpu *vcpu = info->vcpu;
struct kvm_vm *vm = vcpu->vm;
struct kvm_sregs sregs;
struct kvm_regs regs;
- vcpu_args_set(vm, vcpu->id, 3, vcpu->start_gpa, vcpu->end_gpa,
+ vcpu_args_set(vm, vcpu->id, 3, info->start_gpa, info->end_gpa,
vm_get_page_size(vm));
/* Snapshot regs before the first run. */
return NULL;
}
-static pthread_t *spawn_workers(struct kvm_vm *vm, uint64_t start_gpa,
- uint64_t end_gpa)
+static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus,
+ uint64_t start_gpa, uint64_t end_gpa)
{
struct vcpu_info *info;
uint64_t gpa, nr_bytes;
TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus);
for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) {
- info[i].vm = vm;
- info[i].id = i;
+ info[i].vcpu = vcpus[i];
info[i].start_gpa = gpa;
info[i].end_gpa = gpa + nr_bytes;
pthread_create(&threads[i], NULL, vcpu_worker, &info[i]);
uint64_t max_gpa, gpa, slot_size, max_mem, i;
int max_slots, slot, opt, fd;
bool hugepages = false;
+ struct kvm_vcpu **vcpus;
pthread_t *threads;
struct kvm_vm *vm;
void *mem;
}
}
- vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
+ vcpus = malloc(nr_vcpus * sizeof(*vcpus));
+ TEST_ASSERT(vcpus, "Failed to allocate vCPU array");
+
+ vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
max_gpa = vm_get_max_gfn(vm) << vm_get_page_shift(vm);
TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
}
atomic_set(&rendezvous, nr_vcpus + 1);
- threads = spawn_workers(vm, start_gpa, gpa);
+ threads = spawn_workers(vm, vcpus, start_gpa, gpa);
+
+ free(vcpus);
+ vcpus = NULL;
pr_info("Running with %lugb of guest memory and %u vCPUs\n",
(gpa - start_gpa) / size_1gb, nr_vcpus);