"KVM_ADJUST_MAPPINGS",
};
-struct vcpu_args {
- int vcpu_id;
- bool vcpu_write;
-};
-
struct test_args {
struct kvm_vm *vm;
uint64_t guest_test_virt_mem;
uint64_t large_num_pages;
uint64_t host_pages_per_lpage;
enum vm_mem_backing_src_type src_type;
- struct vcpu_args vcpu_args[KVM_MAX_VCPUS];
+ struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
};
/*
*/
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
-static void guest_code(int vcpu_id)
+static void guest_code(bool do_write)
{
struct test_args *p = &test_args;
- struct vcpu_args *vcpu_args = &p->vcpu_args[vcpu_id];
enum test_stage *current_stage = &guest_test_stage;
uint64_t addr;
int i, j;
- /* Make sure vCPU args data structure is not corrupt */
- GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
-
while (true) {
addr = p->guest_test_virt_mem;
*/
case KVM_CREATE_MAPPINGS:
for (i = 0; i < p->large_num_pages; i++) {
- if (vcpu_args->vcpu_write)
+ if (do_write)
*(uint64_t *)addr = 0x0123456789ABCDEF;
else
READ_ONCE(*(uint64_t *)addr);
static void *vcpu_worker(void *data)
{
- int ret;
- struct vcpu_args *vcpu_args = data;
struct kvm_vm *vm = test_args.vm;
- int vcpu_id = vcpu_args->vcpu_id;
- struct kvm_run *run;
+ struct kvm_vcpu *vcpu = data;
+ bool do_write = !(vcpu->id % 2);
struct timespec start;
struct timespec ts_diff;
enum test_stage stage;
+ int ret;
- vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
- run = vcpu_state(vm, vcpu_id);
+ vcpu_args_set(vm, vcpu->id, 1, do_write);
while (!READ_ONCE(host_quit)) {
ret = sem_wait(&test_stage_updated);
return NULL;
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
- ret = _vcpu_run(vm, vcpu_id);
+ ret = _vcpu_run(vm, vcpu->id);
ts_diff = timespec_elapsed(start);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
- TEST_ASSERT(get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC,
+ TEST_ASSERT(get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n",
- exit_reason_str(run->exit_reason));
+ exit_reason_str(vcpu->run->exit_reason));
- pr_debug("Got sync event from vCPU %d\n", vcpu_id);
+ pr_debug("Got sync event from vCPU %d\n", vcpu->id);
stage = READ_ONCE(*current_stage);
/*
*/
pr_debug("vCPU %d has completed stage %s\n"
"execution time is: %ld.%.9lds\n\n",
- vcpu_id, test_stage_string[stage],
+ vcpu->id, test_stage_string[stage],
ts_diff.tv_sec, ts_diff.tv_nsec);
ret = sem_post(&test_stage_completed);
{
int ret;
struct test_params *p = arg;
- struct vcpu_args *vcpu_args;
enum vm_mem_backing_src_type src_type = p->src_type;
uint64_t large_page_size = get_backing_src_pagesz(src_type);
uint64_t guest_page_size = vm_guest_mode_params[mode].page_size;
uint64_t alignment;
void *host_test_mem;
struct kvm_vm *vm;
- int vcpu_id;
/* Align up the test memory size */
alignment = max(large_page_size, guest_page_size);
/* Create a VM with enough guest pages */
guest_num_pages = test_mem_size / guest_page_size;
vm = __vm_create_with_vcpus(mode, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
- guest_num_pages, 0, guest_code, NULL);
+ guest_num_pages, 0, guest_code,
+ test_args.vcpus);
/* Align down GPA of the testing memslot */
if (!p->phys_offset)
test_args.host_pages_per_lpage = large_page_size / host_page_size;
test_args.src_type = src_type;
- for (vcpu_id = 0; vcpu_id < KVM_MAX_VCPUS; vcpu_id++) {
- vcpu_args = &test_args.vcpu_args[vcpu_id];
- vcpu_args->vcpu_id = vcpu_id;
- vcpu_args->vcpu_write = !(vcpu_id % 2);
- }
-
/* Add an extra memory slot with specified backing src type */
vm_userspace_mem_region_add(vm, src_type, guest_test_phys_mem,
TEST_MEM_SLOT_INDEX, guest_num_pages, 0);
static void run_test(enum vm_guest_mode mode, void *arg)
{
- int ret;
pthread_t *vcpu_threads;
struct kvm_vm *vm;
- int vcpu_id;
struct timespec start;
struct timespec ts_diff;
+ int ret, i;
/* Create VM with vCPUs and make some pre-initialization */
vm = pre_init_before_test(mode, arg);
host_quit = false;
*current_stage = KVM_BEFORE_MAPPINGS;
- for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
- pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
- &test_args.vcpu_args[vcpu_id]);
- }
+ for (i = 0; i < nr_vcpus; i++)
+ pthread_create(&vcpu_threads[i], NULL, vcpu_worker,
+ test_args.vcpus[i]);
vcpus_complete_new_stage(*current_stage);
pr_info("Started all vCPUs successfully\n");
/* Tell the vcpu thread to quit */
host_quit = true;
- for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+ for (i = 0; i < nr_vcpus; i++) {
ret = sem_post(&test_stage_updated);
TEST_ASSERT(ret == 0, "Error in sem_post");
}
- for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
- pthread_join(vcpu_threads[vcpu_id], NULL);
+ for (i = 0; i < nr_vcpus; i++)
+ pthread_join(vcpu_threads[i], NULL);
ret = sem_destroy(&test_stage_updated);
TEST_ASSERT(ret == 0, "Error in sem_destroy");