struct kvm_vm *vm = vcpu->vm;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx];
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
/* Currently, any exit from guest is an indication of completion */
pthread_mutex_lock(&vcpu_done_map_lock);
set_bit(vcpu_idx, vcpu_done_map);
pthread_mutex_unlock(&vcpu_done_map_lock);
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
case UCALL_DONE:
break;
static void test_init_timer_irq(struct kvm_vm *vm)
{
/* Timer initid should be same for all the vCPUs, so query only vCPU-0 */
- vcpu_device_attr_get(vm, vcpus[0]->id, KVM_ARM_VCPU_TIMER_CTRL,
+ vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq);
- vcpu_device_attr_get(vm, vcpus[0]->id, KVM_ARM_VCPU_TIMER_CTRL,
+ vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq);
sync_global_to_guest(vm, ptimer_irq);
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
for (i = 0; i < nr_vcpus; i++)
- vcpu_init_descriptor_tables(vm, vcpus[i]->id);
+ vcpu_init_descriptor_tables(vcpus[i]);
ucall_init(vm, NULL);
test_init_timer_irq(vm);
{
uint64_t id_aa64dfr0;
- vcpu_get_reg(vcpu->vm, vcpu->id, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0);
+ vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0);
return id_aa64dfr0 & 0xf;
}
ucall_init(vm, NULL);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
if (debug_version(vcpu) < 6) {
print_skip("Armv8 debug architecture not supported.");
ESR_EC_SVC64, guest_svc_handler);
for (stage = 0; stage < 11; stage++) {
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == stage,
"Stage %d: Unexpected sync ucall, got %lx",
init->features[s->feature / 32] |= 1 << (s->feature % 32);
}
-static void finalize_vcpu(struct kvm_vm *vm, uint32_t vcpuid, struct vcpu_config *c)
+static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_config *c)
{
struct reg_sublist *s;
int feature;
for_each_sublist(c, s) {
if (s->finalize) {
feature = s->feature;
- vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_FINALIZE, &feature);
+ vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
}
}
}
vm = vm_create_barebones();
prepare_vcpu_init(c, &init);
vcpu = __vm_vcpu_add(vm, 0);
- aarch64_vcpu_setup(vm, vcpu->id, &init);
- finalize_vcpu(vm, vcpu->id, c);
+ aarch64_vcpu_setup(vcpu, &init);
+ finalize_vcpu(vcpu, c);
- reg_list = vcpu_get_reg_list(vm, vcpu->id);
+ reg_list = vcpu_get_reg_list(vcpu);
if (fixup_core_regs)
core_reg_fixup();
bool reject_reg = false;
int ret;
- ret = __vcpu_get_reg(vm, vcpu->id, reg_list->reg[i], &addr);
+ ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr);
if (ret) {
printf("%s: Failed to get ", config_name(c));
print_reg(c, reg.id);
for_each_sublist(c, s) {
if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
reject_reg = true;
- ret = __vcpu_ioctl(vm, vcpu->id, KVM_SET_ONE_REG, ®);
+ ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
if (ret != -1 || errno != EPERM) {
printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
print_reg(c, reg.id);
}
if (!reject_reg) {
- ret = __vcpu_ioctl(vm, vcpu->id, KVM_SET_ONE_REG, ®);
+ ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
if (ret) {
printf("%s: Failed to set ", config_name(c));
print_reg(c, reg.id);
gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE);
vm_userspace_mem_region_add(vcpu->vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
- vcpu_device_attr_set(vcpu->vm, vcpu->id, KVM_ARM_VCPU_PVTIME_CTRL,
+ vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PVTIME_CTRL,
KVM_ARM_VCPU_PVTIME_IPA, &st_ipa);
}
const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
/* First 'read' should be an upper limit of the features supported */
- vcpu_get_reg(vcpu->vm, vcpu->id, reg_info->reg, &val);
+ vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n",
reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
/* Test a 'write' by disabling all the features of the register map */
- ret = __vcpu_set_reg(vcpu->vm, vcpu->id, reg_info->reg, 0);
+ ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
TEST_ASSERT(ret == 0,
"Failed to clear all the features of reg: 0x%lx; ret: %d\n",
reg_info->reg, errno);
- vcpu_get_reg(vcpu->vm, vcpu->id, reg_info->reg, &val);
+ vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == 0,
"Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg);
* Avoid this check if all the bits are occupied.
*/
if (reg_info->max_feat_bit < 63) {
- ret = __vcpu_set_reg(vcpu->vm, vcpu->id, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
+ ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
TEST_ASSERT(ret != 0 && errno == EINVAL,
"Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n",
errno, reg_info->reg);
* Before starting the VM, the test clears all the bits.
* Check if that's still the case.
*/
- vcpu_get_reg(vcpu->vm, vcpu->id, reg_info->reg, &val);
+ vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == 0,
"Expected all the features to be cleared for reg: 0x%lx\n",
reg_info->reg);
* the registers and should return EBUSY. Set the registers and check for
* the expected errno.
*/
- ret = __vcpu_set_reg(vcpu->vm, vcpu->id, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
+ ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
TEST_ASSERT(ret != 0 && errno == EBUSY,
"Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n",
errno, reg_info->reg);
test_fw_regs_before_vm_start(vcpu);
while (!guest_done) {
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
- switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
test_guest_stage(&vm, &vcpu);
break;
.mp_state = KVM_MP_STATE_STOPPED,
};
- vcpu_mp_state_set(vcpu->vm, vcpu->id, &mp_state);
+ vcpu_mp_state_set(vcpu, &mp_state);
}
static struct kvm_vm *setup_vm(void *guest_code, struct kvm_vcpu **source,
{
struct ucall uc;
- vcpu_run(vcpu->vm, vcpu->id);
- if (get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_ABORT)
+ vcpu_run(vcpu);
+ if (get_ucall(vcpu, &uc) == UCALL_ABORT)
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], __FILE__,
uc.args[1]);
}
{
uint64_t obs_pc, obs_x0;
- vcpu_get_reg(vcpu->vm, vcpu->id, ARM64_CORE_REG(regs.pc), &obs_pc);
- vcpu_get_reg(vcpu->vm, vcpu->id, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
+ vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &obs_pc);
+ vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR,
"unexpected target cpu pc: %lx (expected: %lx)",
*/
vcpu_power_off(target);
- vcpu_get_reg(vm, target->id, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
- vcpu_args_set(vm, source->id, 1, target_mpidr & MPIDR_HWID_BITMASK);
+ vcpu_get_reg(target, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
+ vcpu_args_set(source, 1, target_mpidr & MPIDR_HWID_BITMASK);
enter_guest(source);
- if (get_ucall(vm, source->id, &uc) != UCALL_DONE)
+ if (get_ucall(source, &uc) != UCALL_DONE)
TEST_FAIL("Unhandled ucall: %lu", uc.cmd);
assert_vcpu_reset(target);
vm = vm_create_barebones();
vcpu0 = __vm_vcpu_add(vm, 0);
- ret = __vcpu_ioctl(vm, vcpu0->id, KVM_ARM_VCPU_INIT, init0);
+ ret = __vcpu_ioctl(vcpu0, KVM_ARM_VCPU_INIT, init0);
if (ret)
goto free_exit;
vcpu1 = __vm_vcpu_add(vm, 1);
- ret = __vcpu_ioctl(vm, vcpu1->id, KVM_ARM_VCPU_INIT, init1);
+ ret = __vcpu_ioctl(vcpu1, KVM_ARM_VCPU_INIT, init1);
free_exit:
kvm_vm_free(vm);
vcpu0 = __vm_vcpu_add(vm, 0);
vcpu1 = __vm_vcpu_add(vm, 1);
- ret = __vcpu_ioctl(vm, vcpu0->id, KVM_ARM_VCPU_INIT, init0);
+ ret = __vcpu_ioctl(vcpu0, KVM_ARM_VCPU_INIT, init0);
if (ret)
goto free_exit;
- ret = __vcpu_ioctl(vm, vcpu1->id, KVM_ARM_VCPU_INIT, init1);
+ ret = __vcpu_ioctl(vcpu1, KVM_ARM_VCPU_INIT, init1);
free_exit:
kvm_vm_free(vm);
{
ucall_init(vcpu->vm, NULL);
- return __vcpu_run(vcpu->vm, vcpu->id) ? -errno : 0;
+ return __vcpu_run(vcpu) ? -errno : 0;
}
static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
ucall_init(vm, NULL);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
/* Setup the guest args page (so it gets the args). */
args_gva = vm_vaddr_alloc_page(vm);
memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
- vcpu_args_set(vm, vcpu->id, 1, args_gva);
+ vcpu_args_set(vcpu, 1, args_gva);
gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
GICD_BASE_GPA, GICR_BASE_GPA);
guest_irq_handlers[args.eoi_split][args.level_sensitive]);
while (1) {
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
kvm_inject_get_call(vm, &uc, &inject_args);
run_guest_cmd(vcpu, gic_fd, &inject_args, &args);
static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall)
{
struct ucall uc;
- uint64_t actual_ucall = get_ucall(vcpu->vm, vcpu->id, &uc);
+ uint64_t actual_ucall = get_ucall(vcpu, &uc);
TEST_ASSERT(expected_ucall == actual_ucall,
"Guest exited unexpectedly (expected ucall %" PRIu64
while (spin_wait_for_next_iteration(¤t_iteration)) {
switch (READ_ONCE(iteration_work)) {
case ITERATION_ACCESS_MEMORY:
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
assert_ucall(vcpu, UCALL_SYNC);
break;
case ITERATION_MARK_IDLE:
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
- struct kvm_vm *vm = perf_test_args.vm;
int vcpu_idx = vcpu_args->vcpu_idx;
struct kvm_run *run = vcpu->run;
struct timespec start;
clock_gettime(CLOCK_MONOTONIC, &start);
/* Let the guest access its memory */
- ret = _vcpu_run(vm, vcpu->id);
+ ret = _vcpu_run(vcpu);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
- if (get_ucall(vm, vcpu->id, NULL) != UCALL_SYNC) {
+ if (get_ucall(vcpu, NULL) != UCALL_SYNC) {
TEST_ASSERT(false,
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason));
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
- struct kvm_vm *vm = perf_test_args.vm;
int vcpu_idx = vcpu_args->vcpu_idx;
uint64_t pages_count = 0;
struct kvm_run *run;
int current_iteration = READ_ONCE(iteration);
clock_gettime(CLOCK_MONOTONIC, &start);
- ret = _vcpu_run(vm, vcpu->id);
+ ret = _vcpu_run(vcpu);
ts_diff = timespec_elapsed(start);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
- TEST_ASSERT(get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC,
+ TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason));
pr_debug("Got sync event from vCPU %d\n", vcpu_idx);
vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
pr_debug("vCPU %d updated last completed iteration to %d\n",
- vcpu->id, vcpu_last_completed_iteration[vcpu_idx]);
+ vcpu_idx, vcpu_last_completed_iteration[vcpu_idx]);
if (current_iteration) {
pages_count += vcpu_args->pages;
TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
"vcpu run failed: errno=%d", err);
- TEST_ASSERT(get_ucall(vcpu->vm, vcpu->id, NULL) == UCALL_SYNC,
+ TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason));
}
/* Only have one vcpu */
- count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu->vm, vcpu->id),
+ count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
slot, bitmap, num_pages, &fetch_index);
cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
struct kvm_run *run = vcpu->run;
/* A ucall-sync or ring-full event is allowed */
- if (get_ucall(vcpu->vm, vcpu->id, NULL) == UCALL_SYNC) {
+ if (get_ucall(vcpu, NULL) == UCALL_SYNC) {
/* We should allow this to continue */
;
} else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
sigmask->len = 8;
pthread_sigmask(0, NULL, sigset);
sigdelset(sigset, SIG_IPI);
- vcpu_ioctl(vm, vcpu->id, KVM_SET_SIGNAL_MASK, sigmask);
+ vcpu_ioctl(vcpu, KVM_SET_SIGNAL_MASK, sigmask);
sigemptyset(sigset);
sigaddset(sigset, SIG_IPI);
generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
pages_count += TEST_PAGES_PER_LOOP;
/* Let the guest dirty the random pages */
- ret = __vcpu_run(vm, vcpu->id);
+ ret = __vcpu_run(vcpu);
if (ret == -1 && errno == EINTR) {
int sig = -1;
sigwait(sigset, &sig);
struct kvm_vcpu *vcpu = arg;
struct kvm_run *run = vcpu->run;
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(false, "%s: exited with reason %d: %s\n",
__func__, run->exit_reason,
#define MPIDR_HWID_BITMASK (0xff00fffffful)
-void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init);
+void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init);
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_vcpu_init *init, void *guest_code);
bool *ps4k, bool *ps16k, bool *ps64k);
void vm_init_descriptor_tables(struct kvm_vm *vm);
-void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid);
+void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
typedef void(*handler_fn)(struct ex_regs *);
void vm_install_exception_handler(struct kvm_vm *vm,
continue; \
else
-struct kvm_vcpu *vcpu_get(struct kvm_vm *vm, uint32_t vcpuid);
+struct kvm_vcpu *vcpu_get(struct kvm_vm *vm, uint32_t vcpu_id);
struct userspace_mem_region *
memslot2region(struct kvm_vm *vm, uint32_t memslot);
void _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, const char *name, void *arg);
#define vm_ioctl(vm, cmd, arg) _vm_ioctl(vm, cmd, #cmd, arg)
-int __vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long cmd,
+int __vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long cmd,
void *arg);
-void _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long cmd,
+void _vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long cmd,
const char *name, void *arg);
-#define vcpu_ioctl(vm, vcpuid, cmd, arg) \
- _vcpu_ioctl(vm, vcpuid, cmd, #cmd, arg)
+#define vcpu_ioctl(vcpu, cmd, arg) \
+ _vcpu_ioctl(vcpu, cmd, #cmd, arg)
/*
* Looks up and returns the value corresponding to the capability
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
-struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
+struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
-struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
-void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
-int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
+struct kvm_run *vcpu_state(struct kvm_vcpu *vcpu);
+void vcpu_run(struct kvm_vcpu *vcpu);
+int _vcpu_run(struct kvm_vcpu *vcpu);
-static inline int __vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
+static inline int __vcpu_run(struct kvm_vcpu *vcpu)
{
- return __vcpu_ioctl(vm, vcpuid, KVM_RUN, NULL);
+ return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
}
-void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
-struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid);
+void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
+struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
-static inline void vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
- uint32_t cap, uint64_t arg0)
+static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
+ uint64_t arg0)
{
struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
- vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_cap);
+ vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
}
-static inline void vcpu_guest_debug_set(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *debug)
{
- vcpu_ioctl(vm, vcpuid, KVM_SET_GUEST_DEBUG, debug);
+ vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
}
-static inline void vcpu_mp_state_get(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- vcpu_ioctl(vm, vcpuid, KVM_GET_MP_STATE, mp_state);
+ vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
}
-static inline void vcpu_mp_state_set(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- vcpu_ioctl(vm, vcpuid, KVM_SET_MP_STATE, mp_state);
+ vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
}
-static inline void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_regs *regs)
+static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
- vcpu_ioctl(vm, vcpuid, KVM_GET_REGS, regs);
+ vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
}
-static inline void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_regs *regs)
+static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
- vcpu_ioctl(vm, vcpuid, KVM_SET_REGS, regs);
+ vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
}
-static inline void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_sregs *sregs)
+static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
- vcpu_ioctl(vm, vcpuid, KVM_GET_SREGS, sregs);
+ vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
}
-static inline void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_sregs *sregs)
+static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
- vcpu_ioctl(vm, vcpuid, KVM_SET_SREGS, sregs);
+ vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
}
-static inline int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_sregs *sregs)
+static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
- return __vcpu_ioctl(vm, vcpuid, KVM_SET_SREGS, sregs);
+ return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
}
-static inline void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_fpu *fpu)
+static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- vcpu_ioctl(vm, vcpuid, KVM_GET_FPU, fpu);
+ vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
}
-static inline void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_fpu *fpu)
+static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu);
+ vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
}
-static inline int __vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid,
- uint64_t reg_id, void *addr)
+static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
{
- struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)addr };
+ struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
- return __vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, ®);
+ return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
}
-static inline int __vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid,
- uint64_t reg_id, uint64_t val)
+static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
{
- struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)&val };
+ struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
- return __vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, ®);
+ return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
}
-static inline void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid,
- uint64_t reg_id, void *addr)
+static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
{
- struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)addr };
+ struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
- vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, ®);
+ vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
}
-static inline void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid,
- uint64_t reg_id, uint64_t val)
+static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
{
- struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)&val };
+ struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
- vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, ®);
+ vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
}
#ifdef __KVM_HAVE_VCPU_EVENTS
-static inline void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
- vcpu_ioctl(vm, vcpuid, KVM_GET_VCPU_EVENTS, events);
+ vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
}
-static inline void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
- vcpu_ioctl(vm, vcpuid, KVM_SET_VCPU_EVENTS, events);
+ vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
}
#endif
#ifdef __x86_64__
-static inline void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
struct kvm_nested_state *state)
{
- vcpu_ioctl(vm, vcpuid, KVM_GET_NESTED_STATE, state);
+ vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
}
-static inline int __vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
+static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
struct kvm_nested_state *state)
{
- return __vcpu_ioctl(vm, vcpuid, KVM_SET_NESTED_STATE, state);
+ return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
}
-static inline void vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
struct kvm_nested_state *state)
{
- vcpu_ioctl(vm, vcpuid, KVM_SET_NESTED_STATE, state);
+ vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
}
#endif
-static inline int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid)
+static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
{
- int fd = __vcpu_ioctl(vm, vcpuid, KVM_GET_STATS_FD, NULL);
+ int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
return fd;
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
}
-int __vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr);
+static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
+ uint64_t attr)
+{
+ return __kvm_has_device_attr(vcpu->fd, group, attr);
+}
-static inline void vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid,
- uint32_t group, uint64_t attr)
+static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
+ uint64_t attr)
{
- int ret = __vcpu_has_device_attr(vm, vcpuid, group, attr);
+ kvm_has_device_attr(vcpu->fd, group, attr);
+}
- TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_HAS_DEVICE_ATTR, ret));
+static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
+ uint64_t attr, void *val)
+{
+ return __kvm_device_attr_get(vcpu->fd, group, attr, val);
+}
+
+static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
+ uint64_t attr, void *val)
+{
+ kvm_device_attr_get(vcpu->fd, group, attr, val);
+}
+
+static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
+ uint64_t attr, void *val)
+{
+ return __kvm_device_attr_set(vcpu->fd, group, attr, val);
+}
+
+static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
+ uint64_t attr, void *val)
+{
+ kvm_device_attr_set(vcpu->fd, group, attr, val);
}
-int __vcpu_device_attr_get(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr, void *val);
-void vcpu_device_attr_get(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr, void *val);
-int __vcpu_device_attr_set(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr, void *val);
-void vcpu_device_attr_set(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr, void *val);
int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
return fd;
}
-void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid);
+void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
/*
* VM VCPU Args Set
*
* Input Args:
* vm - Virtual Machine
- * vcpuid - VCPU ID
* num - number of arguments
* ... - arguments, each of type uint64_t
*
*
* Return: None
*
- * Sets the first @num function input registers of the VCPU with @vcpuid,
- * per the C calling convention of the architecture, to the values given
- * as variable args. Each of the variable args is expected to be of type
- * uint64_t. The maximum @num can be is specific to the architecture.
+ * Sets the first @num input parameters for the function at @vcpu's entry point,
+ * per the C calling convention of the architecture, to the values given as
+ * variable args. Each of the variable args is expected to be of type uint64_t.
+ * The maximum @num can be is specific to the architecture.
*/
-void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...);
+void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
memcpy(&(g), _p, sizeof(g)); \
})
-void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid);
-
-/*
- * VM VCPU Dump
- *
- * Input Args:
- * stream - Output FILE stream
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * indent - Left margin indent amount
- *
- * Output Args: None
- *
- * Return: None
- *
- * Dumps the current state of the VCPU specified by @vcpuid, within the VM
- * given by @vm, to the FILE stream given by @stream.
- */
+void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
-void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
uint8_t indent);
-static inline void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
uint8_t indent)
{
- vcpu_arch_dump(stream, vm, vcpuid, indent);
+ vcpu_arch_dump(stream, vcpu, indent);
}
/*
*
* Input Args:
* vm - Virtual Machine
- * vcpuid - The id of the VCPU to add to the VM.
+ * vcpu_id - The id of the VCPU to add to the VM.
* guest_code - The vCPU's entry point
*/
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void ucall_init(struct kvm_vm *vm, void *arg);
void ucall_uninit(struct kvm_vm *vm);
void ucall(uint64_t cmd, int nargs, ...);
-uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
+uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
extern struct hv_enlightened_vmcs *current_evmcs;
extern struct hv_vp_assist_page *current_vp_assist;
-int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id);
+int vcpu_enable_evmcs(struct kvm_vcpu *vcpu);
static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
{
return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f);
}
-struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
-void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_x86_state *state);
+struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu);
+void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state);
void kvm_x86_state_cleanup(struct kvm_x86_state *state);
const struct kvm_msr_list *kvm_get_msr_index_list(void);
bool kvm_msr_is_in_save_restore_list(uint32_t msr_index);
uint64_t kvm_get_feature_msr(uint64_t msr_index);
-static inline void vcpu_msrs_get(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu,
struct kvm_msrs *msrs)
{
- int r = __vcpu_ioctl(vm, vcpuid, KVM_GET_MSRS, msrs);
+ int r = __vcpu_ioctl(vcpu, KVM_GET_MSRS, msrs);
TEST_ASSERT(r == msrs->nmsrs,
"KVM_GET_MSRS failed, r: %i (failed on MSR %x)",
r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
}
-static inline void vcpu_msrs_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_msrs *msrs)
+static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs)
{
- int r = __vcpu_ioctl(vm, vcpuid, KVM_SET_MSRS, msrs);
+ int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs);
TEST_ASSERT(r == msrs->nmsrs,
"KVM_GET_MSRS failed, r: %i (failed on MSR %x)",
r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
}
-static inline void vcpu_debugregs_get(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu,
struct kvm_debugregs *debugregs)
{
- vcpu_ioctl(vm, vcpuid, KVM_GET_DEBUGREGS, debugregs);
+ vcpu_ioctl(vcpu, KVM_GET_DEBUGREGS, debugregs);
}
-static inline void vcpu_debugregs_set(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_debugregs_set(struct kvm_vcpu *vcpu,
struct kvm_debugregs *debugregs)
{
- vcpu_ioctl(vm, vcpuid, KVM_SET_DEBUGREGS, debugregs);
+ vcpu_ioctl(vcpu, KVM_SET_DEBUGREGS, debugregs);
}
-static inline void vcpu_xsave_get(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_xsave_get(struct kvm_vcpu *vcpu,
struct kvm_xsave *xsave)
{
- vcpu_ioctl(vm, vcpuid, KVM_GET_XSAVE, xsave);
+ vcpu_ioctl(vcpu, KVM_GET_XSAVE, xsave);
}
-static inline void vcpu_xsave2_get(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_xsave2_get(struct kvm_vcpu *vcpu,
struct kvm_xsave *xsave)
{
- vcpu_ioctl(vm, vcpuid, KVM_GET_XSAVE2, xsave);
+ vcpu_ioctl(vcpu, KVM_GET_XSAVE2, xsave);
}
-static inline void vcpu_xsave_set(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_xsave_set(struct kvm_vcpu *vcpu,
struct kvm_xsave *xsave)
{
- vcpu_ioctl(vm, vcpuid, KVM_SET_XSAVE, xsave);
+ vcpu_ioctl(vcpu, KVM_SET_XSAVE, xsave);
}
-static inline void vcpu_xcrs_get(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_xcrs_get(struct kvm_vcpu *vcpu,
struct kvm_xcrs *xcrs)
{
- vcpu_ioctl(vm, vcpuid, KVM_GET_XCRS, xcrs);
+ vcpu_ioctl(vcpu, KVM_GET_XCRS, xcrs);
}
-static inline void vcpu_xcrs_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_xcrs *xcrs)
+static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
{
- vcpu_ioctl(vm, vcpuid, KVM_SET_XCRS, xcrs);
+ vcpu_ioctl(vcpu, KVM_SET_XCRS, xcrs);
}
struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
-struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
+struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vcpu *vcpu);
-static inline int __vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
+static inline int __vcpu_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid)
{
- return __vcpu_ioctl(vm, vcpuid, KVM_SET_CPUID2, cpuid);
+ return __vcpu_ioctl(vcpu, KVM_SET_CPUID2, cpuid);
}
-static inline void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
+static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid)
{
- vcpu_ioctl(vm, vcpuid, KVM_SET_CPUID2, cpuid);
+ vcpu_ioctl(vcpu, KVM_SET_CPUID2, cpuid);
}
struct kvm_cpuid_entry2 *
return kvm_get_supported_cpuid_index(function, 0);
}
-uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
-int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
- uint64_t msr_value);
+uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index);
+int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value);
-static inline void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid,
- uint64_t msr_index, uint64_t msr_value)
+static inline void vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index,
+ uint64_t msr_value)
{
- int r = _vcpu_set_msr(vm, vcpuid, msr_index, msr_value);
+ int r = _vcpu_set_msr(vcpu, msr_index, msr_value);
TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_SET_MSRS, r));
}
};
void vm_init_descriptor_tables(struct kvm_vm *vm);
-void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid);
+void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
void (*handler)(struct ex_regs *));
-uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr);
-void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr,
- uint64_t pte);
+uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
+ uint64_t vaddr);
+void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
+ uint64_t vaddr, uint64_t pte);
/*
* get_cpuid() - find matching CPUID entry and return pointer to it.
uint64_t a3);
struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
-void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
-struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
+void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu);
+struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu);
void vm_xsave_req_perm(int bit);
enum pg_level {
static void vcpu_stats_test(struct kvm_vcpu *vcpu)
{
- int stats_fd = vcpu_get_stats_fd(vcpu->vm, vcpu->id);
+ int stats_fd = vcpu_get_stats_fd(vcpu);
stats_test(stats_fd);
close(stats_fd);
static void *vcpu_worker(void *data)
{
- struct kvm_vm *vm = test_args.vm;
struct kvm_vcpu *vcpu = data;
bool do_write = !(vcpu->id % 2);
struct timespec start;
enum test_stage stage;
int ret;
- vcpu_args_set(vm, vcpu->id, 1, do_write);
+ vcpu_args_set(vcpu, 1, do_write);
while (!READ_ONCE(host_quit)) {
ret = sem_wait(&test_stage_updated);
return NULL;
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
- ret = _vcpu_run(vm, vcpu->id);
+ ret = _vcpu_run(vcpu);
ts_diff = timespec_elapsed(start);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
- TEST_ASSERT(get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC,
+ TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(vcpu->run->exit_reason));
}
}
-void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init)
+void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
{
struct kvm_vcpu_init default_init = { .target = -1, };
+ struct kvm_vm *vm = vcpu->vm;
uint64_t sctlr_el1, tcr_el1;
if (!init)
init->target = preferred.target;
}
- vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, init);
+ vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init);
/*
* Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
* registers, which the variable argument list macros do.
*/
- vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
- vcpu_get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
- vcpu_get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
+ vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
+ vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
/* Configure base granule size */
switch (vm->mode) {
tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
- vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
- vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
- vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
- vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd);
- vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpuid);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id);
}
-void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{
uint64_t pstate, pc;
- vcpu_get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
- vcpu_get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
+ vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate), &pstate);
+ vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
indent, "", pstate, pc);
DEFAULT_ARM64_GUEST_STACK_VADDR_MIN);
struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
- aarch64_vcpu_setup(vm, vcpu_id, init);
+ aarch64_vcpu_setup(vcpu, init);
- vcpu_set_reg(vm, vcpu_id, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
- vcpu_set_reg(vm, vcpu_id, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
+ vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
+ vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
return vcpu;
}
return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code);
}
-void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
+void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
va_list ap;
int i;
va_start(ap, num);
for (i = 0; i < num; i++) {
- vcpu_set_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[i]),
- va_arg(ap, uint64_t));
+ vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]),
+ va_arg(ap, uint64_t));
}
va_end(ap);
;
}
-void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
+void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
struct ucall uc;
- if (get_ucall(vm, vcpuid, &uc) != UCALL_UNHANDLED)
+ if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
return;
if (uc.args[2]) /* valid_ec */ {
handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM];
};
-void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
+void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
{
extern char vectors;
- vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
}
void route_exception(struct ex_regs *regs, int vector)
*ucall_exit_mmio_addr = (vm_vaddr_t)&uc;
}
-uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
+uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
- struct kvm_run *run = vcpu_state(vm, vcpu_id);
+ struct kvm_run *run = vcpu->run;
struct ucall ucall = {};
if (uc)
TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
"Unexpected ucall exit mmio address access");
memcpy(&gva, run->mmio.data, sizeof(gva));
- memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall));
+ memcpy(&ucall, addr_gva2hva(vcpu->vm, gva), sizeof(ucall));
- vcpu_run_complete_io(vm, vcpu_id);
+ vcpu_run_complete_io(vcpu);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
}
return (void *) ((uintptr_t) region->host_alias + offset);
}
-/*
- * VM Create IRQ Chip
- *
- * Input Args:
- * vm - Virtual Machine
- *
- * Output Args: None
- *
- * Return: None
- *
- * Creates an interrupt controller chip for the VM specified by vm.
- */
+/* Create an interrupt controller chip for the specified VM. */
void vm_create_irqchip(struct kvm_vm *vm)
{
vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL);
vm->has_irqchip = true;
}
-
-/*
- * VM VCPU State
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- *
- * Output Args: None
- *
- * Return:
- * Pointer to structure that describes the state of the VCPU.
- *
- * Locates and returns a pointer to a structure that describes the
- * state of the VCPU with the given vcpuid.
- */
-struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
+struct kvm_run *vcpu_state(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
-
return vcpu->run;
}
-/*
- * VM VCPU Run
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- *
- * Output Args: None
- *
- * Return: None
- *
- * Switch to executing the code for the VCPU given by vcpuid, within the VM
- * given by vm.
- */
-void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
-{
- int ret = _vcpu_run(vm, vcpuid);
- TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret));
-}
-
-int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
+int _vcpu_run(struct kvm_vcpu *vcpu)
{
int rc;
do {
- rc = __vcpu_run(vm, vcpuid);
+ rc = __vcpu_run(vcpu);
} while (rc == -1 && errno == EINTR);
- assert_on_unhandled_exception(vm, vcpuid);
+ assert_on_unhandled_exception(vcpu);
return rc;
}
-void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
+/*
+ * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR.
+ * Assert if the KVM returns an error (other than -EINTR).
+ */
+void vcpu_run(struct kvm_vcpu *vcpu)
+{
+ int ret = _vcpu_run(vcpu);
+
+ TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret));
+}
+
+void vcpu_run_complete_io(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
int ret;
vcpu->run->immediate_exit = 1;
- ret = __vcpu_run(vm, vcpuid);
+ ret = __vcpu_run(vcpu);
vcpu->run->immediate_exit = 0;
TEST_ASSERT(ret == -1 && errno == EINTR,
}
/*
- * VM VCPU Get Reg List
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- *
- * Output Args:
- * None
- *
- * Return:
- * A pointer to an allocated struct kvm_reg_list
- *
* Get the list of guest registers which are supported for
- * KVM_GET_ONE_REG/KVM_SET_ONE_REG calls
+ * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer,
+ * it is the callers responsibility to free the list.
*/
-struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid)
+struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
{
struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
int ret;
- ret = __vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, ®_list_n);
+ ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, ®_list_n);
TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
+
reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
reg_list->n = reg_list_n.n;
- vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list);
+ vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list);
return reg_list;
}
-int __vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
- unsigned long cmd, void *arg)
+int __vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long cmd, void *arg)
{
- struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
-
return ioctl(vcpu->fd, cmd, arg);
}
-void _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long cmd,
- const char *name, void *arg)
+void _vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long cmd, const char *name,
+ void *arg)
{
- int ret = __vcpu_ioctl(vm, vcpuid, cmd, arg);
+ int ret = __vcpu_ioctl(vcpu, cmd, arg);
TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));
}
-void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid)
+void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
- uint32_t size = vm->dirty_ring_size;
+ uint32_t page_size = vcpu->vm->page_size;
+ uint32_t size = vcpu->vm->dirty_ring_size;
TEST_ASSERT(size > 0, "Should enable dirty ring first");
if (!vcpu->dirty_gfns) {
void *addr;
- addr = mmap(NULL, size, PROT_READ,
- MAP_PRIVATE, vcpu->fd,
- vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
+ addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd,
+ page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
- addr = mmap(NULL, size, PROT_READ | PROT_EXEC,
- MAP_PRIVATE, vcpu->fd,
- vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
+ addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd,
+ page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
- addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_SHARED, vcpu->fd,
- vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
+ addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
+ page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
vcpu->dirty_gfns = addr;
return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr);
}
-int __vcpu_device_attr_get(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr, void *val)
-{
- return __kvm_device_attr_get(vcpu_get(vm, vcpuid)->fd, group, attr, val);
-}
-
-void vcpu_device_attr_get(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr, void *val)
-{
- kvm_device_attr_get(vcpu_get(vm, vcpuid)->fd, group, attr, val);
-}
-
-int __vcpu_device_attr_set(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr, void *val)
-{
- return __kvm_device_attr_set(vcpu_get(vm, vcpuid)->fd, group, attr, val);
-}
-
-void vcpu_device_attr_set(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr, void *val)
-{
- kvm_device_attr_set(vcpu_get(vm, vcpuid)->fd, group, attr, val);
-}
-
-int __vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr)
-{
- return __kvm_has_device_attr(vcpu_get(vm, vcpuid)->fd, group, attr);
-}
-
/*
* IRQ related functions.
*/
virt_dump(stream, vm, indent + 4);
}
fprintf(stream, "%*sVCPUs:\n", indent, "");
+
list_for_each_entry(vcpu, &vm->vcpus, list)
- vcpu_dump(stream, vm, vcpu->id, indent + 2);
+ vcpu_dump(stream, vcpu, indent + 2);
}
/* Known KVM exit reasons */
vcpu_args->gpa = pta->gpa;
}
- vcpu_args_set(vm, vcpus[i]->id, 1, i);
+ vcpu_args_set(vcpus[i], 1, i);
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
i, vcpu_args->gpa, vcpu_args->gpa +
}
}
-void riscv_vcpu_mmu_setup(struct kvm_vm *vm, int vcpuid)
+void riscv_vcpu_mmu_setup(struct kvm_vcpu *vcpu)
{
+ struct kvm_vm *vm = vcpu->vm;
unsigned long satp;
/*
satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN;
satp |= SATP_MODE_48;
- vcpu_set_reg(vm, vcpuid, RISCV_CSR_REG(satp), satp);
+ vcpu_set_reg(vcpu, RISCV_CSR_REG(satp), satp);
}
-void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{
struct kvm_riscv_core core;
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(mode), &core.mode);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc), &core.regs.pc);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.ra), &core.regs.ra);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp), &core.regs.sp);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), &core.regs.gp);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.tp), &core.regs.tp);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t0), &core.regs.t0);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t1), &core.regs.t1);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t2), &core.regs.t2);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s0), &core.regs.s0);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s1), &core.regs.s1);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a0), &core.regs.a0);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a1), &core.regs.a1);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a2), &core.regs.a2);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a3), &core.regs.a3);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a4), &core.regs.a4);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a5), &core.regs.a5);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a6), &core.regs.a6);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a7), &core.regs.a7);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s2), &core.regs.s2);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s3), &core.regs.s3);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s4), &core.regs.s4);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s5), &core.regs.s5);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s6), &core.regs.s6);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s7), &core.regs.s7);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s8), &core.regs.s8);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s9), &core.regs.s9);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s10), &core.regs.s10);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s11), &core.regs.s11);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t3), &core.regs.t3);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t4), &core.regs.t4);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t5), &core.regs.t5);
- vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t6), &core.regs.t6);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(mode), &core.mode);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc), &core.regs.pc);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.ra), &core.regs.ra);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.sp), &core.regs.sp);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.gp), &core.regs.gp);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.tp), &core.regs.tp);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t0), &core.regs.t0);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t1), &core.regs.t1);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t2), &core.regs.t2);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s0), &core.regs.s0);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s1), &core.regs.s1);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a0), &core.regs.a0);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a1), &core.regs.a1);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a2), &core.regs.a2);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a3), &core.regs.a3);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a4), &core.regs.a4);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a5), &core.regs.a5);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a6), &core.regs.a6);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a7), &core.regs.a7);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s2), &core.regs.s2);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s3), &core.regs.s3);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s4), &core.regs.s4);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s5), &core.regs.s5);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s6), &core.regs.s6);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s7), &core.regs.s7);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s8), &core.regs.s8);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s9), &core.regs.s9);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s10), &core.regs.s10);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s11), &core.regs.s11);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t3), &core.regs.t3);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t4), &core.regs.t4);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t5), &core.regs.t5);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t6), &core.regs.t6);
fprintf(stream,
" MODE: 0x%lx\n", core.mode);
struct kvm_vcpu *vcpu;
vcpu = __vm_vcpu_add(vm, vcpu_id);
- riscv_vcpu_mmu_setup(vm, vcpu_id);
+ riscv_vcpu_mmu_setup(vcpu);
/*
* With SBI HSM support in KVM RISC-V, all secondary VCPUs are
* are powered-on using KVM_SET_MP_STATE ioctl().
*/
mps.mp_state = KVM_MP_STATE_RUNNABLE;
- r = __vcpu_ioctl(vm, vcpu_id, KVM_SET_MP_STATE, &mps);
+ r = __vcpu_ioctl(vcpu, KVM_SET_MP_STATE, &mps);
TEST_ASSERT(!r, "IOCTL KVM_SET_MP_STATE failed (error %d)", r);
/* Setup global pointer of guest to be same as the host */
asm volatile (
"add %0, gp, zero" : "=r" (current_gp) : : "memory");
- vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.gp), current_gp);
+ vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.gp), current_gp);
/* Setup stack pointer and program counter of guest */
- vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.sp),
- stack_vaddr + stack_size);
- vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.pc),
- (unsigned long)guest_code);
+ vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size);
+ vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
/* Setup default exception vector of guest */
- vcpu_set_reg(vm, vcpu_id, RISCV_CSR_REG(stvec),
- (unsigned long)guest_unexp_trap);
+ vcpu_set_reg(vcpu, RISCV_CSR_REG(stvec), (unsigned long)guest_unexp_trap);
return vcpu;
}
-void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
+void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
va_list ap;
uint64_t id = RISCV_CORE_REG(regs.a0);
id = RISCV_CORE_REG(regs.a7);
break;
}
- vcpu_set_reg(vm, vcpuid, id, va_arg(ap, uint64_t));
+ vcpu_set_reg(vcpu, id, va_arg(ap, uint64_t));
}
va_end(ap);
}
-void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
+void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
}
(vm_vaddr_t)&uc, 0, 0, 0, 0, 0);
}
-uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
+uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
- struct kvm_run *run = vcpu_state(vm, vcpu_id);
+ struct kvm_run *run = vcpu->run;
struct ucall ucall = {};
if (uc)
run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT) {
switch (run->riscv_sbi.function_id) {
case KVM_RISCV_SELFTESTS_SBI_UCALL:
- memcpy(&ucall, addr_gva2hva(vm,
- run->riscv_sbi.args[0]), sizeof(ucall));
+ memcpy(&ucall,
+ addr_gva2hva(vcpu->vm, run->riscv_sbi.args[0]),
+ sizeof(ucall));
- vcpu_run_complete_io(vm, vcpu_id);
+ vcpu_run_complete_io(vcpu);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
break;
case KVM_RISCV_SELFTESTS_SBI_UNEXP:
- vcpu_dump(stderr, vm, vcpu_id, 2);
+ vcpu_dump(stderr, vcpu, 2);
TEST_ASSERT(0, "Unexpected trap taken by guest");
break;
default:
uint64_t diag318_info;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
run = vcpu->run;
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
vcpu = __vm_vcpu_add(vm, vcpu_id);
/* Setup guest registers */
- vcpu_regs_get(vm, vcpu_id, ®s);
+ vcpu_regs_get(vcpu, ®s);
regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
- vcpu_regs_set(vm, vcpu_id, ®s);
+ vcpu_regs_set(vcpu, ®s);
- vcpu_sregs_get(vm, vcpu_id, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.crs[0] |= 0x00040000; /* Enable floating point regs */
sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */
- vcpu_sregs_set(vm, vcpu_id, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
- run = vcpu_state(vm, vcpu_id);
+ run = vcpu->run;
run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */
run->psw_addr = (uintptr_t)guest_code;
return vcpu;
}
-void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
+void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
va_list ap;
struct kvm_regs regs;
num);
va_start(ap, num);
- vcpu_regs_get(vm, vcpuid, ®s);
+ vcpu_regs_get(vcpu, ®s);
for (i = 0; i < num; i++)
regs.gprs[i + 2] = va_arg(ap, uint64_t);
- vcpu_regs_set(vm, vcpuid, ®s);
+ vcpu_regs_set(vcpu, ®s);
va_end(ap);
}
-void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{
- struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
-
fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
}
-void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
+void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
}
asm volatile ("diag 0,%0,0x501" : : "a"(&uc) : "memory");
}
-uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
+uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
- struct kvm_run *run = vcpu_state(vm, vcpu_id);
+ struct kvm_run *run = vcpu->run;
struct ucall ucall = {};
if (uc)
(run->s390_sieic.ipb >> 16) == 0x501) {
int reg = run->s390_sieic.ipa & 0xf;
- memcpy(&ucall, addr_gva2hva(vm, run->s.regs.gprs[reg]),
+ memcpy(&ucall, addr_gva2hva(vcpu->vm, run->s.regs.gprs[reg]),
sizeof(ucall));
- vcpu_run_complete_io(vm, vcpu_id);
+ vcpu_run_complete_io(vcpu);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
}
__virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K);
}
-static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
- uint64_t vaddr)
+static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm,
+ struct kvm_vcpu *vcpu,
+ uint64_t vaddr)
{
uint16_t index[4];
uint64_t *pml4e, *pdpe, *pde;
* If IA32_EFER.NXE = 0 and the P flag of a paging-structure entry is 1,
* the XD flag (bit 63) is reserved.
*/
- vcpu_sregs_get(vm, vcpuid, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
if ((sregs.efer & EFER_NX) == 0) {
rsvd_mask |= PTE_NX_MASK;
}
return &pte[index[0]];
}
-uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr)
+uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
+ uint64_t vaddr)
{
- uint64_t *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
+ uint64_t *pte = _vm_get_page_table_entry(vm, vcpu, vaddr);
return *(uint64_t *)pte;
}
-void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr,
- uint64_t pte)
+void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
+ uint64_t vaddr, uint64_t pte)
{
- uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
+ uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpu, vaddr);
*(uint64_t *)new_pte = pte;
}
kvm_seg_fill_gdt_64bit(vm, segp);
}
-static void vcpu_setup(struct kvm_vm *vm, int vcpuid)
+static void vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
{
struct kvm_sregs sregs;
/* Set mode specific system register values. */
- vcpu_sregs_get(vm, vcpuid, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.idt.limit = 0;
}
sregs.cr3 = vm->pgd;
- vcpu_sregs_set(vm, vcpuid, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
}
#define CPUID_XFD_BIT (1 << 4)
DEFAULT_GUEST_STACK_VADDR_MIN);
vcpu = __vm_vcpu_add(vm, vcpu_id);
- vcpu_set_cpuid(vm, vcpu_id, kvm_get_supported_cpuid());
- vcpu_setup(vm, vcpu_id);
+ vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
+ vcpu_setup(vm, vcpu);
/* Setup guest general purpose registers */
- vcpu_regs_get(vm, vcpu_id, ®s);
+ vcpu_regs_get(vcpu, ®s);
regs.rflags = regs.rflags | 0x2;
regs.rsp = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize());
regs.rip = (unsigned long) guest_code;
- vcpu_regs_set(vm, vcpu_id, ®s);
+ vcpu_regs_set(vcpu, ®s);
/* Setup the MP state */
mp_state.mp_state = 0;
- vcpu_mp_state_set(vm, vcpu_id, &mp_state);
+ vcpu_mp_state_set(vcpu, &mp_state);
return vcpu;
}
return buffer.entry.data;
}
-/*
- * VM VCPU CPUID Set
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU id
- *
- * Output Args: None
- *
- * Return: KVM CPUID (KVM_GET_CPUID2)
- *
- * Set the VCPU's CPUID.
- */
-struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
+struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid2 *cpuid;
int max_ent;
max_ent = cpuid->nent;
for (cpuid->nent = 1; cpuid->nent <= max_ent; cpuid->nent++) {
- rc = __vcpu_ioctl(vm, vcpuid, KVM_GET_CPUID2, cpuid);
+ rc = __vcpu_ioctl(vcpu, KVM_GET_CPUID2, cpuid);
if (!rc)
break;
return entry;
}
-uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
+uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index)
{
struct {
struct kvm_msrs header;
buffer.header.nmsrs = 1;
buffer.entry.index = msr_index;
- vcpu_msrs_get(vm, vcpuid, &buffer.header);
+ vcpu_msrs_get(vcpu, &buffer.header);
return buffer.entry.data;
}
-int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
- uint64_t msr_value)
+int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value)
{
struct {
struct kvm_msrs header;
buffer.entry.index = msr_index;
buffer.entry.data = msr_value;
- return __vcpu_ioctl(vm, vcpuid, KVM_SET_MSRS, &buffer.header);
+ return __vcpu_ioctl(vcpu, KVM_SET_MSRS, &buffer.header);
}
-void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
+void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
va_list ap;
struct kvm_regs regs;
num);
va_start(ap, num);
- vcpu_regs_get(vm, vcpuid, ®s);
+ vcpu_regs_get(vcpu, ®s);
if (num >= 1)
regs.rdi = va_arg(ap, uint64_t);
if (num >= 6)
regs.r9 = va_arg(ap, uint64_t);
- vcpu_regs_set(vm, vcpuid, ®s);
+ vcpu_regs_set(vcpu, ®s);
va_end(ap);
}
-void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{
struct kvm_regs regs;
struct kvm_sregs sregs;
- fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid);
+ fprintf(stream, "%*svCPU ID: %u\n", indent, "", vcpu->id);
fprintf(stream, "%*sregs:\n", indent + 2, "");
- vcpu_regs_get(vm, vcpuid, ®s);
+ vcpu_regs_get(vcpu, ®s);
regs_dump(stream, ®s, indent + 4);
fprintf(stream, "%*ssregs:\n", indent + 2, "");
- vcpu_sregs_get(vm, vcpuid, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs_dump(stream, &sregs, indent + 4);
}
return false;
}
-static void vcpu_save_xsave_state(struct kvm_vm *vm, uint32_t vcpuid,
+static void vcpu_save_xsave_state(struct kvm_vcpu *vcpu,
struct kvm_x86_state *state)
{
- int size = vm_check_cap(vm, KVM_CAP_XSAVE2);
+ int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2);
if (size) {
state->xsave = malloc(size);
- vcpu_xsave2_get(vm, vcpuid, state->xsave);
+ vcpu_xsave2_get(vcpu, state->xsave);
} else {
state->xsave = malloc(sizeof(struct kvm_xsave));
- vcpu_xsave_get(vm, vcpuid, state->xsave);
+ vcpu_xsave_get(vcpu, state->xsave);
}
}
-struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
+struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu)
{
const struct kvm_msr_list *msr_list = kvm_get_msr_index_list();
struct kvm_x86_state *state;
* kernel with KVM_RUN. Complete IO prior to migrating state
* to a new VM.
*/
- vcpu_run_complete_io(vm, vcpuid);
+ vcpu_run_complete_io(vcpu);
state = malloc(sizeof(*state) + msr_list->nmsrs * sizeof(state->msrs.entries[0]));
- vcpu_events_get(vm, vcpuid, &state->events);
- vcpu_mp_state_get(vm, vcpuid, &state->mp_state);
- vcpu_regs_get(vm, vcpuid, &state->regs);
- vcpu_save_xsave_state(vm, vcpuid, state);
+ vcpu_events_get(vcpu, &state->events);
+ vcpu_mp_state_get(vcpu, &state->mp_state);
+ vcpu_regs_get(vcpu, &state->regs);
+ vcpu_save_xsave_state(vcpu, state);
if (kvm_check_cap(KVM_CAP_XCRS))
- vcpu_xcrs_get(vm, vcpuid, &state->xcrs);
+ vcpu_xcrs_get(vcpu, &state->xcrs);
- vcpu_sregs_get(vm, vcpuid, &state->sregs);
+ vcpu_sregs_get(vcpu, &state->sregs);
if (nested_size) {
state->nested.size = sizeof(state->nested_);
- vcpu_nested_state_get(vm, vcpuid, &state->nested);
+ vcpu_nested_state_get(vcpu, &state->nested);
TEST_ASSERT(state->nested.size <= nested_size,
"Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
state->nested.size, nested_size);
state->msrs.nmsrs = msr_list->nmsrs;
for (i = 0; i < msr_list->nmsrs; i++)
state->msrs.entries[i].index = msr_list->indices[i];
- vcpu_msrs_get(vm, vcpuid, &state->msrs);
+ vcpu_msrs_get(vcpu, &state->msrs);
- vcpu_debugregs_get(vm, vcpuid, &state->debugregs);
+ vcpu_debugregs_get(vcpu, &state->debugregs);
return state;
}
-void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state)
+void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state)
{
- vcpu_sregs_set(vm, vcpuid, &state->sregs);
- vcpu_msrs_set(vm, vcpuid, &state->msrs);
+ vcpu_sregs_set(vcpu, &state->sregs);
+ vcpu_msrs_set(vcpu, &state->msrs);
if (kvm_check_cap(KVM_CAP_XCRS))
- vcpu_xcrs_set(vm, vcpuid, &state->xcrs);
+ vcpu_xcrs_set(vcpu, &state->xcrs);
- vcpu_xsave_set(vm, vcpuid, state->xsave);
- vcpu_events_set(vm, vcpuid, &state->events);
- vcpu_mp_state_set(vm, vcpuid, &state->mp_state);
- vcpu_debugregs_set(vm, vcpuid, &state->debugregs);
- vcpu_regs_set(vm, vcpuid, &state->regs);
+ vcpu_xsave_set(vcpu, state->xsave);
+ vcpu_events_set(vcpu, &state->events);
+ vcpu_mp_state_set(vcpu, &state->mp_state);
+ vcpu_debugregs_set(vcpu, &state->debugregs);
+ vcpu_regs_set(vcpu, &state->regs);
if (state->nested.size)
- vcpu_nested_state_set(vm, vcpuid, &state->nested);
+ vcpu_nested_state_set(vcpu, &state->nested);
}
void kvm_x86_state_cleanup(struct kvm_x86_state *state)
DEFAULT_CODE_SELECTOR);
}
-void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
+void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
{
+ struct kvm_vm *vm = vcpu->vm;
struct kvm_sregs sregs;
- vcpu_sregs_get(vm, vcpuid, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.idt.base = vm->idt;
sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1;
sregs.gdt.base = vm->gdt;
sregs.gdt.limit = getpagesize() - 1;
kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs);
- vcpu_sregs_set(vm, vcpuid, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
}
handlers[vector] = (vm_vaddr_t)handler;
}
-void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
+void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
struct ucall uc;
- if (get_ucall(vm, vcpuid, &uc) == UCALL_UNHANDLED) {
+ if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) {
uint64_t vector = uc.args[0];
TEST_FAIL("Unexpected vectored event in guest (vector:0x%lx)",
return cpuid;
}
-void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
+void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu)
{
static struct kvm_cpuid2 *cpuid_full;
struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
cpuid_full->nent = nent + cpuid_hv->nent;
}
- vcpu_set_cpuid(vm, vcpuid, cpuid_full);
+ vcpu_set_cpuid(vcpu, cpuid_full);
}
-struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
+struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu)
{
static struct kvm_cpuid2 *cpuid;
cpuid = allocate_kvm_cpuid2();
- vcpu_ioctl(vm, vcpuid, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+ vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
return cpuid;
}
: : [port] "d" (UCALL_PIO_PORT), "D" (&uc) : "rax", "memory");
}
-uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
+uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
- struct kvm_run *run = vcpu_state(vm, vcpu_id);
+ struct kvm_run *run = vcpu->run;
struct ucall ucall = {};
if (uc)
if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) {
struct kvm_regs regs;
- vcpu_regs_get(vm, vcpu_id, ®s);
- memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi),
+ vcpu_regs_get(vcpu, ®s);
+ memcpy(&ucall, addr_gva2hva(vcpu->vm, (vm_vaddr_t)regs.rdi),
sizeof(ucall));
- vcpu_run_complete_io(vm, vcpu_id);
+ vcpu_run_complete_io(vcpu);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
}
uint64_t address:40;
uint64_t reserved_63_52:12;
};
-int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
+int vcpu_enable_evmcs(struct kvm_vcpu *vcpu)
{
uint16_t evmcs_ver;
- vcpu_enable_cap(vm, vcpu_id, KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
+ vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
(unsigned long)&evmcs_ver);
/* KVM should return supported EVMCS version range */
}
}
-static void run_vcpu(struct kvm_vm *vm, uint32_t vcpu_id)
+static void run_vcpu(struct kvm_vcpu *vcpu)
{
- vcpu_run(vm, vcpu_id);
- ASSERT_EQ(get_ucall(vm, vcpu_id, NULL), UCALL_DONE);
+ vcpu_run(vcpu);
+ ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
}
static void *vcpu_worker(void *data)
struct kvm_sregs sregs;
struct kvm_regs regs;
- vcpu_args_set(vm, vcpu->id, 3, info->start_gpa, info->end_gpa,
+ vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa,
vm_get_page_size(vm));
/* Snapshot regs before the first run. */
- vcpu_regs_get(vm, vcpu->id, ®s);
+ vcpu_regs_get(vcpu, ®s);
rendezvous_with_boss();
- run_vcpu(vm, vcpu->id);
+ run_vcpu(vcpu);
rendezvous_with_boss();
- vcpu_regs_set(vm, vcpu->id, ®s);
- vcpu_sregs_get(vm, vcpu->id, &sregs);
+ vcpu_regs_set(vcpu, ®s);
+ vcpu_sregs_get(vcpu, &sregs);
#ifdef __x86_64__
/* Toggle CR0.WP to trigger a MMU context reset. */
sregs.cr0 ^= X86_CR0_WP;
#endif
- vcpu_sregs_set(vm, vcpu->id, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
rendezvous_with_boss();
- run_vcpu(vm, vcpu->id);
+ run_vcpu(vcpu);
rendezvous_with_boss();
return NULL;
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
- struct kvm_vm *vm = perf_test_args.vm;
struct kvm_run *run;
int ret;
/* Let the guest access its memory until a stop signal is received */
while (READ_ONCE(run_vcpus)) {
- ret = _vcpu_run(vm, vcpu->id);
+ ret = _vcpu_run(vcpu);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
- if (get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC)
+ if (get_ucall(vcpu, NULL) == UCALL_SYNC)
continue;
TEST_ASSERT(false,
struct ucall uc;
while (1) {
- vcpu_run(data->vm, vcpu->id);
+ vcpu_run(vcpu);
- switch (get_ucall(data->vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == 0,
"Unexpected sync ucall, got %lx",
pthread_create(&migration_thread, NULL, migration_worker, 0);
for (i = 0; !done; i++) {
- vcpu_run(vm, vcpu->id);
- TEST_ASSERT(get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC,
+ vcpu_run(vcpu);
+ TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Guest failed?");
/*
if (!vcpu)
vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
else
- vcpu_ioctl(vcpu->vm, vcpu->id, KVM_S390_MEM_OP, ksmo);
+ vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
}
static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo)
if (!vcpu)
return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
else
- return __vcpu_ioctl(vcpu->vm, vcpu->id, KVM_S390_MEM_OP, ksmo);
+ return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
}
#define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \
struct ucall uc; \
int __stage = (stage); \
\
- vcpu_run(__vcpu->vm, __vcpu->id); \
- get_ucall(__vcpu->vm, __vcpu->id, &uc); \
+ vcpu_run(__vcpu); \
+ get_ucall(__vcpu, &uc); \
ASSERT_EQ(uc.cmd, UCALL_SYNC); \
ASSERT_EQ(uc.args[1], __stage); \
}) \
{
uint64_t eval_reg;
- vcpu_get_reg(vcpu->vm, vcpu->id, id, &eval_reg);
+ vcpu_get_reg(vcpu, id, &eval_reg);
TEST_ASSERT(eval_reg == value, "value == 0x%lx", value);
}
irq_state.len = sizeof(buf);
irq_state.buf = (unsigned long)buf;
- irqs = __vcpu_ioctl(vcpu->vm, vcpu->id, KVM_S390_GET_IRQ_STATE, &irq_state);
+ irqs = __vcpu_ioctl(vcpu, KVM_S390_GET_IRQ_STATE, &irq_state);
/*
* irqs contains the number of retrieved interrupts. Any interrupt
* (notably, the emergency call interrupt we have injected) should
struct kvm_regs regs;
struct kvm_fpu fpu;
- vcpu_regs_get(vcpu->vm, vcpu->id, ®s);
+ vcpu_regs_get(vcpu, ®s);
TEST_ASSERT(!memcmp(®s.gprs, regs_null, sizeof(regs.gprs)), "grs == 0");
- vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(!memcmp(&sregs.acrs, regs_null, sizeof(sregs.acrs)), "acrs == 0");
- vcpu_fpu_get(vcpu->vm, vcpu->id, &fpu);
+ vcpu_fpu_get(vcpu, &fpu);
TEST_ASSERT(!memcmp(&fpu.fprs, regs_null, sizeof(fpu.fprs)), "fprs == 0");
/* sync regs */
struct kvm_fpu fpu;
/* KVM_GET_SREGS */
- vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(sregs.crs[0] == 0xE0UL, "cr0 == 0xE0 (KVM_GET_SREGS)");
TEST_ASSERT(sregs.crs[14] == 0xC2000000UL,
"cr14 == 0xC2000000 (KVM_GET_SREGS)");
TEST_ASSERT(vcpu->run->psw_addr == 0, "psw_addr == 0 (kvm_run)");
TEST_ASSERT(vcpu->run->psw_mask == 0, "psw_mask == 0 (kvm_run)");
- vcpu_fpu_get(vcpu->vm, vcpu->id, &fpu);
+ vcpu_fpu_get(vcpu, &fpu);
TEST_ASSERT(!fpu.fpc, "fpc == 0");
test_one_reg(vcpu, KVM_REG_S390_GBEA, 1);
irq_state.buf = (unsigned long)buf;
irq->type = KVM_S390_INT_EMERGENCY;
irq->u.emerg.code = vcpu->id;
- irqs = __vcpu_ioctl(vcpu->vm, vcpu->id, KVM_S390_SET_IRQ_STATE, &irq_state);
+ irqs = __vcpu_ioctl(vcpu, KVM_S390_SET_IRQ_STATE, &irq_state);
TEST_ASSERT(irqs >= 0, "Error injecting EMERGENCY IRQ errno %d\n", errno);
}
ksft_print_msg("Testing normal reset\n");
vm = create_vm(&vcpu);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
inject_irq(vcpu);
- vcpu_ioctl(vm, vcpu->id, KVM_S390_NORMAL_RESET, 0);
+ vcpu_ioctl(vcpu, KVM_S390_NORMAL_RESET, 0);
/* must clears */
assert_normal(vcpu);
ksft_print_msg("Testing initial reset\n");
vm = create_vm(&vcpu);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
inject_irq(vcpu);
- vcpu_ioctl(vm, vcpu->id, KVM_S390_INITIAL_RESET, 0);
+ vcpu_ioctl(vcpu, KVM_S390_INITIAL_RESET, 0);
/* must clears */
assert_normal(vcpu);
ksft_print_msg("Testing clear reset\n");
vm = create_vm(&vcpu);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
inject_irq(vcpu);
- vcpu_ioctl(vm, vcpu->id, KVM_S390_CLEAR_RESET, 0);
+ vcpu_ioctl(vcpu, KVM_S390_CLEAR_RESET, 0);
/* must clears */
assert_normal(vcpu);
/* Request reading invalid register set from VCPU. */
run->kvm_valid_regs = INVALID_SYNC_FIELD;
- rv = _vcpu_run(vcpu->vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
run->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
- rv = _vcpu_run(vcpu->vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
/* Request setting invalid register set into VCPU. */
run->kvm_dirty_regs = INVALID_SYNC_FIELD;
- rv = _vcpu_run(vcpu->vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
run->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
- rv = _vcpu_run(vcpu->vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
/* Request and verify all valid register sets. */
run->kvm_valid_regs = TEST_SYNC_FIELDS;
- rv = _vcpu_run(vcpu->vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"Unexpected exit reason: %u (%s)\n",
run->s390_sieic.icptcode, run->s390_sieic.ipa,
run->s390_sieic.ipb);
- vcpu_regs_get(vcpu->vm, vcpu->id, ®s);
+ vcpu_regs_get(vcpu, ®s);
compare_regs(®s, &run->s.regs);
- vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs);
}
run->kvm_dirty_regs |= KVM_SYNC_DIAG318;
}
- rv = _vcpu_run(vcpu->vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"Unexpected exit reason: %u (%s)\n",
"diag318 sync regs value incorrect 0x%llx.",
run->s.regs.diag318);
- vcpu_regs_get(vcpu->vm, vcpu->id, ®s);
+ vcpu_regs_get(vcpu, ®s);
compare_regs(®s, &run->s.regs);
- vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs);
}
run->kvm_dirty_regs = 0;
run->s.regs.gprs[11] = 0xDEADBEEF;
run->s.regs.diag318 = 0x4B1D;
- rv = _vcpu_run(vcpu->vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"Unexpected exit reason: %u (%s)\n",
struct ucall uc; \
int __stage = (stage); \
\
- vcpu_run(__vcpu->vm, __vcpu->id); \
- get_ucall(__vcpu->vm, __vcpu->id, &uc); \
+ vcpu_run(__vcpu); \
+ get_ucall(__vcpu, &uc); \
if (uc.cmd == UCALL_ABORT) { \
TEST_FAIL("line %lu: %s, hints: %lu, %lu", uc.args[1], \
(const char *)uc.args[0], uc.args[2], uc.args[3]); \
* has been deleted or while it is being moved .
*/
while (1) {
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
if (run->exit_reason == KVM_EXIT_IO) {
- cmd = get_ucall(vcpu->vm, vcpu->id, &uc);
+ cmd = get_ucall(vcpu, &uc);
if (cmd != UCALL_SYNC)
break;
run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
"Unexpected exit reason = %d", run->exit_reason);
- vcpu_regs_get(vm, vcpu->id, ®s);
+ vcpu_regs_get(vcpu, ®s);
/*
* On AMD, after KVM_EXIT_SHUTDOWN the VMCB has been reinitialized already,
vcpu = __vm_vcpu_add(vm, 0);
vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
run = vcpu->run;
TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
sync_global_to_guest(vcpu->vm, st_gva[i]);
- ret = _vcpu_set_msr(vcpu->vm, vcpu->id, MSR_KVM_STEAL_TIME,
+ ret = _vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME,
(ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK);
TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
- vcpu_set_msr(vcpu->vm, vcpu->id, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
+ vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
}
static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
.attr = KVM_ARM_VCPU_PVTIME_IPA,
};
- return !__vcpu_ioctl(vcpu->vm, vcpu->id, KVM_HAS_DEVICE_ATTR, &dev);
+ return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
}
static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
.addr = (uint64_t)&st_ipa,
};
- vcpu_ioctl(vm, vcpu->id, KVM_HAS_DEVICE_ATTR, &dev);
+ vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
/* ST_GPA_BASE is identity mapped */
st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
sync_global_to_guest(vm, st_gva[i]);
st_ipa = (ulong)st_gva[i] | 1;
- ret = __vcpu_ioctl(vm, vcpu->id, KVM_SET_DEVICE_ATTR, &dev);
+ ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
st_ipa = (ulong)st_gva[i];
- vcpu_ioctl(vm, vcpu->id, KVM_SET_DEVICE_ATTR, &dev);
+ vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
- ret = __vcpu_ioctl(vm, vcpu->id, KVM_SET_DEVICE_ATTR, &dev);
+ ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
}
{
struct ucall uc;
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
- switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
case UCALL_DONE:
break;
for (i = 0; i < NR_VCPUS; ++i) {
steal_time_init(vcpus[i], i);
- vcpu_args_set(vm, vcpus[i]->id, 1, i);
+ vcpu_args_set(vcpus[i], 1, i);
/* First VCPU run initializes steal-time */
run_vcpu(vcpus[i]);
static void check_preconditions(struct kvm_vcpu *vcpu)
{
- if (!__vcpu_has_device_attr(vcpu->vm, vcpu->id, KVM_VCPU_TSC_CTRL,
- KVM_VCPU_TSC_OFFSET))
+ if (!__vcpu_has_device_attr(vcpu, KVM_VCPU_TSC_CTRL, KVM_VCPU_TSC_OFFSET))
return;
print_skip("KVM_VCPU_TSC_OFFSET not supported; skipping test");
static void setup_system_counter(struct kvm_vcpu *vcpu, struct test_case *test)
{
- vcpu_device_attr_set(vcpu->vm, vcpu->id, KVM_VCPU_TSC_CTRL,
- KVM_VCPU_TSC_OFFSET, &test->tsc_offset);
+ vcpu_device_attr_set(vcpu, KVM_VCPU_TSC_CTRL, KVM_VCPU_TSC_OFFSET,
+ &test->tsc_offset);
}
static uint64_t guest_read_system_counter(struct test_case *test)
setup_system_counter(vcpu, test);
start = host_read_guest_system_counter(test);
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
end = host_read_guest_system_counter(test);
- switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
handle_sync(&uc, start, end);
break;
return;
default:
TEST_ASSERT(0, "unhandled ucall %ld\n",
- get_ucall(vcpu->vm, vcpu->id, &uc));
+ get_ucall(vcpu, &uc));
}
}
}
}
run = vcpu->run;
- vcpu_regs_get(vm, vcpu->id, ®s1);
+ vcpu_regs_get(vcpu, ®s1);
/* Register #NM handler */
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler);
/* amx cfg for guest_code */
/* xsave data for guest_code */
xsavedata = vm_vaddr_alloc_pages(vm, 3);
memset(addr_gva2hva(vm, xsavedata), 0, 3 * getpagesize());
- vcpu_args_set(vm, vcpu->id, 3, amx_cfg, tiledata, xsavedata);
+ vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xsavedata);
for (stage = 1; ; stage++) {
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]);
* size subtract 8K amx size.
*/
amx_offset = xsave_restore_size - NUM_TILES*TILE_SIZE;
- state = vcpu_save_state(vm, vcpu->id);
+ state = vcpu_save_state(vcpu);
void *amx_start = (void *)state->xsave + amx_offset;
void *tiles_data = (void *)addr_gva2hva(vm, tiledata);
/* Only check TMM0 register, 1 tile */
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
- state = vcpu_save_state(vm, vcpu->id);
+ state = vcpu_save_state(vcpu);
memset(®s1, 0, sizeof(regs1));
- vcpu_regs_get(vm, vcpu->id, ®s1);
+ vcpu_regs_get(vcpu, ®s1);
kvm_vm_release(vm);
/* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm);
- vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid());
- vcpu_load_state(vm, vcpu->id, state);
+ vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
+ vcpu_load_state(vcpu, state);
run = vcpu->run;
kvm_x86_state_cleanup(state);
memset(®s2, 0, sizeof(regs2));
- vcpu_regs_get(vm, vcpu->id, ®s2);
+ vcpu_regs_get(vcpu, ®s2);
TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
{
struct ucall uc;
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
- switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage + 1,
u32 eax, ebx, x;
/* Setting unmodified CPUID is allowed */
- rc = __vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid);
+ rc = __vcpu_set_cpuid(vcpu, cpuid);
TEST_ASSERT(!rc, "Setting unmodified CPUID after KVM_RUN failed: %d", rc);
/* Changing CPU features is forbidden */
ent = get_cpuid(cpuid, 0x7, 0);
ebx = ent->ebx;
ent->ebx--;
- rc = __vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid);
+ rc = __vcpu_set_cpuid(vcpu, cpuid);
TEST_ASSERT(rc, "Changing CPU features should fail");
ent->ebx = ebx;
eax = ent->eax;
x = eax & 0xff;
ent->eax = (eax & ~0xffu) | (x - 1);
- rc = __vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid);
+ rc = __vcpu_set_cpuid(vcpu, cpuid);
TEST_ASSERT(rc, "Changing MAXPHYADDR should fail");
ent->eax = eax;
}
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
supp_cpuid = kvm_get_supported_cpuid();
- cpuid2 = vcpu_get_cpuid(vm, vcpu->id);
+ cpuid2 = vcpu_get_cpuid(vcpu);
compare_cpuids(supp_cpuid, cpuid2);
vcpu_alloc_cpuid(vm, &cpuid_gva, cpuid2);
- vcpu_args_set(vm, vcpu->id, 1, cpuid_gva);
+ vcpu_args_set(vcpu, 1, cpuid_gva);
for (stage = 0; stage < 3; stage++)
run_vcpu(vcpu, stage);
run = vcpu->run;
while (1) {
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
/* emulate hypervisor clearing CR4.OSXSAVE */
- vcpu_sregs_get(vm, vcpu->id, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.cr4 &= ~X86_CR4_OSXSAVE;
- vcpu_sregs_set(vm, vcpu->id, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
break;
case UCALL_ABORT:
TEST_FAIL("Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
{
struct kvm_regs regs;
- vcpu_regs_get(vcpu->vm, vcpu->id, ®s);
+ vcpu_regs_get(vcpu, ®s);
regs.rip += insn_len;
- vcpu_regs_set(vcpu->vm, vcpu->id, ®s);
+ vcpu_regs_set(vcpu, ®s);
}
int main(void)
/* Test software BPs - int3 */
memset(&debug, 0, sizeof(debug));
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
- vcpu_guest_debug_set(vm, vcpu->id, &debug);
- vcpu_run(vm, vcpu->id);
+ vcpu_guest_debug_set(vcpu, &debug);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == BP_VECTOR &&
run->debug.arch.pc == CAST_TO_RIP(sw_bp),
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
debug.arch.debugreg[i] = CAST_TO_RIP(hw_bp);
debug.arch.debugreg[7] = 0x400 | (1UL << (2*i+1));
- vcpu_guest_debug_set(vm, vcpu->id, &debug);
- vcpu_run(vm, vcpu->id);
+ vcpu_guest_debug_set(vcpu, &debug);
+ vcpu_run(vcpu);
target_dr6 = 0xffff0ff0 | (1UL << i);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR &&
debug.arch.debugreg[i] = CAST_TO_RIP(guest_value);
debug.arch.debugreg[7] = 0x00000400 | (1UL << (2*i+1)) |
(0x000d0000UL << (4*i));
- vcpu_guest_debug_set(vm, vcpu->id, &debug);
- vcpu_run(vm, vcpu->id);
+ vcpu_guest_debug_set(vcpu, &debug);
+ vcpu_run(vcpu);
target_dr6 = 0xffff0ff0 | (1UL << i);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR &&
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP |
KVM_GUESTDBG_BLOCKIRQ;
debug.arch.debugreg[7] = 0x00000400;
- vcpu_guest_debug_set(vm, vcpu->id, &debug);
- vcpu_run(vm, vcpu->id);
+ vcpu_guest_debug_set(vcpu, &debug);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR &&
run->debug.arch.pc == target_rip &&
memset(&debug, 0, sizeof(debug));
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
debug.arch.debugreg[7] = 0x400 | DR7_GD;
- vcpu_guest_debug_set(vm, vcpu->id, &debug);
- vcpu_run(vm, vcpu->id);
+ vcpu_guest_debug_set(vcpu, &debug);
+ vcpu_run(vcpu);
target_dr6 = 0xffff0ff0 | DR6_BD;
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR &&
/* Disable all debug controls, run to the end */
memset(&debug, 0, sizeof(debug));
- vcpu_guest_debug_set(vm, vcpu->id, &debug);
+ vcpu_guest_debug_set(vcpu, &debug);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "KVM_EXIT_IO");
- cmd = get_ucall(vm, vcpu->id, &uc);
+ cmd = get_ucall(vcpu, &uc);
TEST_ASSERT(cmd == UCALL_DONE, "UCALL_DONE");
kvm_vm_free(vm);
* contained an flds instruction that is 2-bytes in
* length (ie: no prefix, no SIB, no displacement).
*/
- vcpu_regs_get(vcpu->vm, vcpu->id, ®s);
+ vcpu_regs_get(vcpu, ®s);
regs.rip += 2;
- vcpu_regs_set(vcpu->vm, vcpu->id, ®s);
+ vcpu_regs_set(vcpu, ®s);
}
}
}
struct ucall uc;
if (vcpu->run->exit_reason == KVM_EXIT_IO &&
- get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_ABORT) {
+ get_ucall(vcpu, &uc) == UCALL_ABORT) {
do_guest_assert(&uc);
}
}
run->exit_reason,
exit_reason_str(run->exit_reason));
- TEST_ASSERT(get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_DONE,
+ TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE,
"Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
uc.cmd, UCALL_DONE);
}
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
break;
case UCALL_ABORT:
entry->eax = (entry->eax & 0xffffff00) | MAXPHYADDR;
set_cpuid(cpuid, entry);
- vcpu_set_cpuid(vm, vcpu->id, cpuid);
+ vcpu_set_cpuid(vcpu, cpuid);
rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE);
TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable");
virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1);
hva = addr_gpa2hva(vm, MEM_REGION_GPA);
memset(hva, 0, PAGE_SIZE);
- pte = vm_get_page_table_entry(vm, vcpu->id, MEM_REGION_GVA);
- vm_set_page_table_entry(vm, vcpu->id, MEM_REGION_GVA, pte | (1ull << 36));
+ pte = vm_get_page_table_entry(vm, vcpu, MEM_REGION_GVA);
+ vm_set_page_table_entry(vm, vcpu, MEM_REGION_GVA, pte | (1ull << 36));
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
process_exit_on_emulation_error(vcpu);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(process_ucall(vcpu) == UCALL_DONE, "Expected UCALL_DONE");
{
struct kvm_vcpu_events events;
- vcpu_events_get(vcpu->vm, vcpu->id, &events);
+ vcpu_events_get(vcpu, &events);
events.nmi.pending = 1;
events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
- vcpu_events_set(vcpu->vm, vcpu->id, &events);
+ vcpu_events_set(vcpu, &events);
}
static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm,
struct kvm_regs regs1, regs2;
struct kvm_x86_state *state;
- state = vcpu_save_state(vm, vcpu->id);
+ state = vcpu_save_state(vcpu);
memset(®s1, 0, sizeof(regs1));
- vcpu_regs_get(vm, vcpu->id, ®s1);
+ vcpu_regs_get(vcpu, ®s1);
kvm_vm_release(vm);
/* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm);
- vcpu_set_hv_cpuid(vm, vcpu->id);
- vcpu_enable_evmcs(vm, vcpu->id);
- vcpu_load_state(vm, vcpu->id, state);
+ vcpu_set_hv_cpuid(vcpu);
+ vcpu_enable_evmcs(vcpu);
+ vcpu_load_state(vcpu, state);
kvm_x86_state_cleanup(state);
memset(®s2, 0, sizeof(regs2));
- vcpu_regs_get(vm, vcpu->id, ®s2);
+ vcpu_regs_get(vcpu, ®s2);
TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
exit(KSFT_SKIP);
}
- vcpu_set_hv_cpuid(vm, vcpu->id);
- vcpu_enable_evmcs(vm, vcpu->id);
+ vcpu_set_hv_cpuid(vcpu);
+ vcpu_enable_evmcs(vcpu);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
for (stage = 1;; stage++) {
run = vcpu->run;
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]);
static void setup_ud_vector(struct kvm_vcpu *vcpu)
{
vm_init_descriptor_tables(vcpu->vm);
- vcpu_init_descriptor_tables(vcpu->vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler);
}
struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vcpu->vm, vcpu->id);
- switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
+ vcpu_run(vcpu);
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]);
break;
u64 tsc_freq, r1, r2, t1, t2;
s64 delta_ns;
- tsc_freq = vcpu_get_msr(vcpu->vm, vcpu->id, HV_X64_MSR_TSC_FREQUENCY);
+ tsc_freq = vcpu_get_msr(vcpu, HV_X64_MSR_TSC_FREQUENCY);
TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero");
/* For increased accuracy, take mean rdtsc() before and afrer ioctl */
r1 = rdtsc();
- t1 = vcpu_get_msr(vcpu->vm, vcpu->id, HV_X64_MSR_TIME_REF_COUNT);
+ t1 = vcpu_get_msr(vcpu, HV_X64_MSR_TIME_REF_COUNT);
r1 = (r1 + rdtsc()) / 2;
nop_loop();
r2 = rdtsc();
- t2 = vcpu_get_msr(vcpu->vm, vcpu->id, HV_X64_MSR_TIME_REF_COUNT);
+ t2 = vcpu_get_msr(vcpu, HV_X64_MSR_TIME_REF_COUNT);
r2 = (r2 + rdtsc()) / 2;
TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2);
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
run = vcpu->run;
- vcpu_set_hv_cpuid(vm, vcpu->id);
+ vcpu_set_hv_cpuid(vcpu);
tsc_page_gva = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize());
TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
"TSC page has to be page aligned\n");
- vcpu_args_set(vm, vcpu->id, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
+ vcpu_args_set(vcpu, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
host_check_tsc_msr_rdtsc(vcpu);
for (stage = 1;; stage++) {
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]);
int ret;
if (vcpu)
- ret = __vcpu_ioctl(vm, vcpu->id, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
+ ret = __vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
else
ret = __kvm_ioctl(vm_get_kvm_fd(vm), KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
/* Test vCPU ioctl version */
test_hv_cpuid_e2big(vm, vcpu);
- hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vm, vcpu->id);
+ hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vcpu);
test_hv_cpuid(hv_cpuid_entries, false);
free(hv_cpuid_entries);
print_skip("Enlightened VMCS is unsupported");
goto do_sys;
}
- vcpu_enable_evmcs(vm, vcpu->id);
- hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vm, vcpu->id);
+ vcpu_enable_evmcs(vcpu);
+ hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vcpu);
test_hv_cpuid(hv_cpuid_entries, true);
free(hv_cpuid_entries);
"failed to set HYPERV_CPUID_ENLIGHTMENT_INFO leaf");
TEST_ASSERT(set_cpuid(cpuid, dbg),
"failed to set HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES leaf");
- vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid);
+ vcpu_set_cpuid(vcpu, cpuid);
}
static void guest_test_msrs_access(void)
memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
msr = addr_gva2hva(vm, msr_gva);
- vcpu_args_set(vm, vcpu->id, 1, msr_gva);
- vcpu_enable_cap(vm, vcpu->id, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
+ vcpu_args_set(vcpu, 1, msr_gva);
+ vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
- vcpu_set_hv_cpuid(vm, vcpu->id);
+ vcpu_set_hv_cpuid(vcpu);
best = kvm_get_supported_hv_cpuid();
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
run = vcpu->run;
* Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
* capability enabled and guest visible CPUID bit unset.
*/
- vcpu_enable_cap(vm, vcpu->id, KVM_CAP_HYPERV_SYNIC2, 0);
+ vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0);
break;
case 22:
feat.eax |= HV_MSR_SYNIC_AVAILABLE;
else
pr_debug("Stage %d: finish\n", stage);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == 0,
"Unexpected stage: %ld (0 expected)\n",
vm = vm_create_with_one_vcpu(&vcpu, guest_hcall);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
/* Hypercall input/output */
hcall_params = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
- vcpu_args_set(vm, vcpu->id, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
- vcpu_enable_cap(vm, vcpu->id, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
+ vcpu_args_set(vcpu, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
+ vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
- vcpu_set_hv_cpuid(vm, vcpu->id);
+ vcpu_set_hv_cpuid(vcpu);
best = kvm_get_supported_hv_cpuid();
else
pr_debug("Stage %d: finish\n", stage);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == 0,
"Unexpected stage: %ld (0 expected)\n",
}
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- vcpu_set_hv_cpuid(vm, vcpu->id);
+ vcpu_set_hv_cpuid(vcpu);
run = vcpu->run;
vcpu_alloc_svm(vm, &nested_gva);
- vcpu_args_set(vm, vcpu->id, 1, nested_gva);
+ vcpu_args_set(vcpu, 1, nested_gva);
for (stage = 1;; stage++) {
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]);
vm_ioctl(vm, KVM_GET_CLOCK, &start);
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
vm_ioctl(vm, KVM_GET_CLOCK, &end);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
- switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
handle_sync(&uc, &start, &end);
break;
pvti_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000);
pvti_gpa = addr_gva2gpa(vm, pvti_gva);
- vcpu_args_set(vm, vcpu->id, 2, pvti_gpa, pvti_gva);
+ vcpu_args_set(vcpu, 2, pvti_gpa, pvti_gva);
enter_guest(vcpu);
kvm_vm_free(vm);
struct ucall uc;
while (true) {
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
- switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_PR_MSR:
pr_msr(&uc);
break;
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
- vcpu_enable_cap(vm, vcpu->id, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1);
+ vcpu_enable_cap(vcpu, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1);
best = kvm_get_supported_cpuid();
clear_kvm_cpuid_features(best);
- vcpu_set_cpuid(vm, vcpu->id, best);
+ vcpu_set_cpuid(vcpu, best);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
enter_guest(vcpu);
/* Map 1gb page without a backing memlot. */
__virt_pg_map(vm, MMIO_GPA, MMIO_GPA, PG_LEVEL_1G);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
/* Guest access to the 1gb page should trigger MMIO. */
TEST_ASSERT(run->exit_reason == KVM_EXIT_MMIO,
* returns the struct that contains the entry being modified. Eww.
*/
*cpuid_reg = evil_cpuid_val;
- vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid());
+ vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
/*
* Add a dummy memslot to coerce KVM into bumping the MMIO generation.
/* Set up a #PF handler to eat the RSVD #PF and signal all done! */
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, PF_VECTOR, guest_pf_handler);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
- cmd = get_ucall(vm, vcpu->id, NULL);
+ cmd = get_ucall(vcpu, NULL);
TEST_ASSERT(cmd == UCALL_DONE,
"Unexpected guest exit, exit_reason=%s, ucall.cmd = %lu\n",
exit_reason_str(run->exit_reason), cmd);
struct ucall uc;
vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, true);
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- get_ucall(vcpu->vm, vcpu->id, &uc);
+ get_ucall(vcpu, &uc);
TEST_ASSERT(uc.cmd == UCALL_SYNC,
"Received ucall other than UCALL_SYNC: %lu\n", uc.cmd);
TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
struct kvm_run *run = vcpu->run;
vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, false);
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
"Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
run->exit_reason,
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- msr_platform_info = vcpu_get_msr(vm, vcpu->id, MSR_PLATFORM_INFO);
- vcpu_set_msr(vm, vcpu->id, MSR_PLATFORM_INFO,
- msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
+ msr_platform_info = vcpu_get_msr(vcpu, MSR_PLATFORM_INFO);
+ vcpu_set_msr(vcpu, MSR_PLATFORM_INFO,
+ msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
test_msr_platform_info_enabled(vcpu);
test_msr_platform_info_disabled(vcpu);
- vcpu_set_msr(vm, vcpu->id, MSR_PLATFORM_INFO, msr_platform_info);
+ vcpu_set_msr(vcpu, MSR_PLATFORM_INFO, msr_platform_info);
kvm_vm_free(vm);
struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- get_ucall(vcpu->vm, vcpu->id, &uc);
+ get_ucall(vcpu, &uc);
TEST_ASSERT(uc.cmd == UCALL_SYNC,
"Received ucall other than UCALL_SYNC: %lu", uc.cmd);
return uc.args[1];
vcpu = vm_vcpu_add(vm, 0, guest_code);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
TEST_ASSERT(!sanity_check_pmu(vcpu),
"Guest should not be able to use disabled PMU.");
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
if (!sanity_check_pmu(vcpu)) {
print_skip("Guest PMU is not functional");
for (stage = 0; stage < 2; stage++) {
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
- switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage + 1,
memcpy(&sregs, orig, sizeof(sregs));
sregs.cr4 |= feature_bit;
- rc = _vcpu_sregs_set(vcpu->vm, vcpu->id, &sregs);
+ rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(rc, "KVM allowed unsupported CR4 bit (0x%lx)", feature_bit);
/* Sanity check that KVM didn't change anything. */
- vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(!memcmp(&sregs, orig, sizeof(sregs)), "KVM modified sregs");
}
vm = vm_create_barebones();
vcpu = __vm_vcpu_add(vm, 0);
- vcpu_sregs_get(vm, vcpu->id, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.cr4 |= calc_cr4_feature_bits(vm);
cr4 = sregs.cr4;
- rc = _vcpu_sregs_set(vm, vcpu->id, &sregs);
+ rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(!rc, "Failed to set supported CR4 bits (0x%lx)", cr4);
- vcpu_sregs_get(vm, vcpu->id, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(sregs.cr4 == cr4, "sregs.CR4 (0x%llx) != CR4 (0x%lx)",
sregs.cr4, cr4);
/* Create a "real" VM and verify APIC_BASE can be set. */
vm = vm_create_with_one_vcpu(&vcpu, NULL);
- vcpu_sregs_get(vm, vcpu->id, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.apic_base = 1 << 10;
- rc = _vcpu_sregs_set(vm, vcpu->id, &sregs);
+ rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(rc, "Set IA32_APIC_BASE to %llx (invalid)",
sregs.apic_base);
sregs.apic_base = 1 << 11;
- rc = _vcpu_sregs_set(vm, vcpu->id, &sregs);
+ rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(!rc, "Couldn't set IA32_APIC_BASE to %llx (valid)",
sregs.apic_base);
{
struct kvm_vcpu_events events;
- vcpu_events_get(vcpu->vm, vcpu->id, &events);
+ vcpu_events_get(vcpu, &events);
events.smi.pending = 1;
events.flags |= KVM_VCPUEVENT_VALID_SMM;
- vcpu_events_set(vcpu->vm, vcpu->id, &events);
+ vcpu_events_set(vcpu, &events);
}
int main(int argc, char *argv[])
memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
sizeof(smi_handler));
- vcpu_set_msr(vm, vcpu->id, MSR_IA32_SMBASE, SMRAM_GPA);
+ vcpu_set_msr(vcpu, MSR_IA32_SMBASE, SMRAM_GPA);
if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
if (nested_svm_supported())
if (!nested_gva)
pr_info("will skip SMM test with VMX enabled\n");
- vcpu_args_set(vm, vcpu->id, 1, nested_gva);
+ vcpu_args_set(vcpu, 1, nested_gva);
for (stage = 1;; stage++) {
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
memset(®s, 0, sizeof(regs));
- vcpu_regs_get(vm, vcpu->id, ®s);
+ vcpu_regs_get(vcpu, ®s);
stage_reported = regs.rax & 0xff;
if (stage == 10)
inject_smi(vcpu);
- state = vcpu_save_state(vm, vcpu->id);
+ state = vcpu_save_state(vcpu);
kvm_vm_release(vm);
vcpu = vm_recreate_with_one_vcpu(vm);
- vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid());
- vcpu_load_state(vm, vcpu->id, state);
+ vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
+ vcpu_load_state(vcpu, state);
run = vcpu->run;
kvm_x86_state_cleanup(state);
}
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
- vcpu_regs_get(vm, vcpu->id, ®s1);
+ vcpu_regs_get(vcpu, ®s1);
if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
if (nested_svm_supported())
if (!nested_gva)
pr_info("will skip nested state checks\n");
- vcpu_args_set(vm, vcpu->id, 1, nested_gva);
+ vcpu_args_set(vcpu, 1, nested_gva);
for (stage = 1;; stage++) {
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]);
uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
stage, (ulong)uc.args[1]);
- state = vcpu_save_state(vm, vcpu->id);
+ state = vcpu_save_state(vcpu);
memset(®s1, 0, sizeof(regs1));
- vcpu_regs_get(vm, vcpu->id, ®s1);
+ vcpu_regs_get(vcpu, ®s1);
kvm_vm_release(vm);
/* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm);
- vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid());
- vcpu_load_state(vm, vcpu->id, state);
+ vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
+ vcpu_load_state(vcpu, state);
run = vcpu->run;
kvm_x86_state_cleanup(state);
memset(®s2, 0, sizeof(regs2));
- vcpu_regs_get(vm, vcpu->id, ®s2);
+ vcpu_regs_get(vcpu, ®s2);
TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, VINTR_IRQ_NUMBER, vintr_irq_handler);
vm_install_exception_handler(vm, INTR_IRQ_NUMBER, intr_irq_handler);
vcpu_alloc_svm(vm, &svm_gva);
- vcpu_args_set(vm, vcpu->id, 1, svm_gva);
+ vcpu_args_set(vcpu, 1, svm_gva);
run = vcpu->run;
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s", (const char *)uc.args[0]);
break;
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
vm_install_exception_handler(vm, BP_VECTOR, guest_bp_handler);
} else {
idt_alt_vm = 0;
}
- vcpu_args_set(vm, vcpu->id, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm);
+ vcpu_args_set(vcpu, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm);
memset(&debug, 0, sizeof(debug));
- vcpu_guest_debug_set(vm, vcpu->id, &debug);
+ vcpu_guest_debug_set(vcpu, &debug);
struct kvm_run *run = vcpu->run;
struct ucall uc;
alarm(2);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
alarm(0);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld, vals = 0x%lx 0x%lx 0x%lx", (const char *)uc.args[0],
__FILE__, uc.args[1], uc.args[2], uc.args[3], uc.args[4]);
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vcpu_alloc_svm(vm, &svm_gva);
- vcpu_args_set(vm, vcpu->id, 1, svm_gva);
+ vcpu_args_set(vcpu, 1, svm_gva);
for (;;) {
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s", (const char *)uc.args[0]);
/* NOT REACHED */
/* Request reading invalid register set from VCPU. */
run->kvm_valid_regs = INVALID_SYNC_FIELD;
- rv = _vcpu_run(vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
run->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
- rv = _vcpu_run(vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
/* Request setting invalid register set into VCPU. */
run->kvm_dirty_regs = INVALID_SYNC_FIELD;
- rv = _vcpu_run(vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
run->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
- rv = _vcpu_run(vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
/* Request and verify all valid register sets. */
/* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
run->kvm_valid_regs = TEST_SYNC_FIELDS;
- rv = _vcpu_run(vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- vcpu_regs_get(vm, vcpu->id, ®s);
+ vcpu_regs_get(vcpu, ®s);
compare_regs(®s, &run->s.regs.regs);
- vcpu_sregs_get(vm, vcpu->id, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs.sregs);
- vcpu_events_get(vm, vcpu->id, &events);
+ vcpu_events_get(vcpu, &events);
compare_vcpu_events(&events, &run->s.regs.events);
/* Set and verify various register values. */
run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
- rv = _vcpu_run(vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
"apic_base sync regs value incorrect 0x%llx.",
run->s.regs.sregs.apic_base);
- vcpu_regs_get(vm, vcpu->id, ®s);
+ vcpu_regs_get(vcpu, ®s);
compare_regs(®s, &run->s.regs.regs);
- vcpu_sregs_get(vm, vcpu->id, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs.sregs);
- vcpu_events_get(vm, vcpu->id, &events);
+ vcpu_events_get(vcpu, &events);
compare_vcpu_events(&events, &run->s.regs.events);
/* Clear kvm_dirty_regs bits, verify new s.regs values are
run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = 0;
run->s.regs.regs.rbx = 0xDEADBEEF;
- rv = _vcpu_run(vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
run->kvm_dirty_regs = 0;
run->s.regs.regs.rbx = 0xAAAA;
regs.rbx = 0xBAC0;
- vcpu_regs_set(vm, vcpu->id, ®s);
- rv = _vcpu_run(vm, vcpu->id);
+ vcpu_regs_set(vcpu, ®s);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA,
"rbx sync regs value incorrect 0x%llx.",
run->s.regs.regs.rbx);
- vcpu_regs_get(vm, vcpu->id, ®s);
+ vcpu_regs_get(vcpu, ®s);
TEST_ASSERT(regs.rbx == 0xBAC0 + 1,
"rbx guest value incorrect 0x%llx.",
regs.rbx);
run->kvm_valid_regs = 0;
run->kvm_dirty_regs = TEST_SYNC_FIELDS;
run->s.regs.regs.rbx = 0xBBBB;
- rv = _vcpu_run(vm, vcpu->id);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB,
"rbx sync regs value incorrect 0x%llx.",
run->s.regs.regs.rbx);
- vcpu_regs_get(vm, vcpu->id, ®s);
+ vcpu_regs_get(vcpu, ®s);
TEST_ASSERT(regs.rbx == 0xBBBB + 1,
"rbx guest value incorrect 0x%llx.",
regs.rbx);
run = vcpu->run;
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva);
- vcpu_run(vm, vcpu->id);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Expected KVM_EXIT_IO, got: %u (%s)\n",
TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT,
"Expected IN from port %d from L2, got port %d",
ARBITRARY_IO_PORT, run->io.port);
- vcpu_events_get(vm, vcpu->id, &events);
+ vcpu_events_get(vcpu, &events);
events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
events.triple_fault.pending = true;
- vcpu_events_set(vm, vcpu->id, &events);
+ vcpu_events_set(vcpu, &events);
run->immediate_exit = true;
- vcpu_run_complete_io(vm, vcpu->id);
+ vcpu_run_complete_io(vcpu);
- vcpu_events_get(vm, vcpu->id, &events);
+ vcpu_events_get(vcpu, &events);
TEST_ASSERT(events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT,
"Triple fault event invalid");
TEST_ASSERT(events.triple_fault.pending,
"No triple fault pending");
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE:
break;
case UCALL_ABORT:
#define GUEST_STEP (UNITY * 4)
#define ROUND(x) ((x + UNITY / 2) & -UNITY)
#define rounded_rdmsr(x) ROUND(rdmsr(x))
-#define rounded_host_rdmsr(x) ROUND(vcpu_get_msr(vm, vcpu->id, x))
+#define rounded_host_rdmsr(x) ROUND(vcpu_get_msr(vcpu, x))
static void guest_code(void)
{
{
struct ucall uc;
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
- switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage + 1, "Stage %d: Unexpected register values vmexit, got %lx",
* Host: writes to MSR_IA32_TSC set the host-side offset
* and therefore do not change MSR_IA32_TSC_ADJUST.
*/
- vcpu_set_msr(vm, vcpu->id, MSR_IA32_TSC, HOST_ADJUST + val);
+ vcpu_set_msr(vcpu, MSR_IA32_TSC, HOST_ADJUST + val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
run_vcpu(vcpu, 3);
/* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC. */
- vcpu_set_msr(vm, vcpu->id, MSR_IA32_TSC_ADJUST, UNITY * 123456);
+ vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, UNITY * 123456);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
- ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_TSC_ADJUST), UNITY * 123456);
+ ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456);
/* Restore previous value. */
- vcpu_set_msr(vm, vcpu->id, MSR_IA32_TSC_ADJUST, val);
+ vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
if (!first_cpu_done) {
first_cpu_done = true;
- vcpu_set_msr(vm, vcpu->id, MSR_IA32_TSC, TEST_TSC_OFFSET);
+ vcpu_set_msr(vcpu, MSR_IA32_TSC, TEST_TSC_OFFSET);
}
pthread_spin_unlock(&create_lock);
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE:
goto out;
memset(®s, 0, sizeof(regs));
while (1) {
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- if (get_ucall(vm, vcpu->id, &uc))
+ if (get_ucall(vcpu, &uc))
break;
TEST_ASSERT(run->io.port == 0x80,
* scope from a testing perspective as it's not ABI in any way,
* i.e. it really is abusing internal KVM knowledge.
*/
- vcpu_regs_get(vm, vcpu->id, ®s);
+ vcpu_regs_get(vcpu, ®s);
if (regs.rcx == 2)
regs.rcx = 1;
if (regs.rcx == 3)
regs.rcx = 8192;
memset((void *)run + run->io.data_offset, 0xaa, 4096);
- vcpu_regs_set(vm, vcpu->id, ®s);
+ vcpu_regs_set(vcpu, ®s);
}
switch (uc.cmd) {
struct ucall uc;
if (vcpu->run->exit_reason == KVM_EXIT_IO &&
- get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_ABORT) {
+ get_ucall(vcpu, &uc) == UCALL_ABORT) {
TEST_FAIL("%s at %s:%ld",
(const char *)uc.args[0], __FILE__, uc.args[1]);
}
run->exit_reason,
exit_reason_str(run->exit_reason));
- TEST_ASSERT(get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_DONE,
+ TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE,
"Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
uc.cmd, UCALL_DONE);
}
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
break;
case UCALL_ABORT:
static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu,
uint32_t msr_index)
{
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
process_rdmsr(vcpu, msr_index);
}
static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu,
uint32_t msr_index)
{
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
process_wrmsr(vcpu, msr_index);
}
static uint64_t run_guest_then_process_ucall(struct kvm_vcpu *vcpu)
{
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
return process_ucall(vcpu);
}
static void run_guest_then_process_ucall_done(struct kvm_vcpu *vcpu)
{
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
process_ucall_done(vcpu);
}
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
vm_install_exception_handler(vm, UD_VECTOR, NULL);
if (process_ucall(vcpu) != UCALL_DONE) {
{
struct ucall uc;
- switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("Guest assertion not met");
break;
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_deny);
while (1) {
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
switch (run->exit_reason) {
case KVM_EXIT_X86_RDMSR:
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
prepare_virtualize_apic_accesses(vmx, vm);
- vcpu_args_set(vm, vcpu->id, 2, vmx_pages_gva, high_gpa);
+ vcpu_args_set(vcpu, 2, vmx_pages_gva, high_gpa);
while (!done) {
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
if (apic_access_addr == high_gpa) {
TEST_ASSERT(run->exit_reason ==
KVM_EXIT_INTERNAL_ERROR,
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]);
/* Allocate VMX pages and shared descriptors (vmx_pages). */
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
for (;;) {
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
if (run->io.port == PORT_L0_EXIT)
break;
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s", (const char *)uc.args[0]);
/* NOT REACHED */
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
run = vcpu->run;
/* Add an extra memory slot for testing dirty logging */
while (!done) {
memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]);
{
struct kvm_run *run = vcpu->run;
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
"Expected KVM_EXIT_INTERNAL_ERROR, got %d (%s)\n",
static struct kvm_sregs sregs;
if (!sregs.cr0)
- vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.tr.unusable = !!set;
- vcpu_sregs_set(vcpu->vm, vcpu->id, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
}
static void set_invalid_guest_state(struct kvm_vcpu *vcpu)
TEST_ASSERT(sig == SIGALRM, "Unexpected signal = %d", sig);
- vcpu_events_get(vcpu->vm, vcpu->id, &events);
+ vcpu_events_get(vcpu, &events);
/*
* If an exception is pending, attempt KVM_RUN with invalid guest,
get_set_sigalrm_vcpu(vcpu);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
/* Allocate VMX pages and shared descriptors (vmx_pages). */
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
run = vcpu->run;
* emulating invalid guest state for L2.
*/
memset(&sregs, 0, sizeof(sregs));
- vcpu_sregs_get(vm, vcpu->id, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.tr.unusable = 1;
- vcpu_sregs_set(vm, vcpu->id, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE:
break;
case UCALL_ABORT:
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
- tsc_khz = __vcpu_ioctl(vm, vcpu->id, KVM_GET_TSC_KHZ, NULL);
+ tsc_khz = __vcpu_ioctl(vcpu, KVM_GET_TSC_KHZ, NULL);
TEST_ASSERT(tsc_khz != -1, "vcpu ioctl KVM_GET_TSC_KHZ failed");
/* scale down L1's TSC frequency */
- vcpu_ioctl(vm, vcpu->id, KVM_SET_TSC_KHZ,
- (void *) (tsc_khz / l1_scale_factor));
+ vcpu_ioctl(vcpu, KVM_SET_TSC_KHZ, (void *) (tsc_khz / l1_scale_factor));
for (;;) {
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s", (const char *) uc.args[0]);
case UCALL_SYNC:
}
/* testcase 1, set capabilities when we have PDCM bit */
- vcpu_set_cpuid(vm, vcpu->id, cpuid);
- vcpu_set_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES);
+ vcpu_set_cpuid(vcpu, cpuid);
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES);
/* check capabilities can be retrieved with KVM_GET_MSR */
- ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES);
+ ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES);
/* check whatever we write with KVM_SET_MSR is _not_ modified */
- vcpu_run(vm, vcpu->id);
- ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES);
+ vcpu_run(vcpu);
+ ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES);
/* testcase 2, check valid LBR formats are accepted */
- vcpu_set_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES, 0);
- ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES), 0);
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0);
+ ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), 0);
- vcpu_set_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES, host_cap.lbr_format);
- ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format);
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.lbr_format);
+ ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format);
/* testcase 3, check invalid LBR format is rejected */
/* Note, on Arch LBR capable platforms, LBR_FMT in perf capability msr is 0x3f,
* to avoid the failure, use a true invalid format 0x30 for the test. */
- ret = _vcpu_set_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES, 0x30);
+ ret = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0x30);
TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
printf("Completed perf capability tests.\n");
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
- vcpu_regs_get(vm, vcpu->id, ®s1);
+ vcpu_regs_get(vcpu, ®s1);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
for (stage = 1;; stage++) {
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]);
stage, uc.args[4], uc.args[5]);
}
- state = vcpu_save_state(vm, vcpu->id);
+ state = vcpu_save_state(vcpu);
memset(®s1, 0, sizeof(regs1));
- vcpu_regs_get(vm, vcpu->id, ®s1);
+ vcpu_regs_get(vcpu, ®s1);
kvm_vm_release(vm);
/* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm);
- vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid());
- vcpu_load_state(vm, vcpu->id, state);
+ vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
+ vcpu_load_state(vcpu, state);
run = vcpu->run;
kvm_x86_state_cleanup(state);
memset(®s2, 0, sizeof(regs2));
- vcpu_regs_get(vm, vcpu->id, ®s2);
+ vcpu_regs_get(vcpu, ®s2);
TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
void test_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state *state)
{
- vcpu_nested_state_set(vcpu->vm, vcpu->id, state);
+ vcpu_nested_state_set(vcpu, state);
}
void test_nested_state_expect_errno(struct kvm_vcpu *vcpu,
{
int rv;
- rv = __vcpu_nested_state_set(vcpu->vm, vcpu->id, state);
+ rv = __vcpu_nested_state_set(vcpu, state);
TEST_ASSERT(rv == -1 && errno == expected_errno,
"Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
strerror(expected_errno), expected_errno, rv, strerror(errno),
test_nested_state(vcpu, state);
/* Enable VMX in the guest CPUID. */
- vcpu_set_cpuid(vcpu->vm, vcpu->id, kvm_get_supported_cpuid());
+ vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
/*
* Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
state->flags &= KVM_STATE_NESTED_EVMCS;
if (have_evmcs) {
test_nested_state_expect_einval(vcpu, state);
- vcpu_enable_evmcs(vcpu->vm, vcpu->id);
+ vcpu_enable_evmcs(vcpu);
}
test_nested_state(vcpu, state);
state->hdr.vmx.vmcs12_pa = -1ull;
state->flags = 0;
test_nested_state(vcpu, state);
- vcpu_nested_state_get(vcpu->vm, vcpu->id, state);
+ vcpu_nested_state_get(vcpu, state);
TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz,
"Size must be between %ld and %d. The size returned was %d.",
sizeof(*state), state_sz, state->size);
TEST_ASSERT(i != cpuid->nent, "CPUID function 1 not found");
cpuid->entries[i].ecx &= ~CPUID_VMX;
- vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid);
+ vcpu_set_cpuid(vcpu, cpuid);
cpuid->entries[i].ecx |= CPUID_VMX;
}
/* Allocate VMX pages and shared descriptors (vmx_pages). */
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
for (;;) {
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s", (const char *)uc.args[0]);
/* NOT REACHED */
vcpu->id, r);
fprintf(stderr, "vCPU thread running vCPU %u\n", vcpu->id);
- vcpu_run(vcpu->vm, vcpu->id);
+ vcpu_run(vcpu);
exit_reason = vcpu->run->exit_reason;
TEST_ASSERT(exit_reason == KVM_EXIT_IO,
"vCPU %u exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO",
vcpu->id, exit_reason, exit_reason_str(exit_reason));
- if (get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_ABORT) {
+ if (get_ucall(vcpu, &uc) == UCALL_ABORT) {
TEST_ASSERT(false,
"vCPU %u exited with error: %s.\n"
"Sending vCPU sent %lu IPIs to halting vCPU\n"
vm = vm_create_with_one_vcpu(¶ms[0].vcpu, halter_guest_code);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, params[0].vcpu->id);
+ vcpu_init_descriptor_tables(params[0].vcpu);
vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
params[0].data = data;
params[1].data = data;
- vcpu_args_set(vm, params[0].vcpu->id, 1, test_data_page_vaddr);
- vcpu_args_set(vm, params[1].vcpu->id, 1, test_data_page_vaddr);
+ vcpu_args_set(params[0].vcpu, 1, test_data_page_vaddr);
+ vcpu_args_set(params[1].vcpu, 1, test_data_page_vaddr);
pipis_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ipis_rcvd);
params[0].pipis_rcvd = pipis_rcvd;
} while (1);
}
-static void ____test_icr(struct kvm_vm *vm, struct xapic_vcpu *x, uint64_t val)
+static void ____test_icr(struct xapic_vcpu *x, uint64_t val)
{
struct kvm_vcpu *vcpu = x->vcpu;
struct kvm_lapic_state xapic;
* all bits are valid and should not be modified by KVM (ignoring the
* fact that vectors 0-15 are technically illegal).
*/
- vcpu_ioctl(vm, vcpu->id, KVM_GET_LAPIC, &xapic);
+ vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
*((u32 *)&xapic.regs[APIC_IRR]) = val;
*((u32 *)&xapic.regs[APIC_IRR + 0x10]) = val >> 32;
- vcpu_ioctl(vm, vcpu->id, KVM_SET_LAPIC, &xapic);
+ vcpu_ioctl(vcpu, KVM_SET_LAPIC, &xapic);
- vcpu_run(vm, vcpu->id);
- ASSERT_EQ(get_ucall(vm, vcpu->id, &uc), UCALL_SYNC);
+ vcpu_run(vcpu);
+ ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
ASSERT_EQ(uc.args[1], val);
- vcpu_ioctl(vm, vcpu->id, KVM_GET_LAPIC, &xapic);
+ vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
icr = (u64)(*((u32 *)&xapic.regs[APIC_ICR])) |
(u64)(*((u32 *)&xapic.regs[APIC_ICR2])) << 32;
if (!x->is_x2apic)
ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
}
-static void __test_icr(struct kvm_vm *vm, struct xapic_vcpu *x, uint64_t val)
+static void __test_icr(struct xapic_vcpu *x, uint64_t val)
{
- ____test_icr(vm, x, val | APIC_ICR_BUSY);
- ____test_icr(vm, x, val & ~(u64)APIC_ICR_BUSY);
+ ____test_icr(x, val | APIC_ICR_BUSY);
+ ____test_icr(x, val & ~(u64)APIC_ICR_BUSY);
}
-static void test_icr(struct kvm_vm *vm, struct xapic_vcpu *x)
+static void test_icr(struct xapic_vcpu *x)
{
struct kvm_vcpu *vcpu = x->vcpu;
uint64_t icr, i, j;
icr = APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_FIXED;
for (i = 0; i <= 0xff; i++)
- __test_icr(vm, x, icr | i);
+ __test_icr(x, icr | i);
icr = APIC_INT_ASSERT | APIC_DM_FIXED;
for (i = 0; i <= 0xff; i++)
- __test_icr(vm, x, icr | i);
+ __test_icr(x, icr | i);
/*
* Send all flavors of IPIs to non-existent vCPUs. TODO: use number of
icr = APIC_INT_ASSERT | 0xff;
for (i = vcpu->id + 1; i < 0xff; i++) {
for (j = 0; j < 8; j++)
- __test_icr(vm, x, i << (32 + 24) | APIC_INT_ASSERT | (j << 8));
+ __test_icr(x, i << (32 + 24) | APIC_INT_ASSERT | (j << 8));
}
/* And again with a shorthand destination for all types of IPIs. */
icr = APIC_DEST_ALLBUT | APIC_INT_ASSERT;
for (i = 0; i < 8; i++)
- __test_icr(vm, x, icr | (i << 8));
+ __test_icr(x, icr | (i << 8));
/* And a few garbage value, just make sure it's an IRQ (blocked). */
- __test_icr(vm, x, 0xa5a5a5a5a5a5a5a5 & ~APIC_DM_FIXED_MASK);
- __test_icr(vm, x, 0x5a5a5a5a5a5a5a5a & ~APIC_DM_FIXED_MASK);
- __test_icr(vm, x, -1ull & ~APIC_DM_FIXED_MASK);
+ __test_icr(x, 0xa5a5a5a5a5a5a5a5 & ~APIC_DM_FIXED_MASK);
+ __test_icr(x, 0x5a5a5a5a5a5a5a5a & ~APIC_DM_FIXED_MASK);
+ __test_icr(x, -1ull & ~APIC_DM_FIXED_MASK);
}
int main(int argc, char *argv[])
int i;
vm = vm_create_with_one_vcpu(&x.vcpu, x2apic_guest_code);
- test_icr(vm, &x);
+ test_icr(&x);
kvm_vm_free(vm);
/*
vm = vm_create_with_one_vcpu(&x.vcpu, xapic_guest_code);
x.is_x2apic = false;
- cpuid = vcpu_get_cpuid(vm, x.vcpu->id);
+ cpuid = vcpu_get_cpuid(x.vcpu);
for (i = 0; i < cpuid->nent; i++) {
if (cpuid->entries[i].function == 1)
break;
}
cpuid->entries[i].ecx &= ~BIT(21);
- vcpu_set_cpuid(vm, x.vcpu->id, cpuid);
+ vcpu_set_cpuid(x.vcpu, cpuid);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
- test_icr(vm, &x);
+ test_icr(&x);
kvm_vm_free(vm);
}
{
if (vinfo)
printf("evtchn_upcall_pending 0x%x\n", vinfo->evtchn_upcall_pending);
- vcpu_dump(stdout, vcpu->vm, vcpu->id, 0);
+ vcpu_dump(stdout, vcpu, 0);
TEST_FAIL("IRQ delivery timed out");
}
.type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
.u.gpa = VCPU_INFO_ADDR,
};
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &vi);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &vi);
struct kvm_xen_vcpu_attr pvclock = {
.type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
.u.gpa = PVTIME_ADDR,
};
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &pvclock);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &pvclock);
struct kvm_xen_hvm_attr vec = {
.type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR,
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &vec);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, vcpu->id);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, EVTCHN_VECTOR, evtchn_handler);
if (do_runstate_tests) {
.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
.u.gpa = RUNSTATE_ADDR,
};
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &st);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &st);
}
int irq_fd[2] = { -1, -1 };
inj.u.evtchn.flags = 0;
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
}
vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR);
vinfo->evtchn_upcall_pending = 0;
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s", (const char *)uc.args[0]);
/* NOT REACHED */
printf("Testing runstate %s\n", runstate_names[uc.args[1]]);
rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT;
rst.u.runstate.state = uc.args[1];
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &rst);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break;
case 4:
0x6b6b - rs->time[RUNSTATE_offline];
rst.u.runstate.time_runnable = -rst.u.runstate.time_blocked -
rst.u.runstate.time_offline;
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &rst);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break;
case 5:
rst.u.runstate.state_entry_time = 0x6b6b + 0x5a;
rst.u.runstate.time_blocked = 0x6b6b;
rst.u.runstate.time_offline = 0x5a;
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &rst);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break;
case 6:
case 14:
memset(&tmr, 0, sizeof(tmr));
tmr.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER;
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_GET_ATTR, &tmr);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
TEST_ASSERT(tmr.u.timer.port == EVTCHN_TIMER,
"Timer port not returned");
TEST_ASSERT(tmr.u.timer.priority == KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
printf("Testing restored oneshot timer\n");
tmr.u.timer.expires_ns = rs->state_entry_time + 100000000,
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
evtchn_irq_expected = true;
alarm(1);
break;
printf("Testing SCHEDOP_poll wake on masked event\n");
tmr.u.timer.expires_ns = rs->state_entry_time + 100000000,
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
alarm(1);
break;
evtchn_irq_expected = true;
tmr.u.timer.expires_ns = rs->state_entry_time + 100000000;
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
/* Read it back and check the pending time is reported correctly */
tmr.u.timer.expires_ns = 0;
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_GET_ATTR, &tmr);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
TEST_ASSERT(tmr.u.timer.expires_ns == rs->state_entry_time + 100000000,
"Timer not reported pending");
alarm(1);
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
/* Read timer and check it is no longer pending */
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_GET_ATTR, &tmr);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
TEST_ASSERT(!tmr.u.timer.expires_ns, "Timer still reported pending");
shinfo->evtchn_pending[0] = 0;
evtchn_irq_expected = true;
tmr.u.timer.expires_ns = rs->state_entry_time - 100000000ULL;
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
alarm(1);
break;
struct kvm_xen_vcpu_attr rst = {
.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA,
};
- vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_GET_ATTR, &rst);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &rst);
if (verbose) {
printf("Runstate: %s(%d), entry %" PRIu64 " ns\n",
}
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- vcpu_set_hv_cpuid(vm, vcpu->id);
+ vcpu_set_hv_cpuid(vcpu);
struct kvm_xen_hvm_config hvmc = {
.flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, vcpu->id);
+ vcpu_run(vcpu);
if (run->exit_reason == KVM_EXIT_XEN) {
ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL);
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, vcpu->id, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
TEST_FAIL("%s", (const char *)uc.args[0]);
/* NOT REACHED */
exit(KSFT_SKIP);
}
- xss_val = vcpu_get_msr(vm, vcpu->id, MSR_IA32_XSS);
+ xss_val = vcpu_get_msr(vcpu, MSR_IA32_XSS);
TEST_ASSERT(xss_val == 0,
"MSR_IA32_XSS should be initialized to zero\n");
- vcpu_set_msr(vm, vcpu->id, MSR_IA32_XSS, xss_val);
+ vcpu_set_msr(vcpu, MSR_IA32_XSS, xss_val);
/*
* At present, KVM only supports a guest IA32_XSS value of 0. Verify
*/
xss_in_msr_list = kvm_msr_is_in_save_restore_list(MSR_IA32_XSS);
for (i = 0; i < MSR_BITS; ++i) {
- r = _vcpu_set_msr(vm, vcpu->id, MSR_IA32_XSS, 1ull << i);
+ r = _vcpu_set_msr(vcpu, MSR_IA32_XSS, 1ull << i);
/*
* Setting a list of MSRs returns the entry that "faulted", or