kvm_vm_free(vm);
}
-static void enable_system_suspend(struct kvm_vm *vm)
-{
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_ARM_SYSTEM_SUSPEND,
- };
-
- vm_enable_cap(vm, &cap);
-}
-
static void guest_test_system_suspend(void)
{
uint64_t ret;
struct kvm_vm *vm;
vm = setup_vm(guest_test_system_suspend);
- enable_system_suspend(vm);
+ vm_enable_cap(vm, KVM_CAP_ARM_SYSTEM_SUSPEND, 0);
vcpu_power_off(vm, VCPU_ID_TARGET);
run = vcpu_state(vm, VCPU_ID_SOURCE);
struct timespec get_dirty_log_total = (struct timespec){0};
struct timespec vcpu_dirty_total = (struct timespec){0};
struct timespec avg;
- struct kvm_enable_cap cap = {};
struct timespec clear_dirty_log_total = (struct timespec){0};
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
- if (dirty_log_manual_caps) {
- cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
- cap.args[0] = dirty_log_manual_caps;
- vm_enable_cap(vm, &cap);
- }
+ if (dirty_log_manual_caps)
+ vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2,
+ dirty_log_manual_caps);
arch_setup_vm(vm, nr_vcpus);
static void clear_log_create_vm_done(struct kvm_vm *vm)
{
- struct kvm_enable_cap cap = {};
u64 manual_caps;
manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
KVM_DIRTY_LOG_INITIALLY_SET);
- cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
- cap.args[0] = manual_caps;
- vm_enable_cap(vm, &cap);
+ vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, manual_caps);
}
static void dirty_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
return ret;
}
-static inline int __vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
+static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
{
- return __vm_ioctl(vm, KVM_ENABLE_CAP, cap);
+ struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
+
+ return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
}
-static inline void vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
+static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
{
- vm_ioctl(vm, KVM_ENABLE_CAP, cap);
+ struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
+
+ vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
}
void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid);
static inline void vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
- struct kvm_enable_cap *cap)
+ uint32_t cap, uint64_t arg0)
{
- vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, cap);
+ struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
+
+ vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_cap);
}
static inline void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
{
- struct kvm_enable_cap cap = { 0 };
-
- cap.cap = KVM_CAP_DIRTY_LOG_RING;
- cap.args[0] = ring_size;
- vm_enable_cap(vm, &cap);
+ vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size);
vm->dirty_ring_size = ring_size;
}
{
uint16_t evmcs_ver;
- struct kvm_enable_cap enable_evmcs_cap = {
- .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
- .args[0] = (unsigned long)&evmcs_ver
- };
-
- vcpu_enable_cap(vm, vcpu_id, &enable_evmcs_cap);
+ vcpu_enable_cap(vm, vcpu_id, KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
+ (unsigned long)&evmcs_ver);
/* KVM should return supported EVMCS version range */
TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
int main(int argc, char *argv[])
{
- struct kvm_enable_cap emul_failure_cap = {
- .cap = KVM_CAP_EXIT_ON_EMULATION_FAILURE,
- .args[0] = 1,
- };
struct kvm_cpuid_entry2 *entry;
struct kvm_cpuid2 *cpuid;
struct kvm_vm *vm;
rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE);
TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable");
- vm_enable_cap(vm, &emul_failure_cap);
+ vm_enable_cap(vm, KVM_CAP_EXIT_ON_EMULATION_FAILURE, 1);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
MEM_REGION_GPA, MEM_REGION_SLOT,
static void test_fix_hypercall_disabled(void)
{
- struct kvm_enable_cap cap = {0};
struct kvm_vm *vm;
vm = vm_create_default(VCPU_ID, 0, guest_main);
setup_ud_vector(vm);
- cap.cap = KVM_CAP_DISABLE_QUIRKS2;
- cap.args[0] = KVM_X86_QUIRK_FIX_HYPERCALL_INSN;
- vm_enable_cap(vm, &cap);
+ vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2,
+ KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
ud_expected = true;
sync_global_to_guest(vm, ud_expected);
};
struct kvm_cpuid2 *best;
vm_vaddr_t msr_gva;
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
- .args = {1}
- };
struct msr_data *msr;
while (true) {
msr = addr_gva2hva(vm, msr_gva);
vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
- vcpu_enable_cap(vm, VCPU_ID, &cap);
+ vcpu_enable_cap(vm, VCPU_ID, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
vcpu_set_hv_cpuid(vm, VCPU_ID);
* Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
* capability enabled and guest visible CPUID bit unset.
*/
- cap.cap = KVM_CAP_HYPERV_SYNIC2;
- cap.args[0] = 0;
- vcpu_enable_cap(vm, VCPU_ID, &cap);
+ vcpu_enable_cap(vm, VCPU_ID, KVM_CAP_HYPERV_SYNIC2, 0);
break;
case 22:
feat.eax |= HV_MSR_SYNIC_AVAILABLE;
struct kvm_cpuid_entry2 dbg = {
.function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
};
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
- .args = {1}
- };
vm_vaddr_t hcall_page, hcall_params;
struct hcall_data *hcall;
struct kvm_cpuid2 *best;
memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
- vcpu_enable_cap(vm, VCPU_ID, &cap);
+ vcpu_enable_cap(vm, VCPU_ID, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
vcpu_set_hv_cpuid(vm, VCPU_ID);
int main(void)
{
- struct kvm_enable_cap cap = {0};
struct kvm_cpuid2 *best;
struct kvm_vm *vm;
vm = vm_create_default(VCPU_ID, 0, guest_main);
- cap.cap = KVM_CAP_ENFORCE_PV_FEATURE_CPUID;
- cap.args[0] = 1;
- vcpu_enable_cap(vm, VCPU_ID, &cap);
+ vcpu_enable_cap(vm, VCPU_ID, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1);
best = kvm_get_supported_cpuid();
clear_kvm_cpuid_features(best);
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
- struct kvm_enable_cap cap = { 0 };
int ret;
vm = vm_create(0);
ret = vm_check_cap(vm, KVM_CAP_MAX_VCPU_ID);
/* Try to set KVM_CAP_MAX_VCPU_ID beyond KVM cap */
- cap.cap = KVM_CAP_MAX_VCPU_ID;
- cap.args[0] = ret + 1;
- ret = __vm_enable_cap(vm, &cap);
+ ret = __vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, ret + 1);
TEST_ASSERT(ret < 0,
"Setting KVM_CAP_MAX_VCPU_ID beyond KVM cap should fail");
/* Set KVM_CAP_MAX_VCPU_ID */
- cap.cap = KVM_CAP_MAX_VCPU_ID;
- cap.args[0] = MAX_VCPU_ID;
- vm_enable_cap(vm, &cap);
+ vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, MAX_VCPU_ID);
/* Try to set KVM_CAP_MAX_VCPU_ID again */
- cap.args[0] = MAX_VCPU_ID + 1;
- ret = __vm_enable_cap(vm, &cap);
+ ret = __vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, MAX_VCPU_ID + 1);
TEST_ASSERT(ret < 0,
"Setting KVM_CAP_MAX_VCPU_ID multiple times should fail");
}
}
-static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable)
-{
- struct kvm_enable_cap cap = {};
-
- cap.cap = KVM_CAP_MSR_PLATFORM_INFO;
- cap.flags = 0;
- cap.args[0] = (int)enable;
- vm_enable_cap(vm, &cap);
-}
-
static void test_msr_platform_info_enabled(struct kvm_vm *vm)
{
struct kvm_run *run = vcpu_state(vm, VCPU_ID);
struct ucall uc;
- set_msr_platform_info_enabled(vm, true);
+ vm_enable_cap(vm, KVM_CAP_MSR_PLATFORM_INFO, true);
vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
{
struct kvm_run *run = vcpu_state(vm, VCPU_ID);
- set_msr_platform_info_enabled(vm, false);
+ vm_enable_cap(vm, KVM_CAP_MSR_PLATFORM_INFO, false);
vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
"Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
{
int r;
struct kvm_vm *vm;
- struct kvm_enable_cap cap = { 0 };
r = kvm_check_cap(KVM_CAP_PMU_CAPABILITY);
if (!(r & KVM_PMU_CAP_DISABLE))
vm = vm_create_without_vcpus(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES);
- cap.cap = KVM_CAP_PMU_CAPABILITY;
- cap.args[0] = KVM_PMU_CAP_DISABLE;
- vm_enable_cap(vm, &cap);
+ vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE);
vm_vcpu_add_default(vm, VCPU_ID, guest_code);
vm_init_descriptor_tables(vm);
static int __sev_migrate_from(struct kvm_vm *dst, struct kvm_vm *src)
{
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM,
- .args = { src->fd }
- };
-
- return __vm_enable_cap(dst, &cap);
+ return __vm_enable_cap(dst, KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM, src->fd);
}
static int __sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src)
{
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_VM_COPY_ENC_CONTEXT_FROM,
- .args = { src->fd }
- };
-
- return __vm_enable_cap(dst, &cap);
+ return __vm_enable_cap(dst, KVM_CAP_VM_COPY_ENC_CONTEXT_FROM, src->fd);
}
vm_vaddr_t vmx_pages_gva;
struct ucall uc;
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_X86_TRIPLE_FAULT_EVENT,
- .args = {1}
- };
-
if (!nested_vmx_supported()) {
print_skip("Nested VMX not supported");
exit(KSFT_SKIP);
}
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
- vm_enable_cap(vm, &cap);
+ vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
run = vcpu_state(vm, VCPU_ID);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
process_ucall_done(vm);
}
-static void test_msr_filter_allow(void) {
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_X86_USER_SPACE_MSR,
- .args[0] = KVM_MSR_EXIT_REASON_FILTER,
- };
+static void test_msr_filter_allow(void)
+{
struct kvm_vm *vm;
int rc;
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
- vm_enable_cap(vm, &cap);
+ vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
}
}
-static void test_msr_filter_deny(void) {
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_X86_USER_SPACE_MSR,
- .args[0] = KVM_MSR_EXIT_REASON_INVAL |
- KVM_MSR_EXIT_REASON_UNKNOWN |
- KVM_MSR_EXIT_REASON_FILTER,
- };
+static void test_msr_filter_deny(void)
+{
struct kvm_vm *vm;
struct kvm_run *run;
int rc;
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
- vm_enable_cap(vm, &cap);
+ vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_INVAL |
+ KVM_MSR_EXIT_REASON_UNKNOWN |
+ KVM_MSR_EXIT_REASON_FILTER);
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
kvm_vm_free(vm);
}
-static void test_msr_permission_bitmap(void) {
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_X86_USER_SPACE_MSR,
- .args[0] = KVM_MSR_EXIT_REASON_FILTER,
- };
+static void test_msr_permission_bitmap(void)
+{
struct kvm_vm *vm;
int rc;
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
- vm_enable_cap(vm, &cap);
+ vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");