ucall_init(vm, NULL);
test_init_timer_irq(vm);
gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
- if (gic_fd < 0) {
- print_skip("Failed to create vgic-v3");
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3");
/* Make all the test's cmdline args visible to the guest */
sync_global_to_guest(vm, test_args);
if (!parse_args(argc, argv))
exit(KSFT_SKIP);
- if (test_args.migration_freq_ms && get_nprocs() < 2) {
- print_skip("At least two physical CPUs needed for vCPU migration");
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2,
+ "At least two physical CPUs needed for vCPU migration");
vm = test_vm_create();
test_run(vm);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
- if (debug_version(vcpu) < 6) {
- print_skip("Armv8 debug architecture not supported.");
- kvm_vm_free(vm);
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(debug_version(vcpu) >= 6,
+ "Armv8 debug architecture not supported.");
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
ESR_EC_BRK_INS, guest_sw_bp_handler);
struct reg_sublist *s;
for_each_sublist(c, s) {
- if (s->capability && !kvm_has_cap(s->capability)) {
- fprintf(stderr, "%s: %s not available, skipping tests\n", config_name(c), s->name);
- exit(KSFT_SKIP);
- }
+ if (!s->capability)
+ continue;
+
+ __TEST_REQUIRE(kvm_has_cap(s->capability),
+ "%s: %s not available, skipping tests\n",
+ config_name(c), s->name);
}
}
int main(void)
{
- if (!kvm_check_cap(KVM_CAP_ARM_SYSTEM_SUSPEND)) {
- print_skip("KVM_CAP_ARM_SYSTEM_SUSPEND not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_check_cap(KVM_CAP_ARM_SYSTEM_SUSPEND));
host_test_cpu_on();
host_test_system_suspend();
struct kvm_vm *vm;
int ret;
- if (!kvm_has_cap(KVM_CAP_ARM_EL1_32BIT)) {
- print_skip("KVM_CAP_ARM_EL1_32BIT is not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_EL1_32BIT));
/* Get the preferred target type and copy that to init1 for later use */
vm = vm_create_barebones();
}
ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V2);
- if (!ret) {
- pr_info("Running GIC_v2 tests.\n");
- run_tests(KVM_DEV_TYPE_ARM_VGIC_V2);
- return 0;
- }
+ __TEST_REQUIRE(!ret, "No GICv2 nor GICv3 support");
- print_skip("No GICv2 nor GICv3 support");
- exit(KSFT_SKIP);
+ pr_info("Running GIC_v2 tests.\n");
+ run_tests(KVM_DEV_TYPE_ARM_VGIC_V2);
return 0;
}
gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
GICD_BASE_GPA, GICR_BASE_GPA);
- if (gic_fd < 0) {
- print_skip("Failed to create vgic-v3, skipping");
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3, skipping");
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
guest_irq_handlers[args.eoi_split][args.level_sensitive]);
return 0;
pfn = entry & PAGEMAP_PFN_MASK;
- if (!pfn) {
- print_skip("Looking up PFNs requires CAP_SYS_ADMIN");
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(pfn, "Looking up PFNs requires CAP_SYS_ADMIN");
return pfn;
}
}
page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
- if (page_idle_fd < 0) {
- print_skip("CONFIG_IDLE_PAGE_TRACKING is not enabled");
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(page_idle_fd >= 0,
+ "CONFIG_IDLE_PAGE_TRACKING is not enabled");
close(page_idle_fd);
for_each_guest_mode(run_test, ¶ms);
#endif
void print_skip(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
+#define __TEST_REQUIRE(f, fmt, ...) \
+do { \
+ if (!(f)) { \
+ print_skip(fmt, ##__VA_ARGS__); \
+ exit(KSFT_SKIP); \
+ } \
+} while (0)
+
+#define TEST_REQUIRE(f) __TEST_REQUIRE(f, "Requirement not met: %s", #f)
ssize_t test_write(int fd, const void *buf, size_t count);
ssize_t test_read(int fd, void *buf, size_t count);
}
/* Check the extension for binary stats */
- if (!kvm_has_cap(KVM_CAP_BINARY_STATS_FD)) {
- print_skip("Binary form statistics interface is not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_BINARY_STATS_FD));
/* Create VMs and VCPUs */
vms = malloc(sizeof(vms[0]) * max_vm);
rl.rlim_max = nr_fds_wanted;
int r = setrlimit(RLIMIT_NOFILE, &rl);
- if (r < 0) {
- printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
+ __TEST_REQUIRE(r >= 0,
+ "RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
old_rlim_max, nr_fds_wanted);
- exit(KSFT_SKIP);
- }
} else {
TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
}
int fd;
fd = open(path, flags);
- if (fd < 0) {
- print_skip("%s not available (errno: %d)", path, errno);
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(fd >= 0, "%s not available (errno: %d)", path, errno);
return fd;
}
{
vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR);
- if (!kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT)) {
- print_skip("immediate_exit not available");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT));
vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type);
TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
kvm_fd = open_kvm_dev_path_or_exit();
rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr);
close(kvm_fd);
+
if (rc == -1 && (errno == ENXIO || errno == EINVAL))
exit(KSFT_SKIP);
TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc);
- if (!(bitmask & (1ULL << bit)))
- exit(KSFT_SKIP);
- if (!is_xfd_supported())
- exit(KSFT_SKIP);
+ TEST_REQUIRE(bitmask & (1ULL << bit));
+
+ TEST_REQUIRE(is_xfd_supported());
rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit);
void nested_svm_check_supported(void)
{
- if (!nested_svm_supported()) {
- print_skip("nested SVM not enabled");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(nested_svm_supported());
}
/*
void nested_vmx_check_supported(void)
{
- if (!nested_vmx_supported()) {
- print_skip("nested VMX not enabled");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(nested_vmx_supported());
}
static void nested_create_pte(struct kvm_vm *vm,
return NULL;
}
-static int calc_min_max_cpu(void)
+static void calc_min_max_cpu(void)
{
int i, cnt, nproc;
- if (CPU_COUNT(&possible_mask) < 2)
- return -EINVAL;
+ TEST_REQUIRE(CPU_COUNT(&possible_mask) >= 2);
/*
* CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that
cnt++;
}
- return (cnt < 2) ? -EINVAL : 0;
+ __TEST_REQUIRE(cnt >= 2,
+ "Only one usable CPU, task migration not possible");
}
int main(int argc, char *argv[])
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
strerror(errno));
- if (calc_min_max_cpu()) {
- print_skip("Only one usable CPU, task migration not possible");
- exit(KSFT_SKIP);
- }
+ calc_min_max_cpu();
sys_rseq(0);
int main(int argc, char *argv[])
{
- int memop_cap, extension_cap, idx;
+ int extension_cap, idx;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP));
setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
ksft_print_header();
- memop_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP);
- extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
- if (!memop_cap) {
- ksft_exit_skip("CAP_S390_MEM_OP not supported.\n");
- }
-
ksft_set_plan(ARRAY_SIZE(testlist));
+ extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
if (testlist[idx].extension >= extension_cap) {
testlist[idx].test();
struct kvm_vm *vm;
int idx;
+ TEST_REQUIRE(kvm_check_cap(KVM_CAP_SYNC_REGS));
+
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
ksft_print_header();
- if (!kvm_check_cap(KVM_CAP_SYNC_REGS))
- ksft_exit_skip("CAP_SYNC_REGS not supported");
-
ksft_set_plan(ARRAY_SIZE(testlist));
/* Create VM */
virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
ucall_init(vm, NULL);
- if (!is_steal_time_supported(vcpus[0])) {
- print_skip("steal-time not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(is_steal_time_supported(vcpus[0]));
/* Run test on each VCPU */
for (i = 0; i < NR_VCPUS; ++i) {
static void check_preconditions(struct kvm_vcpu *vcpu)
{
- if (!__vcpu_has_device_attr(vcpu, KVM_VCPU_TSC_CTRL, KVM_VCPU_TSC_OFFSET))
- return;
-
- print_skip("KVM_VCPU_TSC_OFFSET not supported; skipping test");
- exit(KSFT_SKIP);
+ __TEST_REQUIRE(!__vcpu_has_device_attr(vcpu, KVM_VCPU_TSC_CTRL,
+ KVM_VCPU_TSC_OFFSET),
+ "KVM_VCPU_TSC_OFFSET not supported; skipping test");
}
static void setup_system_counter(struct kvm_vcpu *vcpu, struct test_case *test)
{
struct kvm_cpuid_entry2 *entry;
struct kvm_regs regs1, regs2;
- bool amx_supported = false;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
entry = kvm_get_supported_cpuid_entry(1);
- if (!(entry->ecx & X86_FEATURE_XSAVE)) {
- print_skip("XSAVE feature not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(entry->ecx & X86_FEATURE_XSAVE);
- if (kvm_get_cpuid_max_basic() >= 0xd) {
- entry = kvm_get_supported_cpuid_index(0xd, 0);
- amx_supported = entry && !!(entry->eax & XFEATURE_MASK_XTILE);
- if (!amx_supported) {
- print_skip("AMX is not supported by the vCPU (eax=0x%x)", entry->eax);
- exit(KSFT_SKIP);
- }
- /* Get xsave/restore max size */
- xsave_restore_size = entry->ecx;
- }
+ TEST_REQUIRE(kvm_get_cpuid_max_basic() >= 0xd);
+
+ entry = kvm_get_supported_cpuid_index(0xd, 0);
+ TEST_REQUIRE(entry->eax & XFEATURE_MASK_XTILE);
+
+ /* Get xsave/restore max size */
+ xsave_restore_size = entry->ecx;
run = vcpu->run;
vcpu_regs_get(vcpu, ®s1);
struct ucall uc;
entry = kvm_get_supported_cpuid_entry(1);
- if (!(entry->ecx & X86_FEATURE_XSAVE)) {
- print_skip("XSAVE feature not supported");
- return 0;
- }
+ TEST_REQUIRE(entry->ecx & X86_FEATURE_XSAVE);
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
1, /* cli */
};
- if (!kvm_has_cap(KVM_CAP_SET_GUEST_DEBUG)) {
- print_skip("KVM_CAP_SET_GUEST_DEBUG not supported");
- return 0;
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_SET_GUEST_DEBUG));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
- if (!kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR)) {
- printf("module parameter 'allow_smaller_maxphyaddr' is not set. Skipping test.\n");
- return 0;
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- if (!nested_vmx_supported() ||
- !kvm_has_cap(KVM_CAP_NESTED_STATE) ||
- !kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
- print_skip("Enlightened VMCS is unsupported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(nested_vmx_supported());
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
vcpu_set_hv_cpuid(vcpu);
vcpu_enable_evmcs(vcpu);
int main(void)
{
- if (!(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) {
- print_skip("KVM_X86_QUIRK_HYPERCALL_INSN not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
test_fix_hypercall();
test_fix_hypercall_disabled();
* will cover the "regular" list of MSRs, the coverage here is purely
* opportunistic and not interesting on its own.
*/
- if (!kvm_check_cap(KVM_CAP_GET_MSR_FEATURES)) {
- print_skip("KVM_CAP_GET_MSR_FEATURES not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_GET_MSR_FEATURES));
(void)kvm_get_msr_index_list();
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
- if (!kvm_has_cap(KVM_CAP_HYPERV_CPUID)) {
- print_skip("KVM_CAP_HYPERV_CPUID not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_CPUID));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
struct ucall uc;
int stage;
- if (!nested_svm_supported()) {
- print_skip("Nested SVM not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(nested_svm_supported());
+
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_set_hv_cpuid(vcpu);
int flags;
flags = kvm_check_cap(KVM_CAP_ADJUST_CLOCK);
- if (!(flags & KVM_CLOCK_REALTIME)) {
- print_skip("KVM_CLOCK_REALTIME not supported; flags: %x",
- flags);
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(flags & KVM_CLOCK_REALTIME);
check_clocksource();
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- if (!kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID)) {
- print_skip("KVM_CAP_ENFORCE_PV_FEATURE_CPUID not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID));
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
{
int warnings_before, warnings_after;
- if (!is_intel_cpu()) {
- print_skip("Must be run on an Intel CPU");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(is_intel_cpu());
- if (vm_is_unrestricted_guest(NULL)) {
- print_skip("Unrestricted guest must be disabled");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
warnings_before = get_warnings_count();
}
}
- if (!do_gbpages && !do_maxphyaddr) {
- print_skip("No sub-tests selected");
- return 0;
- }
+ __TEST_REQUIRE(do_gbpages || do_maxphyaddr, "No sub-tests selected");
entry = kvm_get_supported_cpuid_entry(0x80000001);
- if (!(entry->edx & CPUID_GBPAGES)) {
- print_skip("1gb hugepages not supported");
- return 0;
- }
+ TEST_REQUIRE(entry->edx & CPUID_GBPAGES);
if (do_gbpages) {
pr_info("Test MMIO after toggling CPUID.GBPAGES\n\n");
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- int rv;
uint64_t msr_platform_info;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
- rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO);
- if (!rv) {
- print_skip("KVM_CAP_MSR_PLATFORM_INFO not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_MSR_PLATFORM_INFO));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
int main(int argc, char *argv[])
{
- void (*guest_code)(void) = NULL;
+ void (*guest_code)(void);
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- int r;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
- r = kvm_check_cap(KVM_CAP_PMU_EVENT_FILTER);
- if (!r) {
- print_skip("KVM_CAP_PMU_EVENT_FILTER not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_check_cap(KVM_CAP_PMU_EVENT_FILTER));
- if (use_intel_pmu())
- guest_code = intel_guest_code;
- else if (use_amd_pmu())
- guest_code = amd_guest_code;
-
- if (!guest_code) {
- print_skip("Don't know how to test this guest PMU");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(use_intel_pmu() || use_amd_pmu());
+ guest_code = use_intel_pmu() ? intel_guest_code : amd_guest_code;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
- if (!sanity_check_pmu(vcpu)) {
- print_skip("Guest PMU is not functional");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(sanity_check_pmu(vcpu));
if (use_amd_pmu())
test_amd_deny_list(vcpu);
int main(int argc, char *argv[])
{
- if (!kvm_has_cap(KVM_CAP_SET_BOOT_CPU_ID)) {
- print_skip("set_boot_cpu_id not available");
- return 0;
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_SET_BOOT_CPU_ID));
run_vm_bsp(0);
run_vm_bsp(1);
{
struct kvm_cpuid_entry2 *cpuid;
- if (!kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM) &&
- !kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
- print_skip("Capabilities not available");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM));
cpuid = kvm_get_supported_cpuid_entry(0x80000000);
- if (cpuid->eax < 0x8000001f) {
- print_skip("AMD memory encryption not available");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(cpuid->eax >= 0x8000001f);
+
cpuid = kvm_get_supported_cpuid_entry(0x8000001f);
- if (!(cpuid->eax & X86_FEATURE_SEV)) {
- print_skip("AMD SEV not available");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(cpuid->eax & X86_FEATURE_SEV);
+
have_sev_es = !!(cpuid->eax & X86_FEATURE_SEV_ES);
if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
setbuf(stdout, NULL);
cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
- if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) {
- print_skip("KVM_CAP_SYNC_REGS not supported");
- exit(KSFT_SKIP);
- }
- if ((cap & INVALID_SYNC_FIELD) != 0) {
- print_skip("The \"invalid\" field is not invalid");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS);
+ TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vm_vaddr_t vmx_pages_gva;
struct ucall uc;
- if (!nested_vmx_supported()) {
- print_skip("Nested VMX not supported");
- exit(KSFT_SKIP);
- }
+ nested_vmx_check_supported();
- if (!kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT)) {
- print_skip("KVM_CAP_X86_TRIPLE_FAULT_EVENT not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT));
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
int main(int argc, char *argv[])
{
- if (!kvm_has_cap(KVM_CAP_VM_TSC_CONTROL)) {
- print_skip("KVM_CAP_VM_TSC_CONTROL not available");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_TSC_CONTROL));
vm = vm_create(NR_TEST_VCPUS);
vm_ioctl(vm, KVM_SET_TSC_KHZ, (void *) TEST_TSC_KHZ);
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- if (!is_intel_cpu() || vm_is_unrestricted_guest(NULL)) {
- print_skip("Must be run with kvm_intel.unrestricted_guest=0");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(is_intel_cpu());
+ TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
get_set_sigalrm_vcpu(vcpu);
GUEST_DONE();
}
-static void tsc_scaling_check_supported(void)
-{
- if (!kvm_has_cap(KVM_CAP_TSC_CONTROL)) {
- print_skip("TSC scaling not supported by the HW");
- exit(KSFT_SKIP);
- }
-}
-
static void stable_tsc_check_supported(void)
{
FILE *fp;
uint64_t l2_tsc_freq = 0;
nested_vmx_check_supported();
- tsc_scaling_check_supported();
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_TSC_CONTROL));
stable_tsc_check_supported();
/*
struct kvm_cpuid2 *cpuid;
struct kvm_cpuid_entry2 *entry_1_0;
struct kvm_cpuid_entry2 *entry_a_0;
- bool pdcm_supported = false;
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
int ret;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
cpuid = kvm_get_supported_cpuid();
- if (kvm_get_cpuid_max_basic() >= 0xa) {
- entry_1_0 = kvm_get_supported_cpuid_index(1, 0);
- entry_a_0 = kvm_get_supported_cpuid_index(0xa, 0);
- pdcm_supported = entry_1_0 && !!(entry_1_0->ecx & X86_FEATURE_PDCM);
- eax.full = entry_a_0->eax;
- }
- if (!pdcm_supported) {
- print_skip("MSR_IA32_PERF_CAPABILITIES is not supported by the vCPU");
- exit(KSFT_SKIP);
- }
- if (!eax.split.version_id) {
- print_skip("PMU is not supported by the vCPU");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_get_cpuid_max_basic() >= 0xa);
+
+ entry_1_0 = kvm_get_supported_cpuid_index(1, 0);
+ entry_a_0 = kvm_get_supported_cpuid_index(0xa, 0);
+ TEST_REQUIRE(entry_1_0->ecx & X86_FEATURE_PDCM);
+
+ eax.full = entry_a_0->eax;
+ __TEST_REQUIRE(eax.split.version_id, "PMU is not supported by the vCPU");
/* testcase 1, set capabilities when we have PDCM bit */
vcpu_set_cpuid(vcpu, cpuid);
*/
nested_vmx_check_supported();
- if (!kvm_has_cap(KVM_CAP_NESTED_STATE)) {
- print_skip("KVM_CAP_NESTED_STATE not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
- if (!kvm_has_cap(KVM_CAP_NESTED_STATE)) {
- print_skip("KVM_CAP_NESTED_STATE not available");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
/*
* AMD currently does not implement set_nested_state, so for now we
!strncmp(argv[1], "--verbose", 10));
int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
- if (!(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO) ) {
- print_skip("KVM_XEN_HVM_CONFIG_SHARED_INFO not available");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO);
bool do_runstate_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE);
bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL);
int main(int argc, char *argv[])
{
+ unsigned int xen_caps;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- if (!(kvm_check_cap(KVM_CAP_XEN_HVM) &
- KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) ) {
- print_skip("KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL not available");
- exit(KSFT_SKIP);
- }
+ xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
+ TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_set_hv_cpuid(vcpu);
int main(int argc, char *argv[])
{
struct kvm_cpuid_entry2 *entry;
- bool xss_supported = false;
bool xss_in_msr_list;
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, NULL);
- if (kvm_get_cpuid_max_basic() >= 0xd) {
- entry = kvm_get_supported_cpuid_index(0xd, 1);
- xss_supported = entry && !!(entry->eax & X86_FEATURE_XSAVES);
- }
- if (!xss_supported) {
- print_skip("IA32_XSS is not supported by the vCPU");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_get_cpuid_max_basic() >= 0xd);
+
+ entry = kvm_get_supported_cpuid_index(0xd, 1);
+ TEST_REQUIRE(entry->eax & X86_FEATURE_XSAVES);
xss_val = vcpu_get_msr(vcpu, MSR_IA32_XSS);
TEST_ASSERT(xss_val == 0,