Cache a vCPU's CPUID information in "struct kvm_vcpu" to allow fixing the
mess where tests, often unknowingly, modify the global/static "cpuid"
allocated by kvm_get_supported_cpuid().
Add vcpu_init_cpuid() to handle stuffing an entirely different CPUID
model, e.g. during vCPU creation or when switching to the Hyper-V enabled
CPUID model. Automatically refresh the cache on vcpu_set_cpuid() so that
any adjustments made by KVM are always reflected in the cache. Drop
vcpu_get_cpuid() entirely to force tests to use the cache, and to allow
adding e.g. vcpu_get_cpuid_entry() in the future without creating a
conflicting set of APIs where vcpu_get_cpuid() does KVM_GET_CPUID2, but
vcpu_get_cpuid_entry() does not.
Opportunistically convert the VMX nested state test and KVM PV test to
manipulating the vCPU's CPUID (because it's easy), but use
vcpu_init_cpuid() for the Hyper-V features test and "emulator error" test
to effectively retain their current behavior as they're less trivial to
convert.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20220614200707.3315957-19-seanjc@google.com
int fd;
struct kvm_vm *vm;
struct kvm_run *run;
+#ifdef __x86_64__
+ struct kvm_cpuid2 *cpuid;
+#endif
struct kvm_dirty_gfn *dirty_gfns;
uint32_t fetch_index;
uint32_t dirty_gfns_count;
return vm_arch_vcpu_recreate(vm, vcpu_id);
}
+void vcpu_arch_free(struct kvm_vcpu *vcpu);
+
void virt_arch_pgd_alloc(struct kvm_vm *vm);
static inline void virt_pgd_alloc(struct kvm_vm *vm)
return cpuid;
}
-struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vcpu *vcpu);
+void vcpu_init_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid);
-static inline int __vcpu_set_cpuid(struct kvm_vcpu *vcpu,
- struct kvm_cpuid2 *cpuid)
+static inline int __vcpu_set_cpuid(struct kvm_vcpu *vcpu)
{
- return __vcpu_ioctl(vcpu, KVM_SET_CPUID2, cpuid);
+ int r;
+
+ TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
+ r = __vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
+ if (r)
+ return r;
+
+ /* On success, refresh the cache to pick up adjustments made by KVM. */
+ vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
+ return 0;
}
-static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu,
- struct kvm_cpuid2 *cpuid)
+static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
{
- vcpu_ioctl(vcpu, KVM_SET_CPUID2, cpuid);
+ TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
+ vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
+
+ /* Refresh the cache to pick up adjustments made by KVM. */
+ vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
}
struct kvm_cpuid_entry2 *
return ®ion->region;
}
+__weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
+{
+
+}
+
/*
* VM VCPU Remove
*
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
list_del(&vcpu->list);
+
+ vcpu_arch_free(vcpu);
free(vcpu);
}
DEFAULT_GUEST_STACK_VADDR_MIN);
vcpu = __vm_vcpu_add(vm, vcpu_id);
- vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
+ vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
vcpu_setup(vm, vcpu);
/* Setup guest general purpose registers */
{
struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
- vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
+ vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
return vcpu;
}
+void vcpu_arch_free(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->cpuid)
+ free(vcpu->cpuid);
+}
+
/*
* KVM Supported CPUID Get
*
return buffer.entry.data;
}
-struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vcpu *vcpu)
+void vcpu_init_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid)
{
- struct kvm_cpuid2 *cpuid;
- int max_ent;
- int rc = -1;
-
- cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
- max_ent = cpuid->nent;
+ TEST_ASSERT(cpuid != vcpu->cpuid, "@cpuid can't be the vCPU's CPUID");
- for (cpuid->nent = 1; cpuid->nent <= max_ent; cpuid->nent++) {
- rc = __vcpu_ioctl(vcpu, KVM_GET_CPUID2, cpuid);
- if (!rc)
- break;
-
- TEST_ASSERT(rc == -1 && errno == E2BIG,
- "KVM_GET_CPUID2 should either succeed or give E2BIG: %d %d",
- rc, errno);
+ /* Allow overriding the default CPUID. */
+ if (vcpu->cpuid && vcpu->cpuid->nent < cpuid->nent) {
+ free(vcpu->cpuid);
+ vcpu->cpuid = NULL;
}
- TEST_ASSERT(!rc, KVM_IOCTL_ERROR(KVM_GET_CPUID2, rc));
- return cpuid;
-}
-
+ if (!vcpu->cpuid)
+ vcpu->cpuid = allocate_kvm_cpuid2(cpuid->nent);
+ memcpy(vcpu->cpuid, cpuid, kvm_cpuid2_size(cpuid->nent));
+ vcpu_set_cpuid(vcpu);
+}
/*
* Locate a cpuid entry.
cpuid_full->nent = nent + cpuid_hv->nent;
}
- vcpu_set_cpuid(vcpu, cpuid_full);
+ vcpu_init_cpuid(vcpu, cpuid_full);
}
struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu)
return guest_cpuids;
}
-static void set_cpuid_after_run(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid)
+static void set_cpuid_after_run(struct kvm_vcpu *vcpu)
{
+ struct kvm_cpuid2 *cpuid = vcpu->cpuid;
struct kvm_cpuid_entry2 *ent;
int rc;
u32 eax, ebx, x;
/* Setting unmodified CPUID is allowed */
- rc = __vcpu_set_cpuid(vcpu, cpuid);
+ rc = __vcpu_set_cpuid(vcpu);
TEST_ASSERT(!rc, "Setting unmodified CPUID after KVM_RUN failed: %d", rc);
/* Changing CPU features is forbidden */
ent = get_cpuid(cpuid, 0x7, 0);
ebx = ent->ebx;
ent->ebx--;
- rc = __vcpu_set_cpuid(vcpu, cpuid);
+ rc = __vcpu_set_cpuid(vcpu);
TEST_ASSERT(rc, "Changing CPU features should fail");
ent->ebx = ebx;
eax = ent->eax;
x = eax & 0xff;
ent->eax = (eax & ~0xffu) | (x - 1);
- rc = __vcpu_set_cpuid(vcpu, cpuid);
+ rc = __vcpu_set_cpuid(vcpu);
TEST_ASSERT(rc, "Changing MAXPHYADDR should fail");
ent->eax = eax;
}
int main(void)
{
- struct kvm_cpuid2 *supp_cpuid, *cpuid2;
+ struct kvm_cpuid2 *supp_cpuid;
struct kvm_vcpu *vcpu;
vm_vaddr_t cpuid_gva;
struct kvm_vm *vm;
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
supp_cpuid = kvm_get_supported_cpuid();
- cpuid2 = vcpu_get_cpuid(vcpu);
- compare_cpuids(supp_cpuid, cpuid2);
+ compare_cpuids(supp_cpuid, vcpu->cpuid);
- vcpu_alloc_cpuid(vm, &cpuid_gva, cpuid2);
+ vcpu_alloc_cpuid(vm, &cpuid_gva, vcpu->cpuid);
vcpu_args_set(vcpu, 1, cpuid_gva);
for (stage = 0; stage < 3; stage++)
run_vcpu(vcpu, stage);
- set_cpuid_after_run(vcpu, cpuid2);
+ set_cpuid_after_run(vcpu);
kvm_vm_free(vm);
}
entry->eax = (entry->eax & 0xffffff00) | MAXPHYADDR;
set_cpuid(cpuid, entry);
- vcpu_set_cpuid(vcpu, cpuid);
+ vcpu_init_cpuid(vcpu, cpuid);
rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE);
TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable");
"failed to set HYPERV_CPUID_ENLIGHTMENT_INFO leaf");
TEST_ASSERT(set_cpuid(cpuid, dbg),
"failed to set HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES leaf");
- vcpu_set_cpuid(vcpu, cpuid);
+ vcpu_init_cpuid(vcpu, cpuid);
}
static void guest_test_msrs_access(void)
int main(void)
{
- struct kvm_cpuid2 *best;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
vcpu_enable_cap(vcpu, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1);
- best = kvm_get_supported_cpuid();
- clear_kvm_cpuid_features(best);
- vcpu_set_cpuid(vcpu, best);
+ clear_kvm_cpuid_features(vcpu->cpuid);
+ vcpu_set_cpuid(vcpu);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
set_cpuid(cpuid, entry);
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- vcpu_set_cpuid(vcpu, cpuid);
+ vcpu_set_cpuid(vcpu);
run = vcpu->run;
test_nested_state(vcpu, state);
/* Enable VMX in the guest CPUID. */
- vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
+ vcpu_set_cpuid(vcpu);
/*
* Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
void disable_vmx(struct kvm_vcpu *vcpu)
{
- struct kvm_cpuid2 *cpuid = kvm_get_supported_cpuid();
+ struct kvm_cpuid2 *cpuid = vcpu->cpuid;
int i;
for (i = 0; i < cpuid->nent; ++i)
TEST_ASSERT(i != cpuid->nent, "CPUID function 1 not found");
cpuid->entries[i].ecx &= ~CPUID_VMX;
- vcpu_set_cpuid(vcpu, cpuid);
+ vcpu_set_cpuid(vcpu);
cpuid->entries[i].ecx |= CPUID_VMX;
}
vm = vm_create_with_one_vcpu(&x.vcpu, xapic_guest_code);
x.is_x2apic = false;
- cpuid = vcpu_get_cpuid(x.vcpu);
+ cpuid = x.vcpu->cpuid;
for (i = 0; i < cpuid->nent; i++) {
if (cpuid->entries[i].function == 1)
break;
}
cpuid->entries[i].ecx &= ~BIT(21);
- vcpu_set_cpuid(x.vcpu, cpuid);
+ vcpu_set_cpuid(x.vcpu);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
test_icr(&x);