#include <linux/refcount.h>
#include <linux/nospec.h>
#include <linux/notifier.h>
+#include <linux/xarray.h>
#include <asm/signal.h>
#include <linux/kvm.h>
struct mutex slots_arch_lock;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
- struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+ struct xarray vcpu_array;
/* Used to wait for completion of MMU notifiers. */
spinlock_t mn_invalidate_lock;
/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
smp_rmb();
- return kvm->vcpus[i];
+ return xa_load(&kvm->vcpu_array, i);
}
#define kvm_for_each_vcpu(idx, vcpup, kvm) \
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_vcpu_destroy(vcpu);
- kvm->vcpus[i] = NULL;
+ xa_erase(&kvm->vcpu_array, i);
}
atomic_set(&kvm->online_vcpus, 0);
mutex_init(&kvm->slots_arch_lock);
spin_lock_init(&kvm->mn_invalidate_lock);
rcuwait_init(&kvm->mn_memslots_update_rcuwait);
+ xa_init(&kvm->vcpu_array);
INIT_LIST_HEAD(&kvm->devices);
}
vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
- BUG_ON(kvm->vcpus[vcpu->vcpu_idx]);
+ r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
+ BUG_ON(r == -EBUSY);
+ if (r)
+ goto unlock_vcpu_destroy;
/* Fill the stats id string for the vcpu */
snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
kvm_get_kvm(kvm);
r = create_vcpu_fd(vcpu);
if (r < 0) {
+ xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
kvm_put_kvm_no_destroy(kvm);
goto unlock_vcpu_destroy;
}
- kvm->vcpus[vcpu->vcpu_idx] = vcpu;
-
/*
- * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus
- * before kvm->online_vcpu's incremented value.
+ * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu
+ * pointer before kvm->online_vcpu's incremented value.
*/
smp_wmb();
atomic_inc(&kvm->online_vcpus);