#include <linux/refcount.h>
#include <linux/nospec.h>
#include <linux/notifier.h>
+#include <linux/hashtable.h>
#include <linux/xarray.h>
#include <asm/signal.h>
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
struct kvm_memory_slot {
+ struct hlist_node id_node;
gfn_t base_gfn;
unsigned long npages;
unsigned long *dirty_bitmap;
*/
struct kvm_memslots {
u64 generation;
- /* The mapping table from slot id to the index in memslots[]. */
- short id_to_index[KVM_MEM_SLOTS_NUM];
+ /*
+ * The mapping table from slot id to the index in memslots[].
+ *
+ * 7-bit bucket count matches the size of the old id to index array for
+ * 512 slots, while giving good performance with this slot count.
+ * Higher bucket counts bring only small performance improvements but
+ * always result in higher memory usage (even for lower memslot counts).
+ */
+ DECLARE_HASHTABLE(id_hash, 7);
atomic_t last_used_slot;
int used_slots;
struct kvm_memory_slot memslots[];
static inline
struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
{
- int index = slots->id_to_index[id];
struct kvm_memory_slot *slot;
- if (index < 0)
- return NULL;
-
- slot = &slots->memslots[index];
+ hash_for_each_possible(slots->id_hash, slot, id_node, id) {
+ if (slot->id == id)
+ return slot;
+ }
- WARN_ON(slot->id != id);
- return slot;
+ return NULL;
}
/*
static struct kvm_memslots *kvm_alloc_memslots(void)
{
- int i;
struct kvm_memslots *slots;
slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
if (!slots)
return NULL;
- for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
- slots->id_to_index[i] = -1;
+ hash_init(slots->id_hash);
return slots;
}
return 0;
}
+static void kvm_replace_memslot(struct kvm_memslots *slots,
+ struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new)
+{
+ /*
+ * Remove the old memslot from the hash list, copying the node data
+ * would corrupt the list.
+ */
+ if (old) {
+ hash_del(&old->id_node);
+
+ if (!new)
+ return;
+
+ /* Copy the source *data*, not the pointer, to the destination. */
+ *new = *old;
+ }
+
+ /* (Re)Add the new memslot. */
+ hash_add(slots->id_hash, &new->id_node, new->id);
+}
+
+static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src)
+{
+ struct kvm_memory_slot *mslots = slots->memslots;
+
+ kvm_replace_memslot(slots, &mslots[src], &mslots[dst]);
+}
+
/*
* Delete a memslot by decrementing the number of used slots and shifting all
* other entries in the array forward one spot.
+ * @memslot is a detached dummy struct with just .id and .as_id filled.
*/
static inline void kvm_memslot_delete(struct kvm_memslots *slots,
struct kvm_memory_slot *memslot)
{
struct kvm_memory_slot *mslots = slots->memslots;
+ struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
int i;
- if (WARN_ON(slots->id_to_index[memslot->id] == -1))
+ if (WARN_ON(!oldslot))
return;
slots->used_slots--;
if (atomic_read(&slots->last_used_slot) >= slots->used_slots)
atomic_set(&slots->last_used_slot, 0);
- for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
- mslots[i] = mslots[i + 1];
- slots->id_to_index[mslots[i].id] = i;
- }
+ /*
+ * Remove the to-be-deleted memslot from the list _before_ shifting
+ * the trailing memslots forward, its data will be overwritten.
+ * Defer the (somewhat pointless) copying of the memslot until after
+ * the last slot has been shifted to avoid overwriting said last slot.
+ */
+ kvm_replace_memslot(slots, oldslot, NULL);
+
+ for (i = oldslot - mslots; i < slots->used_slots; i++)
+ kvm_shift_memslot(slots, i, i + 1);
mslots[i] = *memslot;
- slots->id_to_index[memslot->id] = -1;
}
/*
* itself is not preserved in the array, i.e. not swapped at this time, only
* its new index into the array is tracked. Returns the changed memslot's
* current index into the memslots array.
+ * The memslot at the returned index will not be in @slots->id_hash by then.
+ * @memslot is a detached struct with desired final data of the changed slot.
*/
static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
struct kvm_memory_slot *memslot)
{
struct kvm_memory_slot *mslots = slots->memslots;
+ struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
int i;
- if (slots->id_to_index[memslot->id] == -1 || !slots->used_slots)
+ if (!oldslot || !slots->used_slots)
return -1;
+ /*
+ * Delete the slot from the hash table before sorting the remaining
+ * slots, the slot's data may be overwritten when copying slots as part
+ * of the sorting proccess. update_memslots() will unconditionally
+ * rewrite the entire slot and re-add it to the hash table.
+ */
+ kvm_replace_memslot(slots, oldslot, NULL);
+
/*
* Move the target memslot backward in the array by shifting existing
* memslots with a higher GFN (than the target memslot) towards the
* front of the array.
*/
- for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) {
+ for (i = oldslot - mslots; i < slots->used_slots - 1; i++) {
if (memslot->base_gfn > mslots[i + 1].base_gfn)
break;
WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn);
- /* Shift the next memslot forward one and update its index. */
- mslots[i] = mslots[i + 1];
- slots->id_to_index[mslots[i].id] = i;
+ kvm_shift_memslot(slots, i, i + 1);
}
return i;
}
* is not preserved in the array, i.e. not swapped at this time, only its new
* index into the array is tracked. Returns the changed memslot's final index
* into the memslots array.
+ * The memslot at the returned index will not be in @slots->id_hash by then.
+ * @memslot is a detached struct with desired final data of the new or
+ * changed slot.
+ * Assumes that the memslot at @start index is not in @slots->id_hash.
*/
static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
struct kvm_memory_slot *memslot,
WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn);
- /* Shift the next memslot back one and update its index. */
- mslots[i] = mslots[i - 1];
- slots->id_to_index[mslots[i].id] = i;
+ kvm_shift_memslot(slots, i, i - 1);
}
return i;
}
* most likely to be referenced, sorting it to the front of the array was
* advantageous. The current binary search starts from the middle of the array
* and uses an LRU pointer to improve performance for all memslots and GFNs.
+ *
+ * @memslot is a detached struct, not a part of the current or new memslot
+ * array.
*/
static void update_memslots(struct kvm_memslots *slots,
struct kvm_memory_slot *memslot,
* its index accordingly.
*/
slots->memslots[i] = *memslot;
- slots->id_to_index[memslot->id] = i;
+ kvm_replace_memslot(slots, NULL, &slots->memslots[i]);
}
}
{
struct kvm_memslots *slots;
size_t new_size;
+ struct kvm_memory_slot *memslot;
if (change == KVM_MR_CREATE)
new_size = kvm_memslots_size(old->used_slots + 1);
new_size = kvm_memslots_size(old->used_slots);
slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
- if (likely(slots))
- memcpy(slots, old, kvm_memslots_size(old->used_slots));
+ if (unlikely(!slots))
+ return NULL;
+
+ memcpy(slots, old, kvm_memslots_size(old->used_slots));
+
+ hash_init(slots->id_hash);
+ kvm_for_each_memslot(memslot, slots)
+ hash_add(slots->id_hash, &memslot->id_node, memslot->id);
return slots;
}