[PATCH] tmp

From: Sean Christopherson
Date: Tue May 25 2021 - 18:14:05 EST


---
include/linux/kvm_host.h | 31 ++-
virt/kvm/kvm_main.c | 427 ++++++++++++++++++---------------------
2 files changed, 204 insertions(+), 254 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 84fd72d8bb23..85ee81318362 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -508,7 +508,7 @@ struct kvm_memslots {
struct rb_root gfn_tree;
/* The mapping table from slot id to memslot. */
DECLARE_HASHTABLE(id_hash, 7);
- bool is_idx_0;
+ int node_idx;
};

struct kvm {
@@ -520,7 +520,7 @@ struct kvm {

struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
- struct kvm_memslots memslots_all[KVM_ADDRESS_SPACE_NUM][2];
+ struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2];
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];

@@ -724,15 +724,9 @@ static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
return RB_EMPTY_ROOT(&slots->gfn_tree);
}

-static inline int kvm_memslots_idx(struct kvm_memslots *slots)
-{
- return slots->is_idx_0 ? 0 : 1;
-}
-
-#define kvm_for_each_memslot(memslot, ctr, slots) \
- hash_for_each(slots->id_hash, ctr, memslot, \
- id_node[kvm_memslots_idx(slots)]) \
- if (WARN_ON_ONCE(!memslot->npages)) { \
+#define kvm_for_each_memslot(memslot, bkt, slots) \
+ hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
+ if (WARN_ON_ONCE(!memslot->npages)) { \
} else

#define kvm_for_each_hva_range_memslot(node, slots, start, last) \
@@ -743,10 +737,10 @@ static inline int kvm_memslots_idx(struct kvm_memslots *slots)
static inline
struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
{
- int idxactive = kvm_memslots_idx(slots);
struct kvm_memory_slot *slot;
+ int idx = slots->node_idx;

- hash_for_each_possible(slots->id_hash, slot, id_node[idxactive], id) {
+ hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
if (slot->id == id)
return slot;
}
@@ -1160,19 +1154,19 @@ bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
{
- int idxactive = kvm_memslots_idx(slots);
struct kvm_memory_slot *slot;
struct rb_node *prevnode, *node;
+ int idx = slots->node_idx;

slot = (struct kvm_memory_slot *)atomic_long_read(&slots->lru_slot);
if (slot &&
gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
return slot;

- for (prevnode = NULL, node = slots->gfn_tree.rb_node; node; ) {
+ prevnode = NULL;
+ for (node = slots->gfn_tree.rb_node; node; ) {
prevnode = node;
- slot = container_of(node, struct kvm_memory_slot,
- gfn_node[idxactive]);
+ slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
if (gfn >= slot->base_gfn) {
if (gfn < slot->base_gfn + slot->npages) {
atomic_long_set(&slots->lru_slot,
@@ -1185,8 +1179,7 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
}

if (approx && prevnode)
- return container_of(prevnode, struct kvm_memory_slot,
- gfn_node[idxactive]);
+ return container_of(prevnode, struct kvm_memory_slot, gfn_node[idx]);

return NULL;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 189504b27ca6..0744b081b16b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -510,17 +510,15 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
}

for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
- int idxactive;
struct interval_tree_node *node;

slots = __kvm_memslots(kvm, i);
- idxactive = kvm_memslots_idx(slots);
kvm_for_each_hva_range_memslot(node, slots,
range->start, range->end - 1) {
unsigned long hva_start, hva_end;

slot = container_of(node, struct kvm_memory_slot,
- hva_node[idxactive]);
+ hva_node[slots->node_idx]);
hva_start = max(range->start, slot->userspace_addr);
hva_end = min(range->end, slot->userspace_addr +
(slot->npages << PAGE_SHIFT));
@@ -787,14 +785,6 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)

#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */

-static void kvm_init_memslots(struct kvm_memslots *slots)
-{
- atomic_long_set(&slots->lru_slot, (unsigned long)NULL);
- slots->hva_tree = RB_ROOT_CACHED;
- slots->gfn_tree = RB_ROOT;
- hash_init(slots->id_hash);
-}
-
static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
{
if (!memslot->dirty_bitmap)
@@ -816,18 +806,18 @@ static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)

static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
{
- int ctr;
struct hlist_node *idnode;
struct kvm_memory_slot *memslot;
+ int bkt;

/*
- * Both active and inactive struct kvm_memslots should point to
- * the same set of memslots, so it's enough to free them once
+ * The same memslot objects live in both active and inactive sets,
+ * arbitrarily free using index '0'.
*/
- if (slots->is_idx_0)
+ if (slots->node_idx)
return;

- hash_for_each_safe(slots->id_hash, ctr, idnode, memslot, id_node[1])
+ hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[0])
kvm_free_memslot(kvm, memslot);
}

@@ -900,8 +890,9 @@ void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
static struct kvm *kvm_create_vm(unsigned long type)
{
struct kvm *kvm = kvm_arch_alloc_vm();
+ struct kvm_memslots *slots;
int r = -ENOMEM;
- int i;
+ int i, j;

if (!kvm)
return ERR_PTR(-ENOMEM);
@@ -924,14 +915,20 @@ static struct kvm *kvm_create_vm(unsigned long type)

refcount_set(&kvm->users_count, 1);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
- kvm_init_memslots(&kvm->memslots_all[i][0]);
- kvm_init_memslots(&kvm->memslots_all[i][1]);
- kvm->memslots_all[i][0].is_idx_0 = true;
- kvm->memslots_all[i][1].is_idx_0 = false;
+ for (j = 0; j < 2; j++) {
+ slots = &kvm->__memslots[i][j];

- /* Generations must be different for each address space. */
- kvm->memslots_all[i][0].generation = i;
- rcu_assign_pointer(kvm->memslots[i], &kvm->memslots_all[i][0]);
+ atomic_long_set(&slots->lru_slot, (unsigned long)NULL);
+ slots->hva_tree = RB_ROOT_CACHED;
+ slots->gfn_tree = RB_ROOT;
+ hash_init(slots->id_hash);
+ slots->node_idx = j;
+
+ /* Generations must be different for each address space. */
+ slots->generation = i;
+ }
+
+ rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
}

for (i = 0; i < KVM_NR_BUSES; i++) {
@@ -1038,8 +1035,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
kvm_arch_destroy_vm(kvm);
kvm_destroy_devices(kvm);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
- kvm_free_memslots(kvm, &kvm->memslots_all[i][0]);
- kvm_free_memslots(kvm, &kvm->memslots_all[i][1]);
+ kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
+ kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
}
cleanup_srcu_struct(&kvm->irq_srcu);
cleanup_srcu_struct(&kvm->srcu);
@@ -1114,13 +1111,12 @@ static int check_memory_region_flags(const struct kvm_userspace_memory_region *m
return 0;
}

-static void swap_memslots(struct kvm *kvm, int as_id)
+static void kvm_swap_active_memslots(struct kvm *kvm, int as_id,
+ struct kvm_memslots **active,
+ struct kvm_memslots **inactive)
{
- struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
- int idxactive = kvm_memslots_idx(old_memslots);
- int idxina = idxactive == 0 ? 1 : 0;
- struct kvm_memslots *slots = &kvm->memslots_all[as_id][idxina];
- u64 gen = old_memslots->generation;
+ struct kvm_memslots *slots = *inactive;
+ u64 gen = (*active)->generation;

WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
@@ -1148,37 +1144,55 @@ static void swap_memslots(struct kvm *kvm, int as_id)
kvm_arch_memslots_updated(kvm, gen);

slots->generation = gen;
+
+ swap(*active, *inactive);
}

-static void kvm_memslot_gfn_insert(struct rb_root *gfn_tree,
- struct kvm_memory_slot *slot,
- int which)
+static void kvm_memslot_gfn_insert(struct kvm_memslots *slots,
+ struct kvm_memory_slot *slot)
{
- struct rb_node **cur, *parent;
+ struct rb_root *gfn_tree = &slots->gfn_tree;
+ struct rb_node **node, *parent;
+ int idx = slots->node_idx;

- for (cur = &gfn_tree->rb_node, parent = NULL; *cur; ) {
- struct kvm_memory_slot *cslot;
+ parent = NULL;
+ for (node = &gfn_tree->rb_node; *node; ) {
+ struct kvm_memory_slot *tmp;

- cslot = container_of(*cur, typeof(*cslot), gfn_node[which]);
- parent = *cur;
- if (slot->base_gfn < cslot->base_gfn)
- cur = &(*cur)->rb_left;
- else if (slot->base_gfn > cslot->base_gfn)
- cur = &(*cur)->rb_right;
+ tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
+ parent = *node;
+ if (slot->base_gfn < tmp->base_gfn)
+ node = &(*node)->rb_left;
+ else if (slot->base_gfn > tmp->base_gfn)
+ node = &(*node)->rb_right;
else
BUG();
}

- rb_link_node(&slot->gfn_node[which], parent, cur);
- rb_insert_color(&slot->gfn_node[which], gfn_tree);
+ rb_link_node(&slot->gfn_node[idx], parent, node);
+ rb_insert_color(&slot->gfn_node[idx], gfn_tree);
+}
+
+static void kvm_memslot_gfn_erase(struct kvm_memslots *slots,
+ struct kvm_memory_slot *slot)
+{
+ rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
+}
+
+static void kvm_memslot_gfn_replace(struct kvm_memslots *slots,
+ struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new)
+{
+ int idx = slots->node_idx;
+
+ WARN_ON_ONCE(old->base_gfn != new->base_gfn);
+
+ rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
+ &slots->gfn_tree);
}

-/*
- * Just copies the memslot data.
- * Does not copy or touch the embedded nodes, including the ranges at hva_nodes.
- */
static void kvm_copy_memslot(struct kvm_memory_slot *dest,
- struct kvm_memory_slot *src)
+ const struct kvm_memory_slot *src)
{
dest->base_gfn = src->base_gfn;
dest->npages = src->npages;
@@ -1188,89 +1202,72 @@ static void kvm_copy_memslot(struct kvm_memory_slot *dest,
dest->flags = src->flags;
dest->id = src->id;
dest->as_id = src->as_id;
-}

-/*
- * Initializes the ranges at both hva_nodes from the memslot userspace_addr
- * and npages fields.
- */
-static void kvm_init_memslot_hva_ranges(struct kvm_memory_slot *slot)
-{
- slot->hva_node[0].start = slot->hva_node[1].start =
- slot->userspace_addr;
- slot->hva_node[0].last = slot->hva_node[1].last =
- slot->userspace_addr + (slot->npages << PAGE_SHIFT) - 1;
+ dest->hva_node[0].start = dest->hva_node[1].start =
+ dest->userspace_addr;
+ dest->hva_node[0].last = dest->hva_node[1].last =
+ dest->userspace_addr + (dest->npages << PAGE_SHIFT) - 1;
}

/*
- * Replaces the @oldslot with @nslot in the memslot set indicated by
- * @slots_idx.
+ * Replace @old with @new in @slots.
*
- * With NULL @oldslot this simply adds the @nslot to the set.
- * With NULL @nslot this simply removes the @oldslot from the set.
+ * With NULL @old this simply adds the @new to @slots.
+ * With NULL @new this simply removes the @old from @slots.
*
- * If @nslot is non-NULL its hva_node[slots_idx] range has to be set
+ * If @new is non-NULL its hva_node[slots_idx] range has to be set
* appropriately.
*/
-static void kvm_replace_memslot(struct kvm *kvm,
- int as_id, int slots_idx,
- struct kvm_memory_slot *oldslot,
- struct kvm_memory_slot *nslot)
+static void kvm_replace_memslot(struct kvm_memslots *slots,
+ struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new)
{
- struct kvm_memslots *slots = &kvm->memslots_all[as_id][slots_idx];
+ int idx = slots->node_idx;

- if (WARN_ON(!oldslot && !nslot))
- return;
-
- if (oldslot) {
- hash_del(&oldslot->id_node[slots_idx]);
- interval_tree_remove(&oldslot->hva_node[slots_idx],
- &slots->hva_tree);
+ if (old) {
+ hash_del(&old->id_node[idx]);
+ interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
atomic_long_cmpxchg(&slots->lru_slot,
- (unsigned long)oldslot,
- (unsigned long)nslot);
- if (!nslot) {
- rb_erase(&oldslot->gfn_node[slots_idx],
- &slots->gfn_tree);
+ (unsigned long)old, (unsigned long)new);
+ if (!new) {
+ kvm_memslot_gfn_erase(slots, old);
return;
}
}

- hash_add(slots->id_hash, &nslot->id_node[slots_idx],
- nslot->id);
WARN_ON(PAGE_SHIFT > 0 &&
- nslot->hva_node[slots_idx].start >=
- nslot->hva_node[slots_idx].last);
- interval_tree_insert(&nslot->hva_node[slots_idx],
- &slots->hva_tree);
+ new->hva_node[idx].start >= new->hva_node[idx].last);
+ hash_add(slots->id_hash, &new->id_node[idx], new->id);
+ interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);

/* Shame there is no O(1) interval_tree_replace()... */
- if (oldslot && oldslot->base_gfn == nslot->base_gfn)
- rb_replace_node(&oldslot->gfn_node[slots_idx],
- &nslot->gfn_node[slots_idx],
- &slots->gfn_tree);
- else {
- if (oldslot)
- rb_erase(&oldslot->gfn_node[slots_idx],
- &slots->gfn_tree);
- kvm_memslot_gfn_insert(&slots->gfn_tree,
- nslot, slots_idx);
+ if (old && old->base_gfn == new->base_gfn) {
+ kvm_memslot_gfn_replace(slots, old, new);
+ } else {
+ if (old)
+ kvm_memslot_gfn_erase(slots, old);
+ kvm_memslot_gfn_insert(slots, new);
}
}

-/*
- * Copies the @oldslot data into @nslot and uses this slot to replace
- * @oldslot in the memslot set indicated by @slots_idx.
- */
-static void kvm_copy_replace_memslot(struct kvm *kvm,
- int as_id, int slots_idx,
- struct kvm_memory_slot *oldslot,
- struct kvm_memory_slot *nslot)
+static void kvm_activate_memslot(struct kvm *kvm, int as_id,
+ struct kvm_memslots **active,
+ struct kvm_memslots **inactive,
+ struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new)
{
- kvm_copy_memslot(nslot, oldslot);
- kvm_init_memslot_hva_ranges(nslot);
+ /*
+ * Swap the active <-> inactive memslots. Note, this also swaps
+ * the active and inactive pointers themselves.
+ */
+ kvm_swap_active_memslots(kvm, as_id, active, inactive);

- kvm_replace_memslot(kvm, as_id, slots_idx, oldslot, nslot);
+ /* Propagate the new memslot to the now inactive memslots. */
+ kvm_replace_memslot(*inactive, old, new);
+
+ /* And free the old slot. */
+ if (old)
+ kfree(old);
}

static int kvm_set_memslot(struct kvm *kvm,
@@ -1279,37 +1276,43 @@ static int kvm_set_memslot(struct kvm *kvm,
struct kvm_memory_slot *new, int as_id,
enum kvm_mr_change change)
{
- struct kvm_memslots *slotsact = __kvm_memslots(kvm, as_id);
- int idxact = kvm_memslots_idx(slotsact);
- int idxina = idxact == 0 ? 1 : 0;
- struct kvm_memslots *slotsina = &kvm->memslots_all[as_id][idxina];
- struct kvm_memory_slot *slotina, *slotact;
+ struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
+ struct kvm_memslots *inactive = &kvm->__memslots[as_id][active->node_idx ^ 1];
+ struct kvm_memory_slot *slot, *tmp;
int r;

- slotina = kzalloc(sizeof(*slotina), GFP_KERNEL_ACCOUNT);
- if (!slotina)
+ if (change != KVM_MR_CREATE) {
+ slot = id_to_memslot(active, old->id);
+ if (WARN_ON_ONCE(!slot))
+ return -EIO;
+ }
+
+ /*
+ * Modifications are done on a tmp, unreachable slot. The changes are
+ * then (eventually) propagated to both the active and inactive slots.
+ */
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL_ACCOUNT);
+ if (!tmp)
return -ENOMEM;

- if (change != KVM_MR_CREATE)
- slotact = id_to_memslot(slotsact, old->id);
-
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
/*
- * Replace the slot to be deleted or moved in the inactive
- * memslot set by its copy with KVM_MEMSLOT_INVALID flag set.
+ * Mark the current slot INVALID. This must be done on the tmp
+ * slot to avoid modifying the current slot in the active tree.
*/
- kvm_copy_replace_memslot(kvm, as_id, idxina, slotact, slotina);
- slotina->flags |= KVM_MEMSLOT_INVALID;
+ kvm_copy_memslot(tmp, slot);
+ tmp->flags |= KVM_MEMSLOT_INVALID;
+ kvm_replace_memslot(inactive, slot, tmp);

/*
- * Swap the active <-> inactive memslot set.
- * Now the active memslot set still contains the memslot to be
- * deleted or moved, but with the KVM_MEMSLOT_INVALID flag set.
- */
- swap_memslots(kvm, as_id);
- swap(idxact, idxina);
- swap(slotsina, slotsact);
- swap(slotact, slotina);
+ * Activate the slot that is now marked INVALID, but don't
+ * propagate the slot to the now inactive slots. The slot is
+ * either going to be deleted or recreated as a new slot.
+ */
+ kvm_swap_active_memslots(kvm, as_id, &active, &inactive);
+
+ /* The temporary and current slot have swapped roles. */
+ swap(tmp, slot);

/*
* From this point no new shadow pages pointing to a deleted,
@@ -1319,139 +1322,93 @@ static int kvm_set_memslot(struct kvm *kvm,
* - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
* - kvm_is_visible_gfn (mmu_check_root)
*/
- kvm_arch_flush_shadow_memslot(kvm, slotact);
+ kvm_arch_flush_shadow_memslot(kvm, slot);
}

r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
- if (r)
- goto out_slots;
+ if (r) {
+ if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
+ /*
+ * Revert the above INVALID change. No modifications
+ * required since the original slot was preserved in
+ * the inactive slots.
+ */
+ kvm_swap_active_memslots(kvm, as_id, &active, &inactive);
+ swap(tmp, slot);
+ }
+ kfree(tmp);
+ return r;
+ }

if (change == KVM_MR_MOVE) {
/*
- * Since we are going to be changing the memslot gfn we need to
- * remove it from the gfn tree so it can be re-added there with
- * the updated gfn.
+ * The memslot's gfn is changing, remove it from the inactive
+ * tree, it will be re-added with its updated gfn. Because its
+ * range is changing, an in-place replace is not possible.
*/
- rb_erase(&slotina->gfn_node[idxina],
- &slotsina->gfn_tree);
+ kvm_memslot_gfn_erase(inactive, tmp);

- slotina->base_gfn = new->base_gfn;
- slotina->flags = new->flags;
- slotina->dirty_bitmap = new->dirty_bitmap;
+ tmp->base_gfn = new->base_gfn;
+ tmp->flags = new->flags;
+ tmp->dirty_bitmap = new->dirty_bitmap;
/* kvm_arch_prepare_memory_region() might have modified arch */
- slotina->arch = new->arch;
+ tmp->arch = new->arch;

/* Re-add to the gfn tree with the updated gfn */
- kvm_memslot_gfn_insert(&slotsina->gfn_tree,
- slotina, idxina);
+ kvm_memslot_gfn_insert(inactive, tmp);

- /*
- * Swap the active <-> inactive memslot set.
- * Now the active memslot set contains the new, final memslot.
- */
- swap_memslots(kvm, as_id);
- swap(idxact, idxina);
- swap(slotsina, slotsact);
- swap(slotact, slotina);
-
- /*
- * Replace the temporary KVM_MEMSLOT_INVALID slot with the
- * new, final memslot in the inactive memslot set and
- * free the temporary memslot.
- */
- kvm_replace_memslot(kvm, as_id, idxina, slotina, slotact);
- kfree(slotina);
+ /* Replace the current INVALID slot with the updated memslot. */
+ kvm_activate_memslot(kvm, as_id, &active, &inactive, slot, tmp);
} else if (change == KVM_MR_FLAGS_ONLY) {
/*
- * Almost like the move case above, but we don't use a temporary
- * KVM_MEMSLOT_INVALID slot.
- * Instead, we simply replace the old memslot with a new, updated
- * copy in both memslot sets.
+ * Similar to the MOVE case, but the slot doesn't need to be
+ * zapped as an intermediate step. Instead, the old memslot is
+ * simply replaced with a new, updated copy in both memslot sets.
*
- * Since we aren't going to be changing the memslot gfn we can
- * simply use kvm_copy_replace_memslot(), which will use
- * rb_replace_node() to switch the memslot node in the gfn tree
- * instead of removing the old one and inserting the new one
- * as two separate operations.
- * It's a performance win since node replacement is a single
- * O(1) operation as opposed to two O(log(n)) operations for
- * slot removal and then re-insertion.
+ * Since the memslot gfn is unchanged, kvm_copy_replace_memslot()
+ * and kvm_memslot_gfn_replace() can be used to switch the node
+ * in the gfn tree instead of removing the old and inserting the
+ * new as two separate operations. Replacement is a single O(1)
+ * operation versus two O(log(n)) operations for remove+insert.
*/
- kvm_copy_replace_memslot(kvm, as_id, idxina, slotact, slotina);
- slotina->flags = new->flags;
- slotina->dirty_bitmap = new->dirty_bitmap;
+ kvm_copy_memslot(tmp, slot);
+ tmp->flags = new->flags;
+ tmp->dirty_bitmap = new->dirty_bitmap;
/* kvm_arch_prepare_memory_region() might have modified arch */
- slotina->arch = new->arch;
+ tmp->arch = new->arch;
+ kvm_replace_memslot(inactive, slot, tmp);

- /* Swap the active <-> inactive memslot set. */
- swap_memslots(kvm, as_id);
- swap(idxact, idxina);
- swap(slotsina, slotsact);
- swap(slotact, slotina);
-
- /*
- * Replace the old memslot in the other memslot set and
- * then finally free it.
- */
- kvm_replace_memslot(kvm, as_id, idxina, slotina, slotact);
- kfree(slotina);
+ kvm_activate_memslot(kvm, as_id, &active, &inactive, slot, tmp);
} else if (change == KVM_MR_CREATE) {
/*
- * Add the new memslot to the current inactive set as a copy
- * of the provided new memslot data.
+ * Add the new memslot to the inactive set as a copy of the
+ * new memslot data provided by userspace.
*/
- kvm_copy_memslot(slotina, new);
- kvm_init_memslot_hva_ranges(slotina);
+ kvm_copy_memslot(tmp, new);
+ kvm_replace_memslot(inactive, NULL, tmp);

- kvm_replace_memslot(kvm, as_id, idxina, NULL, slotina);
-
- /* Swap the active <-> inactive memslot set. */
- swap_memslots(kvm, as_id);
- swap(idxact, idxina);
- swap(slotsina, slotsact);
-
- /* Now add it also to the other memslot set */
- kvm_replace_memslot(kvm, as_id, idxina, NULL, slotina);
+ kvm_activate_memslot(kvm, as_id, &active, &inactive, NULL, tmp);
} else if (change == KVM_MR_DELETE) {
/*
- * Remove the old memslot from the current inactive set
- * (the other, active set contains the temporary
- * KVM_MEMSLOT_INVALID slot)
- */
- kvm_replace_memslot(kvm, as_id, idxina, slotina, NULL);
-
- /* Swap the active <-> inactive memslot set. */
- swap_memslots(kvm, as_id);
- swap(idxact, idxina);
- swap(slotsina, slotsact);
- swap(slotact, slotina);
-
- /* Remove the temporary KVM_MEMSLOT_INVALID slot and free it. */
- kvm_replace_memslot(kvm, as_id, idxina, slotina, NULL);
- kfree(slotina);
- /* slotact will be freed by kvm_free_memslot() */
- } else
+ * Remove the old memslot (in the inactive memslots) and activate
+ * the NULL slot.
+ */
+ kvm_replace_memslot(inactive, tmp, NULL);
+ kvm_activate_memslot(kvm, as_id, &active, &inactive, slot, NULL);
+ } else {
BUG();
+ }

kvm_arch_commit_memory_region(kvm, mem, old, new, change);

+ /*
+ * Free the memslot and its metadata. Note, slot and tmp hold the same
+ * metadata, but slot is freed as part of activation. It's tmp's turn.
+ */
if (change == KVM_MR_DELETE)
- kvm_free_memslot(kvm, slotact);
+ kvm_free_memslot(kvm, tmp);

return 0;
-
-out_slots:
- if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
- swap_memslots(kvm, as_id);
- swap(idxact, idxina);
- swap(slotsina, slotsact);
- swap(slotact, slotina);
-
- kvm_replace_memslot(kvm, as_id, idxina, slotina, slotact);
- }
- kfree(slotina);
-
- return r;
}

static int kvm_delete_memslot(struct kvm *kvm,
--
2.32.0.rc0.204.g9fa02ecfa5-goog


--3zAiiMgFZmQFHLbv--