[PATCH v3 09/15] KVM: Move memslot deletion to helper function

From: Sean Christopherson
Date: Thu Oct 24 2019 - 19:08:35 EST


Move memslot deletion into its own routine so that the success path for
other memslot updates does not need to use kvm_free_memslot(), i.e. can
explicitly destroy the dirty bitmap when necessary. This paves the way
for dropping @dont from kvm_free_memslot(), i.e. all callers now pass
NULL for @dont.

Add a comment above the code to make a copy of the existing memslot
prior to deletion, it is not at all obvious that the pointer will become
stale during sorting and/or installation of new memslots.

Note, kvm_arch_commit_memory_region() allows an architecture to free
resources when moving a memslot or changing its flags, i.e. implement
logic similar to the dirty bitmap handling, if such functionality is
needed in the future.

Acked-by: Christoffer Dall <christoffer.dall@xxxxxxx>
Tested-by: Christoffer Dall <christoffer.dall@xxxxxxx>
Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
---
virt/kvm/kvm_main.c | 73 +++++++++++++++++++++++++++------------------
1 file changed, 44 insertions(+), 29 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 860de4fadce6..2163a7157c63 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -980,6 +980,27 @@ static int kvm_set_memslot(struct kvm *kvm,
return r;
}

+static int kvm_delete_memslot(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot *old, int as_id)
+{
+ struct kvm_memory_slot new;
+ int r;
+
+ if (!old->npages)
+ return -EINVAL;
+
+ memset(&new, 0, sizeof(new));
+ new.id = old->id;
+
+ r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
+ if (r)
+ return r;
+
+ kvm_free_memslot(kvm, old, NULL);
+ return 0;
+}
+
/*
* Allocate some memory and give it an address in the guest physical address
* space.
@@ -1029,7 +1050,15 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (npages > KVM_MEM_MAX_NR_PAGES)
return -EINVAL;

- new = old = *slot;
+ /*
+ * Make a full copy of the old memslot, the pointer will become stale
+ * when the memslots are re-sorted by update_memslots().
+ */
+ old = *slot;
+ if (!mem->memory_size)
+ return kvm_delete_memslot(kvm, mem, &old, as_id);
+
+ new = old;

new.id = id;
new.base_gfn = base_gfn;
@@ -1037,29 +1066,20 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.flags = mem->flags;
new.userspace_addr = mem->userspace_addr;

- if (npages) {
- if (!old.npages)
- change = KVM_MR_CREATE;
- else { /* Modify an existing slot. */
- if ((new.userspace_addr != old.userspace_addr) ||
- (npages != old.npages) ||
- ((new.flags ^ old.flags) & KVM_MEM_READONLY))
- return -EINVAL;
-
- if (base_gfn != old.base_gfn)
- change = KVM_MR_MOVE;
- else if (new.flags != old.flags)
- change = KVM_MR_FLAGS_ONLY;
- else /* Nothing to change. */
- return 0;
- }
- } else {
- if (!old.npages)
+ if (!old.npages) {
+ change = KVM_MR_CREATE;
+ } else { /* Modify an existing slot. */
+ if ((new.userspace_addr != old.userspace_addr) ||
+ (npages != old.npages) ||
+ ((new.flags ^ old.flags) & KVM_MEM_READONLY))
return -EINVAL;

- change = KVM_MR_DELETE;
- new.base_gfn = 0;
- new.flags = 0;
+ if (base_gfn != old.base_gfn)
+ change = KVM_MR_MOVE;
+ else if (new.flags != old.flags)
+ change = KVM_MR_FLAGS_ONLY;
+ else /* Nothing to change. */
+ return 0;
}

if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
@@ -1082,17 +1102,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
return r;
}

- /* actual memory is freed via old in kvm_free_memslot below */
- if (change == KVM_MR_DELETE) {
- new.dirty_bitmap = NULL;
- memset(&new.arch, 0, sizeof(new.arch));
- }
-
r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
if (r)
goto out_bitmap;

- kvm_free_memslot(kvm, &old, &new);
+ if (old.dirty_bitmap && !new.dirty_bitmap)
+ kvm_destroy_dirty_bitmap(&old);
return 0;

out_bitmap:
--
2.22.0