[RFC PATCH 4/9] kvm_main.c: split logic in kvm_set_memslots

From: Emanuele Giuseppe Esposito
Date: Fri Sep 09 2022 - 06:46:16 EST


At this point it is also just a split, but later will handle atomic
memslot updates (thus avoiding swapping the memslot list every time).

No functional change intended.

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@xxxxxxxxxx>
---
virt/kvm/kvm_main.c | 37 ++++++++++++++++++++++++++++++++-----
1 file changed, 32 insertions(+), 5 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e4fab15d0d4b..17f07546d591 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1790,12 +1790,15 @@ static void kvm_update_flags_memslot(struct kvm *kvm,
kvm_activate_memslot(kvm, old, new);
}

-static int kvm_set_memslot(struct kvm *kvm,
- struct kvm_internal_memory_region_list *batch)
+/*
+ * Takes kvm->slots_arch_lock, and releases it only if
+ * invalid_slot allocation or kvm_prepare_memory_region failed.
+ */
+static int kvm_prepare_memslot(struct kvm *kvm,
+ struct kvm_internal_memory_region_list *batch)
{
struct kvm_memory_slot *invalid_slot;
struct kvm_memory_slot *old = batch->old;
- struct kvm_memory_slot *new = batch->new;
enum kvm_mr_change change = batch->change;
int r;

@@ -1829,7 +1832,8 @@ static int kvm_set_memslot(struct kvm *kvm,
* invalidation needs to be reverted.
*/
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
- invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
+ invalid_slot = kzalloc(sizeof(*invalid_slot),
+ GFP_KERNEL_ACCOUNT);
if (!invalid_slot) {
mutex_unlock(&kvm->slots_arch_lock);
return -ENOMEM;
@@ -1847,13 +1851,24 @@ static int kvm_set_memslot(struct kvm *kvm,
* release slots_arch_lock.
*/
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
+ /* kvm_activate_memslot releases kvm->slots_arch_lock */
kvm_activate_memslot(kvm, invalid_slot, old);
kfree(invalid_slot);
} else {
mutex_unlock(&kvm->slots_arch_lock);
}
- return r;
}
+ return r;
+}
+
+/* Must be called with kvm->slots_arch_lock held, but releases it. */
+static void kvm_finish_memslot(struct kvm *kvm,
+ struct kvm_internal_memory_region_list *batch)
+{
+ struct kvm_memory_slot *invalid_slot = batch->invalid;
+ struct kvm_memory_slot *old = batch->old;
+ struct kvm_memory_slot *new = batch->new;
+ enum kvm_mr_change change = batch->change;

/*
* For DELETE and MOVE, the working slot is now active as the INVALID
@@ -1883,6 +1898,18 @@ static int kvm_set_memslot(struct kvm *kvm,
* responsible for knowing that new->arch may be stale.
*/
kvm_commit_memory_region(kvm, batch);
+}
+
+static int kvm_set_memslot(struct kvm *kvm,
+ struct kvm_internal_memory_region_list *batch)
+{
+ int r;
+
+ r = kvm_prepare_memslot(kvm, batch);
+ if (r)
+ return r;
+
+ kvm_finish_memslot(kvm, batch);

return 0;
}
--
2.31.1