[RFC PATCH 5/9] kvm_main.c: split __kvm_set_memory_region logic in kvm_check_mem and kvm_prepare_batch

From: Emanuele Giuseppe Esposito
Date: Fri Sep 09 2022 - 06:45:53 EST


Just a function split. No functional change intended,
except for the fact that kvm_prepare_batch() does not
immediately call kvm_set_memslot() if batch->change is
KVM_MR_DELETE, but delegates the caller (__kvm_set_memory_region).

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@xxxxxxxxxx>
---
virt/kvm/kvm_main.c | 120 +++++++++++++++++++++++++++++---------------
1 file changed, 79 insertions(+), 41 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 17f07546d591..9d917af30593 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1927,19 +1927,9 @@ static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
return false;
}

-/*
- * Allocate some memory and give it an address in the guest physical address
- * space.
- *
- * Discontiguous memory is allowed, mostly for framebuffers.
- * This function takes also care of initializing batch->new/old/invalid/change
- * fields.
- *
- * Must be called holding kvm->slots_lock for write.
- */
-int __kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- struct kvm_internal_memory_region_list *batch)
+static int kvm_prepare_batch(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem,
+ struct kvm_internal_memory_region_list *batch)
{
struct kvm_memory_slot *old, *new;
struct kvm_memslots *slots;
@@ -1947,34 +1937,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
unsigned long npages;
gfn_t base_gfn;
int as_id, id;
- int r;
-
- r = check_memory_region_flags(mem);
- if (r)
- return r;

as_id = mem->slot >> 16;
id = (u16)mem->slot;

- /* General sanity checks */
- if ((mem->memory_size & (PAGE_SIZE - 1)) ||
- (mem->memory_size != (unsigned long)mem->memory_size))
- return -EINVAL;
- if (mem->guest_phys_addr & (PAGE_SIZE - 1))
- return -EINVAL;
- /* We can read the guest memory with __xxx_user() later on. */
- if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
- (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
- !access_ok((void __user *)(unsigned long)mem->userspace_addr,
- mem->memory_size))
- return -EINVAL;
- if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
- return -EINVAL;
- if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
- return -EINVAL;
- if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
- return -EINVAL;
-
slots = __kvm_memslots(kvm, as_id);

/*
@@ -1993,7 +1959,7 @@ int __kvm_set_memory_region(struct kvm *kvm,

batch->change = KVM_MR_DELETE;
batch->new = NULL;
- return kvm_set_memslot(kvm, batch);
+ return 0;
}

base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
@@ -2020,7 +1986,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
else if (mem->flags != old->flags)
change = KVM_MR_FLAGS_ONLY;
else /* Nothing to change. */
- return 0;
+ return 1;
}

if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
@@ -2041,12 +2007,81 @@ int __kvm_set_memory_region(struct kvm *kvm,

batch->new = new;
batch->change = change;
+ return 0;
+}
+
+static int kvm_check_mem(const struct kvm_userspace_memory_region *mem)
+{
+ int as_id, id;
+
+ as_id = mem->slot >> 16;
+ id = (u16)mem->slot;
+
+ /* General sanity checks */
+ if ((mem->memory_size & (PAGE_SIZE - 1)) ||
+ (mem->memory_size != (unsigned long)mem->memory_size))
+ return -EINVAL;
+ if (mem->guest_phys_addr & (PAGE_SIZE - 1))
+ return -EINVAL;
+ /* We can read the guest memory with __xxx_user() later on. */
+ if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
+ (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
+ !access_ok((void __user *)(unsigned long)mem->userspace_addr,
+ mem->memory_size))
+ return -EINVAL;
+ if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
+ return -EINVAL;
+ if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
+ return -EINVAL;
+ if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int kvm_check_memory_region(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem,
+ struct kvm_internal_memory_region_list *batch)
+{
+ int r;
+
+ r = check_memory_region_flags(mem);
+ if (r)
+ return r;

- r = kvm_set_memslot(kvm, batch);
+ r = kvm_check_mem(mem);
if (r)
- kfree(new);
+ return r;
+
+ r = kvm_prepare_batch(kvm, mem, batch);
+ if (r && batch->new)
+ kfree(batch->new);
+
return r;
}
+
+/*
+ * Allocate some memory and give it an address in the guest physical address
+ * space.
+ *
+ * Discontiguous memory is allowed, mostly for framebuffers.
+ * This function takes also care of initializing batch->new/old/invalid/change
+ * fields.
+ *
+ * Must be called holding kvm->slots_lock for write.
+ */
+int __kvm_set_memory_region(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem,
+ struct kvm_internal_memory_region_list *batch)
+{
+ int r;
+
+ r = kvm_check_memory_region(kvm, mem, batch);
+ if (r)
+ return r;
+
+ return kvm_set_memslot(kvm, batch);
+}
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);

static int kvm_set_memory_region(struct kvm *kvm,
@@ -2061,6 +2096,9 @@ static int kvm_set_memory_region(struct kvm *kvm,
mutex_lock(&kvm->slots_lock);
r = __kvm_set_memory_region(kvm, mem, batch);
mutex_unlock(&kvm->slots_lock);
+ /* r == 1 means empty request, nothing to do but no error */
+ if (r == 1)
+ r = 0;
return r;
}

--
2.31.1