On 3/3/23 7:06 PM, Elliot Berman wrote:
+
+ mem_entries = kcalloc(mapping->npages, sizeof(*mem_entries), GFP_KERNEL);
+ if (!mem_entries) {
+ ret = -ENOMEM;
+ goto reclaim;
+ }
+
+ /* reduce number of entries by combining contiguous pages into single memory entry */
Are you sure you need to do this? I.e., does pin_user_pages_fast()
already take care of consolidating these pages?
+ prev_page = page_to_phys(mapping->pages[0]);
+ mem_entries[0].ipa_base = cpu_to_le64(prev_page);
+ entry_size = PAGE_SIZE;
+ for (i = 1, j = 0; i < mapping->npages; i++) {
+ curr_page = page_to_phys(mapping->pages[i]);
I think you can actually use the page frame numbers
here instead of the addresses. If they are consecutive,
they are contiguous. See pages_are_mergeable() for an
example of that. Using PFNs might simplify this code.
+ if (curr_page - prev_page == PAGE_SIZE) {
+ entry_size += PAGE_SIZE;
+ } else {
+ mem_entries[j].size = cpu_to_le64(entry_size);
+ j++;
+ mem_entries[j].ipa_base = cpu_to_le64(curr_page);
+ entry_size = PAGE_SIZE;
+ }
+
+ prev_page = curr_page;
+ }
+ mem_entries[j].size = cpu_to_le64(entry_size);
It might be messier, but it seems like you could scan the pages to
see how many you'll need (after combining), then allocate the array
of mem entries based on that. That is, do that rather than allocating,
filling, then duplicating and freeing.
count = 1;
curr_page = mapping->pages[0];
for (i = 1; i < mapping->npages; i++) {
next_page = mapping->pages[i];
if (page_to_pfn(next_page) !=
page_to_pfn(curr_page) + 1)
count++;
curr_page = next_page;
}
parcel->n_mem_entries = count;
parcel->mem_entries = kcalloc(count, ...);
/* Then fill them up */
(Not tested, but you get the idea.)
+
+ parcel->n_mem_entries = j + 1;
+ parcel->mem_entries = kmemdup(mem_entries, sizeof(*mem_entries) * parcel->n_mem_entries,
+ GFP_KERNEL);
+ kfree(mem_entries);
+ if (!parcel->mem_entries) {
+ ret = -ENOMEM;
+ goto reclaim;
+ }
+
+ mutex_unlock(&ghvm->mm_lock);
+ return 0;
+reclaim:
+ gh_vm_mem_reclaim(ghvm, mapping);
+free_mapping:
+ kfree(mapping);
+ mutex_unlock(&ghvm->mm_lock);
+ return ret;
+}
+
+int gh_vm_mem_free(struct gh_vm *ghvm, u32 label)
+{
+ struct gh_vm_mem *mapping;
+ int ret;
+
+ ret = mutex_lock_interruptible(&ghvm->mm_lock);
+ if (ret)
+ return ret;
+
+ mapping = __gh_vm_mem_find_by_label(ghvm, label);
+ if (!mapping)
+ goto out;
+
+ gh_vm_mem_reclaim(ghvm, mapping);
+ kfree(mapping);
+out:
+ mutex_unlock(&ghvm->mm_lock);
+ return ret;
+}
diff --git a/include/uapi/linux/gunyah.h b/include/uapi/linux/gunyah.h
index 10ba32d2b0a6..a19207e3e065 100644
--- a/include/uapi/linux/gunyah.h
+++ b/include/uapi/linux/gunyah.h
@@ -20,4 +20,33 @@
*/
#define GH_CREATE_VM _IO(GH_IOCTL_TYPE, 0x0) /* Returns a Gunyah VM fd */
+/*
+ * ioctls for VM fds
+ */
+
I think you should define the following three values in an enum.
+#define GH_MEM_ALLOW_READ (1UL << 0)
+#define GH_MEM_ALLOW_WRITE (1UL << 1)
+#define GH_MEM_ALLOW_EXEC (1UL << 2)
+
+/**
+ * struct gh_userspace_memory_region - Userspace memory descripion for GH_VM_SET_USER_MEM_REGION
+ * @label: Unique identifer to the region.
Unique with respect to what? I think it's unique among memory
regions defined within a VM. And I think it's arbitrary and
defined by the caller (right?).
+ * @flags: Flags for memory parcel behavior
+ * @guest_phys_addr: Location of the memory region in guest's memory space (page-aligned)
+ * @memory_size: Size of the region (page-aligned)
+ * @userspace_addr: Location of the memory region in caller (userspace)'s memory
+ *
+ * See Documentation/virt/gunyah/vm-manager.rst for further details.
+ */
+struct gh_userspace_memory_region {
+ __u32 label;
+ __u32 flags;
Add a comment to indicate what types of values "flags" can have.
Maybe "flags" should be called "perms" or something?
+ __u64 guest_phys_addr;
+ __u64 memory_size;
+ __u64 userspace_addr;
Why isn't userspace_addr just a (void *)? That would be a more natural
thing to pass to the kernel. Is it to avoid 32-bit/64-bit pointer
differences in the API?
+};
+
+#define GH_VM_SET_USER_MEM_REGION _IOW(GH_IOCTL_TYPE, 0x1, \
+ struct gh_userspace_memory_region)
+
I think it's nicer to group the definitions of these IOCTL values.
Then in the struct definitions that follow, you can add comment that
indicates which IOCTL the struct is used for.
#endif