On 20/01/2023 22:46, Elliot Berman wrote:
Add remaining ioctls to support non-proxy VM boot:
- Gunyah Resource Manager uses the VM's devicetree to configure the
virtual machine. The location of the devicetree in the guest's
virtual memory can be declared via the SET_DTB_CONFIG ioctl.
- Trigger start of the virtual machine with VM_START ioctl.
Co-developed-by: Prakruthi Deepak Heragu <quic_pheragu@xxxxxxxxxxx>
Signed-off-by: Prakruthi Deepak Heragu <quic_pheragu@xxxxxxxxxxx>
Signed-off-by: Elliot Berman <quic_eberman@xxxxxxxxxxx>
---
drivers/virt/gunyah/vm_mgr.c | 110 ++++++++++++++++++++++++++++++++
drivers/virt/gunyah/vm_mgr.h | 9 +++
drivers/virt/gunyah/vm_mgr_mm.c | 24 +++++++
include/uapi/linux/gunyah.h | 8 +++
4 files changed, 151 insertions(+)
diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c
index b847fde63333..48bd3f06fb6c 100644
--- a/drivers/virt/gunyah/vm_mgr.c
+++ b/drivers/virt/gunyah/vm_mgr.c
@@ -9,6 +9,7 @@
#include <linux/file.h>
#include <linux/gunyah_rsc_mgr.h>
#include <linux/miscdevice.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <uapi/linux/gunyah.h>
@@ -37,10 +38,98 @@ static __must_check struct gunyah_vm *gunyah_vm_alloc(struct gh_rm_rpc *rm)
mutex_init(&ghvm->mm_lock);
INIT_LIST_HEAD(&ghvm->memory_mappings);
+ init_rwsem(&ghvm->status_lock);
using read write semaphore is really not going to make any difference in this particular case.
we have just one reader (gh_vm_ensure_started) and it mostly makes synchronous call to writer (vm_start).
return ghvm;
}
+static int gh_vm_start(struct gunyah_vm *ghvm)
+{
+ struct gunyah_vm_memory_mapping *mapping;
+ u64 dtb_offset;
+ u32 mem_handle;
+ int ret;
+
+ down_write(&ghvm->status_lock);
+ if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE) {
+ up_write(&ghvm->status_lock);
+ return 0;
+ }
+
+ list_for_each_entry(mapping, &ghvm->memory_mappings, list) {
+ switch (mapping->share_type) {
+ case VM_MEM_LEND:
+ ret = gh_rm_mem_lend(ghvm->rm, &mapping->parcel);
+ break;
+ case VM_MEM_SHARE:
+ ret = gh_rm_mem_share(ghvm->rm, &mapping->parcel);
+ break;
+ }
+ if (ret > 0)
+ ret = -EINVAL;
why are we converting the error messages, afaiu both gh_rm_mem_lend and gh_rm_mem_share return a valid error codes.
+ if (ret) {same feedback as other patches on setting error codes.
+ pr_warn("Failed to %s parcel %d: %d\n",
+ mapping->share_type == VM_MEM_LEND ? "lend" : "share",
+ mapping->parcel.label,
+ ret);
+ goto err;
+ }
+ }
+
+ mapping = gh_vm_mem_mapping_find_mapping(ghvm, ghvm->dtb_config.gpa, ghvm->dtb_config.size);
+ if (!mapping) {
+ pr_warn("Failed to find the memory_handle for DTB\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ mem_handle = mapping->parcel.mem_handle;
+ dtb_offset = ghvm->dtb_config.gpa - mapping->guest_phys_addr;
+
+ ret = gh_rm_vm_configure(ghvm->rm, ghvm->vmid, ghvm->auth, mem_handle,
+ 0, 0, dtb_offset, ghvm->dtb_config.size);
+ if (ret) {
+ pr_warn("Failed to configure VM: %d\n", ret);
+ goto err;
+ }
+
+ ret = gh_rm_vm_init(ghvm->rm, ghvm->vmid);
+ if (ret) {
+ pr_warn("Failed to initialize VM: %d\n", ret);
+ goto err;
+ }
+
+ ret = gh_rm_vm_start(ghvm->rm, ghvm->vmid);
+ if (ret) {
+ pr_warn("Failed to start VM: %d\n", ret);
+ goto err;
+ }
+
+ ghvm->vm_status = GH_RM_VM_STATUS_READY;
+
+ up_write(&ghvm->stvm_status = atus_lock);
+ return ret;
+err:
+ ghvm->vm_status = GH_RM_VM_STATUS_INIT_FAILED;
+ up_write(&ghvm->status_lock);
+ return ret;
+}
+
+static void gh_vm_stop(struct gunyah_vm *ghvm)
+{
+ int ret;
+
+ down_write(&ghvm->status_lock);
+ if (ghvm->vm_status == GH_RM_VM_STATUS_READY) {
+ ret = gh_rm_vm_stop(ghvm->rm, ghvm->vmid);
+ if (ret)
+ pr_warn("Failed to stop VM: %d\n", ret);
+ }
+
+ ghvm->vm_status = GH_RM_VM_STATUS_EXITED;
+ up_write(&ghvm->status_lock);
+}
+
static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct gunyah_vm *ghvm = filp->private_data;
@@ -84,6 +173,25 @@ static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
}
+ case GH_VM_SET_DTB_CONFIG: {
+ struct gh_vm_dtb_config dtb_config;
+
+ r = -EFAULT;
+ if (copy_from_user(&dtb_config, argp, sizeof(dtb_config)))
+ break;
+
+ dtb_config.size = PAGE_ALIGN(dtb_config.size);
+ ghvm->dtb_config = dtb_config;
+
+ r = 0;
+ break;
+ }
+ case GH_VM_START: {
+ r = gh_vm_start(ghvm);
+ if (r)
+ r = -EINVAL;
+ break;
+ }
default:
r = -ENOTTY;
break;
@@ -97,6 +205,8 @@ static int gh_vm_release(struct inode *inode, struct file *filp)
struct gunyah_vm *ghvm = filp->private_data;
struct gunyah_vm_memory_mapping *mapping, *tmp;
+ gh_vm_stop(ghvm);
+
list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings, list) {
gh_vm_mem_mapping_reclaim(ghvm, mapping);
kfree(mapping);
diff --git a/drivers/virt/gunyah/vm_mgr.h b/drivers/virt/gunyah/vm_mgr.h
index 6b38bf780f76..5c02fb305893 100644
--- a/drivers/virt/gunyah/vm_mgr.h
+++ b/drivers/virt/gunyah/vm_mgr.h
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <uapi/linux/gunyah.h>
@@ -34,6 +35,12 @@ struct gunyah_vm {
u16 vmid;
struct gh_rm *rm;
+ enum gh_rm_vm_auth_mechanism auth;
+ struct gh_vm_dtb_config dtb_config;
+
+ enum gh_rm_vm_status vm_status;
+ struct rw_semaphore status_lock;
+
struct mutex mm_lock;
struct list_head memory_mappings;
};
@@ -42,5 +49,7 @@ struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_alloc(struct gunyah_vm *ghvm,
struct gh_userspace_memory_region *region);
void gh_vm_mem_mapping_reclaim(struct gunyah_vm *ghvm, struct gunyah_vm_memory_mapping *mapping);
struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_find(struct gunyah_vm *ghvm, u32 label);
+struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_find_mapping(struct gunyah_vm *ghvm,
+ u64 gpa, u32 size);
#endif
diff --git a/drivers/virt/gunyah/vm_mgr_mm.c b/drivers/virt/gunyah/vm_mgr_mm.c
index f2dbdb4ee8ab..7fcb9f8a29bf 100644
--- a/drivers/virt/gunyah/vm_mgr_mm.c
+++ b/drivers/virt/gunyah/vm_mgr_mm.c
@@ -53,6 +53,30 @@ void gh_vm_mem_mapping_reclaim(struct gunyah_vm *ghvm, struct gunyah_vm_memory_m
mutex_unlock(&ghvm->mm_lock);
}
+struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_find_mapping(struct gunyah_vm *ghvm,
+ u64 gpa, u32 size)
+{
+ struct gunyah_vm_memory_mapping *mapping = NULL;
+ int ret;
+
+ ret = mutex_lock_interruptible(&ghvm->mm_lock);
+ if (ret)
+ return ERR_PTR(ret);
+
+ list_for_each_entry(mapping, &ghvm->memory_mappings, list) {
+ if (gpa >= mapping->guest_phys_addr &&
+ (gpa + size <= mapping->guest_phys_addr +
+ (mapping->npages << PAGE_SHIFT))) {
+ goto unlock;
+ }
+ }
+
+ mapping = NULL;
+unlock:
+ mutex_unlock(&ghvm->mm_lock);
+ return mapping;
+}
+
struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_find(struct gunyah_vm *ghvm, u32 label)
{
struct gunyah_vm_memory_mapping *mapping;
diff --git a/include/uapi/linux/gunyah.h b/include/uapi/linux/gunyah.h
index 574f33b198d0..36359ad2175e 100644
--- a/include/uapi/linux/gunyah.h
+++ b/include/uapi/linux/gunyah.h
@@ -42,4 +42,12 @@ struct gh_userspace_memory_region {
#define GH_VM_SET_USER_MEM_REGION _IOW(GH_IOCTL_TYPE, 0x1, \
struct gh_userspace_memory_region)
+struct gh_vm_dtb_config {
+ __u64 gpa;
need kernedoc, what is gpa?
+ __u64 size;
+};
+#define GH_VM_SET_DTB_CONFIG _IOW(GH_IOCTL_TYPE, 0x2, struct gh_vm_dtb_config)
+
+#define GH_VM_START _IO(GH_IOCTL_TYPE, 0x3)
+
#endif