[PATCH v4 09/11] misc: fastrpc: Fix remote heap alloc and free user request

From: Ekansh Gupta
Date: Thu Jun 06 2024 - 13:03:18 EST


A static PD daemon process can request for remote heap allocation
which will allocate memory and change the ownership if the VMID
information were defined. This allocations are currently getting
added to fl mmaps list which could get freed when there is any
daemon kill. There is no way to request for freeing of this memory
also. Add changes to maintain the remote heap allocation in the
static PD specific structure and add method to free the memory
on user request.

Fixes: 532ad70c6d44 ("misc: fastrpc: Add mmap request assigning for static PD pool")
Cc: stable <stable@xxxxxxxxxx>
Signed-off-by: Ekansh Gupta <quic_ekangupt@xxxxxxxxxxx>
---
drivers/misc/fastrpc.c | 129 +++++++++++++++++++++++++++++++----------
1 file changed, 98 insertions(+), 31 deletions(-)

diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 68c1595446d5..32f2e6f625ed 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -210,6 +210,7 @@ struct fastrpc_buf {
struct dma_buf *dmabuf;
struct device *dev;
void *virt;
+ u32 flag;
u64 phys;
u64 size;
/* Lock for dma buf attachments */
@@ -1924,29 +1925,54 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
return 0;
}

-static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf)
+static int fastrpc_req_munmap_dsp(struct fastrpc_user *fl, uintptr_t raddr, u64 size)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
struct fastrpc_munmap_req_msg req_msg;
- struct device *dev = fl->sctx->dev;
int err;
u32 sc;

req_msg.pgid = fl->tgid;
- req_msg.size = buf->size;
- req_msg.vaddr = buf->raddr;
+ req_msg.size = size;
+ req_msg.vaddr = raddr;

args[0].ptr = (u64) (uintptr_t) &req_msg;
args[0].length = sizeof(req_msg);
-
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
&args[0]);
+
+ return err;
+}
+
+static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf)
+{
+ struct device *dev = fl->sctx->dev;
+ int err;
+
+ err = fastrpc_req_munmap_dsp(fl, buf->raddr, buf->size);
if (!err) {
+ if (buf->flag == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ if (fl->cctx->vmcount) {
+ u64 src_perms = 0;
+ struct qcom_scm_vmperm dst_perms;
+ u32 i;
+
+ for (i = 0; i < fl->cctx->vmcount; i++)
+ src_perms |= BIT(fl->cctx->vmperms[i].vmid);
+
+ dst_perms.vmid = QCOM_SCM_VMID_HLOS;
+ dst_perms.perm = QCOM_SCM_PERM_RWX;
+ err = qcom_scm_assign_mem(buf->phys, (u64)buf->size,
+ &src_perms, &dst_perms, 1);
+ if (err) {
+ dev_err(dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n",
+ buf->phys, buf->size, err);
+ return err;
+ }
+ }
+ }
dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
- spin_lock(&fl->lock);
- list_del(&buf->node);
- spin_unlock(&fl->lock);
fastrpc_buf_free(buf);
} else {
dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
@@ -1960,6 +1986,7 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
struct fastrpc_buf *buf = NULL, *iter, *b;
struct fastrpc_req_munmap req;
struct device *dev = fl->sctx->dev;
+ int err = 0;

if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
@@ -1968,18 +1995,45 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) {
buf = iter;
+ list_del(&buf->node);
break;
}
}
spin_unlock(&fl->lock);

- if (!buf) {
- dev_err(dev, "mmap\t\tpt 0x%09llx [len 0x%08llx] not in list\n",
- req.vaddrout, req.size);
- return -EINVAL;
+ if (buf) {
+ err = fastrpc_req_munmap_impl(fl, buf);
+ if (err) {
+ spin_lock(&fl->lock);
+ list_add_tail(&buf->node, &fl->mmaps);
+ spin_unlock(&fl->lock);
+ }
+ return err;
}

- return fastrpc_req_munmap_impl(fl, buf);
+ spin_lock(&fl->lock);
+ if (fl->spd) {
+ list_for_each_entry_safe(iter, b, &fl->spd->rmaps, node) {
+ if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) {
+ buf = iter;
+ list_del(&buf->node);
+ break;
+ }
+ }
+ }
+ spin_unlock(&fl->lock);
+ if (buf) {
+ err = fastrpc_req_munmap_impl(fl, buf);
+ if (err) {
+ spin_lock(&fl->lock);
+ list_add_tail(&buf->node, &fl->spd->rmaps);
+ spin_unlock(&fl->lock);
+ }
+ return err;
+ }
+ dev_err(dev, "buffer not found addr 0x%09lx, len 0x%08llx\n",
+ req.vaddrout, req.size);
+ return -EINVAL;
}

static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
@@ -2008,15 +2062,34 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
return -EINVAL;
}

- if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
+ if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ if (!fl->spd || !fl->spd->ispdup) {
+ dev_err(dev, "remote heap request supported only for active static PD\n");
+ return -EINVAL;
+ }
err = fastrpc_remote_heap_alloc(fl, dev, req.size, &buf);
- else
+ } else {
err = fastrpc_buf_alloc(fl, dev, req.size, &buf);
+ }

if (err) {
dev_err(dev, "failed to allocate buffer\n");
return err;
}
+ buf->flag = req.flags;
+
+ /* Add memory to static PD pool, protection through hypervisor */
+ if ((req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR) && fl->cctx->vmcount) {
+ u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
+
+ err = qcom_scm_assign_mem(buf->phys, (u64)buf->size,
+ &src_perms, fl->cctx->vmperms, fl->cctx->vmcount);
+ if (err) {
+ dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n",
+ buf->phys, buf->size, err);
+ goto err_invoke;
+ }
+ }

req_msg.pgid = fl->tgid;
req_msg.flags = req.flags;
@@ -2049,26 +2122,16 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
/* let the client know the address to use */
req.vaddrout = rsp_msg.vaddr;

- /* Add memory to static PD pool, protection thru hypervisor */
- if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
- u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
-
- err = qcom_scm_assign_mem(buf->phys, (u64)buf->size,
- &src_perms, fl->cctx->vmperms, fl->cctx->vmcount);
- if (err) {
- dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
- buf->phys, buf->size, err);
- goto err_assign;
- }
- }
-
spin_lock(&fl->lock);
- list_add_tail(&buf->node, &fl->mmaps);
+ if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
+ list_add_tail(&buf->node, &fl->spd->rmaps);
+ else
+ list_add_tail(&buf->node, &fl->mmaps);
spin_unlock(&fl->lock);

if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
err = -EFAULT;
- goto err_assign;
+ goto err_copy;
}

dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
@@ -2076,8 +2139,12 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)

return 0;

-err_assign:
+err_copy:
+ spin_lock(&fl->lock);
+ list_del(&buf->node);
+ spin_unlock(&fl->lock);
fastrpc_req_munmap_impl(fl, buf);
+ buf = NULL;
err_invoke:
fastrpc_buf_free(buf);

--
2.43.0