Sounds great.
Thank you for your recommendation.
The patch code that you recommend is clear and simple.
Please patch this.
Signed-off-by: Sangsup lee <k1rh4.lee@xxxxxxxxx>
---
drivers/misc/fastrpc.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 93ebd174d848..aa1cf0e9f4ed 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1901,7 +1901,9 @@ static long fastrpc_device_ioctl(struct file
*file, unsigned int cmd,
err = fastrpc_req_mmap(fl, argp);
break;
case FASTRPC_IOCTL_MUNMAP:
+ mutex_lock(&fl->mutex);
err = fastrpc_req_munmap(fl, argp);
+ mutex_unlock(&fl->mutex);
break;
case FASTRPC_IOCTL_MEM_MAP:
err = fastrpc_req_mem_map(fl, argp);
--
2.25.1
2023년 3월 21일 (화) 오후 6:27, Srinivas Kandagatla
<srinivas.kandagatla@xxxxxxxxxx>님이 작성:
Thanks Sangsup for reporting the issue and sharing the patch,
Sorry, for some reason I missed this patch.
On 16/02/2023 01:41, Sangsup Lee wrote:
This patch adds mutex_lock for fixing an Use-after-free bug.Commit log can be improved here to something like:
fastrpc_req_munmap_impl can be called concurrently in multi-threded environments.
The buf which is allocated by list_for_each_safe can be used after another thread frees it.
fastrcp_munmap takes two steps to unmap the memory, first to find a
matching fastrpc buf in the list and second is to send request to DSP to
unmap it.
There is a potentially window of race between these two operations,
which can lead to user-after-free.
Fix this by adding locking around this two operations.
Signed-off-by: Sangsup Lee <k1rh4.lee@xxxxxxxxx>
---
drivers/misc/fastrpc.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 5310606113fe..c4b5fa4a50a6 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1806,10 +1806,12 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
struct fastrpc_buf *buf = NULL, *iter, *b;
struct fastrpc_req_munmap req;
struct device *dev = fl->sctx->dev;
+ int err;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
+ mutex_lock(&fl->mutex);
spin_lock(&fl->lock);
list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) {
@@ -1822,10 +1824,13 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
if (!buf) {
dev_err(dev, "mmap\t\tpt 0x%09llx [len 0x%08llx] not in list\n",
req.vaddrout, req.size);
+ mutex_unlock(&fl->mutex);
return -EINVAL;
}
- return fastrpc_req_munmap_impl(fl, buf);
+ err = fastrpc_req_munmap_impl(fl, buf);
+ mutex_unlock(&fl->mutex);
+ return err;
How about moving the locking to ioctl:
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index a701132638cf..2f217071a6c3 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -2087,7 +2087,9 @@ static long fastrpc_device_ioctl(struct file
*file, unsigned int cmd,
err = fastrpc_req_mmap(fl, argp);
break;
case FASTRPC_IOCTL_MUNMAP:
+ mutex_lock(&fl->mutex);
err = fastrpc_req_munmap(fl, argp);
+ mutex_unlock(&fl->mutex);
break;
case FASTRPC_IOCTL_MEM_MAP:
err = fastrpc_req_mem_map(fl, argp);
thanks,
srini
}
static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)