[PATCH v2 1/4] io_uring: fix {SQ,IO}POLL with unsupported opcodes

From: Pavel Begunkov
Date: Wed Jun 03 2020 - 09:31:06 EST


IORING_SETUP_IOPOLL is defined only for read/write, other opcodes should
be disallowed, otherwise it'll get an error as below. Also refuse
open/cloes with SQPOLL, as the polling thread wouldn't know which file
table to use.

RIP: 0010:io_iopoll_getevents+0x111/0x5a0
Call Trace:
? _raw_spin_unlock_irqrestore+0x24/0x40
? do_send_sig_info+0x64/0x90
io_iopoll_reap_events.part.0+0x5e/0xa0
io_ring_ctx_wait_and_kill+0x132/0x1c0
io_uring_release+0x20/0x30
__fput+0xcd/0x230
____fput+0xe/0x10
task_work_run+0x67/0xa0
do_exit+0x353/0xb10
? handle_mm_fault+0xd4/0x200
? syscall_trace_enter+0x18c/0x2c0
do_group_exit+0x43/0xa0
__x64_sys_exit_group+0x18/0x20
do_syscall_64+0x60/0x1e0
entry_SYSCALL_64_after_hwframe+0x44/0xa9

Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
---
fs/io_uring.c | 24 ++++++++++++++++++++++++
1 file changed, 24 insertions(+)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 732ec73ec3c0..2463aaca3172 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2765,6 +2765,8 @@ static int __io_splice_prep(struct io_kiocb *req,

if (req->flags & REQ_F_NEED_CLEANUP)
return 0;
+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;

sp->file_in = NULL;
sp->len = READ_ONCE(sqe->len);
@@ -2965,6 +2967,8 @@ static int io_fallocate_prep(struct io_kiocb *req,
{
if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
return -EINVAL;
+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;

req->sync.off = READ_ONCE(sqe->off);
req->sync.len = READ_ONCE(sqe->addr);
@@ -2990,6 +2994,8 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
const char __user *fname;
int ret;

+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+ return -EINVAL;
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE)
@@ -3023,6 +3029,8 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
size_t len;
int ret;

+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+ return -EINVAL;
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE)
@@ -3107,6 +3115,8 @@ static int io_remove_buffers_prep(struct io_kiocb *req,

if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
return -EINVAL;
+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;

tmp = READ_ONCE(sqe->fd);
if (!tmp || tmp > USHRT_MAX)
@@ -3174,6 +3184,8 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
struct io_provide_buf *p = &req->pbuf;
u64 tmp;

+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
if (sqe->ioprio || sqe->rw_flags)
return -EINVAL;

@@ -3262,6 +3274,8 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
#if defined(CONFIG_EPOLL)
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;

req->epoll.epfd = READ_ONCE(sqe->fd);
req->epoll.op = READ_ONCE(sqe->len);
@@ -3306,6 +3320,8 @@ static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
if (sqe->ioprio || sqe->buf_index || sqe->off)
return -EINVAL;
+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;

req->madvise.addr = READ_ONCE(sqe->addr);
req->madvise.len = READ_ONCE(sqe->len);
@@ -3340,6 +3356,8 @@ static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
if (sqe->ioprio || sqe->buf_index || sqe->addr)
return -EINVAL;
+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;

req->fadvise.offset = READ_ONCE(sqe->off);
req->fadvise.len = READ_ONCE(sqe->len);
@@ -3373,6 +3391,8 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock)

static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE)
@@ -3417,6 +3437,8 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
*/
req->work.flags |= IO_WQ_WORK_NO_CANCEL;

+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+ return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
sqe->rw_flags || sqe->buf_index)
return -EINVAL;
@@ -4906,6 +4928,8 @@ static int io_files_update_prep(struct io_kiocb *req,
{
if (sqe->flags || sqe->ioprio || sqe->rw_flags)
return -EINVAL;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;

req->files_update.offset = READ_ONCE(sqe->off);
req->files_update.nr_args = READ_ONCE(sqe->len);
--
2.24.0