[PATCH 5.8 03/70] io_uring: allow timeout/poll/files killing to take task into account

From: Greg Kroah-Hartman
Date: Sat Oct 31 2020 - 07:40:49 EST


From: Jens Axboe <axboe@xxxxxxxxx>

commit f3606e3a92ddd36299642c78592fc87609abb1f6 upstream.

We currently cancel these when the ring exits, and we cancel all of
them. This is in preparation for killing only the ones associated
with a given task.

Reviewed-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
fs/io_uring.c | 30 ++++++++++++++++++++++--------
1 file changed, 22 insertions(+), 8 deletions(-)

--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1141,13 +1141,25 @@ static void io_kill_timeout(struct io_ki
}
}

-static void io_kill_timeouts(struct io_ring_ctx *ctx)
+static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (!tsk || req->task == tsk)
+ return true;
+ if ((ctx->flags & IORING_SETUP_SQPOLL) && req->task == ctx->sqo_thread)
+ return true;
+ return false;
+}
+
+static void io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk)
{
struct io_kiocb *req, *tmp;

spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
- io_kill_timeout(req);
+ if (io_task_match(req, tsk))
+ io_kill_timeout(req);
spin_unlock_irq(&ctx->completion_lock);
}

@@ -4641,7 +4653,7 @@ static bool io_poll_remove_one(struct io
return do_complete;
}

-static void io_poll_remove_all(struct io_ring_ctx *ctx)
+static void io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk)
{
struct hlist_node *tmp;
struct io_kiocb *req;
@@ -4652,8 +4664,10 @@ static void io_poll_remove_all(struct io
struct hlist_head *list;

list = &ctx->cancel_hash[i];
- hlist_for_each_entry_safe(req, tmp, list, hash_node)
- posted += io_poll_remove_one(req);
+ hlist_for_each_entry_safe(req, tmp, list, hash_node) {
+ if (io_task_match(req, tsk))
+ posted += io_poll_remove_one(req);
+ }
}
spin_unlock_irq(&ctx->completion_lock);

@@ -7556,8 +7570,8 @@ static void io_ring_ctx_wait_and_kill(st
percpu_ref_kill(&ctx->refs);
mutex_unlock(&ctx->uring_lock);

- io_kill_timeouts(ctx);
- io_poll_remove_all(ctx);
+ io_kill_timeouts(ctx, NULL);
+ io_poll_remove_all(ctx, NULL);

if (ctx->io_wq)
io_wq_cancel_all(ctx->io_wq);
@@ -7809,7 +7823,7 @@ static bool io_cancel_task_cb(struct io_
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct task_struct *task = data;

- return req->task == task;
+ return io_task_match(req, task);
}

static int io_uring_flush(struct file *file, void *data)