[PATCH v2 2/4] io_uring: make submission ref putting consistent

From: Pavel Begunkov
Date: Mon Mar 02 2020 - 15:46:24 EST


The rule is simple, any async handler gets a submission ref and should
put it at the end. Make them all follow it, and so more consistent.

This is a preparation patch, and as io_wq_assign_next() currently won't
ever work, this doesn't care to use io_put_req_find_next() instead of
io_put_req().

Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
---
fs/io_uring.c | 26 ++++++++++++--------------
1 file changed, 12 insertions(+), 14 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index ff6cc05b86c7..ad8046a9bc0f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2550,7 +2550,7 @@ static bool io_req_cancelled(struct io_kiocb *req)
if (req->work.flags & IO_WQ_WORK_CANCEL) {
req_set_fail_links(req);
io_cqring_add_event(req, -ECANCELED);
- io_put_req(req);
+ io_double_put_req(req);
return true;
}

@@ -2600,6 +2600,7 @@ static void io_fsync_finish(struct io_wq_work **workptr)
if (io_req_cancelled(req))
return;
__io_fsync(req, &nxt);
+ io_put_req(req); /* drop submission reference */
if (nxt)
io_wq_assign_next(workptr, nxt);
}
@@ -2609,7 +2610,6 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
{
/* fsync always requires a blocking context */
if (force_nonblock) {
- io_put_req(req);
req->work.func = io_fsync_finish;
return -EAGAIN;
}
@@ -2621,9 +2621,6 @@ static void __io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt)
{
int ret;

- if (io_req_cancelled(req))
- return;
-
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
req->sync.len);
if (ret < 0)
@@ -2637,7 +2634,10 @@ static void io_fallocate_finish(struct io_wq_work **workptr)
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
struct io_kiocb *nxt = NULL;

+ if (io_req_cancelled(req))
+ return;
__io_fallocate(req, &nxt);
+ io_put_req(req); /* drop submission reference */
if (nxt)
io_wq_assign_next(workptr, nxt);
}
@@ -2659,7 +2659,6 @@ static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
{
/* fallocate always requiring blocking context */
if (force_nonblock) {
- io_put_req(req);
req->work.func = io_fallocate_finish;
return -EAGAIN;
}
@@ -3015,6 +3014,7 @@ static void io_close_finish(struct io_wq_work **workptr)

/* not cancellable, don't do io_req_cancelled() */
__io_close_finish(req, &nxt);
+ io_put_req(req); /* drop submission reference */
if (nxt)
io_wq_assign_next(workptr, nxt);
}
@@ -3038,6 +3038,8 @@ static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
* the file again and cause a double CQE entry for this request
*/
io_queue_async_work(req);
+ /* submission ref will be dropped, take it for async */
+ refcount_inc_not_zero(&req->refs);
return 0;
}

@@ -3088,6 +3090,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
if (io_req_cancelled(req))
return;
__io_sync_file_range(req, &nxt);
+ io_put_req(req); /* put submission ref */
if (nxt)
io_wq_assign_next(workptr, nxt);
}
@@ -3097,7 +3100,6 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
{
/* sync_file_range always requires a blocking context */
if (force_nonblock) {
- io_put_req(req);
req->work.func = io_sync_file_range_finish;
return -EAGAIN;
}
@@ -3464,11 +3466,10 @@ static void io_accept_finish(struct io_wq_work **workptr)
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
struct io_kiocb *nxt = NULL;

- io_put_req(req);
-
if (io_req_cancelled(req))
return;
__io_accept(req, &nxt, false);
+ io_put_req(req); /* drop submission reference */
if (nxt)
io_wq_assign_next(workptr, nxt);
}
@@ -4733,17 +4734,14 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
} while (1);
}

- /* drop submission reference */
- io_put_req(req);
-
if (ret) {
req_set_fail_links(req);
io_cqring_add_event(req, ret);
io_put_req(req);
}

- /* if a dependent link is ready, pass it back */
- if (!ret && nxt)
+ io_put_req(req); /* drop submission reference */
+ if (nxt)
io_wq_assign_next(workptr, nxt);
}

--
2.24.0