Currently, in case we don't use IORING_SETUP_DEFER_TASKRUN, when io
worker frees work, it needs to add a task work. This creates contention
on tctx->task_list. With this commit, io work queues free work on a
local list and batch multiple free work in one call when the number of
free work in local list exceeds IO_REQ_ALLOC_BATCH.
Signed-off-by: Bui Quang Minh <minhquangbui99@xxxxxxxxx>...
---
io_uring/io-wq.c | 62 +++++++++++++++++++++++++++++++++++++++++++--
io_uring/io-wq.h | 4 ++-
io_uring/io_uring.c | 23 ++++++++++++++---
io_uring/io_uring.h | 6 ++++-
4 files changed, 87 insertions(+), 8 deletions(-)
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index 5d0928f37471..096711707db9 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -601,7 +622,41 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
wq->do_work(work);
io_assign_current_work(worker, NULL);
- linked = wq->free_work(work);
+ /*
+ * All requests in free list must have the same
+ * io_ring_ctx.
+ */
+ if (last_added_ctx && last_added_ctx != req->ctx) {
+ flush_req_free_list(&free_list, tail);
+ tail = NULL;
+ last_added_ctx = NULL;
+ free_req = 0;
+ }
+
+ /*
+ * Try to batch free work when
+ * !IORING_SETUP_DEFER_TASKRUN to reduce contention
+ * on tctx->task_list.
+ */
+ if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+ linked = wq->free_work(work, NULL, NULL);
+ else
+ linked = wq->free_work(work, &free_list, &did_free);
+...
+ if (did_free) {
+ if (!tail)
+ tail = free_list.first;
+
+ last_added_ctx = req->ctx;
+ free_req++;
+ if (free_req == IO_REQ_ALLOC_BATCH) {
+ flush_req_free_list(&free_list, tail);
+ tail = NULL;
+ last_added_ctx = NULL;
+ free_req = 0;
+ }
+ }
+
work = next_hashed;
if (!work && linked && !io_wq_is_hashed(linked)) {
work = linked;
@@ -626,6 +681,9 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
break;
raw_spin_lock(&acct->lock);
} while (1);
+
+ if (free_list.first)
+ flush_req_free_list(&free_list, tail);
}