[PATCH RFC v2 3/3] io_uring: batch get(ctx->ref) across submits

From: Pavel Begunkov
Date: Sat Dec 21 2019 - 11:16:08 EST


Double account ctx->refs keeping number of taken refs in ctx. As
io_uring gets per-request ctx->refs during submission, while holding
ctx->uring_lock, this allows in most of the time to bypass
percpu_ref_get*() and its overhead.

Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
---
fs/io_uring.c | 32 +++++++++++++++++++++++++-------
1 file changed, 25 insertions(+), 7 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5392134f042f..eef09de94609 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -84,6 +84,9 @@
#define IORING_MAX_ENTRIES 32768
#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)

+/* Not less than IORING_MAX_ENTRIES, so can grab once per submission loop */
+#define IORING_REFS_THRESHOLD IORING_MAX_ENTRIES
+
/*
* Shift of 9 is 512 entries, or exactly one page on 64-bit archs
*/
@@ -197,6 +200,7 @@ struct fixed_file_data {
struct io_ring_ctx {
struct {
struct percpu_ref refs;
+ unsigned long taken_refs; /* used under @uring_lock */
} ____cacheline_aligned_in_smp;

struct {
@@ -690,6 +694,13 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref)
complete(&ctx->completions[0]);
}

+static void io_free_taken_refs(struct io_ring_ctx *ctx)
+{
+ if (ctx->taken_refs)
+ percpu_ref_put_many(&ctx->refs, ctx->taken_refs);
+ ctx->taken_refs = 0;
+}
+
static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
{
struct io_ring_ctx *ctx;
@@ -4388,7 +4399,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
int i, submitted = 0;
- unsigned int extra_refs;
bool mm_fault = false;

/* if we have a backlog and couldn't flush it all, return BUSY */
@@ -4398,9 +4408,15 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
return -EBUSY;
}

- if (!percpu_ref_tryget_many(&ctx->refs, nr))
- return -EAGAIN;
- extra_refs = nr;
+ if (ctx->taken_refs < IORING_REFS_THRESHOLD) {
+ if (unlikely(percpu_ref_is_dying(&ctx->refs))) {
+ io_free_taken_refs(ctx);
+ return -ENXIO;
+ }
+ if (!percpu_ref_tryget_many(&ctx->refs, IORING_REFS_THRESHOLD))
+ return -EAGAIN;
+ ctx->taken_refs += IORING_REFS_THRESHOLD;
+ }

if (nr > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, nr);
@@ -4417,8 +4433,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
submitted = -EAGAIN;
break;
}
- --extra_refs;
if (!io_get_sqring(ctx, req, &sqe)) {
+ /* not submitted, but a ref is freed */
+ ctx->taken_refs--;
__io_free_req(req);
break;
}
@@ -4454,8 +4471,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
io_queue_link_head(link);
if (statep)
io_submit_state_end(&state);
- if (extra_refs)
- percpu_ref_put_many(&ctx->refs, extra_refs);
+ ctx->taken_refs -= submitted;

/* Commit SQ ring head once we've consumed and submitted all SQEs */
io_commit_sqring(ctx);
@@ -5731,6 +5747,7 @@ static int io_uring_fasync(int fd, struct file *file, int on)
static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{
mutex_lock(&ctx->uring_lock);
+ io_free_taken_refs(ctx);
percpu_ref_kill(&ctx->refs);
mutex_unlock(&ctx->uring_lock);

@@ -6196,6 +6213,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,

if (opcode != IORING_UNREGISTER_FILES &&
opcode != IORING_REGISTER_FILES_UPDATE) {
+ io_free_taken_refs(ctx);
percpu_ref_kill(&ctx->refs);

/*
--
2.24.0