[PATCH net-next v5 17/27] io_uring: add rsrc referencing for notifiers

From: Pavel Begunkov
Date: Tue Jul 12 2022 - 16:55:15 EST


In preparation to zerocopy sends with fixed buffers make notifiers to
reference the rsrc node to protect the used fixed buffers. We can't just
grab it for a send request as notifiers can likely outlive requests that
used it.

Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
---
io_uring/notif.c | 5 +++++
io_uring/notif.h | 1 +
io_uring/rsrc.h | 12 +++++++++---
3 files changed, 15 insertions(+), 3 deletions(-)

diff --git a/io_uring/notif.c b/io_uring/notif.c
index aec74f88fc33..0a2e98bd74f6 100644
--- a/io_uring/notif.c
+++ b/io_uring/notif.c
@@ -7,10 +7,12 @@

#include "io_uring.h"
#include "notif.h"
+#include "rsrc.h"

static void __io_notif_complete_tw(struct callback_head *cb)
{
struct io_notif *notif = container_of(cb, struct io_notif, task_work);
+ struct io_rsrc_node *rsrc_node = notif->rsrc_node;
struct io_ring_ctx *ctx = notif->ctx;

if (likely(notif->task)) {
@@ -25,6 +27,7 @@ static void __io_notif_complete_tw(struct callback_head *cb)
ctx->notif_locked_nr++;
io_cq_unlock_post(ctx);

+ io_rsrc_put_node(rsrc_node, 1);
percpu_ref_put(&ctx->refs);
}

@@ -119,6 +122,8 @@ struct io_notif *io_alloc_notif(struct io_ring_ctx *ctx,
/* master ref owned by io_notif_slot, will be dropped on flush */
refcount_set(&notif->uarg.refcnt, 1);
percpu_ref_get(&ctx->refs);
+ notif->rsrc_node = ctx->rsrc_node;
+ io_charge_rsrc_node(ctx);
return notif;
}

diff --git a/io_uring/notif.h b/io_uring/notif.h
index 23ca7620fff9..1dd48efb7744 100644
--- a/io_uring/notif.h
+++ b/io_uring/notif.h
@@ -10,6 +10,7 @@
struct io_notif {
struct ubuf_info uarg;
struct io_ring_ctx *ctx;
+ struct io_rsrc_node *rsrc_node;

/* complete via tw if ->task is non-NULL, fallback to wq otherwise */
struct task_struct *task;
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 87f58315b247..af342fd239d0 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -135,6 +135,13 @@ static inline void io_req_put_rsrc_locked(struct io_kiocb *req,
}
}

+static inline void io_charge_rsrc_node(struct io_ring_ctx *ctx)
+{
+ ctx->rsrc_cached_refs--;
+ if (unlikely(ctx->rsrc_cached_refs < 0))
+ io_rsrc_refs_refill(ctx);
+}
+
static inline void io_req_set_rsrc_node(struct io_kiocb *req,
struct io_ring_ctx *ctx,
unsigned int issue_flags)
@@ -144,9 +151,8 @@ static inline void io_req_set_rsrc_node(struct io_kiocb *req,

if (!(issue_flags & IO_URING_F_UNLOCKED)) {
lockdep_assert_held(&ctx->uring_lock);
- ctx->rsrc_cached_refs--;
- if (unlikely(ctx->rsrc_cached_refs < 0))
- io_rsrc_refs_refill(ctx);
+
+ io_charge_rsrc_node(ctx);
} else {
percpu_ref_get(&req->rsrc_node->refs);
}
--
2.37.0