[PATCH 1/2] io_uring/bpf_filter: move filter size and populate helper into struct

From: Jens Axboe

Date: Wed Feb 11 2026 - 10:07:16 EST


Rather than open-code this logic in io_uring_populate_bpf_ctx() with
a switch, move it to the issue side definitions. Outside of making this
easier to extend in the future, it's also a prep patch for using the
pdu size for a given opcode filter elsewhere.

Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
---
io_uring/bpf_filter.c | 17 ++++++-----------
io_uring/opdef.c | 6 ++++++
io_uring/opdef.h | 6 ++++++
3 files changed, 18 insertions(+), 11 deletions(-)

diff --git a/io_uring/bpf_filter.c b/io_uring/bpf_filter.c
index 3816883a45ed..8ac7d06de122 100644
--- a/io_uring/bpf_filter.c
+++ b/io_uring/bpf_filter.c
@@ -26,6 +26,8 @@ static const struct io_bpf_filter dummy_filter;
static void io_uring_populate_bpf_ctx(struct io_uring_bpf_ctx *bctx,
struct io_kiocb *req)
{
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
+
bctx->opcode = req->opcode;
bctx->sqe_flags = (__force int) req->flags & SQE_VALID_FLAGS;
bctx->user_data = req->cqe.user_data;
@@ -34,19 +36,12 @@ static void io_uring_populate_bpf_ctx(struct io_uring_bpf_ctx *bctx,
sizeof(*bctx) - offsetof(struct io_uring_bpf_ctx, pdu_size));

/*
- * Opcodes can provide a handler fo populating more data into bctx,
+ * Opcodes can provide a handler for populating more data into bctx,
* for filters to use.
*/
- switch (req->opcode) {
- case IORING_OP_SOCKET:
- bctx->pdu_size = sizeof(bctx->socket);
- io_socket_bpf_populate(bctx, req);
- break;
- case IORING_OP_OPENAT:
- case IORING_OP_OPENAT2:
- bctx->pdu_size = sizeof(bctx->open);
- io_openat_bpf_populate(bctx, req);
- break;
+ if (def->filter_pdu_size) {
+ bctx->pdu_size = def->filter_pdu_size;
+ def->filter_populate(bctx, req);
}
}

diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index df52d760240e..91a23baf415e 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -221,8 +221,10 @@ const struct io_issue_def io_issue_defs[] = {
.issue = io_fallocate,
},
[IORING_OP_OPENAT] = {
+ .filter_pdu_size = sizeof_field(struct io_uring_bpf_ctx, open),
.prep = io_openat_prep,
.issue = io_openat,
+ .filter_populate = io_openat_bpf_populate,
},
[IORING_OP_CLOSE] = {
.prep = io_close_prep,
@@ -309,8 +311,10 @@ const struct io_issue_def io_issue_defs[] = {
#endif
},
[IORING_OP_OPENAT2] = {
+ .filter_pdu_size = sizeof_field(struct io_uring_bpf_ctx, open),
.prep = io_openat2_prep,
.issue = io_openat2,
+ .filter_populate = io_openat_bpf_populate,
},
[IORING_OP_EPOLL_CTL] = {
.unbound_nonreg_file = 1,
@@ -406,8 +410,10 @@ const struct io_issue_def io_issue_defs[] = {
[IORING_OP_SOCKET] = {
.audit_skip = 1,
#if defined(CONFIG_NET)
+ .filter_pdu_size = sizeof_field(struct io_uring_bpf_ctx, socket),
.prep = io_socket_prep,
.issue = io_socket,
+ .filter_populate = io_socket_bpf_populate,
#else
.prep = io_eopnotsupp_prep,
#endif
diff --git a/io_uring/opdef.h b/io_uring/opdef.h
index aa37846880ff..faf3955dce8b 100644
--- a/io_uring/opdef.h
+++ b/io_uring/opdef.h
@@ -2,6 +2,8 @@
#ifndef IOU_OP_DEF_H
#define IOU_OP_DEF_H

+struct io_uring_bpf_ctx;
+
struct io_issue_def {
/* needs req->file assigned */
unsigned needs_file : 1;
@@ -33,8 +35,12 @@ struct io_issue_def {
/* size of async data needed, if any */
unsigned short async_size;

+ /* bpf filter pdu size, if any */
+ unsigned short filter_pdu_size;
+
int (*issue)(struct io_kiocb *, unsigned int);
int (*prep)(struct io_kiocb *, const struct io_uring_sqe *);
+ void (*filter_populate)(struct io_uring_bpf_ctx *, struct io_kiocb *);
};

struct io_cold_def {
--
2.51.0