[PATCH bpf v5 1/2] bpf: guard sock_ops rtt_min against non-locked tcp_sock

From: Werner Kasselman

Date: Mon Apr 20 2026 - 19:01:22 EST


sock_ops_convert_ctx_access() reads rtt_min without the is_locked_tcp_sock guard used for every other tcp_sock field. On request_sock-backed sock_ops callbacks, sk points at a tcp_request_sock and the converted load reads past the end of the allocation.

Extract the guarded tcp_sock field load sequence into SOCK_OPS_LOAD_TCP_SOCK_FIELD() and use it for the rtt_min access after computing the sub-field offset with offsetof(struct minmax_sample, v). Reusing the shared helper keeps rtt_min aligned with the other guarded tcp_sock field loads and preserves the dst_reg == src_reg failure path that zeros the destination register when the guard fails.

Found via AST-based call-graph analysis using sqry.

Fixes: 44f0e43037d3 ("bpf: Add support for reading sk_state and more")
Cc: stable@xxxxxxxxxxxxxxx
Signed-off-by: Werner Kasselman <werner@xxxxxxxxxxx>
---
net/core/filter.c | 36 ++++++++++++++++++------------------
1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/net/core/filter.c b/net/core/filter.c
index fcfcb72663ca..2e7c33d00749 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -10535,12 +10535,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
struct bpf_insn *insn = insn_buf;
int off;

-/* Helper macro for adding read access to tcp_sock or sock fields. */
-#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
+/* Helper macro for adding guarded read access to tcp_sock fields. */
+#define SOCK_OPS_LOAD_TCP_SOCK_FIELD(FIELD_SIZE, FIELD_OFFSET) \
do { \
int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \
- BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
- sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
if (si->dst_reg == reg || si->src_reg == reg) \
reg--; \
if (si->dst_reg == reg || si->src_reg == reg) \
@@ -10548,7 +10546,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
if (si->dst_reg == si->src_reg) { \
*insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
offsetof(struct bpf_sock_ops_kern, \
- temp)); \
+ temp)); \
fullsock_reg = reg; \
jmp += 2; \
} \
@@ -10562,24 +10560,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
if (si->dst_reg == si->src_reg) \
*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
- temp)); \
+ temp)); \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct bpf_sock_ops_kern, sk),\
si->dst_reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, sk));\
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
- OBJ_FIELD), \
+ *insn++ = BPF_LDX_MEM(FIELD_SIZE, \
si->dst_reg, si->dst_reg, \
- offsetof(OBJ, OBJ_FIELD)); \
+ FIELD_OFFSET); \
if (si->dst_reg == si->src_reg) { \
*insn++ = BPF_JMP_A(2); \
*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
- temp)); \
+ temp)); \
*insn++ = BPF_MOV64_IMM(si->dst_reg, 0); \
} \
} while (0)

+#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
+ do { \
+ BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
+ sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
+ SOCK_OPS_LOAD_TCP_SOCK_FIELD(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD),\
+ offsetof(OBJ, OBJ_FIELD)); \
+ } while (0)
+
#define SOCK_OPS_GET_SK() \
do { \
int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \
@@ -10822,14 +10827,9 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
sizeof(struct minmax));
BUILD_BUG_ON(sizeof(struct minmax) <
sizeof(struct minmax_sample));
-
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
- struct bpf_sock_ops_kern, sk),
- si->dst_reg, si->src_reg,
- offsetof(struct bpf_sock_ops_kern, sk));
- *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
- offsetof(struct tcp_sock, rtt_min) +
- sizeof_field(struct minmax_sample, t));
+ off = offsetof(struct tcp_sock, rtt_min) +
+ offsetof(struct minmax_sample, v);
+ SOCK_OPS_LOAD_TCP_SOCK_FIELD(BPF_W, off);
break;

case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
--
2.43.0