[PATCH bpf v5 2/2] selftests/bpf: cover same-reg sock_ops rtt_min request_sock access
From: Werner Kasselman
Date: Mon Apr 20 2026 - 19:02:31 EST
Add a tcpbpf sock_ops selftest that forces a same-register ctx->rtt_min read on request_sock-backed callbacks and verifies the observed value is zero.
This covers the dst_reg == src_reg path that the previous ctx_rewrite-only test did not exercise.
Signed-off-by: Werner Kasselman <werner@xxxxxxxxxxx>
---
.../testing/selftests/bpf/prog_tests/tcpbpf_user.c | 4 ++++
.../testing/selftests/bpf/progs/test_tcpbpf_kern.c | 14 ++++++++++++++
tools/testing/selftests/bpf/test_tcpbpf.h | 2 ++
3 files changed, 20 insertions(+)
diff --git a/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c b/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c
index 7e8fe1bad03f..1b08e49327d0 100644
--- a/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c
+++ b/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c
@@ -42,6 +42,10 @@ static void verify_result(struct tcpbpf_globals *result)
/* check getsockopt for window_clamp */
ASSERT_EQ(result->window_clamp_client, 9216, "window_clamp_client");
ASSERT_EQ(result->window_clamp_server, 9216, "window_clamp_server");
+
+ /* check same-reg rtt_min read on request_sock-backed callbacks */
+ ASSERT_NEQ(result->rtt_min_req_seen, 0, "rtt_min_req_seen");
+ ASSERT_EQ(result->rtt_min_req_nonzero, 0, "rtt_min_req_nonzero");
}
static void run_test(struct tcpbpf_globals *result)
diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
index 6935f32eeb8f..a488b282b5dd 100644
--- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
@@ -33,6 +33,7 @@ int bpf_testcb(struct bpf_sock_ops *skops)
{
char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)];
struct bpf_sock_ops *reuse = skops;
+ long rtt_min = (long)skops;
struct tcphdr *thdr;
int window_clamp = 9216;
int save_syn = 1;
@@ -84,6 +85,19 @@ int bpf_testcb(struct bpf_sock_ops *skops)
global.event_map |= (1 << op);
+ if (!skops->is_fullsock &&
+ (op == BPF_SOCK_OPS_RWND_INIT || op == BPF_SOCK_OPS_NEEDS_ECN)) {
+ asm volatile (
+ "%[rtt_min] = *(u32 *)(%[rtt_min] + %[rtt_min_off]);\n"
+ : [rtt_min] "+r"(rtt_min)
+ : [rtt_min_off] "i"(offsetof(struct bpf_sock_ops, rtt_min))
+ :);
+
+ global.rtt_min_req_seen = 1;
+ if (rtt_min)
+ global.rtt_min_req_nonzero = 1;
+ }
+
switch (op) {
case BPF_SOCK_OPS_TCP_CONNECT_CB:
rv = bpf_setsockopt(skops, SOL_TCP, TCP_WINDOW_CLAMP,
diff --git a/tools/testing/selftests/bpf/test_tcpbpf.h b/tools/testing/selftests/bpf/test_tcpbpf.h
index 9dd9b5590f9d..e9806215cbc0 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf.h
+++ b/tools/testing/selftests/bpf/test_tcpbpf.h
@@ -18,5 +18,7 @@ struct tcpbpf_globals {
__u32 tcp_saved_syn;
__u32 window_clamp_client;
__u32 window_clamp_server;
+ __u32 rtt_min_req_seen;
+ __u32 rtt_min_req_nonzero;
};
#endif
--
2.43.0