[PATCH bpf-next v3 6/6] selftests/bpf: tc_tunnel validate decap GSO state

From: Nick Hudson

Date: Tue Apr 07 2026 - 06:53:17 EST


Require BPF_F_ADJ_ROOM_DECAP_L4_UDP and BPF_F_ADJ_ROOM_DECAP_L4_GRE enum
values at runtime using CO-RE enum existence checks so missing kernel
support fails fast instead of silently proceeding.

After bpf_skb_adjust_room() decapsulation, inspect skb_shared_info and
sk_buff state for GSO packets and assert that the expected tunnel GSO
bits are cleared and encapsulation matches the remaining tunnel state.

Signed-off-by: Nick Hudson <nhudson@xxxxxxxxxx>
---
.../selftests/bpf/progs/test_tc_tunnel.c | 58 +++++++++++++++++++
1 file changed, 58 insertions(+)

diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
index 7376df405a6b..74dfb694a210 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
@@ -6,6 +6,7 @@

#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include <bpf/bpf_core_read.h>
#include "bpf_tracing_net.h"
#include "bpf_compiler.h"

@@ -37,6 +38,23 @@ struct vxlanhdr___local {

#define EXTPROTO_VXLAN 0x1

+#define SKB_GSO_UDP_TUNNEL_MASK (SKB_GSO_UDP_TUNNEL | \
+ SKB_GSO_UDP_TUNNEL_CSUM | \
+ SKB_GSO_TUNNEL_REMCSUM)
+
+#define SKB_GSO_TUNNEL_MASK (SKB_GSO_UDP_TUNNEL_MASK | \
+ SKB_GSO_GRE | \
+ SKB_GSO_GRE_CSUM | \
+ SKB_GSO_IPXIP4 | \
+ SKB_GSO_IPXIP6 | \
+ SKB_GSO_ESP)
+
+#define BPF_F_ADJ_ROOM_DECAP_L4_MASK (BPF_F_ADJ_ROOM_DECAP_L4_UDP | \
+ BPF_F_ADJ_ROOM_DECAP_L4_GRE)
+
+#define BPF_F_ADJ_ROOM_DECAP_IPXIP_MASK (BPF_F_ADJ_ROOM_DECAP_IPXIP4 | \
+ BPF_F_ADJ_ROOM_DECAP_IPXIP6)
+
#define VXLAN_FLAGS bpf_htonl(1<<27)
#define VNI_ID 1
#define VXLAN_VNI bpf_htonl(VNI_ID << 8)
@@ -592,6 +610,8 @@ int __encap_ip6vxlan_eth(struct __sk_buff *skb)
static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
{
__u64 flags = BPF_F_ADJ_ROOM_FIXED_GSO;
+ struct sk_buff *kskb;
+ struct skb_shared_info *shinfo;
struct ipv6_opt_hdr ip6_opt_hdr;
struct gre_hdr greh;
struct udphdr udph;
@@ -621,6 +641,11 @@ static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
break;
case IPPROTO_GRE:
olen += sizeof(struct gre_hdr);
+ if (!bpf_core_enum_value_exists(enum bpf_adj_room_flags,
+ BPF_F_ADJ_ROOM_DECAP_L4_GRE))
+ return TC_ACT_SHOT;
+ flags |= BPF_F_ADJ_ROOM_DECAP_L4_GRE;
+
if (bpf_skb_load_bytes(skb, off + len, &greh, sizeof(greh)) < 0)
return TC_ACT_OK;
switch (bpf_ntohs(greh.protocol)) {
@@ -634,6 +659,10 @@ static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
break;
case IPPROTO_UDP:
olen += sizeof(struct udphdr);
+ if (!bpf_core_enum_value_exists(enum bpf_adj_room_flags,
+ BPF_F_ADJ_ROOM_DECAP_L4_UDP))
+ return TC_ACT_SHOT;
+ flags |= BPF_F_ADJ_ROOM_DECAP_L4_UDP;
if (bpf_skb_load_bytes(skb, off + len, &udph, sizeof(udph)) < 0)
return TC_ACT_OK;
switch (bpf_ntohs(udph.dest)) {
@@ -655,6 +684,35 @@ static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
if (bpf_skb_adjust_room(skb, -olen, BPF_ADJ_ROOM_MAC, flags))
return TC_ACT_SHOT;

+ kskb = bpf_cast_to_kern_ctx(skb);
+ shinfo = bpf_core_cast(kskb->head + kskb->end, struct skb_shared_info);
+ if (!shinfo->gso_size)
+ return TC_ACT_OK;
+
+ if ((flags & BPF_F_ADJ_ROOM_DECAP_L4_UDP) &&
+ (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_MASK))
+ return TC_ACT_SHOT;
+
+ if ((flags & BPF_F_ADJ_ROOM_DECAP_L4_GRE) &&
+ (shinfo->gso_type & (SKB_GSO_GRE | SKB_GSO_GRE_CSUM)))
+ return TC_ACT_SHOT;
+
+ if ((flags & BPF_F_ADJ_ROOM_DECAP_IPXIP4) &&
+ (shinfo->gso_type & SKB_GSO_IPXIP4))
+ return TC_ACT_SHOT;
+
+ if ((flags & BPF_F_ADJ_ROOM_DECAP_IPXIP6) &&
+ (shinfo->gso_type & SKB_GSO_IPXIP6))
+ return TC_ACT_SHOT;
+
+ if (flags & (BPF_F_ADJ_ROOM_DECAP_L4_MASK |
+ BPF_F_ADJ_ROOM_DECAP_IPXIP_MASK)) {
+ if ((shinfo->gso_type & SKB_GSO_TUNNEL_MASK) && !kskb->encapsulation)
+ return TC_ACT_SHOT;
+ if (!(shinfo->gso_type & SKB_GSO_TUNNEL_MASK) && kskb->encapsulation)
+ return TC_ACT_SHOT;
+ }
+
return TC_ACT_OK;
}

--
2.34.1