Re: [PATCH v2 3/5] bpf: add helper masks for ADJ_ROOM flags and encap validation
From: Martin KaFai Lau
Date: Fri Mar 27 2026 - 15:09:02 EST
On 3/27/26 3:55 AM, Hudson, Nick wrote:
On Mar 26, 2026, at 5:49 PM, Martin KaFai Lau <martin.lau@xxxxxxxxx> wrote:
!-------------------------------------------------------------------|
This Message Is From an External Sender
This message came from outside your organization.
|-------------------------------------------------------------------!
On 3/26/26 10:02 AM, Hudson, Nick wrote:
If a user supplies +ve len_diff and attempts to pass a DECAP flag.static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,Under which case this new check will be hit?
u64 flags)
@@ -3502,6 +3513,11 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
unsigned int gso_type = SKB_GSO_DODGY;
int ret;
+ if (unlikely(flags & ~(BPF_F_ADJ_ROOM_ENCAP_MASK |
+ BPF_F_ADJ_ROOM_NO_CSUM_RESET |
+ BPF_F_ADJ_ROOM_FIXED_GSO)))
The commit message had
Add flag validation to bpf_skb_net_grow() to reject invalid encap
flags early.
There is DECAP_MASK check in bpf_skb_adjust_room() and then !shrink is rejected. What am I missing?
Duh, right.
Do you prefer the do all the flag checking in bpf_skb_adjust_room or keep the encap/decap split?
The new decap flag checks added in patch 4? I do not have a strong preference on whether they should be moved to shrink() or remain in bpf_skb_adjust_room().
If the decap flag checks move into shrink(), it would also make sense to move the corresponding length checks into grow() and shrink(). These check refactoring probably needs another patch after the macro refactoring patch mentioned earlier.
Likely still broken, maybe still worth to share compiler-tested code for this. It is based on patch 3 (with the new flags check in grow). The error return value can differ for some invalid shrink cases because of the check ordering, but I do not think that matters.
diff --git a/net/core/filter.c b/net/core/filter.c
index de61503f06f6..895e94865a9f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3510,6 +3510,8 @@ static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
BPF_F_ADJ_ROOM_DECAP_MASK | \
BPF_F_ADJ_ROOM_NO_CSUM_RESET)
+#define BPF_SKB_MAX_LEN SKB_MAX_ALLOC
+
static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
u64 flags)
{
@@ -3518,6 +3520,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
u16 mac_len = 0, inner_net = 0, inner_trans = 0;
const u8 meta_len = skb_metadata_len(skb);
unsigned int gso_type = SKB_GSO_DODGY;
+ u32 len_max = BPF_SKB_MAX_LEN;
int ret;
if (unlikely(flags & ~(BPF_F_ADJ_ROOM_ENCAP_MASK |
@@ -3525,6 +3528,9 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
BPF_F_ADJ_ROOM_FIXED_GSO)))
return -EINVAL;
+ if (skb->len + len_diff > len_max && !skb_is_gso(skb))
+ return -ENOTSUPP;
+
if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
/* udp gso_size delineates datagrams, only allow if fixed */
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
@@ -3632,6 +3638,8 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
u64 flags)
{
+ u32 len_cur = skb->len - skb_network_offset(skb);
+ u32 len_min = bpf_skb_net_base_len(skb);
int ret;
if (unlikely(flags & ~(BPF_F_ADJ_ROOM_DECAP_MASK |
@@ -3639,6 +3647,22 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
return -EINVAL;
+ if (flags & BPF_F_ADJ_ROOM_DECAP_L3_MASK) {
+ switch (flags & BPF_F_ADJ_ROOM_DECAP_L3_MASK) {
+ case BPF_F_ADJ_ROOM_DECAP_L3_IPV4:
+ len_min = sizeof(struct iphdr);
+ break;
+ case BPF_F_ADJ_ROOM_DECAP_L3_IPV6:
+ len_min = sizeof(struct ipv6hdr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (len_diff >= len_cur || len_cur - len_diff < len_min)
+ return -ENOTSUPP;
+
if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
/* udp gso_size delineates datagrams, only allow if fixed */
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
@@ -3677,8 +3701,6 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
return 0;
}
-#define BPF_SKB_MAX_LEN SKB_MAX_ALLOC
-
BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
u32, mode, u64, flags)
{
@@ -3723,9 +3745,7 @@ static const struct bpf_func_proto sk_skb_adjust_room_proto = {
BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
u32, mode, u64, flags)
{
- u32 len_cur, len_diff_abs = abs(len_diff);
- u32 len_min = bpf_skb_net_base_len(skb);
- u32 len_max = BPF_SKB_MAX_LEN;
+ u32 len_diff_abs = abs(len_diff);
__be16 proto = skb->protocol;
bool shrink = len_diff < 0;
u32 off;
@@ -3750,29 +3770,6 @@ BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
return -ENOTSUPP;
}
- if (flags & BPF_F_ADJ_ROOM_DECAP_L3_MASK) {
- if (!shrink)
- return -EINVAL;
-
- switch (flags & BPF_F_ADJ_ROOM_DECAP_L3_MASK) {
- case BPF_F_ADJ_ROOM_DECAP_L3_IPV4:
- len_min = sizeof(struct iphdr);
- break;
- case BPF_F_ADJ_ROOM_DECAP_L3_IPV6:
- len_min = sizeof(struct ipv6hdr);
- break;
- default:
- return -EINVAL;
- }
- }
-
- len_cur = skb->len - skb_network_offset(skb);
- if ((shrink && (len_diff_abs >= len_cur ||
- len_cur - len_diff_abs < len_min)) ||
- (!shrink && (skb->len + len_diff_abs > len_max &&
- !skb_is_gso(skb))))
- return -ENOTSUPP;
-
ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) :
bpf_skb_net_grow(skb, off, len_diff_abs, flags);
if (!ret && !(flags & BPF_F_ADJ_ROOM_NO_CSUM_RESET))