[PATCH bpf-next v2 4/4] bpf: more lenient bpf_skb_net_shrink() with BPF_F_ADJ_ROOM_FIXED_GSO

From: Maciej Żenczykowski
Date: Wed Jun 16 2021 - 20:10:22 EST


From: Maciej Żenczykowski <maze@xxxxxxxxxx>

This is to more closely match behaviour of bpf_skb_change_proto()
which now does not adjust gso_size, and thus thoretically supports
all gso types, and does not need to set SKB_GSO_DODGY nor reset
gso_segs to zero.

Something similar should probably be done with bpf_skb_net_grow(),
but that code scares me.

Cc: Daniel Borkmann <daniel@xxxxxxxxxxxxx>
Cc: Willem de Bruijn <willemb@xxxxxxxxxx>
Signed-off-by: Maciej Żenczykowski <maze@xxxxxxxxxx>
---
net/core/filter.c | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)

diff --git a/net/core/filter.c b/net/core/filter.c
index 8f05498f497e..faf2bae0309b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3506,11 +3506,10 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
return -EINVAL;

- if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
- /* udp gso_size delineates datagrams, only allow if fixed */
- if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
- !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
- return -ENOTSUPP;
+ if (skb_is_gso(skb) &&
+ !skb_is_gso_tcp(skb) &&
+ !(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) {
+ return -ENOTSUPP;
}

ret = skb_unclone(skb, GFP_ATOMIC);
@@ -3521,12 +3520,11 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
if (unlikely(ret < 0))
return ret;

- if (skb_is_gso(skb)) {
+ if (skb_is_gso(skb) && !(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);

/* Due to header shrink, MSS can be upgraded. */
- if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
- skb_increase_gso_size(shinfo, len_diff);
+ skb_increase_gso_size(shinfo, len_diff);

/* Header must be checked, and gso_segs recomputed. */
shinfo->gso_type |= SKB_GSO_DODGY;
--
2.32.0.272.g935e593368-goog