[PATCH] net: linearizing skb when downgrade gso_size

From: Fred Li
Date: Mon Jul 08 2024 - 10:32:11 EST


Here is a patch that linearizing skb when downgrade
gso_size and sg should disabled, If there are no issues,
I will submit a formal patch shortly.

Signed-off-by: Fred Li <dracodingfly@xxxxxxxxx>
---
include/linux/skbuff.h | 22 ++++++++++++++++++++++
net/core/filter.c | 16 ++++++++++++----
net/core/skbuff.c | 19 ++-----------------
3 files changed, 36 insertions(+), 21 deletions(-)

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5f11f9873341..99b7fc1e826a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2400,6 +2400,28 @@ static inline unsigned int skb_headlen(const struct sk_buff *skb)
return skb->len - skb->data_len;
}

+static inline bool skb_is_nonsg(const struct sk_buff *skb)
+{
+ struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
+ struct sk_buff *check_skb;
+ for (check_skb = list_skb; check_skb; check_skb = check_skb->next) {
+ if (skb_headlen(check_skb) && !check_skb->head_frag) {
+ /* gso_size is untrusted, and we have a frag_list with
+ * a linear non head_frag item.
+ *
+ * If head_skb's headlen does not fit requested gso_size,
+ * it means that the frag_list members do NOT terminate
+ * on exact gso_size boundaries. Hence we cannot perform
+ * skb_frag_t page sharing. Therefore we must fallback to
+ * copying the frag_list skbs; we do so by disabling SG.
+ */
+ return true;
+ }
+ }
+
+ return false;
+}
+
static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
{
unsigned int i, len = 0;
diff --git a/net/core/filter.c b/net/core/filter.c
index df4578219e82..c0e6e7f28635 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3525,13 +3525,21 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);

- /* Due to header grow, MSS needs to be downgraded. */
- if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
- skb_decrease_gso_size(shinfo, len_diff);
-
/* Header must be checked, and gso_segs recomputed. */
shinfo->gso_type |= gso_type;
shinfo->gso_segs = 0;
+
+ /* Due to header grow, MSS needs to be downgraded.
+ * There is BUG_ON When segment the frag_list with
+ * head_frag true so linearize skb after downgrade
+ * the MSS.
+ */
+ if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) {
+ skb_decrease_gso_size(shinfo, len_diff);
+ if (skb_is_nonsg(skb))
+ return skb_linearize(skb) ? : 0;
+ }
+
}

return 0;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b1dab1b071fc..81e018185527 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4458,23 +4458,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,

if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
- struct sk_buff *check_skb;
-
- for (check_skb = list_skb; check_skb; check_skb = check_skb->next) {
- if (skb_headlen(check_skb) && !check_skb->head_frag) {
- /* gso_size is untrusted, and we have a frag_list with
- * a linear non head_frag item.
- *
- * If head_skb's headlen does not fit requested gso_size,
- * it means that the frag_list members do NOT terminate
- * on exact gso_size boundaries. Hence we cannot perform
- * skb_frag_t page sharing. Therefore we must fallback to
- * copying the frag_list skbs; we do so by disabling SG.
- */
- features &= ~NETIF_F_SG;
- break;
- }
- }
+ if (skb_is_nonsg(head_skb))
+ features &= ~NETIF_F_SG;
}

__skb_push(head_skb, doffset);
--
2.33.0