[PATCH 12/14] skbuff: optimise alloc_skb_with_frags()

From: Pavel Begunkov
Date: Mon Jan 10 2022 - 20:25:28 EST


Many users of alloc_skb_with_frags() pass zero datalen, e.g.
all callers sock_alloc_send_skb() including udp. Extract and inline a
part of it doing skb allocation. BTW, do a minor cleanup, e.g. don't
set errcode in advance as it can't be optimised.

Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
---
include/linux/skbuff.h | 41 ++++++++++++++++++++++++++++++++++++-----
net/core/skbuff.c | 31 ++++++++++++-------------------
2 files changed, 48 insertions(+), 24 deletions(-)

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 7fd2b44aada0..8ea145101b56 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1130,11 +1130,42 @@ static inline struct sk_buff *alloc_skb(unsigned int size,
return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
}

-struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
- unsigned long data_len,
- int max_page_order,
- int *errcode,
- gfp_t gfp_mask);
+struct sk_buff *alloc_skb_frags(struct sk_buff *skb,
+ unsigned long data_len,
+ int max_page_order,
+ int *errcode,
+ gfp_t gfp_mask);
+
+/**
+ * alloc_skb_with_frags - allocate skb with page frags
+ *
+ * @header_len: size of linear part
+ * @data_len: needed length in frags
+ * @max_page_order: max page order desired.
+ * @errcode: pointer to error code if any
+ * @gfp_mask: allocation mask
+ *
+ * This can be used to allocate a paged skb, given a maximal order for frags.
+ */
+static inline struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
+ unsigned long data_len,
+ int max_page_order,
+ int *errcode,
+ gfp_t gfp_mask)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(header_len, gfp_mask);
+ if (unlikely(!skb)) {
+ *errcode = -ENOBUFS;
+ return NULL;
+ }
+
+ if (!data_len)
+ return skb;
+ return alloc_skb_frags(skb, data_len, max_page_order, errcode, gfp_mask);
+}
+
struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);

/* Layout of fast clones : [skb1][skb2][fclone_ref] */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a9b8ac38dc1a..7811dde22f26 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -5922,40 +5922,32 @@ int skb_mpls_dec_ttl(struct sk_buff *skb)
EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);

/**
- * alloc_skb_with_frags - allocate skb with page frags
+ * alloc_skb_frags - allocate page frags for skb
*
- * @header_len: size of linear part
+ * @skb: buffer
* @data_len: needed length in frags
* @max_page_order: max page order desired.
* @errcode: pointer to error code if any
* @gfp_mask: allocation mask
*
- * This can be used to allocate a paged skb, given a maximal order for frags.
+ * This can be used to allocate pages for skb, given a maximal order for frags.
*/
-struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
- unsigned long data_len,
- int max_page_order,
- int *errcode,
- gfp_t gfp_mask)
+struct sk_buff *alloc_skb_frags(struct sk_buff *skb,
+ unsigned long data_len,
+ int max_page_order,
+ int *errcode,
+ gfp_t gfp_mask)
{
int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
unsigned long chunk;
- struct sk_buff *skb;
struct page *page;
int i;

- *errcode = -EMSGSIZE;
/* Note this test could be relaxed, if we succeed to allocate
* high order pages...
*/
- if (npages > MAX_SKB_FRAGS)
- return NULL;
-
- *errcode = -ENOBUFS;
- skb = alloc_skb(header_len, gfp_mask);
- if (!skb)
- return NULL;
-
+ if (unlikely(npages > MAX_SKB_FRAGS))
+ goto failure;
skb->truesize += npages << PAGE_SHIFT;

for (i = 0; npages > 0; i++) {
@@ -5989,9 +5981,10 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,

failure:
kfree_skb(skb);
+ *errcode = -EMSGSIZE;
return NULL;
}
-EXPORT_SYMBOL(alloc_skb_with_frags);
+EXPORT_SYMBOL(alloc_skb_frags);

/* carve out the first off bytes from skb when off < headlen */
static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
--
2.34.1