[v3 net-next 08/10] skbuff: reuse NAPI skb cache on allocation path (__build_skb())

From: Alexander Lobakin
Date: Tue Feb 09 2021 - 19:34:11 EST


Instead of just bulk-flushing skbuff_heads queued up through
napi_consume_skb() or __kfree_skb_defer(), try to reuse them
on allocation path.
If the cache is empty on allocation, bulk-allocate the first
half, which is more efficient than per-skb allocation.
If the cache is full on freeing, bulk-wipe the second half.
This also includes custom KASAN poisoning/unpoisoning to be
double sure there are no use-after-free cases.

Functions that got cache fastpath:
- {,__}build_skb();
- {,__}netdev_alloc_skb();
- {,__}napi_alloc_skb().

Note on "napi_safe" argument:
NAPI cache should be accessed only from BH-disabled or (better)
NAPI context. To make sure access is safe, in_serving_softirq()
check is used.
Hovewer, there are plenty of cases when we know for sure that
we're in such context. This includes: build_skb() (called only
from NIC drivers in NAPI Rx context) and {,__}napi_alloc_skb()
(called from the same place or from kernel network softirq
functions).
We can use that knowledge to avoid unnecessary checks.

Suggested-by: Edward Cree <ecree.xilinx@xxxxxxxxx> # Unified cache part
Suggested-by: Eric Dumazet <edumazet@xxxxxxxxxx> # KASAN poisoning
Suggested-by: Dmitry Vyukov <dvyukov@xxxxxxxxxx> # Help with KASAN
Signed-off-by: Alexander Lobakin <alobakin@xxxxx>
---
include/linux/skbuff.h | 2 +-
net/core/skbuff.c | 61 ++++++++++++++++++++++++++++------------
net/netlink/af_netlink.c | 2 +-
3 files changed, 45 insertions(+), 20 deletions(-)

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0e0707296098..5bb443d37bf4 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1082,7 +1082,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,

struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
int node);
-struct sk_buff *__build_skb(void *data, unsigned int frag_size);
+struct sk_buff *__build_skb(void *data, unsigned int frag_size, bool napi_safe);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb_around(struct sk_buff *skb,
void *data, unsigned int frag_size);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 860a9d4f752f..8747566a8136 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -120,6 +120,7 @@ static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
}

#define NAPI_SKB_CACHE_SIZE 64
+#define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)

struct napi_alloc_cache {
struct page_frag_cache page;
@@ -164,6 +165,30 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
}
EXPORT_SYMBOL(__netdev_alloc_frag_align);

+static struct sk_buff *napi_skb_cache_get(bool napi_safe)
+{
+ struct napi_alloc_cache *nc;
+ struct sk_buff *skb;
+
+ if (!napi_safe && unlikely(!in_serving_softirq()))
+ return kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+
+ nc = this_cpu_ptr(&napi_alloc_cache);
+
+ if (unlikely(!nc->skb_count))
+ nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache,
+ GFP_ATOMIC,
+ NAPI_SKB_CACHE_HALF,
+ nc->skb_cache);
+ if (unlikely(!nc->skb_count))
+ return NULL;
+
+ skb = nc->skb_cache[--nc->skb_count];
+ kasan_unpoison_object_data(skbuff_head_cache, skb);
+
+ return skb;
+}
+
/* Caller must provide SKB that is memset cleared */
static void __build_skb_around(struct sk_buff *skb, void *data,
unsigned int frag_size)
@@ -210,11 +235,11 @@ static void __build_skb_around(struct sk_buff *skb, void *data,
* before giving packet to stack.
* RX rings only contains data buffers, not full skbs.
*/
-struct sk_buff *__build_skb(void *data, unsigned int frag_size)
+struct sk_buff *__build_skb(void *data, unsigned int frag_size, bool napi_safe)
{
struct sk_buff *skb;

- skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ skb = napi_skb_cache_get(napi_safe);
if (unlikely(!skb))
return NULL;

@@ -231,7 +256,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
*/
struct sk_buff *build_skb(void *data, unsigned int frag_size)
{
- struct sk_buff *skb = __build_skb(data, frag_size);
+ struct sk_buff *skb = __build_skb(data, frag_size, true);

if (skb && frag_size) {
skb->head_frag = 1;
@@ -443,7 +468,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
if (unlikely(!data))
return NULL;

- skb = __build_skb(data, len);
+ skb = __build_skb(data, len, false);
if (unlikely(!skb)) {
skb_free_frag(data);
return NULL;
@@ -507,7 +532,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
if (unlikely(!data))
return NULL;

- skb = __build_skb(data, len);
+ skb = __build_skb(data, len, true);
if (unlikely(!skb)) {
skb_free_frag(data);
return NULL;
@@ -838,31 +863,31 @@ void __consume_stateless_skb(struct sk_buff *skb)
kfree_skbmem(skb);
}

-static inline void _kfree_skb_defer(struct sk_buff *skb)
+static void napi_skb_cache_put(struct sk_buff *skb)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ u32 i;

/* drop skb->head and call any destructors for packet */
skb_release_all(skb);

- /* record skb to CPU local list */
+ kasan_poison_object_data(skbuff_head_cache, skb);
nc->skb_cache[nc->skb_count++] = skb;

-#ifdef CONFIG_SLUB
- /* SLUB writes into objects when freeing */
- prefetchw(skb);
-#endif
-
- /* flush skb_cache if it is filled */
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
- kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
- nc->skb_cache);
- nc->skb_count = 0;
+ for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
+ kasan_unpoison_object_data(skbuff_head_cache,
+ nc->skb_cache[i]);
+
+ kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF,
+ nc->skb_cache + NAPI_SKB_CACHE_HALF);
+ nc->skb_count = NAPI_SKB_CACHE_HALF;
}
}
+
void __kfree_skb_defer(struct sk_buff *skb)
{
- _kfree_skb_defer(skb);
+ napi_skb_cache_put(skb);
}

void napi_consume_skb(struct sk_buff *skb, int budget)
@@ -887,7 +912,7 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
return;
}

- _kfree_skb_defer(skb);
+ napi_skb_cache_put(skb);
}
EXPORT_SYMBOL(napi_consume_skb);

diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index dd488938447f..afba4e11a526 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1190,7 +1190,7 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
if (data == NULL)
return NULL;

- skb = __build_skb(data, size);
+ skb = __build_skb(data, size, false);
if (skb == NULL)
vfree(data);
else
--
2.30.0