[PATCH mel-git 2/3] net: page_pool: use alloc_pages_bulk in refill code path
From: Jesper Dangaard Brouer
Date: Wed Mar 24 2021 - 17:35:59 EST
There are cases where the page_pool need to refill with pages from the
page allocator. Some workloads cause the page_pool to release pages
instead of recycling these pages.
For these workload it can improve performance to bulk alloc pages from
the page-allocator to refill the alloc cache.
For XDP-redirect workload with 100G mlx5 driver (that use page_pool)
redirecting xdp_frame packets into a veth, that does XDP_PASS to create
an SKB from the xdp_frame, which then cannot return the page to the
page_pool.
Performance results under GitHub xdp-project[1]:
[1] https://github.com/xdp-project/xdp-project/blob/master/areas/mem/page_pool06_alloc_pages_bulk.org
Signed-off-by: Jesper Dangaard Brouer <brouer@xxxxxxxxxx>
Signed-off-by: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx>
---
net/core/page_pool.c | 72 ++++++++++++++++++++++++++++++++------------------
1 file changed, 46 insertions(+), 26 deletions(-)
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 40e1b2beaa6c..3bf6e7f5fc89 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -203,38 +203,17 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
return true;
}
-/* slow path */
-noinline
-static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
- gfp_t _gfp)
+static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
+ gfp_t gfp)
{
- unsigned int pp_flags = pool->p.flags;
struct page *page;
- gfp_t gfp = _gfp;
-
- /* We could always set __GFP_COMP, and avoid this branch, as
- * prep_new_page() can handle order-0 with __GFP_COMP.
- */
- if (pool->p.order)
- gfp |= __GFP_COMP;
-
- /* FUTURE development:
- *
- * Current slow-path essentially falls back to single page
- * allocations, which doesn't improve performance. This code
- * need bulk allocation support from the page allocator code.
- */
- /* Cache was empty, do real allocation */
-#ifdef CONFIG_NUMA
+ gfp |= __GFP_COMP;
page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
-#else
- page = alloc_pages(gfp, pool->p.order);
-#endif
- if (!page)
+ if (unlikely(!page))
return NULL;
- if ((pp_flags & PP_FLAG_DMA_MAP) &&
+ if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
unlikely(!page_pool_dma_map(pool, page))) {
put_page(page);
return NULL;
@@ -243,6 +222,47 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
+ return page;
+}
+
+/* slow path */
+noinline
+static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
+ gfp_t gfp)
+{
+ const int bulk = PP_ALLOC_CACHE_REFILL;
+ unsigned int pp_flags = pool->p.flags;
+ unsigned int pp_order = pool->p.order;
+ struct page *page, *next;
+ LIST_HEAD(page_list);
+
+ /* Don't support bulk alloc for high-order pages */
+ if (unlikely(pp_order))
+ return __page_pool_alloc_page_order(pool, gfp);
+
+ if (unlikely(!alloc_pages_bulk_list(gfp, bulk, &page_list)))
+ return NULL;
+
+ list_for_each_entry_safe(page, next, &page_list, lru) {
+ list_del(&page->lru);
+ if ((pp_flags & PP_FLAG_DMA_MAP) &&
+ unlikely(!page_pool_dma_map(pool, page))) {
+ put_page(page);
+ continue;
+ }
+ /* Alloc cache have room as it is empty on function call */
+ pool->alloc.cache[pool->alloc.count++] = page;
+ /* Track how many pages are held 'in-flight' */
+ pool->pages_state_hold_cnt++;
+ trace_page_pool_state_hold(pool, page,
+ pool->pages_state_hold_cnt);
+ }
+
+ /* Return last page */
+ if (likely(pool->alloc.count > 0))
+ page = pool->alloc.cache[--pool->alloc.count];
+ else
+ page = NULL;
/* When page just alloc'ed is should/must have refcnt 1. */
return page;