[PATCH RFCv5 7/8] page_pool: batch refilling pages to reduce atomic operation

From: Yunsheng Lin
Date: Fri Dec 13 2024 - 07:37:06 EST


Add refill variable in alloc cache to keep batched refilled
pages to avoid doing the atomic operation for each page.

Testing shows there is about 10ns improvement for the
performance of 'time_bench_page_pool02_ptr_ring' test case.

CC: Robin Murphy <robin.murphy@xxxxxxx>
CC: Alexander Duyck <alexander.duyck@xxxxxxxxx>
CC: IOMMU <iommu@xxxxxxxxxxxxxxx>
Signed-off-by: Yunsheng Lin <linyunsheng@xxxxxxxxxx>
---
include/net/page_pool/types.h | 5 +++++
net/core/page_pool.c | 25 +++++++++++++++++++++----
2 files changed, 26 insertions(+), 4 deletions(-)

diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 0a7309d9ff1a..be50a25c4aa0 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -51,6 +51,11 @@
struct pp_alloc_cache {
u32 count;
netmem_ref cache[PP_ALLOC_CACHE_SIZE];
+
+ /* Keep batched refilled pages here to avoid doing the atomic operation
+ * for each page.
+ */
+ struct page_pool_item *refill;
};

/**
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index dcda837392bf..ab832bfa004b 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -673,11 +673,13 @@ static void __page_pool_return_page(struct page_pool *pool, netmem_ref netmem,

static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
{
+ struct page_pool_item *refill;
netmem_ref netmem;
int pref_nid; /* preferred NUMA node */

/* Quicker fallback, avoid locks when ring is empty */
- if (unlikely(!READ_ONCE(pool->ring.list))) {
+ refill = pool->alloc.refill;
+ if (unlikely(!refill && !READ_ONCE(pool->ring.list))) {
alloc_stat_inc(pool, empty);
return 0;
}
@@ -694,10 +696,14 @@ static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)

/* Refill alloc array, but only if NUMA match */
do {
- netmem = page_pool_consume_ring(pool);
- if (unlikely(!netmem))
- break;
+ if (unlikely(!refill)) {
+ refill = xchg(&pool->ring.list, NULL);
+ if (!refill)
+ break;
+ }

+ netmem = refill->pp_netmem;
+ refill = page_pool_item_get_next(refill);
if (likely(netmem_is_pref_nid(netmem, pref_nid))) {
pool->alloc.cache[pool->alloc.count++] = netmem;
} else {
@@ -707,14 +713,18 @@ static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
* This limit stress on page buddy alloactor.
*/
__page_pool_return_page(pool, netmem, false);
+ atomic_dec(&pool->ring.count);
alloc_stat_inc(pool, waive);
netmem = 0;
break;
}
} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);

+ pool->alloc.refill = refill;
+
/* Return last page */
if (likely(pool->alloc.count > 0)) {
+ atomic_sub(pool->alloc.count, &pool->ring.count);
netmem = pool->alloc.cache[--pool->alloc.count];
alloc_stat_inc(pool, refill);
}
@@ -1371,6 +1381,7 @@ static void __page_pool_destroy(struct page_pool *pool)

static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
{
+ struct page_pool_item *refill;
netmem_ref netmem;

if (pool->destroy_cnt)
@@ -1384,6 +1395,12 @@ static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
netmem = pool->alloc.cache[--pool->alloc.count];
page_pool_return_page(pool, netmem);
}
+
+ while ((refill = pool->alloc.refill)) {
+ pool->alloc.refill = page_pool_item_get_next(refill);
+ page_pool_return_page(pool, refill->pp_netmem);
+ atomic_dec(&pool->ring.count);
+ }
}

static void page_pool_scrub(struct page_pool *pool)
--
2.33.0