[PATCH net-next v7 5/8] page_pool: skip dma sync operation for inflight pages

From: Yunsheng Lin
Date: Fri Jan 10 2025 - 08:15:53 EST


Skip dma sync operation for inflight pages before the
page_pool_destroy() returns to the driver as DMA API
expects to be called with a valid device bound to a
driver as mentioned in [1].

After page_pool_destroy() is called, the page is not
expected to be recycled back to pool->alloc cache and
dma sync operation is not needed when the page is not
recyclable or pool->ring is full, so only skip the dma
sync operation for the infilght pages by clearing the
pool->dma_sync, as synchronize_rcu() in
page_pool_destroy() is paired with rcu lock in
page_pool_recycle_in_ring() to ensure that there is no
dma syncoperation called after page_pool_destroy() is
returned.

1. https://lore.kernel.org/all/caf31b5e-0e8f-4844-b7ba-ef59ed13b74e@xxxxxxx/
CC: Robin Murphy <robin.murphy@xxxxxxx>
CC: Alexander Duyck <alexander.duyck@xxxxxxxxx>
CC: IOMMU <iommu@xxxxxxxxxxxxxxx>
Fixes: f71fec47c2df ("page_pool: make sure struct device is stable")
Signed-off-by: Yunsheng Lin <linyunsheng@xxxxxxxxxx>
---
net/core/page_pool.c | 57 ++++++++++++++++++++++++++++++++------------
1 file changed, 42 insertions(+), 15 deletions(-)

diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index f65d946e964b..232ab56f7fac 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -280,9 +280,6 @@ static int page_pool_init(struct page_pool *pool,
/* Driver calling page_pool_create() also call page_pool_destroy() */
refcount_set(&pool->user_cnt, 1);

- if (pool->dma_map)
- get_device(pool->p.dev);
-
if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) {
/* We rely on rtnl_lock()ing to make sure netdev_rx_queue
* configuration doesn't change while we're initializing
@@ -323,9 +320,6 @@ static void page_pool_uninit(struct page_pool *pool)
{
ptr_ring_cleanup(&pool->ring, NULL);

- if (pool->dma_map)
- put_device(pool->p.dev);
-
#ifdef CONFIG_PAGE_POOL_STATS
if (!pool->system)
free_percpu(pool->recycle_stats);
@@ -755,6 +749,25 @@ page_pool_dma_sync_for_device(const struct page_pool *pool,
__page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
}

+static __always_inline void
+page_pool_dma_sync_for_device_rcu(const struct page_pool *pool,
+ netmem_ref netmem,
+ u32 dma_sync_size)
+{
+ if (!pool->dma_sync)
+ return;
+
+ rcu_read_lock();
+
+ /* Recheck the dma_sync under rcu lock to pair with synchronize_rcu() in
+ * page_pool_destroy().
+ */
+ if (pool->dma_sync && dma_dev_need_sync(pool->p.dev))
+ __page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
+
+ rcu_read_unlock();
+}
+
static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
{
struct page_pool_item *item;
@@ -1016,7 +1029,8 @@ static void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
rcu_read_unlock();
}

-static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem)
+static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem,
+ unsigned int dma_sync_size)
{
int ret;
/* BH protection not needed if current is softirq */
@@ -1025,12 +1039,12 @@ static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem)
else
ret = ptr_ring_produce_bh(&pool->ring, (__force void *)netmem);

- if (!ret) {
+ if (likely(!ret)) {
+ page_pool_dma_sync_for_device_rcu(pool, netmem, dma_sync_size);
recycle_stat_inc(pool, ring);
- return true;
}

- return false;
+ return !ret;
}

/* Only allow direct recycling in special circumstances, into the
@@ -1083,10 +1097,10 @@ __page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
if (likely(__page_pool_page_can_be_recycled(netmem))) {
/* Read barrier done in page_ref_count / READ_ONCE */

- page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
-
- if (allow_direct && page_pool_recycle_in_cache(netmem, pool))
+ if (allow_direct && page_pool_recycle_in_cache(netmem, pool)) {
+ page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
return 0;
+ }

/* Page found as candidate for recycling */
return netmem;
@@ -1149,7 +1163,7 @@ void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,

netmem =
__page_pool_put_page(pool, netmem, dma_sync_size, allow_direct);
- if (netmem && !page_pool_recycle_in_ring(pool, netmem)) {
+ if (netmem && !page_pool_recycle_in_ring(pool, netmem, dma_sync_size)) {
/* Cache full, fallback to free pages */
recycle_stat_inc(pool, ring_full);
page_pool_return_page(pool, netmem);
@@ -1175,14 +1189,17 @@ static void page_pool_recycle_ring_bulk(struct page_pool *pool,
/* Bulk produce into ptr_ring page_pool cache */
in_softirq = page_pool_producer_lock(pool);

+ rcu_read_lock();
for (i = 0; i < bulk_len; i++) {
if (__ptr_ring_produce(&pool->ring, (__force void *)bulk[i])) {
/* ring full */
recycle_stat_inc(pool, ring_full);
break;
}
+ page_pool_dma_sync_for_device(pool, (__force netmem_ref)bulk[i],
+ -1);
}
-
+ rcu_read_unlock();
page_pool_producer_unlock(pool, in_softirq);
recycle_stat_add(pool, ring, i);

@@ -1489,6 +1506,16 @@ void page_pool_destroy(struct page_pool *pool)
if (!page_pool_release(pool))
return;

+ /* After page_pool_destroy() is called, the page is not expected to be
+ * recycled back to pool->alloc cache and dma sync operation is not
+ * needed when the page is not recyclable or pool->ring is full, so only
+ * skip the dma sync operation for the infilght pages by clearing the
+ * pool->dma_sync, and the synchronize_rcu() is paired with rcu lock in
+ * page_pool_recycle_in_ring() to ensure that there is no dma sync
+ * operation called after page_pool_destroy() is returned.
+ */
+ pool->dma_sync = false;
+
/* Paired with rcu lock in page_pool_napi_local() to enable clearing
* of pool->p.napi in page_pool_disable_direct_recycling() is seen
* before returning to driver to free the napi instance.
--
2.33.0