Re: [PATCH net,v2] net: mana: Switch to page pool for jumbo frames
From: Shradha Gupta
Date: Thu Mar 27 2025 - 01:01:56 EST
On Tue, Mar 25, 2025 at 09:32:37AM -0700, Haiyang Zhang wrote:
> Frag allocators, such as netdev_alloc_frag(), were not designed to
> work for fragsz > PAGE_SIZE.
>
> So, switch to page pool for jumbo frames instead of using page frag
> allocators. This driver is using page pool for smaller MTUs already.
>
> Cc: stable@xxxxxxxxxxxxxxx
> Fixes: 80f6215b450e ("net: mana: Add support for jumbo frame")
> Signed-off-by: Haiyang Zhang <haiyangz@xxxxxxxxxxxxx>
> ---
> v2: updated the commit msg as suggested by Jakub Kicinski.
>
> ---
> drivers/net/ethernet/microsoft/mana/mana_en.c | 46 ++++---------------
> 1 file changed, 9 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
> index 9a8171f099b6..4d41f4cca3d8 100644
> --- a/drivers/net/ethernet/microsoft/mana/mana_en.c
> +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
> @@ -661,30 +661,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
> mpc->rxbpre_total = 0;
>
> for (i = 0; i < num_rxb; i++) {
> - if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
> - va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
> - if (!va)
> - goto error;
> -
> - page = virt_to_head_page(va);
> - /* Check if the frag falls back to single page */
> - if (compound_order(page) <
> - get_order(mpc->rxbpre_alloc_size)) {
> - put_page(page);
> - goto error;
> - }
> - } else {
> - page = dev_alloc_page();
> - if (!page)
> - goto error;
> + page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
> + if (!page)
> + goto error;
>
> - va = page_to_virt(page);
> - }
> + va = page_to_virt(page);
>
> da = dma_map_single(dev, va + mpc->rxbpre_headroom,
> mpc->rxbpre_datasize, DMA_FROM_DEVICE);
> if (dma_mapping_error(dev, da)) {
> - put_page(virt_to_head_page(va));
> + put_page(page);
> goto error;
> }
>
> @@ -1672,7 +1658,7 @@ static void mana_rx_skb(void *buf_va, bool from_pool,
> }
>
> static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
> - dma_addr_t *da, bool *from_pool, bool is_napi)
> + dma_addr_t *da, bool *from_pool)
> {
> struct page *page;
> void *va;
> @@ -1683,21 +1669,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
> if (rxq->xdp_save_va) {
> va = rxq->xdp_save_va;
> rxq->xdp_save_va = NULL;
> - } else if (rxq->alloc_size > PAGE_SIZE) {
> - if (is_napi)
> - va = napi_alloc_frag(rxq->alloc_size);
> - else
> - va = netdev_alloc_frag(rxq->alloc_size);
> -
> - if (!va)
> - return NULL;
> -
> - page = virt_to_head_page(va);
> - /* Check if the frag falls back to single page */
> - if (compound_order(page) < get_order(rxq->alloc_size)) {
> - put_page(page);
> - return NULL;
> - }
> } else {
> page = page_pool_dev_alloc_pages(rxq->page_pool);
> if (!page)
> @@ -1730,7 +1701,7 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
> dma_addr_t da;
> void *va;
>
> - va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
> + va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
> if (!va)
> return;
>
> @@ -2172,7 +2143,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
> if (mpc->rxbufs_pre)
> va = mana_get_rxbuf_pre(rxq, &da);
> else
> - va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
> + va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
>
> if (!va)
> return -ENOMEM;
> @@ -2258,6 +2229,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
> pprm.nid = gc->numa_node;
> pprm.napi = &rxq->rx_cq.napi;
> pprm.netdev = rxq->ndev;
> + pprm.order = get_order(rxq->alloc_size);
>
> rxq->page_pool = page_pool_create(&pprm);
>
> --
Reviewed-by: Shradha Gupta <shradhagupta@xxxxxxxxxxxxxxxxxxx>
> 2.34.1