Re: [PATCH net-next 5/5] net/mlx5e: XDP, Use page fragments for linear data in multibuf-mode
From: Jakub Kicinski
Date: Mon Mar 23 2026 - 22:45:11 EST
This is an AI-generated review of your patch. The human sending this
email has considered the AI review valid, or at least plausible.
For local reproduction steps see https://netdev-ai.bots.linux.dev/ai-local.html
---
net/mlx5e: XDP, Use page fragments for linear data in multibuf-mode
This commit optimizes XDP multi-buffer mode by splitting pages into fixed-
size fragments instead of allocating whole pages for the linear part of XDP
buffers. This reduces allocations significantly, especially on systems with
larger page sizes, improving performance by 15.80% on 4K pages and 31.62%
on 64K pages for XDP_DROP tests.
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
> index 592234780f2b..2270e2e550dd 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
[ ... ]
> @@ -1081,6 +1085,8 @@ bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
> bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
> bool dim_enabled, bool keep_dim_state);
>
> +void mlx5e_mpwqe_dealloc_linear_page(struct mlx5e_rq *rq);
> +
> struct mlx5e_sq_param;
> int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
> struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
[ ... ]
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
> index 8b3c82f6f038..b376abc561fd 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
[ ... ]
> @@ -385,10 +385,22 @@ static int mlx5e_rq_alloc_mpwqe_linear_info(struct mlx5e_rq *rq, int node,
> if (!li)
> return -ENOMEM;
>
> + linear_frag_count =
> + BIT(rq->mpwqe.page_shift - MLX5E_XDP_LOG_MAX_LINEAR_SZ);
> + if (linear_frag_count > U16_MAX) {
> + netdev_warn(rq->netdev,
> + "rq %d: linear_frag_count (%u) larger than expected (%u), page_shift: %u, log_max_linear_sz: %u\n",
> + rq->ix, linear_frag_count, U16_MAX,
> + rq->mpwqe.page_shift, MLX5E_XDP_LOG_MAX_LINEAR_SZ);
> + kvfree(li);
> + return -EINVAL;
> + }
> +
> + li->max_frags = linear_frag_count;
> rq->mpwqe.linear_info = li;
>
> - /* additional page per packet for the linear part */
> - *pool_size *= 2;
> + /* Set to max to force allocation on first run. */
> + li->frag_page.frags = li->max_frags;
>
> return 0;
> }
[ ... ]
> @@ -1347,6 +1358,8 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
> mlx5_wq_ll_pop(wq, wqe_ix_be,
> &wqe->next.next_wqe_index);
> }
> +
> + mlx5e_mpwqe_dealloc_linear_page(rq);
> } else {
> struct mlx5_wq_cyc *wq = &rq->wqe.wq;
> u16 missing = mlx5_wq_cyc_missing(wq);
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
> index feb042d84b8e..2ac38536afe9 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
[ ... ]
> @@ -300,6 +300,35 @@ static void mlx5e_page_release_fragmented(struct page_pool *pp,
> page_pool_put_unrefed_netmem(pp, netmem, -1, true);
> }
>
> +static int mlx5e_mpwqe_linear_page_refill(struct mlx5e_rq *rq)
> +{
> + struct mlx5e_mpw_linear_info *li = rq->mpwqe.linear_info;
> +
> + if (likely(li->frag_page.frags < li->max_frags))
> + return 0;
> +
> + if (likely(li->frag_page.netmem)) {
> + mlx5e_page_release_fragmented(rq->page_pool, &li->frag_page);
> + li->frag_page.netmem = 0;
> + }
> +
> + return mlx5e_page_alloc_fragmented(rq->page_pool, &li->frag_page);
> +}
> +
> +static void *mlx5e_mpwqe_get_linear_page_frag(struct mlx5e_rq *rq)
> +{
> + struct mlx5e_mpw_linear_info *li = rq->mpwqe.linear_info;
> + u32 frag_offset;
> +
> + if (unlikely(mlx5e_mpwqe_linear_page_refill(rq)))
> + return NULL;
> +
> + frag_offset = li->frag_page.frags << MLX5E_XDP_LOG_MAX_LINEAR_SZ;
> + WARN_ON(frag_offset >= BIT(rq->mpwqe.page_shift));
> +
> + return netmem_address(li->frag_page.netmem) + frag_offset;
> +}
> +
[ ... ]
> @@ -702,6 +731,16 @@ static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
> bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
> }
>
> +void mlx5e_mpwqe_dealloc_linear_page(struct mlx5e_rq *rq)
> +{
> + struct mlx5e_mpw_linear_info *li = rq->mpwqe.linear_info;
> +
> + if (!li || !li->frag_page.netmem)
> + return;
> +
> + mlx5e_page_release_fragmented(rq->page_pool, &li->frag_page);
> +}
Does this function need to reset li->frag_page.netmem to 0 and
li->frag_page.frags to li->max_frags after releasing the page?
When an RQ is flushed and then reactivated (for example, in error recovery
via mlx5e_rx_reporter_err_rq_cqe_recover()), the following sequence occurs:
mlx5e_rx_reporter_err_rq_cqe_recover()
mlx5e_flush_rq()
mlx5e_free_rx_descs()
mlx5e_mpwqe_dealloc_linear_page()
mlx5e_page_release_fragmented() [releases the page]
[but doesn't reset netmem or frags]
mlx5e_activate_rq()
[RQ becomes active again]
When the next XDP multi-buffer packet arrives, mlx5e_mpwqe_get_linear_page_frag()
is called:
mlx5e_skb_from_cqe_mpwrq_nonlinear()
mlx5e_mpwqe_get_linear_page_frag()
mlx5e_mpwqe_linear_page_refill()
if (likely(li->frag_page.frags < li->max_frags))
return 0; [skips allocation if frags < max_frags]
netmem_address(li->frag_page.netmem) + frag_offset [UAF]
If li->frag_page.frags was not reset to li->max_frags in
mlx5e_mpwqe_dealloc_linear_page(), the refill function will see frags <
max_frags and skip the allocation, then compute the address using the stale
netmem pointer that was already returned to the page pool.
Shouldn't this match the initialization in mlx5e_rq_alloc_mpwqe_linear_info()
which sets li->frag_page.frags = li->max_frags?
--
pw-bot: cr