[PATCH net-next 03/15] net/mlx5e: Extract max_xsk_wqebbs into its own function

From: Tariq Toukan

Date: Mon Feb 23 2026 - 15:45:29 EST


From: Dragos Tatulea <dtatulea@xxxxxxxxxx>

Calculating max_xsk_wqebbs seems large enough to deserve its own
function. It will make upcoming changes easier.

This patch has no functional changes.

Signed-off-by: Dragos Tatulea <dtatulea@xxxxxxxxxx>
Reviewed-by: Cosmin Ratiu <cratiu@xxxxxxxxxx>
Signed-off-by: Tariq Toukan <tariqt@xxxxxxxxxx>
---
.../ethernet/mellanox/mlx5/core/en/params.c | 94 ++++++++++---------
1 file changed, 52 insertions(+), 42 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 07d75a85ee7f..be1aa37531de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -1116,18 +1116,15 @@ static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
}

-static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params,
- struct mlx5e_rq_param *rq_param)
+static u32 mlx5e_max_xsk_wqebbs(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
{
- u32 wqebbs, total_pages, useful_space;
-
- /* MLX5_WQ_TYPE_CYCLIC */
- if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
- return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ struct mlx5e_xsk_param xsk = {0};
+ u32 max_xsk_wqebbs = 0;
+ u8 frame_shift;

- /* UMR WQEs for the regular RQ. */
- wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL);
+ if (!params->xdp_prog)
+ return 0;

/* If XDP program is attached, XSK may be turned on at any time without
* restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
@@ -1139,41 +1136,54 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
* from capabilities. Hence, we have to try all valid values of XSK
* frame size (and page_shift) to find the maximum.
*/
- if (params->xdp_prog) {
- u32 max_xsk_wqebbs = 0;
- u8 frame_shift;
-
- for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT;
- frame_shift <= PAGE_SHIFT; frame_shift++) {
- /* The headroom doesn't affect the calculation. */
- struct mlx5e_xsk_param xsk = {
- .chunk_size = 1 << frame_shift,
- .unaligned = false,
- };
-
- /* XSK aligned mode. */
- max_xsk_wqebbs = max(max_xsk_wqebbs,
- mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
-
- /* XSK unaligned mode, frame size is a power of two. */
- xsk.unaligned = true;
- max_xsk_wqebbs = max(max_xsk_wqebbs,
- mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
-
- /* XSK unaligned mode, frame size is not equal to stride size. */
- xsk.chunk_size -= 1;
- max_xsk_wqebbs = max(max_xsk_wqebbs,
- mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
-
- /* XSK unaligned mode, frame size is a triple power of two. */
- xsk.chunk_size = (1 << frame_shift) / 4 * 3;
- max_xsk_wqebbs = max(max_xsk_wqebbs,
- mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
- }
+ for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT;
+ frame_shift <= PAGE_SHIFT; frame_shift++) {
+ u32 total_wqebbs;

- wqebbs += max_xsk_wqebbs;
+ /* The headroom doesn't affect the calculations below. */
+
+ /* XSK aligned mode. */
+ xsk.chunk_size = 1 << frame_shift;
+ xsk.unaligned = false;
+ total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+ max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);
+
+ /* XSK unaligned mode, frame size is a power of two. */
+ xsk.unaligned = true;
+ total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+ max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);
+
+ /* XSK unaligned mode, frame size is not equal to stride
+ * size.
+ */
+ xsk.chunk_size -= 1;
+ total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+ max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);
+
+ /* XSK unaligned mode, frame size is a triple power of two. */
+ xsk.chunk_size = (1 << frame_shift) / 4 * 3;
+ total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+ max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);
}

+ return max_xsk_wqebbs;
+}
+
+static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param)
+{
+ u32 wqebbs, total_pages, useful_space;
+
+ /* MLX5_WQ_TYPE_CYCLIC */
+ if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+
+ /* UMR WQEs for the regular RQ. */
+ wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL);
+
+ wqebbs += mlx5e_max_xsk_wqebbs(mdev, params);
+
/* UMR WQEs don't cross the page boundary, they are padded with NOPs.
* This padding is always smaller than the max WQE size. That gives us
* at least (PAGE_SIZE - (max WQE size - MLX5_SEND_WQE_BB)) useful bytes
--
2.44.0