[PATCH net-next 13/15] net/mlx5e: Pass netdev queue config to param calculations
From: Tariq Toukan
Date: Mon Feb 23 2026 - 15:51:13 EST
From: Dragos Tatulea <dtatulea@xxxxxxxxxx>
If set, take rx_page_size into consideration when calculating
the page shift in Multi Packet WQE mode.
The queue config is saved in the mlx5e_rq_opt_param struct which is
added to the mlx5e_channel_param struct. Now the configuration can be
read from the struct instead of adding it as an argument to all call
sites. For consistency, the queue config is assigned in
mlx5e_build_channel_param().
The queue configuration is read only from queue management ops
as that's the only place where it is currently useful. Furthermore,
netdev_queue_config() expects netdev->queue_mgmt_ops to be
set which is not always the case (representor netdevs).
Signed-off-by: Dragos Tatulea <dtatulea@xxxxxxxxxx>
Reviewed-by: Cosmin Ratiu <cratiu@xxxxxxxxxx>
Signed-off-by: Tariq Toukan <tariqt@xxxxxxxxxx>
---
.../ethernet/mellanox/mlx5/core/en/params.c | 14 ++++++++++++--
.../ethernet/mellanox/mlx5/core/en/params.h | 2 ++
.../net/ethernet/mellanox/mlx5/core/en_main.c | 19 ++++++++++++-------
3 files changed, 26 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 97f5d1c2adea..304b46ecc8df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -10,6 +10,7 @@
#include <linux/dim.h>
#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>
+#include <net/netdev_queues.h>
#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
#define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17
@@ -24,11 +25,17 @@ static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev,
struct mlx5e_rq_opt_param *rqo)
{
+ struct netdev_queue_config *qcfg = rqo ? rqo->qcfg : NULL;
struct mlx5e_xsk_param *xsk = mlx5e_rqo_xsk_param(rqo);
u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev);
u8 req_page_shift;
- req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT;
+ if (xsk)
+ req_page_shift = order_base_2(xsk->chunk_size);
+ else if (qcfg && qcfg->rx_page_size)
+ req_page_shift = order_base_2(qcfg->rx_page_size);
+ else
+ req_page_shift = PAGE_SHIFT;
/* Regular RQ uses order-0 pages, the NIC must be able to map them. */
if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift))
@@ -1283,12 +1290,15 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
+ struct netdev_queue_config *qcfg,
struct mlx5e_channel_param *cparam)
{
u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
int err;
- err = mlx5e_build_rq_param(mdev, params, NULL, &cparam->rq);
+ cparam->rq_opt.qcfg = qcfg;
+
+ err = mlx5e_build_rq_param(mdev, params, &cparam->rq_opt, &cparam->rq);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 4bce769d48ed..5b6d528bce9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -14,6 +14,7 @@ struct mlx5e_xsk_param {
struct mlx5e_rq_opt_param {
struct mlx5e_xsk_param *xsk;
+ struct netdev_queue_config *qcfg;
};
struct mlx5e_cq_param {
@@ -143,6 +144,7 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
+ struct netdev_queue_config *qcfg,
struct mlx5e_channel_param *cparam);
void mlx5e_build_xsk_channel_param(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 336e384c143a..59e38e7e067e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2524,8 +2524,10 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
-static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_rq_param *rq_param)
+static int mlx5e_open_rxq_rq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param,
+ struct mlx5e_rq_opt_param *rqo)
{
u16 q_counter = c->priv->q_counter[c->sd_ix];
int err;
@@ -2534,7 +2536,7 @@ static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
if (err)
return err;
- return mlx5e_open_rq(params, rq_param, NULL, cpu_to_node(c->cpu),
+ return mlx5e_open_rq(params, rq_param, rqo, cpu_to_node(c->cpu),
q_counter, &c->rq);
}
@@ -2638,7 +2640,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_icosq;
- err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
+ err = mlx5e_open_rxq_rq(c, params, &cparam->rq, &cparam->rq_opt);
if (err)
goto err_close_sqs;
@@ -2783,6 +2785,7 @@ static void mlx5e_channel_pick_doorbell(struct mlx5e_channel *c)
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
+ struct netdev_queue_config *qcfg,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
@@ -2816,7 +2819,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
goto err_free;
}
- err = mlx5e_build_channel_param(mdev, params, cparam);
+ err = mlx5e_build_channel_param(mdev, params, qcfg, cparam);
if (err)
goto err_free;
@@ -2941,7 +2944,8 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
if (chs->params.xdp_prog)
xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
- err = mlx5e_open_channel(priv, i, &chs->params, xsk_pool, &chs->c[i]);
+ err = mlx5e_open_channel(priv, i, &chs->params, NULL,
+ xsk_pool, &chs->c[i]);
if (err)
goto err_close_channels;
}
@@ -5619,7 +5623,8 @@ static int mlx5e_queue_mem_alloc(struct net_device *dev,
goto unlock;
}
- err = mlx5e_open_channel(priv, queue_index, ¶ms, NULL, &new->c);
+ err = mlx5e_open_channel(priv, queue_index, ¶ms, qcfg, NULL,
+ &new->c);
unlock:
mutex_unlock(&priv->state_lock);
return err;
--
2.44.0