[PATCH net-next 06/15] net/mlx5e: Move xsk param into new option container struct

From: Tariq Toukan

Date: Mon Feb 23 2026 - 15:47:03 EST


From: Dragos Tatulea <dtatulea@xxxxxxxxxx>

The xsk parameter configuration (struct mlx5e_xsk_param) is passed
around many places during parameter calculation. It is used to contain
channel specific information (as opposed to the global info from
struct mlx5e_params).

Upcoming changes will need to push similar channel specific rq
configuration. Instead of adding one more parameter to all these
functions, create a new container structure that has optional rq
specific parameters. The xsk parameter will be the first of such kind.

The new container struct is itself optional. That means that before
checking its members, it has to be checked itself for validity.

This patch has no functional changes.

Signed-off-by: Dragos Tatulea <dtatulea@xxxxxxxxxx>
Reviewed-by: Cosmin Ratiu <cratiu@xxxxxxxxxx>
Signed-off-by: Tariq Toukan <tariqt@xxxxxxxxxx>
---
drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 +-
.../ethernet/mellanox/mlx5/core/en/params.c | 192 ++++++++++--------
.../ethernet/mellanox/mlx5/core/en/params.h | 38 ++--
.../net/ethernet/mellanox/mlx5/core/en/xdp.c | 5 +-
.../net/ethernet/mellanox/mlx5/core/en/xdp.h | 3 +-
.../ethernet/mellanox/mlx5/core/en/xsk/pool.c | 6 +-
.../mellanox/mlx5/core/en/xsk/setup.c | 31 +--
.../mellanox/mlx5/core/en/xsk/setup.h | 2 +-
.../net/ethernet/mellanox/mlx5/core/en_main.c | 33 +--
9 files changed, 185 insertions(+), 128 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 550426979627..5181d6ab39ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -1060,8 +1060,9 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv);
struct mlx5e_xsk_param;

struct mlx5e_rq_param;
+struct mlx5e_rq_opt_param;
int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *rq_param,
- struct mlx5e_xsk_param *xsk, int node, u16 q_counter,
+ struct mlx5e_rq_opt_param *rqo, int node, u16 q_counter,
struct mlx5e_rq *rq);
#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index ef88097c1d4d..97f5d1c2adea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -21,10 +21,14 @@ static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
return min_page_shift ? : 12;
}

-u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
+u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq_opt_param *rqo)
{
- u8 req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT;
+ struct mlx5e_xsk_param *xsk = mlx5e_rqo_xsk_param(rqo);
u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev);
+ u8 req_page_shift;
+
+ req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT;

/* Regular RQ uses order-0 pages, the NIC must be able to map them. */
if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift))
@@ -34,7 +38,8 @@ u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xs
}

enum mlx5e_mpwrq_umr_mode
-mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
+mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq_opt_param *rqo)
{
/* Different memory management schemes use different mechanisms to map
* user-mode memory. The stricter guarantees we have, the faster
@@ -45,7 +50,8 @@ mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
* 3. KLM - indirect mapping to another MKey to arbitrary addresses, and
* mappings can have different sizes.
*/
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ struct mlx5e_xsk_param *xsk = mlx5e_rqo_xsk_param(rqo);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
bool unaligned = xsk ? xsk->unaligned : false;
bool oversized = false;

@@ -225,12 +231,12 @@ u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
}

u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
u16 headroom;

- if (xsk)
- return xsk->headroom;
+ if (mlx5e_rqo_xsk_param(rqo))
+ return rqo->xsk->headroom;

headroom = NET_IP_ALIGN;
if (params->xdp_prog)
@@ -263,19 +269,23 @@ static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool no_head_

static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_opt_param *rqo,
bool mpwqe)
{
+ struct mlx5e_xsk_param *xsk = mlx5e_rqo_xsk_param(rqo);
bool no_head_tail_room;
u32 sz;

/* XSK frames are mapped as individual pages, because frames may come in
* an arbitrary order from random locations in the UMEM.
*/
- if (xsk)
- return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
+ if (xsk) {
+ return mpwqe ?
+ BIT(mlx5e_mpwrq_page_shift(mdev, rqo)) : PAGE_SIZE;
+ }

- no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk);
+ no_head_tail_room = params->xdp_prog && mpwqe &&
+ !mlx5e_rx_is_linear_skb(mdev, params, rqo);

/* When no_head_tail_room is set, headroom and tailroom are excluded from skb calculations.
* no_head_tail_room should be set in the case of XDP with Striding RQ
@@ -291,11 +301,12 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,

static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true);
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
+ u32 linear_stride_sz =
+ mlx5e_rx_get_linear_stride_sz(mdev, params, rqo, true);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);

return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
order_base_2(linear_stride_sz);
@@ -303,8 +314,10 @@ static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev,

bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
+ struct mlx5e_xsk_param *xsk = mlx5e_rqo_xsk_param(rqo);
+
if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
return false;

@@ -315,7 +328,7 @@ bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
* must fit into a CPU page.
*/
- if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
+ if (mlx5e_rx_get_linear_sz_skb(params, !!xsk) > PAGE_SIZE)
return false;

/* XSK frames must be big enough to hold the packet data. */
@@ -349,12 +362,14 @@ static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,

bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
- u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
+ u8 log_wqe_num_of_strides =
+ mlx5e_mpwqe_get_log_num_strides(mdev, params, rqo);
+ u8 log_wqe_stride_size =
+ mlx5e_mpwqe_get_log_stride_size(mdev, params, rqo);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);

return mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
log_wqe_num_of_strides,
@@ -363,18 +378,20 @@ bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,

bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
+ u32 linear_stride_sz =
+ mlx5e_rx_get_linear_stride_sz(mdev, params, rqo, true);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
u8 log_num_strides;
u8 log_stride_sz;
u8 log_wqe_sz;

- if (!mlx5e_rx_is_linear_skb(mdev, params, xsk))
+ if (!mlx5e_rx_is_linear_skb(mdev, params, rqo))
return false;

- log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
+ log_stride_sz = order_base_2(linear_stride_sz);
log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);

if (log_wqe_sz < log_stride_sz)
@@ -389,13 +406,13 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,

u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
u8 log_pkts_per_wqe, page_shift, max_log_rq_size;

- log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk);
- page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, rqo);
+ page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode);

/* Numbers are unsigned, don't subtract to avoid underflow. */
@@ -423,10 +440,11 @@ static u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5e_params *params)

u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
- return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo))
+ return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params,
+ rqo, true));

/* XDP in mlx5e doesn't support multiple packets per page. */
if (params->xdp_prog)
@@ -437,17 +455,18 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,

u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
u8 log_wqe_size, log_stride_size;

log_wqe_size = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
- log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, rqo);
WARN(log_wqe_size < log_stride_size,
"Log WQE size %u < log stride size %u (page shift %u, umr mode %d, xsk on? %d)\n",
- log_wqe_size, log_stride_size, page_shift, umr_mode, !!xsk);
+ log_wqe_size, log_stride_size, page_shift, umr_mode,
+ rqo && rqo->xsk);
return log_wqe_size - log_stride_size;
}

@@ -459,14 +478,14 @@ u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)

u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
+ u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, rqo);

if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
return linear_headroom;

- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo))
return linear_headroom;

if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
@@ -535,10 +554,11 @@ int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params
}

int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
+ struct mlx5e_xsk_param *xsk = mlx5e_rqo_xsk_param(rqo);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
u16 max_mtu_pkts;

if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) {
@@ -547,7 +567,7 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
return -EOPNOTSUPP;
}

- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) {
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo)) {
mlx5_core_err(mdev, "Striding RQ linear mode for XSK can't be activated with current params\n");
return -EINVAL;
}
@@ -559,7 +579,8 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned));
if (params->log_rq_mtu_frames > max_mtu_pkts) {
mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
- 1 << params->log_rq_mtu_frames, xsk->chunk_size);
+ 1 << params->log_rq_mtu_frames,
+ xsk->chunk_size);
return -EINVAL;
}

@@ -672,7 +693,7 @@ static void mlx5e_rx_compute_wqe_bulk_params(struct mlx5e_params *params,

static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_opt_param *rqo,
struct mlx5e_rq_frags_info *info,
u32 *xdp_frag_size)
{
@@ -684,10 +705,11 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
int max_mtu;
int i;

- if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
+ if (mlx5e_rx_is_linear_skb(mdev, params, rqo)) {
int frag_stride;

- frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, false);
+ frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, rqo,
+ false);

info->arr[0].frag_size = byte_count;
info->arr[0].frag_stride = frag_stride;
@@ -703,7 +725,7 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
goto out;
}

- headroom = mlx5e_get_linear_rq_headroom(params, xsk);
+ headroom = mlx5e_get_linear_rq_headroom(params, rqo);
first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);

max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
@@ -819,12 +841,13 @@ static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,

static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
- u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, rqo);
+ u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params,
+ rqo));
int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
- int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
+ int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo));
int wqe_size = BIT(log_stride_sz) * num_strides;
int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;

@@ -836,7 +859,7 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,

static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_opt_param *rqo,
struct mlx5e_cq_param *param)
{
bool hw_stridx = false;
@@ -847,10 +870,13 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
- log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
+ log_cq_size =
+ mlx5e_shampo_get_log_cq_size(mdev, params, rqo);
else
- log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) +
- mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
+ log_cq_size =
+ mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo) +
+ mlx5e_mpwqe_get_log_num_strides(mdev, params,
+ rqo);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
log_cq_size = params->log_rq_mtu_frames;
@@ -882,22 +908,22 @@ static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *param

static int mlx5e_mpwqe_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_opt_param *rqo,
struct mlx5e_rq_param *rq_param)
{
- u8 log_rq_sz = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ u8 log_rq_sz = mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
u8 log_wqe_num_of_strides, log_wqe_stride_size;
enum mlx5e_mpwrq_umr_mode umr_mode;
void *rqc = rq_param->rqc;
u32 lro_timeout;
void *wq;

- log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params,
- xsk);
- log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params,
- xsk);
- umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ log_wqe_num_of_strides =
+ mlx5e_mpwqe_get_log_num_strides(mdev, params, rqo);
+ log_wqe_stride_size =
+ mlx5e_mpwqe_get_log_stride_size(mdev, params, rqo);
+ umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);

wq = MLX5_ADDR_OF(rqc, rqc, wq);
if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
@@ -940,7 +966,7 @@ static int mlx5e_mpwqe_build_rq_param(struct mlx5_core_dev *mdev,

int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_opt_param *rqo,
struct mlx5e_rq_param *rq_param)
{
void *rqc = rq_param->rqc;
@@ -952,13 +978,13 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,

switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- err = mlx5e_mpwqe_build_rq_param(mdev, params, xsk, rq_param);
+ err = mlx5e_mpwqe_build_rq_param(mdev, params, rqo, rq_param);
if (err)
return err;
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
- err = mlx5e_build_rq_frags_info(mdev, params, xsk,
+ err = mlx5e_build_rq_frags_info(mdev, params, rqo,
&rq_param->frags_info,
&rq_param->xdp_frag_size);
if (err)
@@ -975,7 +1001,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);

rq_param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
- mlx5e_build_rx_cq_param(mdev, params, xsk, &rq_param->cqp);
+ mlx5e_build_rx_cq_param(mdev, params, rqo, &rq_param->cqp);

return 0;
}
@@ -1105,20 +1131,22 @@ u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)

static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
u8 umr_wqebbs;

umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);

- return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
+ return umr_wqebbs *
+ (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo));
}

static u32 mlx5e_max_xsk_wqebbs(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
+ struct mlx5e_rq_opt_param rqo = {0};
struct mlx5e_xsk_param xsk = {0};
u32 max_xsk_wqebbs = 0;
u8 frame_shift;
@@ -1126,6 +1154,8 @@ static u32 mlx5e_max_xsk_wqebbs(struct mlx5_core_dev *mdev,
if (!params->xdp_prog)
return 0;

+ rqo.xsk = &xsk;
+
/* If XDP program is attached, XSK may be turned on at any time without
* restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
* both regular RQ and XSK RQ.
@@ -1145,24 +1175,24 @@ static u32 mlx5e_max_xsk_wqebbs(struct mlx5_core_dev *mdev,
/* XSK aligned mode. */
xsk.chunk_size = 1 << frame_shift;
xsk.unaligned = false;
- total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+ total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &rqo);
max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);

/* XSK unaligned mode, frame size is a power of two. */
xsk.unaligned = true;
- total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+ total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &rqo);
max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);

/* XSK unaligned mode, frame size is not equal to stride
* size.
*/
xsk.chunk_size -= 1;
- total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+ total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &rqo);
max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);

/* XSK unaligned mode, frame size is a triple power of two. */
xsk.chunk_size = (1 << frame_shift) / 4 * 3;
- total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk);
+ total_wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &rqo);
max_xsk_wqebbs = max(max_xsk_wqebbs, total_wqebbs);
}

@@ -1278,7 +1308,7 @@ void mlx5e_build_xsk_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk,
struct mlx5e_channel_param *cparam)
{
- cparam->xsk = xsk;
- mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq);
+ cparam->rq_opt.xsk = xsk;
+ mlx5e_build_rq_param(mdev, params, &cparam->rq_opt, &cparam->rq);
mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index c132649dd9f2..4bce769d48ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -12,6 +12,10 @@ struct mlx5e_xsk_param {
bool unaligned;
};

+struct mlx5e_rq_opt_param {
+ struct mlx5e_xsk_param *xsk;
+};
+
struct mlx5e_cq_param {
u32 cqc[MLX5_ST_SZ_DW(cqc)];
struct mlx5_wq_param wq;
@@ -38,11 +42,11 @@ struct mlx5e_sq_param {

struct mlx5e_channel_param {
struct mlx5e_rq_param rq;
+ struct mlx5e_rq_opt_param rq_opt;
struct mlx5e_sq_param txq_sq;
struct mlx5e_sq_param xdp_sq;
struct mlx5e_sq_param icosq;
struct mlx5e_sq_param async_icosq;
- struct mlx5e_xsk_param *xsk;
};

struct mlx5e_create_sq_param {
@@ -57,9 +61,11 @@ struct mlx5e_create_sq_param {

/* Striding RQ dynamic parameters */

-u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
+u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq_opt_param *rqo);
enum mlx5e_mpwrq_umr_mode
-mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
+mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq_opt_param *rqo);
u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode);
u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode);
@@ -81,22 +87,22 @@ u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);

u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param);
@@ -106,21 +112,21 @@ u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);
u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz);
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);

/* Build queue parameters */

void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c);
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_opt_param *rqo,
struct mlx5e_rq_param *param);
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_rq_param *param);
@@ -148,7 +154,7 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
+ struct mlx5e_rq_opt_param *rqo);

static inline void mlx5e_params_print_info(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
@@ -164,4 +170,10 @@ static inline void mlx5e_params_print_info(struct mlx5_core_dev *mdev,
"enhanced" : "basic");
};

+static inline struct mlx5e_xsk_param *
+mlx5e_rqo_xsk_param(struct mlx5e_rq_opt_param *rqo)
+{
+ return rqo ? rqo->xsk : NULL;
+}
+
#endif /* __MLX5_EN_PARAMS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 80f9fc10877a..04e1b5fa4825 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -37,9 +37,10 @@
#include <linux/bitfield.h>
#include <net/page_pool/helpers.h>

-int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params,
+ struct mlx5e_rq_opt_param *rqo)
{
- int hr = mlx5e_get_linear_rq_headroom(params, xsk);
+ int hr = mlx5e_get_linear_rq_headroom(params, rqo);

/* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)).
* The condition checked in mlx5e_rx_is_linear_skb is:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 46ab0a9e8cdd..3c54f8962664 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -96,7 +96,8 @@ union mlx5e_xdp_info {
};

struct mlx5e_xsk_param;
-int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params,
+ struct mlx5e_rq_opt_param *rqo);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
struct bpf_prog *prog, struct mlx5e_xdp_buff *mlctx);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index 92bcf16a2019..565e5c4ddcce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -80,6 +80,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
{
struct mlx5e_params *params = &priv->channels.params;
struct mlx5e_channel_param *cparam;
+ enum mlx5e_mpwrq_umr_mode umr_mode;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
int err;
@@ -105,8 +106,9 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
mlx5e_build_xsk_param(pool, &xsk);
mlx5e_build_xsk_channel_param(priv->mdev, params, &xsk, cparam);

+ umr_mode = mlx5e_mpwrq_umr_mode(priv->mdev, &cparam->rq_opt);
if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
- mlx5e_mpwrq_umr_mode(priv->mdev, &xsk) == MLX5E_MPWRQ_UMR_MODE_OVERSIZED) {
+ umr_mode == MLX5E_MPWRQ_UMR_MODE_OVERSIZED) {
const char *recommendation = is_power_of_2(xsk.chunk_size) ?
"Upgrade firmware" : "Disable striding RQ";

@@ -163,7 +165,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
/* Check the configuration in advance, rather than fail at a later stage
* (in mlx5e_xdp_set or on open) and end up with no channels.
*/
- if (!mlx5e_validate_xsk_param(params, &xsk, priv->mdev)) {
+ if (!mlx5e_validate_xsk_param(params, &cparam->rq_opt, priv->mdev)) {
err = -EINVAL;
goto err_remove_pool;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 03f1be361701..11500fd213a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -9,9 +9,9 @@

static int mlx5e_legacy_rq_validate_xsk(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_rq_opt_param *rqo)
{
- if (!mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
+ if (!mlx5e_rx_is_linear_skb(mdev, params, rqo)) {
mlx5_core_err(mdev, "Legacy RQ linear mode for XSK can't be activated with current params\n");
return -EINVAL;
}
@@ -25,9 +25,14 @@ static int mlx5e_legacy_rq_validate_xsk(struct mlx5_core_dev *mdev,
#define MLX5E_MIN_XSK_CHUNK_SIZE max(2048, XDP_UMEM_MIN_CHUNK_SIZE)

bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_opt_param *rqo,
struct mlx5_core_dev *mdev)
{
+ struct mlx5e_xsk_param *xsk = mlx5e_rqo_xsk_param(rqo);
+
+ if (WARN_ON(!xsk))
+ return false;
+
/* AF_XDP doesn't support frames larger than PAGE_SIZE,
* and xsk->chunk_size is limited to 65535 bytes.
*/
@@ -42,9 +47,9 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
*/
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk);
+ return !mlx5e_mpwrq_validate_xsk(mdev, params, rqo);
default: /* MLX5_WQ_TYPE_CYCLIC */
- return !mlx5e_legacy_rq_validate_xsk(mdev, params, xsk);
+ return !mlx5e_legacy_rq_validate_xsk(mdev, params, rqo);
}
}

@@ -83,19 +88,20 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,

static int mlx5e_open_xsk_rq(struct mlx5e_channel *c,
struct mlx5e_params *params,
- struct mlx5e_rq_param *rq_param,
- struct xsk_buff_pool *pool,
- struct mlx5e_xsk_param *xsk)
+ struct mlx5e_channel_param *cparam,
+ struct xsk_buff_pool *pool)
{
+ struct mlx5e_rq_param *rq_param = &cparam->rq;
+ struct mlx5e_rq_opt_param *rqo = &cparam->rq_opt;
u16 q_counter = c->priv->q_counter[c->sd_ix];
struct mlx5e_rq *xskrq = &c->xskrq;
int err;

- err = mlx5e_init_xsk_rq(c, params, pool, xsk, xskrq);
+ err = mlx5e_init_xsk_rq(c, params, pool, rqo->xsk, xskrq);
if (err)
return err;

- err = mlx5e_open_rq(params, rq_param, xsk, cpu_to_node(c->cpu),
+ err = mlx5e_open_rq(params, rq_param, rqo, cpu_to_node(c->cpu),
q_counter, xskrq);
if (err)
return err;
@@ -109,13 +115,12 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
struct xsk_buff_pool *pool,
struct mlx5e_channel *c)
{
- struct mlx5e_xsk_param *xsk = cparam->xsk;
struct mlx5e_create_cq_param ccp;
int err;

mlx5e_build_create_cq_param(&ccp, c);

- if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev))
+ if (!mlx5e_validate_xsk_param(params, &cparam->rq_opt, priv->mdev))
return -EINVAL;

err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
@@ -123,7 +128,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
return err;

- err = mlx5e_open_xsk_rq(c, params, &cparam->rq, pool, xsk);
+ err = mlx5e_open_xsk_rq(c, params, cparam, pool);
if (unlikely(err))
goto err_close_rx_cq;

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
index fc86d19ea2b3..664ec78192c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
@@ -9,7 +9,7 @@
struct mlx5e_xsk_param;

bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_opt_param *rqo,
struct mlx5_core_dev *mdev);
struct mlx5e_channel_param;
int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 35b767105492..9e406275e243 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -851,8 +851,8 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
}

static int mlx5e_alloc_rq(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
struct mlx5e_rq_param *rq_param,
+ struct mlx5e_rq_opt_param *rqo,
int node, struct mlx5e_rq *rq)
{
void *rqc_wq = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
@@ -871,7 +871,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);

rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
- rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
+ rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, rqo);
pool_size = 1 << params->log_rq_mtu_frames;

rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
@@ -891,8 +891,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,

wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);

- rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
- rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
+ rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
rq->mpwqe.pages_per_wqe =
mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,
rq->mpwqe.umr_mode);
@@ -904,14 +904,17 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rq->mpwqe.umr_mode);

pool_size = rq->mpwqe.pages_per_wqe <<
- mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
+ mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo);

- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk) && params->xdp_prog)
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo) &&
+ params->xdp_prog)
pool_size *= 2; /* additional page per packet for the linear part */

- rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ rq->mpwqe.log_stride_sz =
+ mlx5e_mpwqe_get_log_stride_size(mdev, params,
+ rqo);
rq->mpwqe.num_strides =
- BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
+ BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, rqo));
rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz);

rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
@@ -947,7 +950,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
goto err_rq_wq_destroy;
}

- if (xsk) {
+ if (mlx5e_rqo_xsk_param(rqo)) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL);
if (err)
@@ -1324,7 +1327,7 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
}

int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *rq_param,
- struct mlx5e_xsk_param *xsk, int node, u16 q_counter,
+ struct mlx5e_rq_opt_param *rqo, int node, u16 q_counter,
struct mlx5e_rq *rq)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -1333,7 +1336,7 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *rq_param,
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
__set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state);

- err = mlx5e_alloc_rq(params, xsk, rq_param, node, rq);
+ err = mlx5e_alloc_rq(params, rq_param, rqo, node, rq);
if (err)
return err;

@@ -4587,6 +4590,7 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
for (ix = 0; ix < chs->params.num_channels; ix++) {
struct xsk_buff_pool *xsk_pool =
mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
+ struct mlx5e_rq_opt_param rqo = {0};
struct mlx5e_xsk_param xsk;
int max_xdp_mtu;

@@ -4594,12 +4598,13 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
continue;

mlx5e_build_xsk_param(xsk_pool, &xsk);
- max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &xsk);
+ rqo.xsk = &xsk;
+ max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &rqo);

/* Validate XSK params and XDP MTU in advance */
- if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev) ||
+ if (!mlx5e_validate_xsk_param(new_params, &rqo, mdev) ||
new_params->sw_mtu > max_xdp_mtu) {
- u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
+ u32 hr = mlx5e_get_linear_rq_headroom(new_params, &rqo);
int max_mtu_frame, max_mtu_page, max_mtu;

/* Two criteria must be met:
--
2.44.0