[PATCH net-next 1/7] net/mlx5: Lag: refactor representor reload handling

From: Tariq Toukan

Date: Thu Apr 09 2026 - 08:03:01 EST


From: Mark Bloch <mbloch@xxxxxxxxxx>

Representor reload during LAG/MPESW transitions has to be repeated in
several flows, and each open‑coded loop was easy to get out of sync
when adding new flags or tweaking error handling. Move the sequencing
into a single helper so that all call sites share the same ordering
and checks

Signed-off-by: Mark Bloch <mbloch@xxxxxxxxxx>
Reviewed-by: Shay Drori <shayd@xxxxxxxxxx>
Signed-off-by: Tariq Toukan <tariqt@xxxxxxxxxx>
---
.../net/ethernet/mellanox/mlx5/core/lag/lag.c | 44 +++++++++++--------
.../net/ethernet/mellanox/mlx5/core/lag/lag.h | 1 +
.../ethernet/mellanox/mlx5/core/lag/mpesw.c | 12 ++---
3 files changed, 31 insertions(+), 26 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 449e4bd86c06..c402a8463081 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1093,6 +1093,27 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
}
}

+int mlx5_lag_reload_ib_reps(struct mlx5_lag *ldev, u32 flags)
+{
+ struct lag_func *pf;
+ int ret;
+ int i;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ pf = mlx5_lag_pf(ldev, i);
+ if (!(pf->dev->priv.flags & flags)) {
+ struct mlx5_eswitch *esw;
+
+ esw = pf->dev->priv.eswitch;
+ ret = mlx5_eswitch_reload_ib_reps(esw);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
void mlx5_disable_lag(struct mlx5_lag *ldev)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
@@ -1130,9 +1151,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
mlx5_lag_add_devices(ldev);

if (shared_fdb)
- mlx5_ldev_for_each(i, 0, ldev)
- if (!(mlx5_lag_pf(ldev, i)->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
- mlx5_eswitch_reload_ib_reps(mlx5_lag_pf(ldev, i)->dev->priv.eswitch);
+ mlx5_lag_reload_ib_reps(ldev, MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV);
}

bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev)
@@ -1388,10 +1407,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (err) {
if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev);
- if (shared_fdb) {
- mlx5_ldev_for_each(i, 0, ldev)
- mlx5_eswitch_reload_ib_reps(mlx5_lag_pf(ldev, i)->dev->priv.eswitch);
- }
+ if (shared_fdb)
+ mlx5_lag_reload_ib_reps(ldev, 0);

return;
}
@@ -1409,24 +1426,15 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_nic_vport_enable_roce(dev);
}
} else if (shared_fdb) {
- int i;
-
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
-
- mlx5_ldev_for_each(i, 0, ldev) {
- err = mlx5_eswitch_reload_ib_reps(mlx5_lag_pf(ldev, i)->dev->priv.eswitch);
- if (err)
- break;
- }
-
+ err = mlx5_lag_reload_ib_reps(ldev, 0);
if (err) {
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
- mlx5_ldev_for_each(i, 0, ldev)
- mlx5_eswitch_reload_ib_reps(mlx5_lag_pf(ldev, i)->dev->priv.eswitch);
+ mlx5_lag_reload_ib_reps(ldev, 0);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 6c911374f409..db561e306fc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -199,4 +199,5 @@ int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx);
int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq);
int mlx5_lag_num_devs(struct mlx5_lag *ldev);
int mlx5_lag_num_netdevs(struct mlx5_lag *ldev);
+int mlx5_lag_reload_ib_reps(struct mlx5_lag *ldev, u32 flags);
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 5eea12a6887a..4d68e3092a56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -70,7 +70,6 @@ static int mlx5_lag_enable_mpesw(struct mlx5_lag *ldev)
int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev0;
int err;
- int i;

if (ldev->mode == MLX5_LAG_MODE_MPESW)
return 0;
@@ -103,11 +102,9 @@ static int mlx5_lag_enable_mpesw(struct mlx5_lag *ldev)

dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- mlx5_ldev_for_each(i, 0, ldev) {
- err = mlx5_eswitch_reload_ib_reps(mlx5_lag_pf(ldev, i)->dev->priv.eswitch);
- if (err)
- goto err_rescan_drivers;
- }
+ err = mlx5_lag_reload_ib_reps(ldev, 0);
+ if (err)
+ goto err_rescan_drivers;

mlx5_lag_set_vports_agg_speed(ldev);

@@ -119,8 +116,7 @@ static int mlx5_lag_enable_mpesw(struct mlx5_lag *ldev)
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
- mlx5_ldev_for_each(i, 0, ldev)
- mlx5_eswitch_reload_ib_reps(mlx5_lag_pf(ldev, i)->dev->priv.eswitch);
+ mlx5_lag_reload_ib_reps(ldev, 0);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
--
2.44.0