Skip to content

Commit

Permalink
net/mlx5: LAG, generalize handling of shared FDB
Browse files Browse the repository at this point in the history
Shared FDB handling is using the assumption that shared FDB can only
be created from two devices.
In order to support shared FDB of more than two devices, iterate over
all LAG ports instead of hard coding only the first two LAG ports
whenever handling shared FDB.

Signed-off-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
  • Loading branch information
Shay Drory authored and Saeed Mahameed committed Jun 7, 2023
1 parent 4c103ae commit 86a1212
Showing 1 changed file with 38 additions and 28 deletions.
66 changes: 38 additions & 28 deletions drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
Original file line number Diff line number Diff line change
Expand Up @@ -512,8 +512,11 @@ static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
return;

if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH)
tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
if (ldev->ports > 2)
ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
}
}

static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
Expand Down Expand Up @@ -782,7 +785,6 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
bool roce_lag;
int err;
int i;
Expand All @@ -807,30 +809,35 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev);

if (shared_fdb) {
if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
mlx5_eswitch_reload_reps(dev0->priv.eswitch);
if (!(dev1->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
mlx5_eswitch_reload_reps(dev1->priv.eswitch);
}
if (shared_fdb)
for (i = 0; i < ldev->ports; i++)
if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
}

bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;

if (is_mdev_switchdev_mode(dev0) &&
is_mdev_switchdev_mode(dev1) &&
mlx5_eswitch_vport_match_metadata_enabled(dev0->priv.eswitch) &&
mlx5_eswitch_vport_match_metadata_enabled(dev1->priv.eswitch) &&
mlx5_devcom_comp_is_ready(dev0->priv.devcom,
MLX5_DEVCOM_ESW_OFFLOADS) &&
MLX5_CAP_GEN(dev1, lag_native_fdb_selection) &&
MLX5_CAP_ESW(dev1, root_ft_on_other_esw) &&
MLX5_CAP_ESW(dev0, esw_shared_ingress_acl) &&
mlx5_eswitch_get_npeers(dev0->priv.eswitch) == MLX5_CAP_GEN(dev0, num_lag_ports) - 1 &&
mlx5_eswitch_get_npeers(dev1->priv.eswitch) == MLX5_CAP_GEN(dev1, num_lag_ports) - 1)
struct mlx5_core_dev *dev;
int i;

for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
dev = ldev->pf[i].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
MLX5_CAP_GEN(dev, lag_native_fdb_selection) &&
MLX5_CAP_ESW(dev, root_ft_on_other_esw) &&
mlx5_eswitch_get_npeers(dev->priv.eswitch) ==
MLX5_CAP_GEN(dev, num_lag_ports) - 1)
continue;
return false;
}

dev = ldev->pf[MLX5_LAG_P1].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
mlx5_devcom_comp_is_ready(dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
MLX5_CAP_ESW(dev, esw_shared_ingress_acl) &&
mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1)
return true;

return false;
Expand Down Expand Up @@ -867,7 +874,6 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
struct lag_tracker tracker = { };
bool do_bond, roce_lag;
int err;
Expand Down Expand Up @@ -908,20 +914,24 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
for (i = 1; i < ldev->ports; i++)
mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
} else if (shared_fdb) {
int i;

dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);

err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
if (!err)
err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
for (i = 0; i < ldev->ports; i++) {
err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
break;
}

if (err) {
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
mlx5_eswitch_reload_reps(dev0->priv.eswitch);
mlx5_eswitch_reload_reps(dev1->priv.eswitch);
for (i = 0; i < ldev->ports; i++)
mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
}
Expand Down

0 comments on commit 86a1212

Please sign in to comment.