diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index dd8a19d856178..00773aab9d20a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -512,8 +512,11 @@ static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev, return; if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) && - tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) + tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) { + if (ldev->ports > 2) + ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS; set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags); + } } static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, @@ -782,7 +785,6 @@ void mlx5_disable_lag(struct mlx5_lag *ldev) { bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags); struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; bool roce_lag; int err; int i; @@ -807,30 +809,35 @@ void mlx5_disable_lag(struct mlx5_lag *ldev) if (shared_fdb || roce_lag) mlx5_lag_add_devices(ldev); - if (shared_fdb) { - if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) - mlx5_eswitch_reload_reps(dev0->priv.eswitch); - if (!(dev1->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) - mlx5_eswitch_reload_reps(dev1->priv.eswitch); - } + if (shared_fdb) + for (i = 0; i < ldev->ports; i++) + if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) + mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); } bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) { - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; - - if (is_mdev_switchdev_mode(dev0) && - is_mdev_switchdev_mode(dev1) && - mlx5_eswitch_vport_match_metadata_enabled(dev0->priv.eswitch) && - mlx5_eswitch_vport_match_metadata_enabled(dev1->priv.eswitch) && - mlx5_devcom_comp_is_ready(dev0->priv.devcom, - MLX5_DEVCOM_ESW_OFFLOADS) && - MLX5_CAP_GEN(dev1, lag_native_fdb_selection) && - MLX5_CAP_ESW(dev1, root_ft_on_other_esw) && - MLX5_CAP_ESW(dev0, esw_shared_ingress_acl) && - mlx5_eswitch_get_npeers(dev0->priv.eswitch) == MLX5_CAP_GEN(dev0, num_lag_ports) - 1 && - mlx5_eswitch_get_npeers(dev1->priv.eswitch) == MLX5_CAP_GEN(dev1, num_lag_ports) - 1) + struct mlx5_core_dev *dev; + int i; + + for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) { + dev = ldev->pf[i].dev; + if (is_mdev_switchdev_mode(dev) && + mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) && + MLX5_CAP_GEN(dev, lag_native_fdb_selection) && + MLX5_CAP_ESW(dev, root_ft_on_other_esw) && + mlx5_eswitch_get_npeers(dev->priv.eswitch) == + MLX5_CAP_GEN(dev, num_lag_ports) - 1) + continue; + return false; + } + + dev = ldev->pf[MLX5_LAG_P1].dev; + if (is_mdev_switchdev_mode(dev) && + mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) && + mlx5_devcom_comp_is_ready(dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS) && + MLX5_CAP_ESW(dev, esw_shared_ingress_acl) && + mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1) return true; return false; @@ -867,7 +874,6 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond) static void mlx5_do_bond(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; struct lag_tracker tracker = { }; bool do_bond, roce_lag; int err; @@ -908,20 +914,24 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) for (i = 1; i < ldev->ports; i++) mlx5_nic_vport_enable_roce(ldev->pf[i].dev); } else if (shared_fdb) { + int i; + dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); - err = mlx5_eswitch_reload_reps(dev0->priv.eswitch); - if (!err) - err = mlx5_eswitch_reload_reps(dev1->priv.eswitch); + for (i = 0; i < ldev->ports; i++) { + err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); + if (err) + break; + } if (err) { dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); mlx5_deactivate_lag(ldev); mlx5_lag_add_devices(ldev); - mlx5_eswitch_reload_reps(dev0->priv.eswitch); - mlx5_eswitch_reload_reps(dev1->priv.eswitch); + for (i = 0; i < ldev->ports; i++) + mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); mlx5_core_err(dev0, "Failed to enable lag\n"); return; }