diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 87ce5a208cb52..d7fc665deab2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -244,6 +244,8 @@ struct mlx5_esw_offload { struct mlx5_flow_table *ft_offloads; struct mlx5_flow_group *vport_rx_group; + struct mlx5_flow_group *vport_rx_drop_group; + struct mlx5_flow_handle *vport_rx_drop_rule; struct xarray vport_reps; struct list_head peer_flows; struct mutex peer_mutex; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index ed73132129aae..c2b1b2ff68468 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -70,6 +70,8 @@ #define MLX5_ESW_VPORT_TBL_SIZE 128 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4 +#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) + static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { .max_fte = MLX5_ESW_VPORT_TBL_SIZE, .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS, @@ -1930,7 +1932,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) atomic64_set(&esw->user_count, 0); } -static int esw_get_offloads_ft_size(struct mlx5_eswitch *esw) +static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw) { int nvports; @@ -1955,7 +1957,8 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw) return -EOPNOTSUPP; } - ft_attr.max_fte = esw_get_offloads_ft_size(esw); + ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) + + MLX5_ESW_FT_OFFLOADS_DROP_RULE; ft_attr.prio = 1; ft_offloads = mlx5_create_flow_table(ns, &ft_attr); @@ -1984,7 +1987,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) int nvports; int err = 0; - nvports = esw_get_offloads_ft_size(esw); + nvports = esw_get_nr_ft_offloads_steering_src_ports(esw); flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; @@ -2014,6 +2017,52 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) mlx5_destroy_flow_group(esw->offloads.vport_rx_group); } +static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw) +{ + /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) + * for the drop rule, which is placed at the end of the table. + * So return the total of vport and int_port as rule index. + */ + return esw_get_nr_ft_offloads_steering_src_ports(esw); +} + +static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *g; + u32 *flow_group_in; + int flow_index; + int err = 0; + + flow_index = esw_create_vport_rx_drop_rule_index(esw); + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); + + g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); + + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err); + goto out; + } + + esw->offloads.vport_rx_drop_group = g; +out: + kvfree(flow_group_in); + return err; +} + +static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw) +{ + if (esw->offloads.vport_rx_drop_group) + mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group); +} + struct mlx5_flow_handle * mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, struct mlx5_flow_destination *dest) @@ -2062,6 +2111,32 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, return flow_rule; } +static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw) +{ + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *flow_rule; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL, + &flow_act, NULL, 0); + if (IS_ERR(flow_rule)) { + esw_warn(esw->dev, + "fs offloads: Failed to add vport rx drop rule err %ld\n", + PTR_ERR(flow_rule)); + return PTR_ERR(flow_rule); + } + + esw->offloads.vport_rx_drop_rule = flow_rule; + + return 0; +} + +static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw) +{ + if (esw->offloads.vport_rx_drop_rule) + mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule); +} + static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode) { u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; @@ -3062,8 +3137,20 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) if (err) goto create_fg_err; + err = esw_create_vport_rx_drop_group(esw); + if (err) + goto create_rx_drop_fg_err; + + err = esw_create_vport_rx_drop_rule(esw); + if (err) + goto create_rx_drop_rule_err; + return 0; +create_rx_drop_rule_err: + esw_destroy_vport_rx_drop_group(esw); +create_rx_drop_fg_err: + esw_destroy_vport_rx_group(esw); create_fg_err: esw_destroy_offloads_fdb_tables(esw); create_fdb_err: @@ -3081,6 +3168,8 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) { + esw_destroy_vport_rx_drop_rule(esw); + esw_destroy_vport_rx_drop_group(esw); esw_destroy_vport_rx_group(esw); esw_destroy_offloads_fdb_tables(esw); esw_destroy_restore_table(esw);