Skip to content

Commit

Permalink
Merge tag 'mlx5-updates-2023-05-19' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-05-19

mlx5 misc changes and code clean up:

The following series contains general changes for improving
E-Switch driver behavior.

1) improving condition checking
2) Code clean up
3) Using metadata matching on send-to-vport rules.
4) Using RoCE v2 instead of v1 for loopback rules.

* tag 'mlx5-updates-2023-05-19' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: E-Switch, Initialize E-Switch for eswitch manager
  net/mlx5: devlink, Only show PF related devlink warning when needed
  net/mlx5: E-Switch, Use metadata matching for RoCE loopback rule
  net/mlx5: E-Switch, Use RoCE version 2 for loopback traffic
  net/mlx5e: E-Switch, Add a check that log_max_l2_table is valid
  net/mlx5e: E-Switch: move debug print of adding mac to correct place
  net/mlx5e: E-Switch, Check device is PF when stopping esw offloads
  net/mlx5: Remove redundant vport_group_manager cap check
  net/mlx5e: E-Switch, Use metadata for vport matching in send-to-vport rules
  net/mlx5e: E-Switch, Allow get vport api if esw exists
  net/mlx5e: E-Switch, Update when to set other vport context
  net/mlx5e: Remove redundant __func__ arg from fs_err() calls
  net/mlx5e: E-Switch, Remove flow_source check for metadata matching
  net/mlx5: E-Switch, Remove redundant check
  net/mlx5: Remove redundant esw multiport validate function
====================

Acked-by: Jakub Kicinski <kuba@kernel.org>
Link: https://lore.kernel.org/r/20230519175557.15683-1-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Jakub Kicinski committed May 23, 2023
2 parents de5c9bf + f5d87b4 commit 62a41dc
Show file tree
Hide file tree
Showing 11 changed files with 108 additions and 104 deletions.
26 changes: 2 additions & 24 deletions drivers/net/ethernet/mellanox/mlx5/core/devlink.c
Original file line number Diff line number Diff line change
Expand Up @@ -162,9 +162,8 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}

if (pci_num_vf(pdev)) {
if (mlx5_core_is_pf(dev) && pci_num_vf(pdev))
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
}

switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
Expand Down Expand Up @@ -464,27 +463,6 @@ static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id,
ctx->val.vbool = mlx5_lag_is_mpesw(dev);
return 0;
}

static int mlx5_devlink_esw_multiport_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);

if (!MLX5_ESWITCH_MANAGER(dev)) {
NL_SET_ERR_MSG_MOD(extack, "E-Switch is unsupported");
return -EOPNOTSUPP;
}

if (mlx5_eswitch_mode(dev) != MLX5_ESWITCH_OFFLOADS) {
NL_SET_ERR_MSG_MOD(extack,
"E-Switch must be in switchdev mode");
return -EBUSY;
}

return 0;
}

#endif

static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
Expand Down Expand Up @@ -563,7 +541,7 @@ static const struct devlink_param mlx5_devlink_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_esw_multiport_get,
mlx5_devlink_esw_multiport_set,
mlx5_devlink_esw_multiport_validate),
NULL),
#endif
DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_eq_depth_validate),
Expand Down
12 changes: 5 additions & 7 deletions drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
fs_err(fs, "%s: add rule failed\n", __func__);
fs_err(fs, "add rule failed\n");
}

return err;
Expand Down Expand Up @@ -395,8 +395,7 @@ int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
fs->vlan->trap_rule = NULL;
fs_err(fs, "%s: add VLAN trap rule failed, err %d\n",
__func__, err);
fs_err(fs, "add VLAN trap rule failed, err %d\n", err);
return err;
}
fs->vlan->trap_rule = rule;
Expand All @@ -421,8 +420,7 @@ int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
fs->l2.trap_rule = NULL;
fs_err(fs, "%s: add MAC trap rule failed, err %d\n",
__func__, err);
fs_err(fs, "add MAC trap rule failed, err %d\n", err);
return err;
}
fs->l2.trap_rule = rule;
Expand Down Expand Up @@ -763,7 +761,7 @@ static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
fs_err(fs, "%s: add promiscuous rule failed\n", __func__);
fs_err(fs, "add promiscuous rule failed\n");
}
kvfree(spec);
return err;
Expand Down Expand Up @@ -995,7 +993,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,

ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(ai->rule)) {
fs_err(fs, "%s: add l2 rule(mac:%pM) failed\n", __func__, mv_dmac);
fs_err(fs, "add l2 rule(mac:%pM) failed\n", mv_dmac);
err = PTR_ERR(ai->rule);
ai->rule = NULL;
}
Expand Down
3 changes: 2 additions & 1 deletion drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@ esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns,
}

ft_attr.max_fte = size;
ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
if (vport_num || mlx5_core_is_ecpf(esw->dev))
ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport_num);
if (IS_ERR(acl)) {
err = PTR_ERR(acl);
Expand Down
22 changes: 14 additions & 8 deletions drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport;

if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
if (!esw)
return ERR_PTR(-EPERM);

vport = xa_load(&esw->vports, vport_num);
Expand All @@ -113,7 +113,8 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
if (vport || mlx5_core_is_ecpf(dev))
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);

Expand Down Expand Up @@ -309,11 +310,12 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)

fdb_add:
/* SRIOV is enabled: Forward UC MAC to vport */
if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) {
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);

esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
vport, mac, vaddr->flow_rule);
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
vport, mac, vaddr->flow_rule);
}

return 0;
}
Expand Down Expand Up @@ -710,6 +712,9 @@ void esw_vport_change_handle_locked(struct mlx5_vport *vport)
struct mlx5_eswitch *esw = dev->priv.eswitch;
u8 mac[ETH_ALEN];

if (!MLX5_CAP_GEN(dev, log_max_l2_table))
return;

mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
vport->vport, mac);
Expand Down Expand Up @@ -946,7 +951,8 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
vport->enabled = false;

/* Disable events from this vport */
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
if (MLX5_CAP_GEN(esw->dev, log_max_l2_table))
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);

if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
Expand Down Expand Up @@ -1616,7 +1622,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
struct mlx5_eswitch *esw;
int err;

if (!MLX5_VPORT_MANAGER(dev))
if (!MLX5_VPORT_MANAGER(dev) && !MLX5_ESWITCH_MANAGER(dev))
return 0;

esw = kzalloc(sizeof(*esw), GFP_KERNEL);
Expand Down Expand Up @@ -1686,7 +1692,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)

void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
{
if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
if (!esw)
return;

esw_info(esw->dev, "cleanup\n");
Expand Down
8 changes: 8 additions & 0 deletions drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
Original file line number Diff line number Diff line change
Expand Up @@ -683,6 +683,14 @@ mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr
struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);

void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
u32 *flow_group_in,
int match_params);

void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
u16 vport,
struct mlx5_flow_spec *spec);

int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);

Expand Down
103 changes: 67 additions & 36 deletions drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
Original file line number Diff line number Diff line change
Expand Up @@ -838,6 +838,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
u16 vport;

spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
Expand All @@ -847,20 +848,43 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,

misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
/* source vport is the esw manager */
MLX5_SET(fte_match_set_misc, misc, source_port, from_esw->manager_vport);
if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
MLX5_CAP_GEN(from_esw->dev, vhca_id));

misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id);

spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;

/* source vport is the esw manager */
vport = from_esw->manager_vport;

if (mlx5_eswitch_vport_match_metadata_enabled(on_esw)) {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(from_esw, vport));

misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());

spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, vport);

if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
MLX5_CAP_GEN(from_esw->dev, vhca_id));

misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);

if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id);

spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}

dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = rep->vport;
dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
Expand Down Expand Up @@ -1269,8 +1293,10 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
#define MAX_PF_SQ 256
#define MAX_SQ_NVPORTS 32

static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
u32 *flow_group_in)
void
mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
u32 *flow_group_in,
int match_params)
{
void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in,
Expand All @@ -1279,15 +1305,15 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
MLX5_SET(create_flow_group_in, flow_group_in,
match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS_2);
MLX5_MATCH_MISC_PARAMETERS_2 | match_params);

MLX5_SET(fte_match_param, match_criteria,
misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
} else {
MLX5_SET(create_flow_group_in, flow_group_in,
match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS);
MLX5_MATCH_MISC_PARAMETERS | match_params);

MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_port);
Expand Down Expand Up @@ -1463,14 +1489,13 @@ esw_create_send_to_vport_group(struct mlx5_eswitch *esw,

memset(flow_group_in, 0, inlen);

MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS);
mlx5_esw_set_flow_group_source_port(esw, flow_group_in, MLX5_MATCH_MISC_PARAMETERS);

match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);

MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {

if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_eswitch_owner_vhca_id);
MLX5_SET(create_flow_group_in, flow_group_in,
Expand Down Expand Up @@ -1558,7 +1583,7 @@ esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,

memset(flow_group_in, 0, inlen);

esw_set_flow_group_source_port(esw, flow_group_in);
mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);

if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
Expand Down Expand Up @@ -1845,7 +1870,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
return -ENOMEM;

/* create vport rx group */
esw_set_flow_group_source_port(esw, flow_group_in);
mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);

MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
Expand Down Expand Up @@ -1915,21 +1940,13 @@ static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
}

struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_flow_destination *dest)
void
mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
u16 vport,
struct mlx5_flow_spec *spec)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;

spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
flow_rule = ERR_PTR(-ENOMEM);
goto out;
}

if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
Expand All @@ -1949,6 +1966,23 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,

spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
}
}

struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;

spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
flow_rule = ERR_PTR(-ENOMEM);
goto out;
}

mlx5_esw_set_spec_source_port(esw, vport, spec);

flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
Expand Down Expand Up @@ -2827,9 +2861,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
MLX5_FDB_TO_VPORT_REG_C_0))
return false;

if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
return false;

return true;
}

Expand Down Expand Up @@ -3280,7 +3311,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
/* If changing from switchdev to legacy mode without sriov enabled,
* no need to create legacy fdb.
*/
if (!mlx5_sriov_is_enabled(esw->dev))
if (!mlx5_core_is_pf(esw->dev) || !mlx5_sriov_is_enabled(esw->dev))
return 0;

err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
Expand Down
Loading

0 comments on commit 62a41dc

Please sign in to comment.