Skip to content

Commit

Permalink
net/mlx5e: Branch according to classified tunnel type
Browse files Browse the repository at this point in the history
Currently the tunnel offloading encap/decap methods assumes that VXLAN
is the sole tunneling protocol. Lay the infrastructure for supporting
multiple tunneling protocols by branching according to the tunnel
net device kind.

Encap filters tunnel type is determined according to the egress/mirred
net device. Decap filters classify the tunnel type according to the
filter's ingress net device kind.

Distinguish between the tunnel type as defined by the SW model and
the FW reformat type that specifies the HW operation being made.

Signed-off-by: Oz Shlomo <ozsh@mellanox.com>
Reviewed-by: Eli Britstein <elibr@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
  • Loading branch information
Oz Shlomo authored and Saeed Mahameed committed Dec 10, 2018
1 parent 4d70564 commit 54c177c
Show file tree
Hide file tree
Showing 2 changed files with 114 additions and 48 deletions.
2 changes: 2 additions & 0 deletions drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,8 @@ struct mlx5e_encap_entry {

struct net_device *out_dev;
int tunnel_type;
int tunnel_hlen;
int reformat_type;
u8 flags;
char *encap_header;
int encap_size;
Expand Down
160 changes: 112 additions & 48 deletions drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,13 @@ enum {

#define MLX5E_TC_MAX_SPLITS 1

enum {
MLX5E_TC_TUNNEL_TYPE_UNKNOWN,
MLX5E_TC_TUNNEL_TYPE_VXLAN
};

static int mlx5e_get_tunnel_type(struct net_device *tunnel_dev);

struct mlx5e_tc_flow {
struct rhash_head node;
struct mlx5e_priv *priv;
Expand Down Expand Up @@ -677,6 +684,14 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
}
}

static const char *mlx5e_netdev_kind(struct net_device *dev)
{
if (dev->rtnl_link_ops)
return dev->rtnl_link_ops->kind;
else
return "";
}

static int
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
Expand Down Expand Up @@ -1037,7 +1052,8 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
int err;

err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
err = mlx5_packet_reformat_alloc(priv->mdev,
e->reformat_type,
e->encap_size, e->encap_header,
MLX5_FLOW_NAMESPACE_FDB,
&e->encap_id);
Expand Down Expand Up @@ -1265,7 +1281,8 @@ static int parse_tunnel_vxlan_attr(struct mlx5e_priv *priv,

static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
struct tc_cls_flower_offload *f,
struct net_device *filter_dev)
{
struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
Expand All @@ -1277,20 +1294,28 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
f->key);
int tunnel_type;
int err = 0;

if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
int err = 0;

tunnel_type = mlx5e_get_tunnel_type(filter_dev);
if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
err = parse_tunnel_vxlan_attr(priv, spec, f,
headers_c, headers_v);
if (err)
goto vxlan_match_offload_err;
} else { /* udp dst port must be given */
vxlan_match_offload_err:
} else {
NL_SET_ERR_MSG_MOD(extack,
"IP tunnel decap offload supported only for vxlan, must set UDP dport");
"decapsulation offload is not supported");
netdev_warn(priv->netdev,
"IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
"decapsulation offload is not supported for %s net device (%d)\n",
mlx5e_netdev_kind(filter_dev), tunnel_type);
return -EOPNOTSUPP;
}

if (err) {
NL_SET_ERR_MSG_MOD(extack,
"failed to parse tunnel attributes");
netdev_warn(priv->netdev,
"failed to parse %s tunnel attributes (%d)\n",
mlx5e_netdev_kind(filter_dev), tunnel_type);
return -EOPNOTSUPP;
}

Expand Down Expand Up @@ -1395,6 +1420,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
struct net_device *filter_dev,
u8 *match_level)
{
struct netlink_ext_ack *extack = f->common.extack;
Expand Down Expand Up @@ -1446,7 +1472,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
switch (key->addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
if (parse_tunnel_attr(priv, spec, f))
if (parse_tunnel_attr(priv, spec, f, filter_dev))
return -EOPNOTSUPP;
break;
default:
Expand Down Expand Up @@ -1788,7 +1814,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
struct tc_cls_flower_offload *f,
struct net_device *filter_dev)
{
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_core_dev *dev = priv->mdev;
Expand All @@ -1798,7 +1825,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
u8 match_level;
int err;

err = __parse_cls_flower(priv, spec, f, &match_level);
err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level);

if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
rep = rpriv->rep;
Expand Down Expand Up @@ -2443,10 +2470,21 @@ static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
}

static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
struct ip_tunnel_key *tun_key)
struct mlx5e_encap_entry *e)
{
*ip_proto = IPPROTO_UDP;
return mlx5e_gen_vxlan_header(buf, tun_key);
int err = 0;
struct ip_tunnel_key *key = &e->tun_info.key;

if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
*ip_proto = IPPROTO_UDP;
err = mlx5e_gen_vxlan_header(buf, key);
} else {
pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
, e->tunnel_type);
err = -EOPNOTSUPP;
}

return err;
}

static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
Expand All @@ -2456,7 +2494,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv4_encap_size = ETH_HLEN +
sizeof(struct iphdr) +
VXLAN_HLEN;
e->tunnel_hlen;
struct ip_tunnel_key *tun_key = &e->tun_info.key;
struct net_device *out_dev;
struct neighbour *n = NULL;
Expand Down Expand Up @@ -2527,7 +2565,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,

/* add tunneling protocol header */
err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
&ip->protocol, tun_key);
&ip->protocol, e);
if (err)
goto destroy_neigh_entry;

Expand All @@ -2540,7 +2578,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
goto out;
}

err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
err = mlx5_packet_reformat_alloc(priv->mdev,
e->reformat_type,
ipv4_encap_size, encap_header,
MLX5_FLOW_NAMESPACE_FDB,
&e->encap_id);
Expand Down Expand Up @@ -2569,7 +2608,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv6_encap_size = ETH_HLEN +
sizeof(struct ipv6hdr) +
VXLAN_HLEN;
e->tunnel_hlen;
struct ip_tunnel_key *tun_key = &e->tun_info.key;
struct net_device *out_dev;
struct neighbour *n = NULL;
Expand Down Expand Up @@ -2639,7 +2678,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,

/* add tunneling protocol header */
err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
&ip6h->nexthdr, tun_key);
&ip6h->nexthdr, e);
if (err)
goto destroy_neigh_entry;

Expand All @@ -2652,7 +2691,8 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
goto out;
}

err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
err = mlx5_packet_reformat_alloc(priv->mdev,
e->reformat_type,
ipv6_encap_size, encap_header,
MLX5_FLOW_NAMESPACE_FDB,
&e->encap_id);
Expand All @@ -2674,14 +2714,52 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
return err;
}

static int mlx5e_get_tunnel_type(struct net_device *tunnel_dev)
{
if (netif_is_vxlan(tunnel_dev))
return MLX5E_TC_TUNNEL_TYPE_VXLAN;
else
return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
}

bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev)
{
if (netif_is_vxlan(netdev) &&
int tunnel_type = mlx5e_get_tunnel_type(netdev);

if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
return true;
else
return false;
}

return false;
static int mlx5e_init_tunnel_attr(struct net_device *tunnel_dev,
struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
struct netlink_ext_ack *extack)
{
e->tunnel_type = mlx5e_get_tunnel_type(tunnel_dev);

if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
int dst_port = be16_to_cpu(e->tun_info.key.tp_dst);

if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
NL_SET_ERR_MSG_MOD(extack,
"vxlan udp dport was not registered with the HW");
netdev_warn(priv->netdev,
"%d isn't an offloaded vxlan udp dport\n",
dst_port);
return -EOPNOTSUPP;
}
e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
e->tunnel_hlen = VXLAN_HLEN;
} else {
e->reformat_type = -1;
e->tunnel_hlen = -1;
return -EOPNOTSUPP;
}
return 0;
}

static int mlx5e_attach_encap(struct mlx5e_priv *priv,
Expand All @@ -2696,28 +2774,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct ip_tunnel_key *key = &tun_info->key;
struct mlx5e_encap_entry *e;
int tunnel_type, err = 0;
uintptr_t hash_key;
bool found = false;

if (!MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
NL_SET_ERR_MSG_MOD(extack,
"vxlan HW offloading is not supported");
netdev_warn(priv->netdev,
"vxlan HW offloading is not supported\n");
return -EOPNOTSUPP;
}

if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
tunnel_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
} else {
NL_SET_ERR_MSG_MOD(extack,
"port isn't an offloaded vxlan udp dport");
netdev_warn(priv->netdev,
"%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
return -EOPNOTSUPP;
}
int err = 0;

hash_key = hash_encap_info(key);

Expand All @@ -2738,7 +2797,10 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
return -ENOMEM;

e->tun_info = *tun_info;
e->tunnel_type = tunnel_type;
err = mlx5e_init_tunnel_attr(mirred_dev, priv, e, extack);
if (err)
goto out_err;

INIT_LIST_HEAD(&e->flows);

if (family == AF_INET)
Expand Down Expand Up @@ -3063,7 +3125,8 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
goto out;
parse_attr->filter_dev = filter_dev;
flow->esw_attr->parse_attr = parse_attr;
err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, f);
err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
f, filter_dev);
if (err)
goto err_free;

Expand Down Expand Up @@ -3116,7 +3179,8 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
goto out;

parse_attr->filter_dev = filter_dev;
err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, f);
err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
f, filter_dev);
if (err)
goto err_free;

Expand Down

0 comments on commit 54c177c

Please sign in to comment.