Skip to content

Commit

Permalink
Merge tag 'mlx5-updates-2019-03-20' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2019-03-20

This series includes updates to mlx5 driver,

1) Compiler warnings cleanup from Saeed Mahameed
2) Parav Pandit simplifies sriov enable/disables
3) Gustavo A. R. Silva, Removes a redundant assignment
4) Moshe Shemesh, Adds Geneve tunnel stateless offload support
5) Eli Britstein, Adds the Support for VLAN modify action and
   Replaces TC VLAN pop and push actions with VLAN modify

Note: This series includes two simple non-mlx5 patches,

1) Declare IANA_VXLAN_UDP_PORT definition in include/net/vxlan.h,
and use it in some drivers.
2) Declare GENEVE_UDP_PORT definition in include/net/geneve.h,
and use it in mlx5 and nfp drivers.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Mar 24, 2019
2 parents 071d08a + 76b496b commit d64fee0
Show file tree
Hide file tree
Showing 22 changed files with 243 additions and 114 deletions.
4 changes: 2 additions & 2 deletions drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
Original file line number Diff line number Diff line change
Expand Up @@ -826,12 +826,12 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
*/
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
#define IANA_VXLAN_PORT 4789
union l4_hdr_info l4;

l4.hdr = skb_transport_header(skb);

if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
if (!(!skb->encapsulation &&
l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
return false;

skb_checksum_help(skb);
Expand Down
47 changes: 47 additions & 0 deletions drivers/net/ethernet/mellanox/mlx5/core/en.h
Original file line number Diff line number Diff line change
Expand Up @@ -884,6 +884,53 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
}

static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_ETH(mdev, swp) &&
MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
}

struct mlx5e_swp_spec {
__be16 l3_proto;
u8 l4_proto;
u8 is_tun;
__be16 tun_l3_proto;
u8 tun_l4_proto;
};

static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
struct mlx5e_swp_spec *swp_spec)
{
/* SWP offsets are in 2-bytes words */
eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
if (swp_spec->l3_proto == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
if (swp_spec->l4_proto) {
eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
if (swp_spec->l4_proto == IPPROTO_UDP)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
}

if (swp_spec->is_tun) {
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
} else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
if (swp_spec->l3_proto == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
}
switch (swp_spec->tun_l4_proto) {
case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
/* fall through */
case IPPROTO_TCP:
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
break;
}
}

static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe **wqe,
u16 *pi)
Expand Down
30 changes: 15 additions & 15 deletions drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -165,23 +165,23 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
}

/**
* update_buffer_lossy()
* mtu: device's MTU
* pfc_en: <input> current pfc configuration
* buffer: <input> current prio to buffer mapping
* xoff: <input> xoff value
* port_buffer: <output> port receive buffer configuration
* change: <output>
* update_buffer_lossy - Update buffer configuration based on pfc
* @mtu: device's MTU
* @pfc_en: <input> current pfc configuration
* @buffer: <input> current prio to buffer mapping
* @xoff: <input> xoff value
* @port_buffer: <output> port receive buffer configuration
* @change: <output>
*
* Update buffer configuration based on pfc configuraiton and priority
* to buffer mapping.
* Buffer's lossy bit is changed to:
* lossless if there is at least one PFC enabled priority mapped to this buffer
* lossy if all priorities mapped to this buffer are PFC disabled
* Update buffer configuration based on pfc configuraiton and
* priority to buffer mapping.
* Buffer's lossy bit is changed to:
* lossless if there is at least one PFC enabled priority
* mapped to this buffer lossy if all priorities mapped to
* this buffer are PFC disabled
*
* Return:
* Return 0 if no error.
* Set change to true if buffer configuration is modified.
* @return: 0 if no error,
* sets change to true if buffer configuration was modified.
*/
static int update_buffer_lossy(unsigned int mtu,
u8 pfc_en, u8 *buffer, u32 xoff,
Expand Down
51 changes: 51 additions & 0 deletions drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,57 @@
#include "en_accel/tls_rxtx.h"
#include "en.h"

#if IS_ENABLED(CONFIG_GENEVE)
static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
{
return mlx5_tx_swp_supported(mdev);
}

static inline void
mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
{
struct mlx5e_swp_spec swp_spec = {};
unsigned int offset = 0;
__be16 l3_proto;
u8 l4_proto;

l3_proto = vlan_get_protocol(skb);
switch (l3_proto) {
case htons(ETH_P_IP):
l4_proto = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
break;
default:
return;
}

if (l4_proto != IPPROTO_UDP ||
udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT))
return;
swp_spec.l3_proto = l3_proto;
swp_spec.l4_proto = l4_proto;
swp_spec.is_tun = true;
if (inner_ip_hdr(skb)->version == 6) {
swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
} else {
swp_spec.tun_l3_proto = htons(ETH_P_IP);
swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
}

mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
}

#else
static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
{
return false;
}

#endif /* CONFIG_GENEVE */

static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
{
Expand Down
36 changes: 12 additions & 24 deletions drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg, u8 mode,
struct xfrm_offload *xo)
{
u8 proto;
struct mlx5e_swp_spec swp_spec = {};

/* Tunnel Mode:
* SWP: OutL3 InL3 InL4
Expand All @@ -146,35 +146,23 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
* SWP: OutL3 InL4
* InL3
* Pkt: MAC IP ESP L4
*
* Offsets are in 2-byte words, counting from start of frame
*/
eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
if (skb->protocol == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;

if (mode == XFRM_MODE_TUNNEL) {
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
swp_spec.l3_proto = skb->protocol;
swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
if (swp_spec.is_tun) {
if (xo->proto == IPPROTO_IPV6) {
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
proto = inner_ipv6_hdr(skb)->nexthdr;
swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
} else {
proto = inner_ip_hdr(skb)->protocol;
swp_spec.tun_l3_proto = htons(ETH_P_IP);
swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
}
} else {
eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
if (skb->protocol == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
proto = xo->proto;
}
switch (proto) {
case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
/* Fall through */
case IPPROTO_TCP:
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
break;
swp_spec.tun_l3_proto = skb->protocol;
swp_spec.tun_l4_proto = xo->proto;
}

mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
}

void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
Expand Down
18 changes: 15 additions & 3 deletions drivers/net/ethernet/mellanox/mlx5/core/en_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
#include <net/geneve.h>
#include <linux/bpf.h>
#include <linux/if_bridge.h>
#include <net/page_pool.h>
Expand All @@ -43,6 +44,7 @@
#include "en_rep.h"
#include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/en_accel.h"
#include "en_accel/tls.h"
#include "accel/ipsec.h"
#include "accel/tls.h"
Expand Down Expand Up @@ -2173,10 +2175,13 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
bool allow_swp;

allow_swp = mlx5_geneve_tx_allowed(priv->mdev) ||
!!MLX5_IPSEC_DEV(priv->mdev);
mlx5e_build_sq_param_common(priv, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
}

static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
Expand Down Expand Up @@ -4103,6 +4108,12 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
/* Verify if UDP port is being offloaded by HW */
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
return features;

#if IS_ENABLED(CONFIG_GENEVE)
/* Support Geneve offload for default UDP port */
if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
return features;
#endif
}

out:
Expand Down Expand Up @@ -4674,15 +4685,16 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;

if (mlx5_vxlan_allowed(mdev->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
netdev->hw_enc_features |= NETIF_F_TSO6;
netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
}

if (mlx5_vxlan_allowed(mdev->vxlan)) {
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
Expand Down
65 changes: 62 additions & 3 deletions drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1827,6 +1827,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,

struct pedit_headers {
struct ethhdr eth;
struct vlan_hdr vlan;
struct iphdr ip4;
struct ipv6hdr ip6;
struct tcphdr tcp;
Expand Down Expand Up @@ -1884,6 +1885,7 @@ static struct mlx5_fields fields[] = {
OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
OFFLOAD(FIRST_VID, 2, vlan.h_vlan_TCI, 0),

OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
OFFLOAD(SIPV4, 4, ip4.saddr, 0),
Expand Down Expand Up @@ -2247,6 +2249,35 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
return (fsystem_guid == psystem_guid);
}

static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
u16 mask16 = VLAN_VID_MASK;
u16 val16 = act->vlan.vid & VLAN_VID_MASK;
const struct flow_action_entry pedit_act = {
.id = FLOW_ACTION_MANGLE,
.mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
.mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
.mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
.mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
};
int err;

if (act->vlan.prio) {
NL_SET_ERR_MSG_MOD(extack, "Setting VLAN prio is not supported");
return -EOPNOTSUPP;
}

err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
hdrs, NULL);
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;

return err;
}

static int parse_tc_nic_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
Expand Down Expand Up @@ -2282,6 +2313,15 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
break;
case FLOW_ACTION_VLAN_MANGLE:
err = add_vlan_rewrite_action(priv,
MLX5_FLOW_NAMESPACE_KERNEL,
act, parse_attr, hdrs,
&action, extack);
if (err)
return err;

break;
case FLOW_ACTION_CSUM:
if (csum_offload_supported(priv, action,
act->csum_flags,
Expand Down Expand Up @@ -2490,8 +2530,7 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
}
break;
default:
/* action is FLOW_ACT_VLAN_MANGLE */
return -EOPNOTSUPP;
return -EINVAL;
}

attr->total_vlan = vlan_idx + 1;
Expand Down Expand Up @@ -2625,7 +2664,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_POP:
err = parse_tc_vlan_action(priv, act, attr, &action);
if (act->id == FLOW_ACTION_VLAN_PUSH &&
(action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
/* Replace vlan pop+push with vlan modify */
action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = add_vlan_rewrite_action(priv,
MLX5_FLOW_NAMESPACE_FDB,
act, parse_attr, hdrs,
&action, extack);
} else {
err = parse_tc_vlan_action(priv, act, attr, &action);
}
if (err)
return err;

attr->split_count = attr->out_count;
break;
case FLOW_ACTION_VLAN_MANGLE:
err = add_vlan_rewrite_action(priv,
MLX5_FLOW_NAMESPACE_FDB,
act, parse_attr, hdrs,
&action, extack);
if (err)
return err;

Expand Down
Loading

0 comments on commit d64fee0

Please sign in to comment.