Skip to content

Commit

Permalink
i40e/i40evf: Add support for IPv4 encapsulated in IPv6
Browse files Browse the repository at this point in the history
This patch fixes two issues.  First was the fact that iphdr(skb)->protocl
was being used to test for the outer transport protocol.  This completely
breaks IPv6 support.  Second was the fact that we cleared the flag for v4
going to v6, but we didn't take care of txflags going the other way.  As
such we would have the v6 flag still set even if the inner header was v4.

Signed-off-by: Alexander Duyck <aduyck@mirantis.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
  • Loading branch information
Alexander Duyck authored and Jeff Kirsher committed Feb 18, 2016
1 parent b96b78f commit a006472
Show file tree
Hide file tree
Showing 2 changed files with 49 additions and 30 deletions.
38 changes: 23 additions & 15 deletions drivers/net/ethernet/intel/i40e/i40e_txrx.c
Original file line number Diff line number Diff line change
Expand Up @@ -2409,13 +2409,28 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
l4.hdr = skb_transport_header(skb);

if (skb->encapsulation) {
switch (ip_hdr(skb)->protocol) {
/* define outer network header type */
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
if (*tx_flags & I40E_TX_FLAGS_TSO)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
else
*cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
l4_proto = ip.v4->protocol;
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
l4_proto = ip.v6->nexthdr;
}

/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
break;
case IPPROTO_GRE:
l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
break;
default:
return;
Expand All @@ -2424,17 +2439,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
/* switch L4 header pointer from outer to inner */
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);

if (*tx_flags & I40E_TX_FLAGS_IPV4) {
if (*tx_flags & I40E_TX_FLAGS_TSO) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
} else {
*cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
}
l4_proto = 0;

/* Now set the ctx descriptor fields */
*cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
Expand All @@ -2443,10 +2448,13 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
if (ip.v6->version == 6) {
*tx_flags &= ~I40E_TX_FLAGS_IPV4;

/* reset type as we transition from outer to inner headers */
*tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
if (ip.v4->version == 4)
*tx_flags |= I40E_TX_FLAGS_IPV4;
if (ip.v6->version == 6)
*tx_flags |= I40E_TX_FLAGS_IPV6;
}
}

/* Enable IP checksum offloads */
Expand Down
41 changes: 26 additions & 15 deletions drivers/net/ethernet/intel/i40evf/i40e_txrx.c
Original file line number Diff line number Diff line change
Expand Up @@ -1626,29 +1626,37 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
l4.hdr = skb_transport_header(skb);

if (skb->encapsulation) {
switch (ip_hdr(skb)->protocol) {
/* define outer network header type */
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
if (*tx_flags & I40E_TX_FLAGS_TSO)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
else
*cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
l4_proto = ip.v4->protocol;
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
l4_proto = ip.v6->nexthdr;
}

/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
case IPPROTO_GRE:
l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
default:
return;
}

/* switch L4 header pointer from outer to inner */
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);

if (*tx_flags & I40E_TX_FLAGS_IPV4) {
if (*tx_flags & I40E_TX_FLAGS_TSO) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
} else {
*cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
}
l4_proto = 0;

/* Now set the ctx descriptor fields */
*cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
Expand All @@ -1657,10 +1665,13 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
if (ip.v6->version == 6) {
*tx_flags &= ~I40E_TX_FLAGS_IPV4;

/* reset type as we transition from outer to inner headers */
*tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
if (ip.v4->version == 4)
*tx_flags |= I40E_TX_FLAGS_IPV4;
if (ip.v6->version == 6)
*tx_flags |= I40E_TX_FLAGS_IPV6;
}
}

/* Enable IP checksum offloads */
Expand Down

0 comments on commit a006472

Please sign in to comment.