Skip to content

Commit

Permalink
i40e: add support for XDP_TX action
Browse files Browse the repository at this point in the history
This patch adds proper XDP_TX action support. For each Tx ring, an
additional XDP Tx ring is allocated and setup. This version does the
DMA mapping in the fast-path, which will penalize performance for
IOMMU enabled systems. Further, debugfs support is not wired up for
the XDP Tx rings.

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
  • Loading branch information
Björn Töpel authored and Jeff Kirsher committed Jun 21, 2017
1 parent 0c8493d commit 74608d1
Show file tree
Hide file tree
Showing 5 changed files with 384 additions and 87 deletions.
1 change: 1 addition & 0 deletions drivers/net/ethernet/intel/i40e/i40e.h
Original file line number Diff line number Diff line change
Expand Up @@ -629,6 +629,7 @@ struct i40e_vsi {
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;
struct i40e_ring **tx_rings;
struct i40e_ring **xdp_rings; /* XDP Tx rings */

u32 active_filters;
u32 promisc_threshold;
Expand Down
42 changes: 33 additions & 9 deletions drivers/net/ethernet/intel/i40e/i40e_ethtool.c
Original file line number Diff line number Diff line change
Expand Up @@ -1299,6 +1299,17 @@ static void i40e_get_ringparam(struct net_device *netdev,
ring->rx_jumbo_pending = 0;
}

static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index)
{
if (i40e_enabled_xdp_vsi(vsi)) {
return index < vsi->num_queue_pairs ||
(index >= vsi->alloc_queue_pairs &&
index < vsi->alloc_queue_pairs + vsi->num_queue_pairs);
}

return index < vsi->num_queue_pairs;
}

static int i40e_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
Expand All @@ -1308,6 +1319,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 new_rx_count, new_tx_count;
u16 tx_alloc_queue_pairs;
int timeout = 50;
int i, err = 0;

Expand Down Expand Up @@ -1345,6 +1357,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) {
vsi->tx_rings[i]->count = new_tx_count;
vsi->rx_rings[i]->count = new_rx_count;
if (i40e_enabled_xdp_vsi(vsi))
vsi->xdp_rings[i]->count = new_tx_count;
}
goto done;
}
Expand All @@ -1354,20 +1368,24 @@ static int i40e_set_ringparam(struct net_device *netdev,
* to the Tx and Rx ring structs.
*/

/* alloc updated Tx resources */
/* alloc updated Tx and XDP Tx resources */
tx_alloc_queue_pairs = vsi->alloc_queue_pairs *
(i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
if (new_tx_count != vsi->tx_rings[0]->count) {
netdev_info(netdev,
"Changing Tx descriptor count from %d to %d.\n",
vsi->tx_rings[0]->count, new_tx_count);
tx_rings = kcalloc(vsi->alloc_queue_pairs,
tx_rings = kcalloc(tx_alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!tx_rings) {
err = -ENOMEM;
goto done;
}

for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */
for (i = 0; i < tx_alloc_queue_pairs; i++) {
if (!i40e_active_tx_ring_index(vsi, i))
continue;

tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count;
/* the desc and bi pointers will be reallocated in the
Expand All @@ -1379,6 +1397,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (err) {
while (i) {
i--;
if (!i40e_active_tx_ring_index(vsi, i))
continue;
i40e_free_tx_resources(&tx_rings[i]);
}
kfree(tx_rings);
Expand Down Expand Up @@ -1446,9 +1466,11 @@ static int i40e_set_ringparam(struct net_device *netdev,
i40e_down(vsi);

if (tx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_free_tx_resources(vsi->tx_rings[i]);
*vsi->tx_rings[i] = tx_rings[i];
for (i = 0; i < tx_alloc_queue_pairs; i++) {
if (i40e_active_tx_ring_index(vsi, i)) {
i40e_free_tx_resources(vsi->tx_rings[i]);
*vsi->tx_rings[i] = tx_rings[i];
}
}
kfree(tx_rings);
tx_rings = NULL;
Expand Down Expand Up @@ -1479,8 +1501,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
free_tx:
/* error cleanup if the Rx allocations failed after getting Tx */
if (tx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++)
i40e_free_tx_resources(&tx_rings[i]);
for (i = 0; i < tx_alloc_queue_pairs; i++) {
if (i40e_active_tx_ring_index(vsi, i))
i40e_free_tx_resources(vsi->tx_rings[i]);
}
kfree(tx_rings);
tx_rings = NULL;
}
Expand Down
Loading

0 comments on commit 74608d1

Please sign in to comment.