Skip to content

Commit

Permalink
i40e: Move rings from pointer to array to array of pointers
Browse files Browse the repository at this point in the history
Allocate the queue pairs individually instead of as a group.  This
allows for much easier queue management as it is possible to dynamically
resize the queues without having to free and allocate the entire block.

Ease statistic collection by treating Tx/Rx queue pairs as a single
unit.  Each pair is allocated together and starts with a Tx queue and
ends with an Rx queue.  By ordering them this way it is possible to know
the Rx offset based on a pointer to the Tx queue.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Kavindya Deegala <kavindya.s.deegala@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
  • Loading branch information
Alexander Duyck authored and Jeff Kirsher committed Oct 10, 2013
1 parent cd0b6fa commit 9f65e15
Show file tree
Hide file tree
Showing 6 changed files with 204 additions and 185 deletions.
6 changes: 3 additions & 3 deletions drivers/net/ethernet/intel/i40e/i40e.h
Original file line number Diff line number Diff line change
Expand Up @@ -347,9 +347,9 @@ struct i40e_vsi {
u32 rx_buf_failed;
u32 rx_page_failed;

/* These are arrays of rings, allocated at run-time */
struct i40e_ring *rx_rings;
struct i40e_ring *tx_rings;
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;
struct i40e_ring **tx_rings;

u16 work_limit;
/* high bit set means dynamic, use accessor routines to read/write.
Expand Down
195 changes: 100 additions & 95 deletions drivers/net/ethernet/intel/i40e/i40e_debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -258,12 +258,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,

for (i = 0; i < vsi->num_queue_pairs; i++) {
len = sizeof(struct i40e_tx_buffer);
memcpy(p, vsi->tx_rings[i].tx_bi, len);
memcpy(p, vsi->tx_rings[i]->tx_bi, len);
p += len;
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
len = sizeof(struct i40e_rx_buffer);
memcpy(p, vsi->rx_rings[i].rx_bi, len);
memcpy(p, vsi->rx_rings[i]->rx_bi, len);
p += len;
}

Expand Down Expand Up @@ -484,99 +484,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed);
if (vsi->rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
dev_info(&pf->pdev->dev,
" rx_rings[%i]: desc = %p\n",
i, vsi->rx_rings[i].desc);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
i, vsi->rx_rings[i].dev,
vsi->rx_rings[i].netdev,
vsi->rx_rings[i].rx_bi);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
i, vsi->rx_rings[i].state,
vsi->rx_rings[i].queue_index,
vsi->rx_rings[i].reg_idx);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
i, vsi->rx_rings[i].rx_hdr_len,
vsi->rx_rings[i].rx_buf_len,
vsi->rx_rings[i].dtype);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i, vsi->rx_rings[i].hsplit,
vsi->rx_rings[i].next_to_use,
vsi->rx_rings[i].next_to_clean,
vsi->rx_rings[i].ring_active);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
i, vsi->rx_rings[i].stats.packets,
vsi->rx_rings[i].stats.bytes,
vsi->rx_rings[i].rx_stats.non_eop_descs);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
i,
vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, vsi->rx_rings[i].size,
(long unsigned int)vsi->rx_rings[i].dma);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: vsi = %p, q_vector = %p\n",
i, vsi->rx_rings[i].vsi,
vsi->rx_rings[i].q_vector);
}
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
if (!rx_ring)
continue;

dev_info(&pf->pdev->dev,
" rx_rings[%i]: desc = %p\n",
i, rx_ring->desc);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
i, rx_ring->dev,
rx_ring->netdev,
rx_ring->rx_bi);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
i, rx_ring->state,
rx_ring->queue_index,
rx_ring->reg_idx);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
i, rx_ring->rx_hdr_len,
rx_ring->rx_buf_len,
rx_ring->dtype);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i, rx_ring->hsplit,
rx_ring->next_to_use,
rx_ring->next_to_clean,
rx_ring->ring_active);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
i, rx_ring->stats.packets,
rx_ring->stats.bytes,
rx_ring->rx_stats.non_eop_descs);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
i,
rx_ring->rx_stats.alloc_rx_page_failed,
rx_ring->rx_stats.alloc_rx_buff_failed);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, rx_ring->size,
(long unsigned int)rx_ring->dma);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: vsi = %p, q_vector = %p\n",
i, rx_ring->vsi,
rx_ring->q_vector);
}
if (vsi->tx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
dev_info(&pf->pdev->dev,
" tx_rings[%i]: desc = %p\n",
i, vsi->tx_rings[i].desc);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
i, vsi->tx_rings[i].dev,
vsi->tx_rings[i].netdev,
vsi->tx_rings[i].tx_bi);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
i, vsi->tx_rings[i].state,
vsi->tx_rings[i].queue_index,
vsi->tx_rings[i].reg_idx);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: dtype = %d\n",
i, vsi->tx_rings[i].dtype);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i, vsi->tx_rings[i].hsplit,
vsi->tx_rings[i].next_to_use,
vsi->tx_rings[i].next_to_clean,
vsi->tx_rings[i].ring_active);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
i, vsi->tx_rings[i].stats.packets,
vsi->tx_rings[i].stats.bytes,
vsi->tx_rings[i].tx_stats.restart_queue);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
i,
vsi->tx_rings[i].tx_stats.tx_busy,
vsi->tx_rings[i].tx_stats.tx_done_old);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, vsi->tx_rings[i].size,
(long unsigned int)vsi->tx_rings[i].dma);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: vsi = %p, q_vector = %p\n",
i, vsi->tx_rings[i].vsi,
vsi->tx_rings[i].q_vector);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: DCB tc = %d\n",
i, vsi->tx_rings[i].dcb_tc);
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
dev_info(&pf->pdev->dev,
" tx_rings[%i]: desc = %p\n",
i, tx_ring->desc);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
i, tx_ring->dev,
tx_ring->netdev,
tx_ring->tx_bi);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
i, tx_ring->state,
tx_ring->queue_index,
tx_ring->reg_idx);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: dtype = %d\n",
i, tx_ring->dtype);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i, tx_ring->hsplit,
tx_ring->next_to_use,
tx_ring->next_to_clean,
tx_ring->ring_active);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
i, tx_ring->stats.packets,
tx_ring->stats.bytes,
tx_ring->tx_stats.restart_queue);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
i,
tx_ring->tx_stats.tx_busy,
tx_ring->tx_stats.tx_done_old);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, tx_ring->size,
(long unsigned int)tx_ring->dma);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: vsi = %p, q_vector = %p\n",
i, tx_ring->vsi,
tx_ring->q_vector);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: DCB tc = %d\n",
i, tx_ring->dcb_tc);
}
rcu_read_unlock();
dev_info(&pf->pdev->dev,
" work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
vsi->work_limit, vsi->rx_itr_setting,
Expand Down Expand Up @@ -782,9 +787,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
return;
}
if (is_rx_ring)
ring = vsi->rx_rings[ring_id];
ring = *vsi->rx_rings[ring_id];
else
ring = vsi->tx_rings[ring_id];
ring = *vsi->tx_rings[ring_id];
if (cnt == 2) {
dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
Expand Down
40 changes: 20 additions & 20 deletions drivers/net/ethernet/intel/i40e/i40e_ethtool.c
Original file line number Diff line number Diff line change
Expand Up @@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = vsi->rx_rings[0].count;
ring->tx_pending = vsi->tx_rings[0].count;
ring->rx_pending = vsi->rx_rings[0]->count;
ring->tx_pending = vsi->tx_rings[0]->count;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
Expand Down Expand Up @@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);

/* if nothing to do return success */
if ((new_tx_count == vsi->tx_rings[0].count) &&
(new_rx_count == vsi->rx_rings[0].count))
if ((new_tx_count == vsi->tx_rings[0]->count) &&
(new_rx_count == vsi->rx_rings[0]->count))
return 0;

while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
Expand All @@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (!netif_running(vsi->netdev)) {
/* simple case - set for the next time the netdev is started */
for (i = 0; i < vsi->num_queue_pairs; i++) {
vsi->tx_rings[i].count = new_tx_count;
vsi->rx_rings[i].count = new_rx_count;
vsi->tx_rings[i]->count = new_tx_count;
vsi->rx_rings[i]->count = new_rx_count;
}
goto done;
}
Expand All @@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/

/* alloc updated Tx resources */
if (new_tx_count != vsi->tx_rings[0].count) {
if (new_tx_count != vsi->tx_rings[0]->count) {
netdev_info(netdev,
"Changing Tx descriptor count from %d to %d.\n",
vsi->tx_rings[0].count, new_tx_count);
vsi->tx_rings[0]->count, new_tx_count);
tx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!tx_rings) {
Expand All @@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev,

for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */
tx_rings[i] = vsi->tx_rings[i];
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count;
err = i40e_setup_tx_descriptors(&tx_rings[i]);
if (err) {
Expand All @@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
}

/* alloc updated Rx resources */
if (new_rx_count != vsi->rx_rings[0].count) {
if (new_rx_count != vsi->rx_rings[0]->count) {
netdev_info(netdev,
"Changing Rx descriptor count from %d to %d\n",
vsi->rx_rings[0].count, new_rx_count);
vsi->rx_rings[0]->count, new_rx_count);
rx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!rx_rings) {
Expand All @@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev,

for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */
rx_rings[i] = vsi->rx_rings[i];
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err) {
Expand All @@ -517,17 +517,17 @@ static int i40e_set_ringparam(struct net_device *netdev,

if (tx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_free_tx_resources(&vsi->tx_rings[i]);
vsi->tx_rings[i] = tx_rings[i];
i40e_free_tx_resources(vsi->tx_rings[i]);
*vsi->tx_rings[i] = tx_rings[i];
}
kfree(tx_rings);
tx_rings = NULL;
}

if (rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_free_rx_resources(&vsi->rx_rings[i]);
vsi->rx_rings[i] = rx_rings[i];
i40e_free_rx_resources(vsi->rx_rings[i]);
*vsi->rx_rings[i] = rx_rings[i];
}
kfree(rx_rings);
rx_rings = NULL;
Expand Down Expand Up @@ -588,10 +588,10 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
data[i] = vsi->tx_rings[j].stats.packets;
data[i + 1] = vsi->tx_rings[j].stats.bytes;
data[i + 2] = vsi->rx_rings[j].stats.packets;
data[i + 3] = vsi->rx_rings[j].stats.bytes;
data[i] = vsi->tx_rings[j]->stats.packets;
data[i + 1] = vsi->tx_rings[j]->stats.bytes;
data[i + 2] = vsi->rx_rings[j]->stats.packets;
data[i + 3] = vsi->rx_rings[j]->stats.bytes;
}
if (vsi == pf->vsi[pf->lan_vsi]) {
for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
Expand Down
Loading

0 comments on commit 9f65e15

Please sign in to comment.