Skip to content

Commit

Permalink
ixgbe: combine some stats into a union to allow for Tx/Rx stats overlap
Browse files Browse the repository at this point in the history
This change moved some of the RX and TX stats into separate structures and
them placed those structures in a union in order to help reduce the size of
the ring structure.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Ross Brattain <ross.b.brattain@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
  • Loading branch information
Alexander Duyck authored and Jeff Kirsher committed Nov 17, 2010
1 parent b6ec895 commit 5b7da51
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 29 deletions.
24 changes: 18 additions & 6 deletions drivers/net/ixgbe/ixgbe.h
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,19 @@ struct ixgbe_queue_stats {
u64 bytes;
};

struct ixgbe_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
};

struct ixgbe_rx_queue_stats {
u64 rsc_count;
u64 rsc_flush;
u64 non_eop_descs;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
};

struct ixgbe_ring {
void *desc; /* descriptor ring memory */
struct device *dev; /* device for DMA mapping */
Expand Down Expand Up @@ -183,13 +196,12 @@ struct ixgbe_ring {

struct ixgbe_queue_stats stats;
struct u64_stats_sync syncp;
int numa_node;
union {
struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats;
};
unsigned long reinit_state;
u64 rsc_count; /* stat for coalesced packets */
u64 rsc_flush; /* stats for flushed packets */
u32 restart_queue; /* track tx queue restarts */
u32 non_eop_descs; /* track hardware descriptor chaining */

int numa_node;
unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */
struct rcu_head rcu;
Expand Down
68 changes: 45 additions & 23 deletions drivers/net/ixgbe/ixgbe_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -783,7 +783,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
!test_bit(__IXGBE_DOWN, &adapter->state)) {
netif_wake_subqueue(netdev, tx_ring->queue_index);
++tx_ring->restart_queue;
++tx_ring->tx_stats.restart_queue;
}
}

Expand Down Expand Up @@ -1024,7 +1024,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
skb = netdev_alloc_skb_ip_align(adapter->netdev,
rx_ring->rx_buf_len);
if (!skb) {
adapter->alloc_rx_buff_failed++;
rx_ring->rx_stats.alloc_rx_buff_failed++;
goto no_buffers;
}
/* initialize queue mapping */
Expand All @@ -1038,7 +1038,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, bi->dma)) {
adapter->alloc_rx_buff_failed++;
rx_ring->rx_stats.alloc_rx_buff_failed++;
bi->dma = 0;
goto no_buffers;
}
Expand All @@ -1048,7 +1048,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
if (!bi->page) {
bi->page = netdev_alloc_page(adapter->netdev);
if (!bi->page) {
adapter->alloc_rx_page_failed++;
rx_ring->rx_stats.alloc_rx_page_failed++;
goto no_buffers;
}
}
Expand All @@ -1063,7 +1063,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev,
bi->page_dma)) {
adapter->alloc_rx_page_failed++;
rx_ring->rx_stats.alloc_rx_page_failed++;
bi->page_dma = 0;
goto no_buffers;
}
Expand Down Expand Up @@ -1258,7 +1258,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (staterr & IXGBE_RXD_STAT_EOP) {
if (skb->prev)
skb = ixgbe_transform_rsc_queue(skb,
&(rx_ring->rsc_count));
&(rx_ring->rx_stats.rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
if (IXGBE_RSC_CB(skb)->delay_unmap) {
dma_unmap_single(rx_ring->dev,
Expand All @@ -1269,11 +1269,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
IXGBE_RSC_CB(skb)->delay_unmap = false;
}
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
rx_ring->rsc_count +=
skb_shinfo(skb)->nr_frags;
rx_ring->rx_stats.rsc_count +=
skb_shinfo(skb)->nr_frags;
else
rx_ring->rsc_count++;
rx_ring->rsc_flush++;
rx_ring->rx_stats.rsc_count++;
rx_ring->rx_stats.rsc_flush++;
}
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets++;
Expand All @@ -1289,7 +1289,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb->next = next_buffer->skb;
skb->next->prev = skb;
}
rx_ring->non_eop_descs++;
rx_ring->rx_stats.non_eop_descs++;
goto next_desc;
}

Expand Down Expand Up @@ -5406,10 +5406,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_hw_stats *hwstats = &adapter->stats;
u64 total_mpc = 0;
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
u64 non_eop_descs = 0, restart_queue = 0;
struct ixgbe_hw_stats *hwstats = &adapter->stats;
u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
u64 bytes = 0, packets = 0;

if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
Expand All @@ -5422,21 +5424,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
for (i = 0; i < adapter->num_rx_queues; i++) {
rsc_count += adapter->rx_ring[i]->rsc_count;
rsc_flush += adapter->rx_ring[i]->rsc_flush;
rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
}
adapter->rsc_total_count = rsc_count;
adapter->rsc_total_flush = rsc_flush;
}

for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
non_eop_descs += rx_ring->rx_stats.non_eop_descs;
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
bytes += rx_ring->stats.bytes;
packets += rx_ring->stats.packets;
}
adapter->non_eop_descs = non_eop_descs;
adapter->alloc_rx_page_failed = alloc_rx_page_failed;
adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
netdev->stats.rx_bytes = bytes;
netdev->stats.rx_packets = packets;

bytes = 0;
packets = 0;
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++)
restart_queue += adapter->tx_ring[i]->restart_queue;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
restart_queue += tx_ring->tx_stats.restart_queue;
tx_busy += tx_ring->tx_stats.tx_busy;
bytes += tx_ring->stats.bytes;
packets += tx_ring->stats.packets;
}
adapter->restart_queue = restart_queue;

for (i = 0; i < adapter->num_rx_queues; i++)
non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
adapter->non_eop_descs = non_eop_descs;
adapter->tx_busy = tx_busy;
netdev->stats.tx_bytes = bytes;
netdev->stats.tx_packets = packets;

hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
for (i = 0; i < 8; i++) {
Expand Down Expand Up @@ -6223,7 +6245,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,

/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(netdev, tx_ring->queue_index);
++tx_ring->restart_queue;
++tx_ring->tx_stats.restart_queue;
return 0;
}

Expand Down Expand Up @@ -6339,7 +6361,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);

if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
adapter->tx_busy++;
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}

Expand Down

0 comments on commit 5b7da51

Please sign in to comment.