Skip to content

Commit

Permalink
igb: flush when in xmit_more mode and under descriptor pressure
Browse files Browse the repository at this point in the history
Mirror the changes made to ixgbe in commit 2367a17
("ixgbe: flush when in xmit_more mode and under descriptor pressure")

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Aug 28, 2014
1 parent 2367a17 commit 6f19e12
Showing 1 changed file with 39 additions and 39 deletions.
78 changes: 39 additions & 39 deletions drivers/net/ethernet/intel/igb/igb_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -4813,6 +4813,41 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}

static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
{
struct net_device *netdev = tx_ring->netdev;

netif_stop_subqueue(netdev, tx_ring->queue_index);

/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it.
*/
smp_mb();

/* We need to check again in a case another CPU has just
* made room available.
*/
if (igb_desc_unused(tx_ring) < size)
return -EBUSY;

/* A reprieve! */
netif_wake_subqueue(netdev, tx_ring->queue_index);

u64_stats_update_begin(&tx_ring->tx_syncp2);
tx_ring->tx_stats.restart_queue2++;
u64_stats_update_end(&tx_ring->tx_syncp2);

return 0;
}

static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
{
if (igb_desc_unused(tx_ring) >= size)
return 0;
return __igb_maybe_stop_tx(tx_ring, size);
}

static void igb_tx_map(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
const u8 hdr_len)
Expand Down Expand Up @@ -4915,7 +4950,10 @@ static void igb_tx_map(struct igb_ring *tx_ring,

tx_ring->next_to_use = i;

if (!skb->xmit_more) {
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, DESC_NEEDED);

if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);

/* we need this if more than one processor can write to our tail
Expand All @@ -4942,41 +4980,6 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_ring->next_to_use = i;
}

static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
{
struct net_device *netdev = tx_ring->netdev;

netif_stop_subqueue(netdev, tx_ring->queue_index);

/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it.
*/
smp_mb();

/* We need to check again in a case another CPU has just
* made room available.
*/
if (igb_desc_unused(tx_ring) < size)
return -EBUSY;

/* A reprieve! */
netif_wake_subqueue(netdev, tx_ring->queue_index);

u64_stats_update_begin(&tx_ring->tx_syncp2);
tx_ring->tx_stats.restart_queue2++;
u64_stats_update_end(&tx_ring->tx_syncp2);

return 0;
}

static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
{
if (igb_desc_unused(tx_ring) >= size)
return 0;
return __igb_maybe_stop_tx(tx_ring, size);
}

netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
Expand Down Expand Up @@ -5047,9 +5050,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,

igb_tx_map(tx_ring, first, hdr_len);

/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, DESC_NEEDED);

return NETDEV_TX_OK;

out_drop:
Expand Down

0 comments on commit 6f19e12

Please sign in to comment.