Skip to content

Commit

Permalink
fm10k: Add skb->xmit_more support
Browse files Browse the repository at this point in the history
This change adds support for skb->xmit_more based on the changes that were
made to igb to support the feature.  The main changes are moving up the
check for maybe_stop_tx so that we can check netif_xmit_stopped to determine
if we must write the tail because we can add no further buffers.

Acked-by: Matthew Vick <matthew.vick@intel.com>
Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com>
Acked-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Alexander Duyck authored and David S. Miller committed Oct 14, 2014
1 parent 5bc2672 commit 2c2b2f0
Showing 1 changed file with 34 additions and 31 deletions.
65 changes: 34 additions & 31 deletions drivers/net/ethernet/intel/fm10k/fm10k_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -929,6 +929,30 @@ static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
return i == tx_ring->count;
}

static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);

smp_mb();

/* We need to check again in a case another CPU has just
* made room available. */
if (likely(fm10k_desc_unused(tx_ring) < size))
return -EBUSY;

/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
return 0;
}

static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
{
if (likely(fm10k_desc_unused(tx_ring) >= size))
return 0;
return __fm10k_maybe_stop_tx(tx_ring, size);
}

static void fm10k_tx_map(struct fm10k_ring *tx_ring,
struct fm10k_tx_buffer *first)
{
Expand Down Expand Up @@ -1022,13 +1046,18 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,

tx_ring->next_to_use = i;

/* Make sure there is space in the ring for the next send. */
fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);

/* notify HW of packet */
writel(i, tx_ring->tail);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);

/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
}

return;
dma_error:
Expand All @@ -1048,30 +1077,6 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
tx_ring->next_to_use = i;
}

static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);

smp_mb();

/* We need to check again in a case another CPU has just
* made room available. */
if (likely(fm10k_desc_unused(tx_ring) < size))
return -EBUSY;

/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
return 0;
}

static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
{
if (likely(fm10k_desc_unused(tx_ring) >= size))
return 0;
return __fm10k_maybe_stop_tx(tx_ring, size);
}

netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
struct fm10k_ring *tx_ring)
{
Expand Down Expand Up @@ -1116,8 +1121,6 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,

fm10k_tx_map(tx_ring, first);

fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);

return NETDEV_TX_OK;

out_drop:
Expand Down

0 comments on commit 2c2b2f0

Please sign in to comment.