Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 352483
b: refs/heads/master
c: 21ba6fe
h: refs/heads/master
i:
  352481: ef9fc7e
  352479: 1d77c0d
v: v3
  • Loading branch information
Alexander Duyck authored and Jeff Kirsher committed Feb 15, 2013
1 parent 9640ae0 commit 99a9e8b
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 16 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2c7d7724bc5b12a8bc038880d2dfe8ea496c618d
refs/heads/master: 21ba6fe19370f8008d1edd9aedd6dadd7e3fa8f8
13 changes: 11 additions & 2 deletions trunk/drivers/net/ethernet/intel/igb/igb.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,6 @@ struct vf_data_storage {
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048

/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */

Expand Down Expand Up @@ -169,6 +167,17 @@ enum igb_tx_flags {
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16

/*
* The largest size we can write to the descriptor is 65535. In order to
* maintain a power of two alignment we have to limit ourselves to 32K.
*/
#define IGB_MAX_TXD_PWR 15
#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)

/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)

/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct igb_tx_buffer {
Expand Down
30 changes: 17 additions & 13 deletions trunk/drivers/net/ethernet/intel/igb/igb_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -4434,13 +4434,6 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}

/*
* The largest size we can write to the descriptor is 65535. In order to
* maintain a power of two alignment we have to limit ourselves to 32K.
*/
#define IGB_MAX_TXD_PWR 15
#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)

static void igb_tx_map(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
const u8 hdr_len)
Expand Down Expand Up @@ -4609,15 +4602,25 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
u8 hdr_len = 0;

/* need: 1 descriptor per page,
/* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
* + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
* + 1 desc for skb->data,
* + 1 desc for context descriptor,
* otherwise try next time */
if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
* otherwise try next time
*/
if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
unsigned short f;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
} else {
count += skb_shinfo(skb)->nr_frags;
}

if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
return NETDEV_TX_BUSY;
}
Expand Down Expand Up @@ -4659,7 +4662,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
igb_tx_map(tx_ring, first, hdr_len);

/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
igb_maybe_stop_tx(tx_ring, DESC_NEEDED);

return NETDEV_TX_OK;

Expand Down Expand Up @@ -6063,9 +6066,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
}
}

#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets &&
netif_carrier_ok(tx_ring->netdev) &&
igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
Expand Down

0 comments on commit 99a9e8b

Please sign in to comment.