Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 266087
b: refs/heads/master
c: 44390ca
h: refs/heads/master
i:
  266085: 954245f
  266083: 25c5ced
  266079: b712b77
v: v3
  • Loading branch information
Alexander Duyck authored and Jeff Kirsher committed Sep 20, 2011
1 parent b15e0f4 commit fffc187
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 87 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 153285f9ce88b2d37e29a351dfcf7601ff274c69
refs/heads/master: 44390ca6cb3d4d3c7c4078bafde11073b5268150
7 changes: 2 additions & 5 deletions trunk/drivers/net/ethernet/intel/igb/igb.h
Original file line number Diff line number Diff line change
Expand Up @@ -111,11 +111,9 @@ struct vf_data_storage {
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522

/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_64 64 /* Used for packet split */
#define IGB_RXBUFFER_128 128 /* Used for packet split */
#define IGB_RXBUFFER_1024 1024
#define IGB_RXBUFFER_2048 2048
#define IGB_RXBUFFER_512 512
#define IGB_RXBUFFER_16384 16384
#define IGB_RX_HDR_LEN IGB_RXBUFFER_512

/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16
Expand Down Expand Up @@ -221,7 +219,6 @@ struct igb_ring {
struct {
struct igb_rx_queue_stats rx_stats;
struct u64_stats_sync rx_syncp;
u32 rx_buffer_len;
};
};
};
Expand Down
5 changes: 2 additions & 3 deletions trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c
Original file line number Diff line number Diff line change
Expand Up @@ -1368,7 +1368,6 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
rx_ring->count = IGB_DEFAULT_RXD;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->netdev = adapter->netdev;
rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
rx_ring->reg_idx = adapter->vfs_allocated_count;

if (igb_setup_rx_resources(rx_ring)) {
Expand Down Expand Up @@ -1597,7 +1596,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* unmap rx buffer, will be remapped by alloc_rx_buffers */
dma_unmap_single(rx_ring->dev,
buffer_info->dma,
rx_ring->rx_buffer_len,
IGB_RX_HDR_LEN,
DMA_FROM_DEVICE);
buffer_info->dma = 0;

Expand Down Expand Up @@ -1635,7 +1634,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
struct igb_ring *tx_ring = &adapter->test_tx_ring;
struct igb_ring *rx_ring = &adapter->test_rx_ring;
int i, j, lc, good_cnt, ret_val = 0;
unsigned int size = 1024;
unsigned int size = IGB_RX_HDR_LEN;
netdev_tx_t tx_ret_val;
struct sk_buff *skb;

Expand Down
102 changes: 24 additions & 78 deletions trunk/drivers/net/ethernet/intel/igb/igb_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -517,16 +517,14 @@ static void igb_dump(struct igb_adapter *adapter)
DUMP_PREFIX_ADDRESS,
16, 1,
phys_to_virt(buffer_info->dma),
rx_ring->rx_buffer_len, true);
if (rx_ring->rx_buffer_len
< IGB_RXBUFFER_1024)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
phys_to_virt(
buffer_info->page_dma +
buffer_info->page_offset),
PAGE_SIZE/2, true);
IGB_RX_HDR_LEN, true);
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
phys_to_virt(
buffer_info->page_dma +
buffer_info->page_offset),
PAGE_SIZE/2, true);
}
}

Expand Down Expand Up @@ -707,7 +705,6 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
ring->queue_index = i;
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
/* set flag indicating ring supports SCTP checksum offload */
if (adapter->hw.mac.type >= e1000_82576)
Expand Down Expand Up @@ -3049,22 +3046,13 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
writel(0, ring->tail);

/* set descriptor configuration */
if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
srrctl = ALIGN(ring->rx_buffer_len, 64) <<
E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
srrctl |= IGB_RXBUFFER_16384 >>
E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
#else
srrctl |= (PAGE_SIZE / 2) >>
E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
#endif
srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
} else {
srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
}
srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
if (hw->mac.type == e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
/* Only set Drop Enable if we are supporting multiple queues */
Expand Down Expand Up @@ -3268,7 +3256,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
if (buffer_info->dma) {
dma_unmap_single(rx_ring->dev,
buffer_info->dma,
rx_ring->rx_buffer_len,
IGB_RX_HDR_LEN,
DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
Expand Down Expand Up @@ -4466,7 +4454,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
u32 rx_buffer_len, i;

if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
dev_err(&pdev->dev, "Invalid MTU setting\n");
Expand All @@ -4485,40 +4472,13 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
/* igb_down has a dependency on max_frame_size */
adapter->max_frame_size = max_frame;

/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size.
* i.e. RXBUFFER_2048 --> size-4096 slab
*/

if (adapter->hw.mac.type == e1000_82580)
max_frame += IGB_TS_HDR_LEN;

if (max_frame <= IGB_RXBUFFER_1024)
rx_buffer_len = IGB_RXBUFFER_1024;
else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
else
rx_buffer_len = IGB_RXBUFFER_128;

if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;

if ((adapter->hw.mac.type == e1000_82580) &&
(rx_buffer_len == IGB_RXBUFFER_128))
rx_buffer_len += IGB_RXBUFFER_64;

if (netif_running(netdev))
igb_down(adapter);

dev_info(&pdev->dev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;

for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;

if (netif_running(netdev))
igb_up(adapter);
else
Expand Down Expand Up @@ -5781,17 +5741,16 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,

igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
}
static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc)
static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
{
/* HW will not DMA in data larger than the given buffer, even if it
* parses the (NFS, of course) header to be larger. In that case, it
* fills the header buffer and spills the rest into the page.
*/
u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
if (hlen > rx_ring->rx_buffer_len)
hlen = rx_ring->rx_buffer_len;
if (hlen > IGB_RX_HDR_LEN)
hlen = IGB_RX_HDR_LEN;
return hlen;
}

Expand Down Expand Up @@ -5841,14 +5800,10 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,

if (buffer_info->dma) {
dma_unmap_single(dev, buffer_info->dma,
rx_ring->rx_buffer_len,
IGB_RX_HDR_LEN,
DMA_FROM_DEVICE);
buffer_info->dma = 0;
if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
skb_put(skb, length);
goto send_up;
}
skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
skb_put(skb, igb_get_hlen(rx_desc));
}

if (length) {
Expand Down Expand Up @@ -5879,7 +5834,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
next_buffer->dma = 0;
goto next_desc;
}
send_up:

if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
dev_kfree_skb_irq(skb);
goto next_desc;
Expand Down Expand Up @@ -5943,17 +5898,14 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
struct igb_buffer *buffer_info;
struct sk_buff *skb;
unsigned int i;
int bufsz;

i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];

bufsz = rx_ring->rx_buffer_len;

while (cleaned_count--) {
rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);

if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
if (!buffer_info->page_dma) {
if (!buffer_info->page) {
buffer_info->page = netdev_alloc_page(netdev);
if (unlikely(!buffer_info->page)) {
Expand Down Expand Up @@ -5983,7 +5935,7 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)

skb = buffer_info->skb;
if (!skb) {
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
skb = netdev_alloc_skb_ip_align(netdev, IGB_RX_HDR_LEN);
if (unlikely(!skb)) {
u64_stats_update_begin(&rx_ring->rx_syncp);
rx_ring->rx_stats.alloc_failed++;
Expand All @@ -5996,7 +5948,7 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
if (!buffer_info->dma) {
buffer_info->dma = dma_map_single(rx_ring->dev,
skb->data,
bufsz,
IGB_RX_HDR_LEN,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev,
buffer_info->dma)) {
Expand All @@ -6009,14 +5961,8 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
if (bufsz < IGB_RXBUFFER_1024) {
rx_desc->read.pkt_addr =
cpu_to_le64(buffer_info->page_dma);
rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
} else {
rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
rx_desc->read.hdr_addr = 0;
}
rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->page_dma);
rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);

i++;
if (i == rx_ring->count)
Expand Down

0 comments on commit fffc187

Please sign in to comment.