Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 362625
b: refs/heads/master
c: f9d40f6
h: refs/heads/master
i:
  362623: 1185774
v: v3
  • Loading branch information
Alexander Duyck authored and David S. Miller committed Apr 18, 2013
1 parent 972b2b1 commit c80a5b9
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 115 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0fecd7504614b0bc98b79e31daffec040ec4e49f
refs/heads/master: f9d40f6a9921cc7d9385f64362314054e22152bd
8 changes: 0 additions & 8 deletions trunk/drivers/net/ethernet/intel/igb/igb.h
Original file line number Diff line number Diff line change
Expand Up @@ -284,18 +284,10 @@ struct igb_q_vector {
enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG
};

#define ring_uses_build_skb(ring) \
test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
#define set_ring_build_skb_enabled(ring) \
set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
#define clear_ring_build_skb_enabled(ring) \
clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)

#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)

#define IGB_RX_DESC(R, i) \
Expand Down
110 changes: 4 additions & 106 deletions trunk/drivers/net/ethernet/intel/igb/igb_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
wr32(E1000_RXDCTL(reg_idx), rxdctl);
}

static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
struct igb_ring *rx_ring)
{
#define IGB_MAX_BUILD_SKB_SIZE \
(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
(NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))

/* set build_skb flag */
if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
set_ring_build_skb_enabled(rx_ring);
else
clear_ring_build_skb_enabled(rx_ring);
}

/**
* igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure
Expand All @@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter)

/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *rx_ring = adapter->rx_ring[i];
igb_set_rx_buffer_len(adapter, rx_ring);
igb_configure_rx_ring(adapter, rx_ring);
}
for (i = 0; i < adapter->num_rx_queues; i++)
igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
}

/**
Expand Down Expand Up @@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return igb_can_reuse_rx_page(rx_buffer, page, truesize);
}

static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc)
{
struct igb_rx_buffer *rx_buffer;
struct sk_buff *skb;
struct page *page;
void *page_addr;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IGB_RX_BUFSZ;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
SKB_DATA_ALIGN(NET_SKB_PAD +
NET_IP_ALIGN +
size);
#endif

/* If we spanned a buffer we have a huge mess so test for it */
BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));

rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);

page_addr = page_address(page) + rx_buffer->page_offset;

/* prefetch first cache line of first page */
prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
#if L1_CACHE_BYTES < 128
prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
#endif

/* build an skb to around the page buffer */
skb = build_skb(page_addr, truesize);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_failed++;
return NULL;
}

/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
IGB_RX_BUFSZ,
DMA_FROM_DEVICE);

/* update pointers within the skb to store the data */
skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
__skb_put(skb, size);

/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
__skb_pull(skb, IGB_TS_HDR_LEN);
}

if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page(rx_ring->dev, rx_buffer->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
}

/* clear contents of buffer_info */
rx_buffer->dma = 0;
rx_buffer->page = NULL;

return skb;
}

static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
Expand Down Expand Up @@ -6690,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rmb();

/* retrieve a buffer from the ring */
if (ring_uses_build_skb(rx_ring))
skb = igb_build_rx_buffer(rx_ring, rx_desc);
else
skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);

/* exit if we failed to retrieve a buffer */
if (!skb)
Expand Down Expand Up @@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
}

static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
{
if (ring_uses_build_skb(rx_ring))
return NET_SKB_PAD + NET_IP_ALIGN;
else
return 0;
}

/**
* igb_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure
Expand All @@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
bi->page_offset +
igb_rx_offset(rx_ring));
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);

rx_desc++;
bi++;
Expand Down

0 comments on commit c80a5b9

Please sign in to comment.