Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 327561
b: refs/heads/master
c: 18806c9
h: refs/heads/master
i:
  327559: f77b815
v: v3
  • Loading branch information
Alexander Duyck authored and Peter P Waskiewicz Jr committed Aug 16, 2012
1 parent f3a0c78 commit d388408
Show file tree
Hide file tree
Showing 2 changed files with 90 additions and 78 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: cf3fe7aca03e14db107d649c5699a48bec054583
refs/heads/master: 18806c9ea28320d5368fd43318506a7d33cc952c
166 changes: 89 additions & 77 deletions trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1693,6 +1693,89 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
return true;
}

static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc)
{
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
struct page *page;

rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);

skb = rx_buffer->skb;

if (likely(!skb)) {
void *page_addr = page_address(page) +
rx_buffer->page_offset;

/* prefetch first cache line of first page */
prefetch(page_addr);
#if L1_CACHE_BYTES < 128
prefetch(page_addr + L1_CACHE_BYTES);
#endif

/* allocate a skb to store the frags */
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
IXGBE_RX_HDR_SIZE);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
return NULL;
}

/*
* we will be copying header into skb->data in
* pskb_may_pull so it is in our interest to prefetch
* it now to avoid a possible cache miss
*/
prefetchw(skb->data);

/*
* Delay unmapping of the first packet. It carries the
* header information, HW may still access the header
* after the writeback. Only unmap it when EOP is
* reached
*/
if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
goto dma_sync;

IXGBE_CB(skb)->dma = rx_buffer->dma;
} else {
if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
ixgbe_dma_sync_frag(rx_ring, skb);

dma_sync:
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
}

/* pull page into skb */
if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
/* the page has been released from the ring */
IXGBE_CB(skb)->page_released = true;
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page(rx_ring->dev, rx_buffer->dma,
ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE);
}

/* clear contents of buffer_info */
rx_buffer->skb = NULL;
rx_buffer->dma = 0;
rx_buffer->page = NULL;

return skb;
}

/**
* ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @q_vector: structure containing interrupt and ring information
Expand All @@ -1718,21 +1801,16 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
u16 cleaned_count = ixgbe_desc_unused(rx_ring);

do {
struct ixgbe_rx_buffer *rx_buffer;
union ixgbe_adv_rx_desc *rx_desc;
struct sk_buff *skb;
struct page *page;
u16 ntc;

/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}

ntc = rx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
rx_buffer = &rx_ring->rx_buffer_info[ntc];
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);

if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
break;
Expand All @@ -1744,78 +1822,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
rmb();

page = rx_buffer->page;
prefetchw(page);

skb = rx_buffer->skb;

if (likely(!skb)) {
void *page_addr = page_address(page) +
rx_buffer->page_offset;

/* prefetch first cache line of first page */
prefetch(page_addr);
#if L1_CACHE_BYTES < 128
prefetch(page_addr + L1_CACHE_BYTES);
#endif

/* allocate a skb to store the frags */
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
IXGBE_RX_HDR_SIZE);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
break;
}

/*
* we will be copying header into skb->data in
* pskb_may_pull so it is in our interest to prefetch
* it now to avoid a possible cache miss
*/
prefetchw(skb->data);

/*
* Delay unmapping of the first packet. It carries the
* header information, HW may still access the header
* after the writeback. Only unmap it when EOP is
* reached
*/
if (likely(ixgbe_test_staterr(rx_desc,
IXGBE_RXD_STAT_EOP)))
goto dma_sync;
/* retrieve a buffer from the ring */
skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);

IXGBE_CB(skb)->dma = rx_buffer->dma;
} else {
if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
ixgbe_dma_sync_frag(rx_ring, skb);

dma_sync:
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
}

/* pull page into skb */
if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
/* the page has been released from the ring */
IXGBE_CB(skb)->page_released = true;
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page(rx_ring->dev, rx_buffer->dma,
ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE);
}

/* clear contents of buffer_info */
rx_buffer->skb = NULL;
rx_buffer->dma = 0;
rx_buffer->page = NULL;
/* exit if we failed to retrieve a buffer */
if (!skb)
break;

ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);

Expand Down

0 comments on commit d388408

Please sign in to comment.