Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 290079
b: refs/heads/master
c: f990b79
h: refs/heads/master
i:
  290077: ff99e30
  290075: 83ac1e6
  290071: 4b68522
  290063: 834aea6
  290047: 25ab84a
v: v3
  • Loading branch information
Alexander Duyck authored and Jeff Kirsher committed Feb 10, 2012
1 parent 3a1c8a1 commit 5d7b575
Show file tree
Hide file tree
Showing 2 changed files with 94 additions and 63 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1d2024f61ec14bdb0c57a97a3fe73685abc2d198
refs/heads/master: f990b79bc80ca7a23b8a6c33241c439072d0b85b
155 changes: 93 additions & 62 deletions trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1101,91 +1101,124 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
writel(val, rx_ring->tail);
}

static bool ixgbe_alloc_mapped_skb(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *bi)
{
struct sk_buff *skb = bi->skb;
dma_addr_t dma = bi->dma;

if (dma)
return true;

if (likely(!skb)) {
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
bi->skb = skb;
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
return false;
}

/* initialize skb for ring */
skb_record_rx_queue(skb, rx_ring->queue_index);
}

dma = dma_map_single(rx_ring->dev, skb->data,
rx_ring->rx_buf_len, DMA_FROM_DEVICE);

if (dma_mapping_error(rx_ring->dev, dma)) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
return false;
}

bi->dma = dma;
return true;
}

static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *bi)
{
struct page *page = bi->page;
dma_addr_t page_dma = bi->page_dma;
unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);

if (page_dma)
return true;

if (!page) {
page = alloc_page(GFP_ATOMIC | __GFP_COLD);
bi->page = page;
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
}
}

page_dma = dma_map_page(rx_ring->dev, page,
page_offset, PAGE_SIZE / 2,
DMA_FROM_DEVICE);

if (dma_mapping_error(rx_ring->dev, page_dma)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
}

bi->page_dma = page_dma;
bi->page_offset = page_offset;
return true;
}

/**
* ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
* ixgbe_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
**/
void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
{
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
struct sk_buff *skb;
u16 i = rx_ring->next_to_use;

/* do nothing if no valid netdev defined */
if (!rx_ring->netdev)
/* nothing to do or no valid netdev defined */
if (!cleaned_count || !rx_ring->netdev)
return;

while (cleaned_count--) {
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i];
skb = bi->skb;

if (!skb) {
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
goto no_buffers;
}
/* initialize queue mapping */
skb_record_rx_queue(skb, rx_ring->queue_index);
bi->skb = skb;
}
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;

if (!bi->dma) {
bi->dma = dma_map_single(rx_ring->dev,
skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, bi->dma)) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
bi->dma = 0;
goto no_buffers;
}
}
while (cleaned_count--) {
if (!ixgbe_alloc_mapped_skb(rx_ring, bi))
break;

/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info. */
if (ring_is_ps_enabled(rx_ring)) {
if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
if (!bi->page) {
rx_ring->rx_stats.alloc_rx_page_failed++;
goto no_buffers;
}
}
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);

if (!bi->page_dma) {
/* use a half page if we're re-using */
bi->page_offset ^= PAGE_SIZE / 2;
bi->page_dma = dma_map_page(rx_ring->dev,
bi->page,
bi->page_offset,
PAGE_SIZE / 2,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev,
bi->page_dma)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
bi->page_dma = 0;
goto no_buffers;
}
}
if (!ixgbe_alloc_mapped_page(rx_ring, bi))
break;

/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info. */
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
} else {
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
rx_desc->read.hdr_addr = 0;
}

rx_desc++;
bi++;
i++;
if (i == rx_ring->count)
i = 0;
if (unlikely(!i)) {
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, 0);
bi = rx_ring->rx_buffer_info;
i -= rx_ring->count;
}

/* clear the hdr_addr for the next_to_use descriptor */
rx_desc->read.hdr_addr = 0;
}

no_buffers:
i += rx_ring->count;

if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
ixgbe_release_rx_desc(rx_ring, i);
Expand Down Expand Up @@ -1593,8 +1626,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,

budget--;
next_desc:
rx_desc->wb.upper.status_error = 0;

if (!budget)
break;

Expand Down

0 comments on commit 5d7b575

Please sign in to comment.