Skip to content

Commit

Permalink
ixgbe: Have the CPU take ownership of the buffers sooner
Browse files Browse the repository at this point in the history
This patch makes it so that we will always have ownership of the buffers by
the time we get to ixgbe_add_rx_frag. This is necessary as I am planning to
add a copy-break to ixgbe_add_rx_frag and in order for that to function
correctly we need the CPU to have ownership of the buffer.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
  • Loading branch information
Alexander Duyck authored and Peter P Waskiewicz Jr committed Aug 16, 2012
1 parent 09816fb commit 42073d9
Showing 1 changed file with 38 additions and 14 deletions.
52 changes: 38 additions & 14 deletions drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1457,6 +1457,36 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
return true;
}

/**
* ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
* @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being updated
*
* This function provides a basic DMA sync up for the first fragment of an
* skb. The reason for doing this is that the first fragment cannot be
* unmapped until we have reached the end of packet descriptor for a buffer
* chain.
*/
static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
struct sk_buff *skb)
{
/* if the page was released unmap it, else just sync our portion */
if (unlikely(IXGBE_CB(skb)->page_released)) {
dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
IXGBE_CB(skb)->page_released = false;
} else {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];

dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
frag->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
}
IXGBE_CB(skb)->dma = 0;
}

/**
* ixgbe_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
Expand Down Expand Up @@ -1484,20 +1514,6 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
unsigned char *va;
unsigned int pull_len;

/* if the page was released unmap it, else just sync our portion */
if (unlikely(IXGBE_CB(skb)->page_released)) {
dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
IXGBE_CB(skb)->page_released = false;
} else {
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
frag->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
}
IXGBE_CB(skb)->dma = 0;

/* verify that the packet does not have any known errors */
if (unlikely(ixgbe_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
Expand Down Expand Up @@ -1742,8 +1758,16 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
* after the writeback. Only unmap it when EOP is
* reached
*/
if (likely(ixgbe_test_staterr(rx_desc,
IXGBE_RXD_STAT_EOP)))
goto dma_sync;

IXGBE_CB(skb)->dma = rx_buffer->dma;
} else {
if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
ixgbe_dma_sync_frag(rx_ring, skb);

dma_sync:
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
Expand Down

0 comments on commit 42073d9

Please sign in to comment.