Skip to content

Commit

Permalink
iavf: allow null RX descriptors
Browse files Browse the repository at this point in the history
In some circumstances, the hardware can hand us a null receive
descriptor, with no data attached but otherwise valid. Unfortunately,
the driver was ill-equipped to handle such an event, and would stop
processing packets at that point.

To fix this, use the Descriptor Done bit instead of the size to
determine whether or not a descriptor is ready to be processed. Add some
checks to allow for unused buffers.

Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
  • Loading branch information
Mitch Williams authored and Jeff Kirsher committed Jun 17, 2019
1 parent 68dfe63 commit efa14c3
Showing 1 changed file with 18 additions and 3 deletions.
21 changes: 18 additions & 3 deletions drivers/net/ethernet/intel/iavf/iavf_txrx.c
Original file line number Diff line number Diff line change
Expand Up @@ -1236,6 +1236,9 @@ static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
#endif

if (!size)
return;

skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);

Expand All @@ -1260,6 +1263,9 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
{
struct iavf_rx_buffer *rx_buffer;

if (!size)
return NULL;

rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);

Expand Down Expand Up @@ -1299,6 +1305,8 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
unsigned int headlen;
struct sk_buff *skb;

if (!rx_buffer)
return NULL;
/* prefetch first cache line of first page */
prefetch(va);
#if L1_CACHE_BYTES < 128
Expand Down Expand Up @@ -1363,6 +1371,8 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
#endif
struct sk_buff *skb;

if (!rx_buffer)
return NULL;
/* prefetch first cache line of first page */
prefetch(va);
#if L1_CACHE_BYTES < 128
Expand Down Expand Up @@ -1398,6 +1408,9 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
struct iavf_rx_buffer *rx_buffer)
{
if (!rx_buffer)
return;

if (iavf_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */
iavf_reuse_rx_page(rx_ring, rx_buffer);
Expand Down Expand Up @@ -1496,11 +1509,12 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
* verified the descriptor has been written back.
*/
dma_rmb();
#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
break;

size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
if (!size)
break;

iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = iavf_get_rx_buffer(rx_ring, size);
Expand All @@ -1516,7 +1530,8 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
rx_buffer->pagecnt_bias++;
if (rx_buffer)
rx_buffer->pagecnt_bias++;
break;
}

Expand Down

0 comments on commit efa14c3

Please sign in to comment.