Skip to content

Commit

Permalink
Merge branch 'sfc-3.9' of git://git.kernel.org/pub/scm/linux/kernel/g…
Browse files Browse the repository at this point in the history
…it/bwh/sfc

Ben Hutchings says:

====================
Some fixes that should go into 3.9:

1. Fix packet corruption when using non-coherent RX DMA buffers.
2. Fix occasional watchdog misfiring when changing MTU or ring size.

These are longstanding bugs and should be fixed in stable as well, but
I'd like to review other recent fixes first and send a separate request
for stable inclusion.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Feb 26, 2013
2 parents 1cef935 + 29c69a4 commit 3647d34
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 15 deletions.
16 changes: 12 additions & 4 deletions drivers/net/ethernet/sfc/efx.c
Original file line number Diff line number Diff line change
Expand Up @@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
tx_queue->txd.entries);
}

efx_device_detach_sync(efx);
efx_stop_all(efx);
efx_stop_interrupts(efx, true);

Expand Down Expand Up @@ -832,6 +833,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)

efx_start_interrupts(efx, true);
efx_start_all(efx);
netif_device_attach(efx->net_dev);
return rc;

rollback:
Expand Down Expand Up @@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx)
/* Flush efx_mac_work(), refill_workqueue, monitor_work */
efx_flush_all(efx);

/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
/* Stop the kernel transmit interface. This is only valid if
* the device is stopped or detached; otherwise the watchdog
* may fire immediately.
*/
WARN_ON(netif_running(efx->net_dev) &&
netif_device_present(efx->net_dev));
netif_tx_disable(efx->net_dev);

efx_stop_datapath(efx);
Expand Down Expand Up @@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
if (new_mtu > EFX_MAX_MTU)
return -EINVAL;

efx_stop_all(efx);

netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);

efx_device_detach_sync(efx);
efx_stop_all(efx);

mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu;
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);

efx_start_all(efx);
netif_device_attach(efx->net_dev);
return 0;
}

Expand Down
4 changes: 3 additions & 1 deletion drivers/net/ethernet/sfc/net_driver.h
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,7 @@ struct efx_tx_queue {
* Will be %NULL if the buffer slot is currently free.
* @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
* Will be %NULL if the buffer slot is currently free.
* @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
* @len: Buffer length, in bytes.
* @flags: Flags for buffer and packet state.
*/
Expand All @@ -219,7 +220,8 @@ struct efx_rx_buffer {
struct sk_buff *skb;
struct page *page;
} u;
unsigned int len;
u16 page_offset;
u16 len;
u16 flags;
};
#define EFX_RX_BUF_PAGE 0x0001
Expand Down
25 changes: 15 additions & 10 deletions drivers/net/ethernet/sfc/rx.c
Original file line number Diff line number Diff line change
Expand Up @@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
struct efx_rx_buffer *buf)
{
/* Offset is always within one page, so we don't need to consider
* the page order.
*/
return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
efx->type->rx_buffer_hash_size;
return buf->page_offset + efx->type->rx_buffer_hash_size;
}
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
Expand Down Expand Up @@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
struct page *page;
unsigned int page_offset;
struct efx_rx_page_state *state;
dma_addr_t dma_addr;
unsigned index, count;
Expand All @@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
state->dma_addr = dma_addr;

dma_addr += sizeof(struct efx_rx_page_state);
page_offset = sizeof(struct efx_rx_page_state);

split:
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
rx_buf->u.page = page;
rx_buf->page_offset = page_offset;
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
rx_buf->flags = EFX_RX_BUF_PAGE;
++rx_queue->added_count;
Expand All @@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
/* Use the second half of the page */
get_page(page);
dma_addr += (PAGE_SIZE >> 1);
page_offset += (PAGE_SIZE >> 1);
++count;
goto split;
}
Expand All @@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
}

static void efx_unmap_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
struct efx_rx_buffer *rx_buf,
unsigned int used_len)
{
if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
struct efx_rx_page_state *state;
Expand All @@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state->dma_addr,
efx_rx_buf_size(efx),
DMA_FROM_DEVICE);
} else if (used_len) {
dma_sync_single_for_cpu(&efx->pci_dev->dev,
rx_buf->dma_addr, used_len,
DMA_FROM_DEVICE);
}
} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
Expand All @@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf)
{
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
efx_free_rx_buffer(rx_queue->efx, rx_buf);
}

Expand Down Expand Up @@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
goto out;
}

/* Release card resources - assumes all RX buffers consumed in-order
* per RX queue
/* Release and/or sync DMA mapping - assumes all RX buffers
* consumed in-order per RX queue
*/
efx_unmap_rx_buffer(efx, rx_buf);
efx_unmap_rx_buffer(efx, rx_buf, len);

/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.
Expand Down

0 comments on commit 3647d34

Please sign in to comment.