Skip to content

Commit

Permalink
e1000: Redo netpoll fix to address community concerns
Browse files Browse the repository at this point in the history
The original suggested fix for netpoll was found to be racy on SMP
kernels. While it is highly unlikely that this race would ever be seen
in the real world due to current netpoll usage models, we implemented
this updated fix to address concerns.

Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
  • Loading branch information
Auke Kok authored and Auke Kok committed Jul 14, 2006
1 parent 22e1170 commit d3d9e48
Showing 1 changed file with 15 additions and 22 deletions.
37 changes: 15 additions & 22 deletions drivers/net/e1000/e1000_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -3387,8 +3387,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
E1000_WRITE_REG(hw, IMC, ~0);
E1000_WRITE_FLUSH(hw);
}
if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
__netif_rx_schedule(&adapter->polling_netdev[0]);
if (likely(netif_rx_schedule_prep(netdev)))
__netif_rx_schedule(netdev);
else
e1000_irq_enable(adapter);
#else
Expand Down Expand Up @@ -3431,42 +3431,34 @@ e1000_clean(struct net_device *poll_dev, int *budget)
{
struct e1000_adapter *adapter;
int work_to_do = min(*budget, poll_dev->quota);
int tx_cleaned = 0, i = 0, work_done = 0;
int tx_cleaned = 0, work_done = 0;

/* Must NOT use netdev_priv macro here. */
adapter = poll_dev->priv;

/* Keep link state information with original netdev */
if (!netif_carrier_ok(adapter->netdev))
if (!netif_carrier_ok(poll_dev))
goto quit_polling;

while (poll_dev != &adapter->polling_netdev[i]) {
i++;
BUG_ON(i == adapter->num_rx_queues);
/* e1000_clean is called per-cpu. This lock protects
* tx_ring[0] from being cleaned by multiple cpus
* simultaneously. A failure obtaining the lock means
* tx_ring[0] is currently being cleaned anyway. */
if (spin_trylock(&adapter->tx_queue_lock)) {
tx_cleaned = e1000_clean_tx_irq(adapter,
&adapter->tx_ring[0]);
spin_unlock(&adapter->tx_queue_lock);
}

if (likely(adapter->num_tx_queues == 1)) {
/* e1000_clean is called per-cpu. This lock protects
* tx_ring[0] from being cleaned by multiple cpus
* simultaneously. A failure obtaining the lock means
* tx_ring[0] is currently being cleaned anyway. */
if (spin_trylock(&adapter->tx_queue_lock)) {
tx_cleaned = e1000_clean_tx_irq(adapter,
&adapter->tx_ring[0]);
spin_unlock(&adapter->tx_queue_lock);
}
} else
tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);

adapter->clean_rx(adapter, &adapter->rx_ring[i],
adapter->clean_rx(adapter, &adapter->rx_ring[0],
&work_done, work_to_do);

*budget -= work_done;
poll_dev->quota -= work_done;

/* If no Tx and not enough Rx work done, exit the polling mode */
if ((!tx_cleaned && (work_done == 0)) ||
!netif_running(adapter->netdev)) {
!netif_running(poll_dev)) {
quit_polling:
netif_rx_complete(poll_dev);
e1000_irq_enable(adapter);
Expand Down Expand Up @@ -4752,6 +4744,7 @@ static void
e1000_netpoll(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);

disable_irq(adapter->pdev->irq);
e1000_intr(adapter->pdev->irq, netdev, NULL);
e1000_clean_tx_irq(adapter, adapter->tx_ring);
Expand Down

0 comments on commit d3d9e48

Please sign in to comment.