Skip to content

Commit

Permalink
forcedeth: napi - handle all processing
Browse files Browse the repository at this point in the history
The napi poll routine has been modified to handle all interrupt events
and process them accordingly. Therefore, the ISR will now only schedule
the napi poll and disable all interrupts instead of just disabling rx
interrupt.

Signed-off-by: Ayaz Abdulla <aabdulla@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Ayaz Abdulla authored and David S. Miller committed Mar 10, 2009
1 parent 33912e7 commit f27e6f3
Showing 1 changed file with 60 additions and 38 deletions.
98 changes: 60 additions & 38 deletions drivers/net/forcedeth.c
Original file line number Diff line number Diff line change
Expand Up @@ -3440,25 +3440,22 @@ static irqreturn_t nv_nic_irq(int foo, void *data)

nv_msi_workaround(np);

#ifdef CONFIG_FORCEDETH_NAPI
spin_lock(&np->lock);
nv_tx_done(dev, np->tx_ring_size);
spin_unlock(&np->lock);
napi_schedule(&np->napi);

#ifdef CONFIG_FORCEDETH_NAPI
if (np->events & NVREG_IRQ_RX_ALL) {
spin_lock(&np->lock);
napi_schedule(&np->napi);
/* Disable furthur irq's
(msix not enabled with napi) */
writel(0, base + NvRegIrqMask);

/* Disable furthur receive irq's */
np->irqmask &= ~NVREG_IRQ_RX_ALL;
spin_unlock(&np->lock);

if (np->msi_flags & NV_MSI_X_ENABLED)
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
else
writel(np->irqmask, base + NvRegIrqMask);
spin_unlock(&np->lock);
}
return IRQ_HANDLED;
#else
spin_lock(&np->lock);
nv_tx_done(dev, np->tx_ring_size);
spin_unlock(&np->lock);

if (nv_rx_process(dev, RX_WORK_PER_LOOP)) {
if (unlikely(nv_alloc_rx(dev))) {
spin_lock(&np->lock);
Expand All @@ -3467,7 +3464,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
spin_unlock(&np->lock);
}
}
#endif

if (unlikely(np->events & NVREG_IRQ_LINK)) {
spin_lock(&np->lock);
nv_link_irq(dev);
Expand Down Expand Up @@ -3513,7 +3510,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
break;
}

#endif
}
dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);

Expand Down Expand Up @@ -3548,25 +3545,22 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)

nv_msi_workaround(np);

#ifdef CONFIG_FORCEDETH_NAPI
spin_lock(&np->lock);
nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
spin_unlock(&np->lock);
napi_schedule(&np->napi);

#ifdef CONFIG_FORCEDETH_NAPI
if (np->events & NVREG_IRQ_RX_ALL) {
spin_lock(&np->lock);
napi_schedule(&np->napi);
/* Disable furthur irq's
(msix not enabled with napi) */
writel(0, base + NvRegIrqMask);

/* Disable furthur receive irq's */
np->irqmask &= ~NVREG_IRQ_RX_ALL;
spin_unlock(&np->lock);

if (np->msi_flags & NV_MSI_X_ENABLED)
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
else
writel(np->irqmask, base + NvRegIrqMask);
spin_unlock(&np->lock);
}
return IRQ_HANDLED;
#else
spin_lock(&np->lock);
nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
spin_unlock(&np->lock);

if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
if (unlikely(nv_alloc_rx_optimized(dev))) {
spin_lock(&np->lock);
Expand All @@ -3575,7 +3569,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
spin_unlock(&np->lock);
}
}
#endif

if (unlikely(np->events & NVREG_IRQ_LINK)) {
spin_lock(&np->lock);
nv_link_irq(dev);
Expand Down Expand Up @@ -3622,7 +3616,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
break;
}

#endif
}
dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);

Expand Down Expand Up @@ -3682,9 +3676,17 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
int pkts, retcode;

if (!nv_optimized(np)) {
spin_lock_irqsave(&np->lock, flags);
nv_tx_done(dev, np->tx_ring_size);
spin_unlock_irqrestore(&np->lock, flags);

pkts = nv_rx_process(dev, budget);
retcode = nv_alloc_rx(dev);
} else {
spin_lock_irqsave(&np->lock, flags);
nv_tx_done_optimized(dev, np->tx_ring_size);
spin_unlock_irqrestore(&np->lock, flags);

pkts = nv_rx_process_optimized(dev, budget);
retcode = nv_alloc_rx_optimized(dev);
}
Expand All @@ -3696,17 +3698,37 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&np->lock, flags);
}

if (unlikely(np->events & NVREG_IRQ_LINK)) {
spin_lock_irqsave(&np->lock, flags);
nv_link_irq(dev);
spin_unlock_irqrestore(&np->lock, flags);
}
if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
spin_lock_irqsave(&np->lock, flags);
nv_linkchange(dev);
spin_unlock_irqrestore(&np->lock, flags);
np->link_timeout = jiffies + LINK_TIMEOUT;
}
if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
spin_lock_irqsave(&np->lock, flags);
if (!np->in_shutdown) {
np->nic_poll_irq = np->irqmask;
np->recover_error = 1;
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
spin_unlock_irqrestore(&np->lock, flags);
__napi_complete(napi);
return pkts;
}

if (pkts < budget) {
/* re-enable receive interrupts */
/* re-enable interrupts
(msix not enabled in napi) */
spin_lock_irqsave(&np->lock, flags);

__napi_complete(napi);

np->irqmask |= NVREG_IRQ_RX_ALL;
if (np->msi_flags & NV_MSI_X_ENABLED)
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
else
writel(np->irqmask, base + NvRegIrqMask);
writel(np->irqmask, base + NvRegIrqMask);

spin_unlock_irqrestore(&np->lock, flags);
}
Expand Down

0 comments on commit f27e6f3

Please sign in to comment.