Skip to content

Commit

Permalink
Merge branch 'remove-__napi_complete_done'
Browse files Browse the repository at this point in the history
Eric Dumazet says:

====================
net: get rid of __napi_complete()

This patch series removes __napi_complete() calls, in an effort
to make NAPI API simpler and generalize GRO and napi_complete_done()
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Feb 5, 2017
2 parents 3976001 + 02c1602 commit bd092ad
Show file tree
Hide file tree
Showing 13 changed files with 127 additions and 222 deletions.
2 changes: 1 addition & 1 deletion drivers/net/ethernet/aeroflex/greth.c
Original file line number Diff line number Diff line change
Expand Up @@ -1008,7 +1008,7 @@ static int greth_poll(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&greth->devlock, flags);
goto restart_txrx_poll;
} else {
__napi_complete(napi);
napi_complete_done(napi, work_done);
spin_unlock_irqrestore(&greth->devlock, flags);
}
}
Expand Down
164 changes: 72 additions & 92 deletions drivers/net/ethernet/amd/amd8111e.c
Original file line number Diff line number Diff line change
Expand Up @@ -695,125 +695,105 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
void __iomem *mmio = lp->mmio;
struct sk_buff *skb,*new_skb;
int min_pkt_len, status;
unsigned int intr0;
int num_rx_pkt = 0;
short pkt_len;
#if AMD8111E_VLAN_TAG_USED
short vtag;
#endif
int rx_pkt_limit = budget;
unsigned long flags;

if (rx_pkt_limit <= 0)
goto rx_not_empty;
while (num_rx_pkt < budget) {
status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
if (status & OWN_BIT)
break;

do{
/* process receive packets until we use the quota.
* If we own the next entry, it's a new packet. Send it up.
/* There is a tricky error noted by John Murphy,
* <murf@perftech.com> to Russ Nelson: Even with
* full-sized * buffers it's possible for a
* jabber packet to use two buffers, with only
* the last correctly noting the error.
*/
while(1) {
status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
if (status & OWN_BIT)
break;

/* There is a tricky error noted by John Murphy,
* <murf@perftech.com> to Russ Nelson: Even with
* full-sized * buffers it's possible for a
* jabber packet to use two buffers, with only
* the last correctly noting the error.
*/
if(status & ERR_BIT) {
/* resetting flags */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
goto err_next_pkt;
}
/* check for STP and ENP */
if(!((status & STP_BIT) && (status & ENP_BIT))){
/* resetting flags */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
goto err_next_pkt;
}
pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
if (status & ERR_BIT) {
/* resetting flags */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
goto err_next_pkt;
}
/* check for STP and ENP */
if (!((status & STP_BIT) && (status & ENP_BIT))){
/* resetting flags */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
goto err_next_pkt;
}
pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;

#if AMD8111E_VLAN_TAG_USED
vtag = status & TT_MASK;
/*MAC will strip vlan tag*/
if (vtag != 0)
min_pkt_len =MIN_PKT_LEN - 4;
vtag = status & TT_MASK;
/* MAC will strip vlan tag */
if (vtag != 0)
min_pkt_len = MIN_PKT_LEN - 4;
else
#endif
min_pkt_len =MIN_PKT_LEN;
min_pkt_len = MIN_PKT_LEN;

if (pkt_len < min_pkt_len) {
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
lp->drv_rx_errors++;
goto err_next_pkt;
}
if(--rx_pkt_limit < 0)
goto rx_not_empty;
new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
if (!new_skb) {
/* if allocation fail,
* ignore that pkt and go to next one
*/
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
lp->drv_rx_errors++;
goto err_next_pkt;
}
if (pkt_len < min_pkt_len) {
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
lp->drv_rx_errors++;
goto err_next_pkt;
}
new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
if (!new_skb) {
/* if allocation fail,
* ignore that pkt and go to next one
*/
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
lp->drv_rx_errors++;
goto err_next_pkt;
}

skb_reserve(new_skb, 2);
skb = lp->rx_skbuff[rx_index];
pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
skb_put(skb, pkt_len);
lp->rx_skbuff[rx_index] = new_skb;
lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
new_skb->data,
lp->rx_buff_len-2,
PCI_DMA_FROMDEVICE);
skb_reserve(new_skb, 2);
skb = lp->rx_skbuff[rx_index];
pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
skb_put(skb, pkt_len);
lp->rx_skbuff[rx_index] = new_skb;
lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
new_skb->data,
lp->rx_buff_len-2,
PCI_DMA_FROMDEVICE);

skb->protocol = eth_type_trans(skb, dev);
skb->protocol = eth_type_trans(skb, dev);

#if AMD8111E_VLAN_TAG_USED
if (vtag == TT_VLAN_TAGGED){
u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
}
#endif
netif_receive_skb(skb);
/*COAL update rx coalescing parameters*/
lp->coal_conf.rx_packets++;
lp->coal_conf.rx_bytes += pkt_len;
num_rx_pkt++;

err_next_pkt:
lp->rx_ring[rx_index].buff_phy_addr
= cpu_to_le32(lp->rx_dma_addr[rx_index]);
lp->rx_ring[rx_index].buff_count =
cpu_to_le16(lp->rx_buff_len-2);
wmb();
lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
if (vtag == TT_VLAN_TAGGED){
u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
}
/* Check the interrupt status register for more packets in the
* mean time. Process them since we have not used up our quota.
*/
intr0 = readl(mmio + INT0);
/*Ack receive packets */
writel(intr0 & RINT0,mmio + INT0);
#endif
napi_gro_receive(napi, skb);
/* COAL update rx coalescing parameters */
lp->coal_conf.rx_packets++;
lp->coal_conf.rx_bytes += pkt_len;
num_rx_pkt++;

err_next_pkt:
lp->rx_ring[rx_index].buff_phy_addr
= cpu_to_le32(lp->rx_dma_addr[rx_index]);
lp->rx_ring[rx_index].buff_count =
cpu_to_le16(lp->rx_buff_len-2);
wmb();
lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
}

} while(intr0 & RINT0);
if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) {
unsigned long flags;

if (rx_pkt_limit > 0) {
/* Receive descriptor is empty now */
spin_lock_irqsave(&lp->lock, flags);
__napi_complete(napi);
writel(VAL0|RINTEN0, mmio + INTEN0);
writel(VAL2 | RDMD0, mmio + CMD0);
spin_unlock_irqrestore(&lp->lock, flags);
}

rx_not_empty:
return num_rx_pkt;
}

Expand Down
11 changes: 3 additions & 8 deletions drivers/net/ethernet/amd/pcnet32.c
Original file line number Diff line number Diff line change
Expand Up @@ -1350,23 +1350,18 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
pcnet32_restart(dev, CSR0_START);
netif_wake_queue(dev);
}
spin_unlock_irqrestore(&lp->lock, flags);

if (work_done < budget) {
spin_lock_irqsave(&lp->lock, flags);

__napi_complete(napi);

if (work_done < budget && napi_complete_done(napi, work_done)) {
/* clear interrupt masks */
val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
lp->a->write_csr(ioaddr, CSR3, val);

/* Set interrupt enable. */
lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);

spin_unlock_irqrestore(&lp->lock, flags);
}

spin_unlock_irqrestore(&lp->lock, flags);
return work_done;
}

Expand Down
29 changes: 6 additions & 23 deletions drivers/net/ethernet/cirrus/ep93xx_eth.c
Original file line number Diff line number Diff line change
Expand Up @@ -228,9 +228,10 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d
pr_info("mdio write timed out\n");
}

static int ep93xx_rx(struct net_device *dev, int processed, int budget)
static int ep93xx_rx(struct net_device *dev, int budget)
{
struct ep93xx_priv *ep = netdev_priv(dev);
int processed = 0;

while (processed < budget) {
int entry;
Expand Down Expand Up @@ -294,7 +295,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, dev);

netif_receive_skb(skb);
napi_gro_receive(&ep->napi, skb);

dev->stats.rx_packets++;
dev->stats.rx_bytes += length;
Expand All @@ -310,35 +311,17 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
return processed;
}

static int ep93xx_have_more_rx(struct ep93xx_priv *ep)
{
struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer;
return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP));
}

static int ep93xx_poll(struct napi_struct *napi, int budget)
{
struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi);
struct net_device *dev = ep->dev;
int rx = 0;

poll_some_more:
rx = ep93xx_rx(dev, rx, budget);
if (rx < budget) {
int more = 0;
int rx;

rx = ep93xx_rx(dev, budget);
if (rx < budget && napi_complete_done(napi, rx)) {
spin_lock_irq(&ep->rx_lock);
__napi_complete(napi);
wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
if (ep93xx_have_more_rx(ep)) {
wrl(ep, REG_INTEN, REG_INTEN_TX);
wrl(ep, REG_INTSTSP, REG_INTSTS_RX);
more = 1;
}
spin_unlock_irq(&ep->rx_lock);

if (more && napi_reschedule(napi))
goto poll_some_more;
}

if (rx) {
Expand Down
18 changes: 9 additions & 9 deletions drivers/net/ethernet/ibm/emac/mal.c
Original file line number Diff line number Diff line change
Expand Up @@ -421,20 +421,20 @@ static int mal_poll(struct napi_struct *napi, int budget)
int n;
if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
continue;
n = mc->ops->poll_rx(mc->dev, budget);
n = mc->ops->poll_rx(mc->dev, budget - received);
if (n) {
received += n;
budget -= n;
if (budget <= 0)
goto more_work; // XXX What if this is the last one ?
if (received >= budget)
return budget;
}
}

/* We need to disable IRQs to protect from RXDE IRQ here */
spin_lock_irqsave(&mal->lock, flags);
__napi_complete(napi);
mal_enable_eob_irq(mal);
spin_unlock_irqrestore(&mal->lock, flags);
if (napi_complete_done(napi, received)) {
/* We need to disable IRQs to protect from RXDE IRQ here */
spin_lock_irqsave(&mal->lock, flags);
mal_enable_eob_irq(mal);
spin_unlock_irqrestore(&mal->lock, flags);
}

/* Check for "rotting" packet(s) */
list_for_each(l, &mal->poll_list) {
Expand Down
8 changes: 3 additions & 5 deletions drivers/net/ethernet/marvell/skge.c
Original file line number Diff line number Diff line change
Expand Up @@ -3201,7 +3201,7 @@ static void skge_tx_done(struct net_device *dev)
}
}

static int skge_poll(struct napi_struct *napi, int to_do)
static int skge_poll(struct napi_struct *napi, int budget)
{
struct skge_port *skge = container_of(napi, struct skge_port, napi);
struct net_device *dev = skge->netdev;
Expand All @@ -3214,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)

skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);

for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) {
struct skge_rx_desc *rd = e->desc;
struct sk_buff *skb;
u32 control;
Expand All @@ -3236,12 +3236,10 @@ static int skge_poll(struct napi_struct *napi, int to_do)
wmb();
skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);

if (work_done < to_do) {
if (work_done < budget && napi_complete_done(napi, work_done)) {
unsigned long flags;

napi_gro_flush(napi, false);
spin_lock_irqsave(&hw->hw_lock, flags);
__napi_complete(napi);
hw->intr_mask |= napimask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
skge_read32(hw, B0_IMSK);
Expand Down
11 changes: 5 additions & 6 deletions drivers/net/ethernet/micrel/ks8695net.c
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,7 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget)
/* Relinquish the SKB to the network layer */
skb_put(skb, pktlen);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
napi_gro_receive(&ksp->napi, skb);

/* Record stats */
ndev->stats.rx_packets++;
Expand Down Expand Up @@ -561,18 +561,17 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget)
static int ks8695_poll(struct napi_struct *napi, int budget)
{
struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
unsigned long work_done;

unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
int work_done;

work_done = ks8695_rx(ksp, budget);

if (work_done < budget) {
if (work_done < budget && napi_complete_done(napi, work_done)) {
unsigned long flags;

spin_lock_irqsave(&ksp->rx_lock, flags);
__napi_complete(napi);
/*enable rx interrupt*/
/* enable rx interrupt */
writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
spin_unlock_irqrestore(&ksp->rx_lock, flags);
}
Expand Down
Loading

0 comments on commit bd092ad

Please sign in to comment.