Skip to content

Commit

Permalink
skge: add byte queue limit support
Browse files Browse the repository at this point in the history
This also changes the cleanup logic slightly to aggregate
completed notifications for multiple packets.

Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
stephen hemminger authored and David S. Miller committed Jan 23, 2012
1 parent 302476c commit da057fb
Showing 1 changed file with 24 additions and 13 deletions.
37 changes: 24 additions & 13 deletions drivers/net/ethernet/marvell/skge.c
Original file line number Diff line number Diff line change
Expand Up @@ -2817,6 +2817,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
wmb();

netdev_sent_queue(dev, skb->len);

skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);

netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
Expand Down Expand Up @@ -2858,11 +2860,9 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,


/* Free resources associated with this reing element */
static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
u32 control)
static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e,
u32 control)
{
struct pci_dev *pdev = skge->hw->pdev;

/* skb header vs. fragment */
if (control & BMU_STF)
pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
Expand All @@ -2872,13 +2872,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
dma_unmap_len(e, maplen),
PCI_DMA_TODEVICE);

if (control & BMU_EOF) {
netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
"tx done slot %td\n", e - skge->tx_ring.start);

dev_kfree_skb(e->skb);
}
}

/* Free all buffers in transmit ring */
Expand All @@ -2889,10 +2882,15 @@ static void skge_tx_clean(struct net_device *dev)

for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
struct skge_tx_desc *td = e->desc;
skge_tx_free(skge, e, td->control);

skge_tx_unmap(skge->hw->pdev, e, td->control);

if (td->control & BMU_EOF)
dev_kfree_skb(e->skb);
td->control = 0;
}

netdev_reset_queue(dev);
skge->tx_ring.to_clean = e;
}

Expand Down Expand Up @@ -3157,6 +3155,7 @@ static void skge_tx_done(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_ring *ring = &skge->tx_ring;
struct skge_element *e;
unsigned int bytes_compl = 0, pkts_compl = 0;

skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);

Expand All @@ -3166,8 +3165,20 @@ static void skge_tx_done(struct net_device *dev)
if (control & BMU_OWN)
break;

skge_tx_free(skge, e, control);
skge_tx_unmap(skge->hw->pdev, e, control);

if (control & BMU_EOF) {
netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
"tx done slot %td\n",
e - skge->tx_ring.start);

pkts_compl++;
bytes_compl += e->skb->len;

dev_kfree_skb(e->skb);
}
}
netdev_completed_queue(dev, pkts_compl, bytes_compl);
skge->tx_ring.to_clean = e;

/* Can run lockless until we need to synchronize to restart queue. */
Expand Down

0 comments on commit da057fb

Please sign in to comment.