Skip to content

Commit

Permalink
mv643xx_eth: transmit multiqueue support
Browse files Browse the repository at this point in the history
As all the infrastructure for multiple transmit queues already exists
in the driver, this patch is entirely trivial.

The individual transmit queues are still serialised by the driver's
per-port private spinlock, but that will disappear (i.e. be replaced
by the per-subqueue ->_xmit_lock) in a subsequent patch.

Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
  • Loading branch information
Lennert Buytenhek authored and Lennert Buytenhek committed Sep 5, 2008
1 parent befefe2 commit e5ef1de
Showing 1 changed file with 18 additions and 22 deletions.
40 changes: 18 additions & 22 deletions drivers/net/mv643xx_eth.c
Original file line number Diff line number Diff line change
Expand Up @@ -449,15 +449,10 @@ static void txq_disable(struct tx_queue *txq)
static void __txq_maybe_wake(struct tx_queue *txq)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);

/*
* netif_{stop,wake}_queue() flow control only applies to
* the primary queue.
*/
BUG_ON(txq->index != 0);
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);

if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
netif_wake_queue(mp->dev);
netif_tx_wake_queue(nq);
}


Expand Down Expand Up @@ -827,8 +822,11 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
int queue;
struct tx_queue *txq;
struct netdev_queue *nq;
unsigned long flags;
int entries_left;

if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
stats->tx_dropped++;
Expand All @@ -838,15 +836,16 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}

spin_lock_irqsave(&mp->lock, flags);
queue = skb_get_queue_mapping(skb);
txq = mp->txq + queue;
nq = netdev_get_tx_queue(dev, queue);

txq = mp->txq;
spin_lock_irqsave(&mp->lock, flags);

if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
spin_unlock_irqrestore(&mp->lock, flags);
if (txq->index == 0 && net_ratelimit())
dev_printk(KERN_ERR, &dev->dev,
"primary tx queue full?!\n");
if (net_ratelimit())
dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
kfree_skb(skb);
return NETDEV_TX_OK;
}
Expand All @@ -856,13 +855,9 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
stats->tx_packets++;
dev->trans_start = jiffies;

if (txq->index == 0) {
int entries_left;

entries_left = txq->tx_ring_size - txq->tx_desc_count;
if (entries_left < MAX_SKB_FRAGS + 1)
netif_stop_queue(dev);
}
entries_left = txq->tx_ring_size - txq->tx_desc_count;
if (entries_left < MAX_SKB_FRAGS + 1)
netif_tx_stop_queue(nq);

spin_unlock_irqrestore(&mp->lock, flags);

Expand Down Expand Up @@ -2169,10 +2164,10 @@ static void tx_timeout_task(struct work_struct *ugly)

mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
if (netif_running(mp->dev)) {
netif_stop_queue(mp->dev);
netif_tx_stop_all_queues(mp->dev);
port_reset(mp);
port_start(mp);
netif_wake_queue(mp->dev);
netif_tx_wake_all_queues(mp->dev);
}
}

Expand Down Expand Up @@ -2546,7 +2541,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
return -ENODEV;
}

dev = alloc_etherdev(sizeof(struct mv643xx_eth_private));
dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
if (!dev)
return -ENOMEM;

Expand All @@ -2559,6 +2554,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mp->dev = dev;

set_params(mp, pd);
dev->real_num_tx_queues = mp->txq_count;

spin_lock_init(&mp->lock);

Expand Down

0 comments on commit e5ef1de

Please sign in to comment.