Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 111895
b: refs/heads/master
c: 8fd8921
h: refs/heads/master
i:
  111893: b81a318
  111891: 03ce089
  111887: b1715d0
v: v3
  • Loading branch information
Lennert Buytenhek authored and Lennert Buytenhek committed Sep 14, 2008
1 parent 7d45d48 commit aaceb1a
Show file tree
Hide file tree
Showing 2 changed files with 56 additions and 31 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1fa38c586e92cce4ce06bfc08ad3134b8445170b
refs/heads/master: 8fd89211bf8e8e60415c66e5546c1478f5e8bc2b
85 changes: 55 additions & 30 deletions trunk/drivers/net/mv643xx_eth.c
Original file line number Diff line number Diff line change
Expand Up @@ -337,6 +337,10 @@ struct tx_queue {
dma_addr_t tx_desc_dma;
int tx_desc_area_size;
struct sk_buff **tx_skb;

unsigned long tx_packets;
unsigned long tx_bytes;
unsigned long tx_dropped;
};

struct mv643xx_eth_private {
Expand All @@ -347,8 +351,6 @@ struct mv643xx_eth_private {

int phy_addr;

spinlock_t lock;

struct mib_counters mib_counters;
struct work_struct tx_timeout_task;
struct mii_if_info mii;
Expand Down Expand Up @@ -453,10 +455,12 @@ static void txq_maybe_wake(struct tx_queue *txq)
struct mv643xx_eth_private *mp = txq_to_mp(txq);
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);

spin_lock(&mp->lock);
if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
netif_tx_wake_queue(nq);
spin_unlock(&mp->lock);
if (netif_tx_queue_stopped(nq)) {
__netif_tx_lock(nq, smp_processor_id());
if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
netif_tx_wake_queue(nq);
__netif_tx_unlock(nq);
}
}


Expand Down Expand Up @@ -785,45 +789,39 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
int queue;
struct tx_queue *txq;
struct netdev_queue *nq;
int entries_left;

queue = skb_get_queue_mapping(skb);
txq = mp->txq + queue;
nq = netdev_get_tx_queue(dev, queue);

if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
stats->tx_dropped++;
txq->tx_dropped++;
dev_printk(KERN_DEBUG, &dev->dev,
"failed to linearize skb with tiny "
"unaligned fragment\n");
return NETDEV_TX_BUSY;
}

queue = skb_get_queue_mapping(skb);
txq = mp->txq + queue;
nq = netdev_get_tx_queue(dev, queue);

spin_lock(&mp->lock);

if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
spin_unlock(&mp->lock);
if (net_ratelimit())
dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
kfree_skb(skb);
return NETDEV_TX_OK;
}

txq_submit_skb(txq, skb);
stats->tx_bytes += skb->len;
stats->tx_packets++;
txq->tx_bytes += skb->len;
txq->tx_packets++;
dev->trans_start = jiffies;

entries_left = txq->tx_ring_size - txq->tx_desc_count;
if (entries_left < MAX_SKB_FRAGS + 1)
netif_tx_stop_queue(nq);

spin_unlock(&mp->lock);

return NETDEV_TX_OK;
}

Expand All @@ -832,10 +830,11 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
static void txq_kick(struct tx_queue *txq)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
u32 hw_desc_ptr;
u32 expected_ptr;

spin_lock(&mp->lock);
__netif_tx_lock(nq, smp_processor_id());

if (rdl(mp, TXQ_COMMAND(mp->port_num)) & (1 << txq->index))
goto out;
Expand All @@ -848,17 +847,18 @@ static void txq_kick(struct tx_queue *txq)
txq_enable(txq);

out:
spin_unlock(&mp->lock);
__netif_tx_unlock(nq);

mp->work_tx_end &= ~(1 << txq->index);
}

static int txq_reclaim(struct tx_queue *txq, int budget, int force)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
int reclaimed;

spin_lock(&mp->lock);
__netif_tx_lock(nq, smp_processor_id());

reclaimed = 0;
while (reclaimed < budget && txq->tx_desc_count > 0) {
Expand Down Expand Up @@ -897,9 +897,9 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
}

/*
* Drop mp->lock while we free the skb.
* Drop tx queue lock while we free the skb.
*/
spin_unlock(&mp->lock);
__netif_tx_unlock(nq);

if (cmd_sts & TX_FIRST_DESC)
dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
Expand All @@ -909,14 +909,14 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
if (skb)
dev_kfree_skb(skb);

spin_lock(&mp->lock);
__netif_tx_lock(nq, smp_processor_id());
}

__netif_tx_unlock(nq);

if (reclaimed < budget)
mp->work_tx &= ~(1 << txq->index);

spin_unlock(&mp->lock);

return reclaimed;
}

Expand Down Expand Up @@ -1123,7 +1123,31 @@ static int smi_reg_write(struct mv643xx_eth_private *mp, unsigned int addr,
}


/* mib counters *************************************************************/
/* statistics ***************************************************************/
static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
unsigned long tx_packets = 0;
unsigned long tx_bytes = 0;
unsigned long tx_dropped = 0;
int i;

for (i = 0; i < mp->txq_count; i++) {
struct tx_queue *txq = mp->txq + i;

tx_packets += txq->tx_packets;
tx_bytes += txq->tx_bytes;
tx_dropped += txq->tx_dropped;
}

stats->tx_packets = tx_packets;
stats->tx_bytes = tx_bytes;
stats->tx_dropped = tx_dropped;

return stats;
}

static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
{
return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
Expand Down Expand Up @@ -1355,6 +1379,7 @@ static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
struct mv643xx_eth_private *mp = netdev_priv(dev);
int i;

mv643xx_eth_get_stats(dev);
mib_counters_update(mp);

for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
Expand Down Expand Up @@ -2138,6 +2163,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
free_irq(dev->irq, dev);

port_reset(mp);
mv643xx_eth_get_stats(dev);
mib_counters_update(mp);

for (i = 0; i < mp->rxq_count; i++)
Expand Down Expand Up @@ -2585,8 +2611,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
set_params(mp, pd);
dev->real_num_tx_queues = mp->txq_count;

spin_lock_init(&mp->lock);

mib_counters_clear(mp);
INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);

Expand All @@ -2612,6 +2636,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
BUG_ON(!res);
dev->irq = res->start;

dev->get_stats = mv643xx_eth_get_stats;
dev->hard_start_xmit = mv643xx_eth_xmit;
dev->open = mv643xx_eth_open;
dev->stop = mv643xx_eth_stop;
Expand Down

0 comments on commit aaceb1a

Please sign in to comment.