Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 171506
b: refs/heads/master
c: a3bc1f1
h: refs/heads/master
v: v3
  • Loading branch information
Anton Vorontsov authored and David S. Miller committed Nov 12, 2009
1 parent 5e79908 commit 106488a
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 13 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 836cf7faf8c75743477ed6ed341cce491f3183fb
refs/heads/master: a3bc1f11e9b867a4f49505ecac486a33af248b2e
31 changes: 19 additions & 12 deletions trunk/drivers/net/gianfar.c
Original file line number Diff line number Diff line change
Expand Up @@ -1928,14 +1928,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* total number of fragments in the SKB */
nr_frags = skb_shinfo(skb)->nr_frags;

spin_lock_irqsave(&tx_queue->txlock, flags);

/* check if there is space to queue this packet */
if ((nr_frags+1) > tx_queue->num_txbdfree) {
/* no space, stop the queue */
netif_tx_stop_queue(txq);
dev->stats.tx_fifo_errors++;
spin_unlock_irqrestore(&tx_queue->txlock, flags);
return NETDEV_TX_BUSY;
}

Expand Down Expand Up @@ -1998,6 +1995,20 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)

lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);

/*
* We can work in parallel with gfar_clean_tx_ring(), except
* when modifying num_txbdfree. Note that we didn't grab the lock
* when we were reading the num_txbdfree and checking for available
* space, that's because outside of this function it can only grow,
* and once we've got needed space, it cannot suddenly disappear.
*
* The lock also protects us from gfar_error(), which can modify
* regs->tstat and thus retrigger the transfers, which is why we
* also must grab the lock before setting ready bit for the first
* to be transmitted BD.
*/
spin_lock_irqsave(&tx_queue->txlock, flags);

/*
* The powerpc-specific eieio() is used, as wmb() has too strong
* semantics (it requires synchronization between cacheable and
Expand Down Expand Up @@ -2225,6 +2236,8 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
skb_dirtytx = tx_queue->skb_dirtytx;

while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
unsigned long flags;

frags = skb_shinfo(skb)->nr_frags;
lbdp = skip_txbd(bdp, frags, base, tx_ring_size);

Expand Down Expand Up @@ -2269,7 +2282,9 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
TX_RING_MOD_MASK(tx_ring_size);

howmany++;
spin_lock_irqsave(&tx_queue->txlock, flags);
tx_queue->num_txbdfree += frags + 1;
spin_unlock_irqrestore(&tx_queue->txlock, flags);
}

/* If we freed a buffer, we can restart transmission, if necessary */
Expand Down Expand Up @@ -2548,7 +2563,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
int tx_cleaned = 0, i, left_over_budget = budget;
unsigned long serviced_queues = 0;
int num_queues = 0;
unsigned long flags;

num_queues = gfargrp->num_rx_queues;
budget_per_queue = budget/num_queues;
Expand All @@ -2568,14 +2582,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
rx_queue = priv->rx_queue[i];
tx_queue = priv->tx_queue[rx_queue->qindex];

/* If we fail to get the lock,
* don't bother with the TX BDs */
if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
tx_cleaned += gfar_clean_tx_ring(tx_queue);
spin_unlock_irqrestore(&tx_queue->txlock,
flags);
}

tx_cleaned += gfar_clean_tx_ring(tx_queue);
rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
budget_per_queue);
rx_cleaned += rx_cleaned_per_queue;
Expand Down

0 comments on commit 106488a

Please sign in to comment.