Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 103608
b: refs/heads/master
c: c7e4f3b
h: refs/heads/master
v: v3
  • Loading branch information
David S. Miller committed Jul 18, 2008
1 parent 2268edf commit d7ece4b
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 37 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 78a5b30b7324b2d66bcf7d2e3935877d3c26497c
refs/heads/master: c7e4f3bbb4ba4e48ab3b529d5016e454cee1ccd6
3 changes: 0 additions & 3 deletions trunk/include/net/sch_generic.h
Original file line number Diff line number Diff line change
Expand Up @@ -180,9 +180,6 @@ static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
return qdisc->dev_queue->dev;
}

extern void qdisc_lock_tree(struct net_device *dev);
extern void qdisc_unlock_tree(struct net_device *dev);

static inline void sch_tree_lock(struct Qdisc *q)
{
spin_lock_bh(qdisc_root_lock(q));
Expand Down
36 changes: 3 additions & 33 deletions trunk/net/sched/sch_generic.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,44 +29,14 @@
/* Main transmission queue. */

/* Modifications to data participating in scheduling must be protected with
* queue->lock spinlock.
* qdisc_root_lock(qdisc) spinlock.
*
* The idea is the following:
* - enqueue, dequeue are serialized via top level device
* spinlock queue->lock.
* - ingress filtering is serialized via top level device
* spinlock dev->rx_queue.lock.
* - enqueue, dequeue are serialized via qdisc root lock
* - ingress filtering is also serialized via qdisc root lock
* - updates to tree and tree walking are only done under the rtnl mutex.
*/

void qdisc_lock_tree(struct net_device *dev)
__acquires(dev->rx_queue.lock)
{
unsigned int i;

local_bh_disable();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
spin_lock(&txq->lock);
}
spin_lock(&dev->rx_queue.lock);
}
EXPORT_SYMBOL(qdisc_lock_tree);

void qdisc_unlock_tree(struct net_device *dev)
__releases(dev->rx_queue.lock)
{
unsigned int i;

spin_unlock(&dev->rx_queue.lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
spin_unlock(&txq->lock);
}
local_bh_enable();
}
EXPORT_SYMBOL(qdisc_unlock_tree);

static inline int qdisc_qlen(struct Qdisc *q)
{
return q->q.qlen;
Expand Down

0 comments on commit d7ece4b

Please sign in to comment.