Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 103386
b: refs/heads/master
c: dc2b484
h: refs/heads/master
v: v3
  • Loading branch information
David S. Miller committed Jul 9, 2008
1 parent eb44884 commit 6e0ec1f
Show file tree
Hide file tree
Showing 13 changed files with 74 additions and 63 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5ce2d488fe039ddd86a638496cf704df86c74eeb
refs/heads/master: dc2b48475a0a36f8b3bbb2da60d3a006dc5c2c84
8 changes: 4 additions & 4 deletions trunk/drivers/net/ifb.c
Original file line number Diff line number Diff line change
Expand Up @@ -229,12 +229,12 @@ module_param(numifbs, int, 0);
MODULE_PARM_DESC(numifbs, "Number of ifb devices");

/*
* dev_ifb->queue_lock is usually taken after dev->ingress_lock,
* dev_ifb->tx_queue.lock is usually taken after dev->ingress_lock,
* reversely to e.g. qdisc_lock_tree(). It should be safe until
* ifb doesn't take dev->queue_lock with dev_ifb->ingress_lock.
* ifb doesn't take dev->tx_queue.lock with dev_ifb->ingress_lock.
* But lockdep should know that ifb has different locks from dev.
*/
static struct lock_class_key ifb_queue_lock_key;
static struct lock_class_key ifb_tx_queue_lock_key;
static struct lock_class_key ifb_ingress_lock_key;


Expand All @@ -258,7 +258,7 @@ static int __init ifb_init_one(int index)
if (err < 0)
goto err;

lockdep_set_class(&dev_ifb->queue_lock, &ifb_queue_lock_key);
lockdep_set_class(&dev_ifb->tx_queue.lock, &ifb_tx_queue_lock_key);
lockdep_set_class(&dev_ifb->ingress_lock, &ifb_ingress_lock_key);

return 0;
Expand Down
4 changes: 2 additions & 2 deletions trunk/include/linux/netdevice.h
Original file line number Diff line number Diff line change
Expand Up @@ -449,6 +449,7 @@ static inline void napi_synchronize(const struct napi_struct *n)
#endif

struct netdev_queue {
spinlock_t lock;
struct net_device *dev;
};

Expand Down Expand Up @@ -629,7 +630,7 @@ struct net_device
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */

struct netdev_queue rx_queue;
struct netdev_queue tx_queue;
struct netdev_queue tx_queue ____cacheline_aligned_in_smp;

/* ingress path synchronizer */
spinlock_t ingress_lock;
Expand All @@ -639,7 +640,6 @@ struct net_device
* Cache line mostly used on queue transmit path (qdisc)
*/
/* device queue lock */
spinlock_t queue_lock ____cacheline_aligned_in_smp;
struct Qdisc *qdisc;
struct Qdisc *qdisc_sleeping;
struct list_head qdisc_list;
Expand Down
33 changes: 22 additions & 11 deletions trunk/net/core/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -1667,6 +1667,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
int dev_queue_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
struct Qdisc *q;
int rc = -ENOMEM;

Expand Down Expand Up @@ -1699,22 +1700,23 @@ int dev_queue_xmit(struct sk_buff *skb)
}

gso:
spin_lock_prefetch(&dev->queue_lock);
txq = &dev->tx_queue;
spin_lock_prefetch(&txq->lock);

/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
rcu_read_lock_bh();

/* Updates of qdisc are serialized by queue_lock.
/* Updates of qdisc are serialized by queue->lock.
* The struct Qdisc which is pointed to by qdisc is now a
* rcu structure - it may be accessed without acquiring
* a lock (but the structure may be stale.) The freeing of the
* qdisc will be deferred until it's known that there are no
* more references to it.
*
* If the qdisc has an enqueue function, we still need to
* hold the queue_lock before calling it, since queue_lock
* hold the queue->lock before calling it, since queue->lock
* also serializes access to the device queue.
*/

Expand All @@ -1724,19 +1726,19 @@ int dev_queue_xmit(struct sk_buff *skb)
#endif
if (q->enqueue) {
/* Grab device queue */
spin_lock(&dev->queue_lock);
spin_lock(&txq->lock);
q = dev->qdisc;
if (q->enqueue) {
/* reset queue_mapping to zero */
skb_set_queue_mapping(skb, 0);
rc = q->enqueue(skb, q);
qdisc_run(dev);
spin_unlock(&dev->queue_lock);
spin_unlock(&txq->lock);

rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
goto out;
}
spin_unlock(&dev->queue_lock);
spin_unlock(&txq->lock);
}

/* The device has no queue. Common case for software devices:
Expand Down Expand Up @@ -1919,14 +1921,17 @@ static void net_tx_action(struct softirq_action *h)

while (head) {
struct net_device *dev = head;
struct netdev_queue *txq;
head = head->next_sched;

txq = &dev->tx_queue;

smp_mb__before_clear_bit();
clear_bit(__LINK_STATE_SCHED, &dev->state);

if (spin_trylock(&dev->queue_lock)) {
if (spin_trylock(&txq->lock)) {
qdisc_run(dev);
spin_unlock(&dev->queue_lock);
spin_unlock(&txq->lock);
} else {
netif_schedule(dev);
}
Expand Down Expand Up @@ -3787,7 +3792,6 @@ int register_netdevice(struct net_device *dev)
BUG_ON(!dev_net(dev));
net = dev_net(dev);

spin_lock_init(&dev->queue_lock);
spin_lock_init(&dev->_xmit_lock);
netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
dev->xmit_lock_owner = -1;
Expand Down Expand Up @@ -4072,10 +4076,17 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
return &dev->stats;
}

static void netdev_init_one_queue(struct net_device *dev,
struct netdev_queue *queue)
{
spin_lock_init(&queue->lock);
queue->dev = dev;
}

static void netdev_init_queues(struct net_device *dev)
{
dev->rx_queue.dev = dev;
dev->tx_queue.dev = dev;
netdev_init_one_queue(dev, &dev->rx_queue);
netdev_init_one_queue(dev, &dev->tx_queue);
}

/**
Expand Down
10 changes: 5 additions & 5 deletions trunk/net/mac80211/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -636,7 +636,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)

/* ensure that TX flow won't interrupt us
* until the end of the call to requeue function */
spin_lock_bh(&local->mdev->queue_lock);
spin_lock_bh(&local->mdev->tx_queue.lock);

/* create a new queue for this aggregation */
ret = ieee80211_ht_agg_queue_add(local, sta, tid);
Expand Down Expand Up @@ -675,7 +675,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)

/* Will put all the packets in the new SW queue */
ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
spin_unlock_bh(&local->mdev->queue_lock);
spin_unlock_bh(&local->mdev->tx_queue.lock);
spin_unlock_bh(&sta->lock);

/* send an addBA request */
Expand All @@ -701,7 +701,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
err_unlock_queue:
kfree(sta->ampdu_mlme.tid_tx[tid]);
sta->ampdu_mlme.tid_tx[tid] = NULL;
spin_unlock_bh(&local->mdev->queue_lock);
spin_unlock_bh(&local->mdev->tx_queue.lock);
ret = -EBUSY;
err_unlock_sta:
spin_unlock_bh(&sta->lock);
Expand Down Expand Up @@ -875,10 +875,10 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)

/* avoid ordering issues: we are the only one that can modify
* the content of the qdiscs */
spin_lock_bh(&local->mdev->queue_lock);
spin_lock_bh(&local->mdev->tx_queue.lock);
/* remove the queue for this aggregation */
ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
spin_unlock_bh(&local->mdev->queue_lock);
spin_unlock_bh(&local->mdev->tx_queue.lock);

/* we just requeued the all the frames that were in the removed
* queue, and since we might miss a softirq we do netif_schedule.
Expand Down
2 changes: 1 addition & 1 deletion trunk/net/mac80211/wme.c
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
}

/**
* the caller needs to hold local->mdev->queue_lock
* the caller needs to hold local->mdev->tx_queue.lock
*/
void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
struct sta_info *sta, u16 tid,
Expand Down
2 changes: 1 addition & 1 deletion trunk/net/sched/sch_api.c
Original file line number Diff line number Diff line change
Expand Up @@ -606,7 +606,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
sch->stats_lock = &dev->ingress_lock;
handle = TC_H_MAKE(TC_H_INGRESS, 0);
} else {
sch->stats_lock = &dev->queue_lock;
sch->stats_lock = &dev_queue->lock;
if (handle == 0) {
handle = qdisc_alloc_handle(dev);
err = -ENOMEM;
Expand Down
8 changes: 4 additions & 4 deletions trunk/net/sched/sch_cbq.c
Original file line number Diff line number Diff line change
Expand Up @@ -1746,10 +1746,10 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
#ifdef CONFIG_NET_CLS_ACT
struct cbq_sched_data *q = qdisc_priv(sch);

spin_lock_bh(&qdisc_dev(sch)->queue_lock);
spin_lock_bh(&sch->dev_queue->lock);
if (q->rx_class == cl)
q->rx_class = NULL;
spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
spin_unlock_bh(&sch->dev_queue->lock);
#endif

cbq_destroy_class(sch, cl);
Expand Down Expand Up @@ -1828,7 +1828,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t

if (tca[TCA_RATE])
gen_replace_estimator(&cl->bstats, &cl->rate_est,
&qdisc_dev(sch)->queue_lock,
&sch->dev_queue->lock,
tca[TCA_RATE]);
return 0;
}
Expand Down Expand Up @@ -1919,7 +1919,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t

if (tca[TCA_RATE])
gen_new_estimator(&cl->bstats, &cl->rate_est,
&qdisc_dev(sch)->queue_lock, tca[TCA_RATE]);
&sch->dev_queue->lock, tca[TCA_RATE]);

*arg = (unsigned long)cl;
return 0;
Expand Down
40 changes: 20 additions & 20 deletions trunk/net/sched/sch_generic.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,31 +29,31 @@
/* Main transmission queue. */

/* Modifications to data participating in scheduling must be protected with
* dev->queue_lock spinlock.
* queue->lock spinlock.
*
* The idea is the following:
* - enqueue, dequeue are serialized via top level device
* spinlock dev->queue_lock.
* spinlock queue->lock.
* - ingress filtering is serialized via top level device
* spinlock dev->ingress_lock.
* - updates to tree and tree walking are only done under the rtnl mutex.
*/

void qdisc_lock_tree(struct net_device *dev)
__acquires(dev->queue_lock)
__acquires(dev->tx_queue.lock)
__acquires(dev->ingress_lock)
{
spin_lock_bh(&dev->queue_lock);
spin_lock_bh(&dev->tx_queue.lock);
spin_lock(&dev->ingress_lock);
}
EXPORT_SYMBOL(qdisc_lock_tree);

void qdisc_unlock_tree(struct net_device *dev)
__releases(dev->ingress_lock)
__releases(dev->queue_lock)
__releases(dev->tx_queue.lock)
{
spin_unlock(&dev->ingress_lock);
spin_unlock_bh(&dev->queue_lock);
spin_unlock_bh(&dev->tx_queue.lock);
}
EXPORT_SYMBOL(qdisc_unlock_tree);

Expand Down Expand Up @@ -118,15 +118,15 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
}

/*
* NOTE: Called under dev->queue_lock with locally disabled BH.
* NOTE: Called under queue->lock with locally disabled BH.
*
* __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this
* device at a time. dev->queue_lock serializes queue accesses for
* device at a time. queue->lock serializes queue accesses for
* this device AND dev->qdisc pointer itself.
*
* netif_tx_lock serializes accesses to device driver.
*
* dev->queue_lock and netif_tx_lock are mutually exclusive,
* queue->lock and netif_tx_lock are mutually exclusive,
* if one is grabbed, another must be free.
*
* Note, that this procedure can be called by a watchdog timer
Expand All @@ -148,14 +148,14 @@ static inline int qdisc_restart(struct net_device *dev)


/* And release queue */
spin_unlock(&dev->queue_lock);
spin_unlock(&q->dev_queue->lock);

HARD_TX_LOCK(dev, smp_processor_id());
if (!netif_subqueue_stopped(dev, skb))
ret = dev_hard_start_xmit(skb, dev);
HARD_TX_UNLOCK(dev);

spin_lock(&dev->queue_lock);
spin_lock(&q->dev_queue->lock);
q = dev->qdisc;

switch (ret) {
Expand Down Expand Up @@ -482,7 +482,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev,
sch = qdisc_alloc(dev_queue, ops);
if (IS_ERR(sch))
goto errout;
sch->stats_lock = &dev->queue_lock;
sch->stats_lock = &dev_queue->lock;
sch->parent = parentid;

if (!ops->init || ops->init(sch, NULL) == 0)
Expand All @@ -494,7 +494,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev,
}
EXPORT_SYMBOL(qdisc_create_dflt);

/* Under dev->queue_lock and BH! */
/* Under queue->lock and BH! */

void qdisc_reset(struct Qdisc *qdisc)
{
Expand All @@ -514,7 +514,7 @@ static void __qdisc_destroy(struct rcu_head *head)
kfree((char *) qdisc - qdisc->padded);
}

/* Under dev->queue_lock and BH! */
/* Under queue->lock and BH! */

void qdisc_destroy(struct Qdisc *qdisc)
{
Expand Down Expand Up @@ -566,13 +566,13 @@ void dev_activate(struct net_device *dev)
/* Delay activation until next carrier-on event */
return;

spin_lock_bh(&dev->queue_lock);
spin_lock_bh(&dev->tx_queue.lock);
rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
if (dev->qdisc != &noqueue_qdisc) {
dev->trans_start = jiffies;
dev_watchdog_up(dev);
}
spin_unlock_bh(&dev->queue_lock);
spin_unlock_bh(&dev->tx_queue.lock);
}

void dev_deactivate(struct net_device *dev)
Expand All @@ -581,15 +581,15 @@ void dev_deactivate(struct net_device *dev)
struct sk_buff *skb;
int running;

spin_lock_bh(&dev->queue_lock);
spin_lock_bh(&dev->tx_queue.lock);
qdisc = dev->qdisc;
dev->qdisc = &noop_qdisc;

qdisc_reset(qdisc);

skb = dev->gso_skb;
dev->gso_skb = NULL;
spin_unlock_bh(&dev->queue_lock);
spin_unlock_bh(&dev->tx_queue.lock);

kfree_skb(skb);

Expand All @@ -607,9 +607,9 @@ void dev_deactivate(struct net_device *dev)
* Double-check inside queue lock to ensure that all effects
* of the queue run are visible when we return.
*/
spin_lock_bh(&dev->queue_lock);
spin_lock_bh(&dev->tx_queue.lock);
running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
spin_unlock_bh(&dev->queue_lock);
spin_unlock_bh(&dev->tx_queue.lock);

/*
* The running flag should never be set at this point because
Expand Down
Loading

0 comments on commit 6e0ec1f

Please sign in to comment.