Skip to content

Commit

Permalink
net: sch_generic: aviod concurrent reset and enqueue op for lockless …
Browse files Browse the repository at this point in the history
…qdisc

Currently there is concurrent reset and enqueue operation for the
same lockless qdisc when there is no lock to synchronize the
q->enqueue() in __dev_xmit_skb() with the qdisc reset operation in
qdisc_deactivate() called by dev_deactivate_queue(), which may cause
out-of-bounds access for priv->ring[] in hns3 driver if user has
requested a smaller queue num when __dev_xmit_skb() still enqueue a
skb with a larger queue_mapping after the corresponding qdisc is
reset, and call hns3_nic_net_xmit() with that skb later.

Reused the existing synchronize_net() in dev_deactivate_many() to
make sure skb with larger queue_mapping enqueued to old qdisc(which
is saved in dev_queue->qdisc_sleeping) will always be reset when
dev_reset_queue() is called.

Fixes: 6b3ba91 ("net: sched: allow qdiscs to handle locking")
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Yunsheng Lin authored and David S. Miller committed Sep 10, 2020
1 parent edecfa9 commit 2fb541c
Showing 1 changed file with 33 additions and 15 deletions.
48 changes: 33 additions & 15 deletions net/sched/sch_generic.c
Original file line number Diff line number Diff line change
Expand Up @@ -1131,24 +1131,10 @@ EXPORT_SYMBOL(dev_activate);

static void qdisc_deactivate(struct Qdisc *qdisc)
{
bool nolock = qdisc->flags & TCQ_F_NOLOCK;

if (qdisc->flags & TCQ_F_BUILTIN)
return;
if (test_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state))
return;

if (nolock)
spin_lock_bh(&qdisc->seqlock);
spin_lock_bh(qdisc_lock(qdisc));

set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);

qdisc_reset(qdisc);

spin_unlock_bh(qdisc_lock(qdisc));
if (nolock)
spin_unlock_bh(&qdisc->seqlock);
}

static void dev_deactivate_queue(struct net_device *dev,
Expand All @@ -1165,6 +1151,30 @@ static void dev_deactivate_queue(struct net_device *dev,
}
}

static void dev_reset_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_unused)
{
struct Qdisc *qdisc;
bool nolock;

qdisc = dev_queue->qdisc_sleeping;
if (!qdisc)
return;

nolock = qdisc->flags & TCQ_F_NOLOCK;

if (nolock)
spin_lock_bh(&qdisc->seqlock);
spin_lock_bh(qdisc_lock(qdisc));

qdisc_reset(qdisc);

spin_unlock_bh(qdisc_lock(qdisc));
if (nolock)
spin_unlock_bh(&qdisc->seqlock);
}

static bool some_qdisc_is_busy(struct net_device *dev)
{
unsigned int i;
Expand Down Expand Up @@ -1213,12 +1223,20 @@ void dev_deactivate_many(struct list_head *head)
dev_watchdog_down(dev);
}

/* Wait for outstanding qdisc-less dev_queue_xmit calls.
/* Wait for outstanding qdisc-less dev_queue_xmit calls or
* outstanding qdisc enqueuing calls.
* This is avoided if all devices are in dismantle phase :
* Caller will call synchronize_net() for us
*/
synchronize_net();

list_for_each_entry(dev, head, close_list) {
netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);

if (dev_ingress_queue(dev))
dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
}

/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list) {
while (some_qdisc_is_busy(dev)) {
Expand Down

0 comments on commit 2fb541c

Please sign in to comment.