Skip to content

Commit

Permalink
net: Separate the close_list and the unreg_list v2
Browse files Browse the repository at this point in the history
Separate the unreg_list and the close_list in dev_close_many preventing
dev_close_many from permuting the unreg_list.  The permutations of the
unreg_list have resulted in cases where the loopback device is accessed
it has been freed in code such as dst_ifdown.  Resulting in subtle memory
corruption.

This is the second bug from sharing the storage between the close_list
and the unreg_list.  The issues that crop up with sharing are
apparently too subtle to show up in normal testing or usage, so let's
forget about being clever and use two separate lists.

v2: Make all callers pass in a close_list to dev_close_many

Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Eric W. Biederman authored and David S. Miller committed Oct 7, 2013
1 parent d639fea commit 5cde282
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 14 deletions.
1 change: 1 addition & 0 deletions include/linux/netdevice.h
Original file line number Diff line number Diff line change
Expand Up @@ -1143,6 +1143,7 @@ struct net_device {
struct list_head dev_list;
struct list_head napi_list;
struct list_head unreg_list;
struct list_head close_list;

/* directly linked devices, like slaves for bonding */
struct {
Expand Down
25 changes: 14 additions & 11 deletions net/core/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -1307,7 +1307,7 @@ static int __dev_close_many(struct list_head *head)
ASSERT_RTNL();
might_sleep();

list_for_each_entry(dev, head, unreg_list) {
list_for_each_entry(dev, head, close_list) {
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);

clear_bit(__LINK_STATE_START, &dev->state);
Expand All @@ -1323,7 +1323,7 @@ static int __dev_close_many(struct list_head *head)

dev_deactivate_many(head);

list_for_each_entry(dev, head, unreg_list) {
list_for_each_entry(dev, head, close_list) {
const struct net_device_ops *ops = dev->netdev_ops;

/*
Expand Down Expand Up @@ -1351,7 +1351,7 @@ static int __dev_close(struct net_device *dev)
/* Temporarily disable netpoll until the interface is down */
netpoll_rx_disable(dev);

list_add(&dev->unreg_list, &single);
list_add(&dev->close_list, &single);
retval = __dev_close_many(&single);
list_del(&single);

Expand All @@ -1362,21 +1362,20 @@ static int __dev_close(struct net_device *dev)
static int dev_close_many(struct list_head *head)
{
struct net_device *dev, *tmp;
LIST_HEAD(tmp_list);

list_for_each_entry_safe(dev, tmp, head, unreg_list)
/* Remove the devices that don't need to be closed */
list_for_each_entry_safe(dev, tmp, head, close_list)
if (!(dev->flags & IFF_UP))
list_move(&dev->unreg_list, &tmp_list);
list_del_init(&dev->close_list);

__dev_close_many(head);

list_for_each_entry(dev, head, unreg_list) {
list_for_each_entry_safe(dev, tmp, head, close_list) {
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
call_netdevice_notifiers(NETDEV_DOWN, dev);
list_del_init(&dev->close_list);
}

/* rollback_registered_many needs the complete original list */
list_splice(&tmp_list, head);
return 0;
}

Expand All @@ -1397,7 +1396,7 @@ int dev_close(struct net_device *dev)
/* Block netpoll rx while the interface is going down */
netpoll_rx_disable(dev);

list_add(&dev->unreg_list, &single);
list_add(&dev->close_list, &single);
dev_close_many(&single);
list_del(&single);

Expand Down Expand Up @@ -5439,6 +5438,7 @@ static void net_set_todo(struct net_device *dev)
static void rollback_registered_many(struct list_head *head)
{
struct net_device *dev, *tmp;
LIST_HEAD(close_head);

BUG_ON(dev_boot_phase);
ASSERT_RTNL();
Expand All @@ -5461,7 +5461,9 @@ static void rollback_registered_many(struct list_head *head)
}

/* If device is running, close it first. */
dev_close_many(head);
list_for_each_entry(dev, head, unreg_list)
list_add_tail(&dev->close_list, &close_head);
dev_close_many(&close_head);

list_for_each_entry(dev, head, unreg_list) {
/* And unlink it from device chain. */
Expand Down Expand Up @@ -6257,6 +6259,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,

INIT_LIST_HEAD(&dev->napi_list);
INIT_LIST_HEAD(&dev->unreg_list);
INIT_LIST_HEAD(&dev->close_list);
INIT_LIST_HEAD(&dev->link_watch_list);
INIT_LIST_HEAD(&dev->adj_list.upper);
INIT_LIST_HEAD(&dev->adj_list.lower);
Expand Down
6 changes: 3 additions & 3 deletions net/sched/sch_generic.c
Original file line number Diff line number Diff line change
Expand Up @@ -829,7 +829,7 @@ void dev_deactivate_many(struct list_head *head)
struct net_device *dev;
bool sync_needed = false;

list_for_each_entry(dev, head, unreg_list) {
list_for_each_entry(dev, head, close_list) {
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
&noop_qdisc);
if (dev_ingress_queue(dev))
Expand All @@ -848,7 +848,7 @@ void dev_deactivate_many(struct list_head *head)
synchronize_net();

/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, unreg_list)
list_for_each_entry(dev, head, close_list)
while (some_qdisc_is_busy(dev))
yield();
}
Expand All @@ -857,7 +857,7 @@ void dev_deactivate(struct net_device *dev)
{
LIST_HEAD(single);

list_add(&dev->unreg_list, &single);
list_add(&dev->close_list, &single);
dev_deactivate_many(&single);
list_del(&single);
}
Expand Down

0 comments on commit 5cde282

Please sign in to comment.