Skip to content

Commit

Permalink
vxlan: use exit_batch_rtnl() method
Browse files Browse the repository at this point in the history
exit_batch_rtnl() is called while RTNL is held,
and devices to be unregistered can be queued in the dev_kill_list.

This saves one rtnl_lock()/rtnl_unlock() pair per netns
and one unregister_netdevice_many() call.

v4: (Paolo feedback : https://netdev-3.bots.linux.dev/vmksft-net/results/453141/17-udpgro-fwd-sh/stdout )
  - Changed vxlan_destroy_tunnels() to use vxlan_dellink()
    instead of unregister_netdevice_queue to propely remove
    devices from vn->vxlan_list.
  - vxlan_destroy_tunnels() can simply iterate one list (vn->vxlan_list)
    to find all devices in the most efficient way.
  - Moved sanity checks in a separate vxlan_exit_net() method.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Antoine Tenart <atenart@kernel.org>
Link: https://lore.kernel.org/r/20240206144313.2050392-10-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Eric Dumazet authored and Jakub Kicinski committed Feb 8, 2024
1 parent 70f16ea commit 110d304
Showing 1 changed file with 19 additions and 31 deletions.
50 changes: 19 additions & 31 deletions drivers/net/vxlan/vxlan_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -4826,55 +4826,43 @@ static __net_init int vxlan_init_net(struct net *net)
NULL);
}

static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
static void __net_exit vxlan_destroy_tunnels(struct vxlan_net *vn,
struct list_head *dev_to_kill)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next;
struct net_device *dev, *aux;

for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &vxlan_link_ops)
unregister_netdevice_queue(dev, head);

list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
/* If vxlan->dev is in the same netns, it has already been added
* to the list by the previous loop.
*/
if (!net_eq(dev_net(vxlan->dev), net))
unregister_netdevice_queue(vxlan->dev, head);
}

list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next)
vxlan_dellink(vxlan->dev, dev_to_kill);
}

static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
static void __net_exit vxlan_exit_batch_rtnl(struct list_head *net_list,
struct list_head *dev_to_kill)
{
struct net *net;
LIST_HEAD(list);
unsigned int h;

ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
struct vxlan_net *vn = net_generic(net, vxlan_net_id);

unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
}
rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
vxlan_destroy_tunnels(net, &list);
__unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);

unregister_netdevice_many(&list);
rtnl_unlock();
vxlan_destroy_tunnels(vn, dev_to_kill);
}
}

list_for_each_entry(net, net_list, exit_list) {
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
static void __net_exit vxlan_exit_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
unsigned int h;

for (h = 0; h < PORT_HASH_SIZE; ++h)
WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
for (h = 0; h < PORT_HASH_SIZE; ++h)
WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}

static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
.exit_batch = vxlan_exit_batch_net,
.exit_batch_rtnl = vxlan_exit_batch_rtnl,
.exit = vxlan_exit_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
};
Expand Down

0 comments on commit 110d304

Please sign in to comment.