From 0734d7c3d93cdcb8a56ce914d3c661300f24434d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 14 Jan 2025 20:55:27 +0000 Subject: [PATCH 1/5] net: expedite synchronize_net() for cleanup_net() cleanup_net() is the single thread responsible for netns dismantles, and a serious bottleneck. Before we can get per-netns RTNL, make sure all synchronize_net() called from this thread are using rcu_synchronize_expedited(). v3: deal with CONFIG_NET_NS=n Signed-off-by: Eric Dumazet Reviewed-by: Jesse Brandeburg Link: https://patch.msgid.link/20250114205531.967841-2-edumazet@google.com Signed-off-by: Jakub Kicinski --- include/net/net_namespace.h | 2 ++ net/core/dev.c | 11 ++++++++++- net/core/net_namespace.c | 5 +++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 5a2a0df8ad91b..0f5eb9db0c626 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -210,6 +210,8 @@ void net_ns_barrier(void); struct ns_common *get_net_ns(struct ns_common *ns); struct net *get_net_ns_by_fd(int fd); +extern struct task_struct *cleanup_net_task; + #else /* CONFIG_NET_NS */ #include #include diff --git a/net/core/dev.c b/net/core/dev.c index 47e6b0f73cfc7..115a7a0a11042 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10099,6 +10099,15 @@ static void dev_index_release(struct net *net, int ifindex) WARN_ON(xa_erase(&net->dev_by_index, ifindex)); } +static bool from_cleanup_net(void) +{ +#ifdef CONFIG_NET_NS + return current == cleanup_net_task; +#else + return false; +#endif +} + /* Delayed registration/unregisteration */ LIST_HEAD(net_todo_list); DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); @@ -11474,7 +11483,7 @@ EXPORT_SYMBOL_GPL(alloc_netdev_dummy); void synchronize_net(void) { might_sleep(); - if (rtnl_is_locked()) + if (from_cleanup_net() || rtnl_is_locked()) synchronize_rcu_expedited(); else synchronize_rcu(); diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index b5cd3ae4f04cf..cb39a12b2f829 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -588,6 +588,8 @@ static void unhash_nsid(struct net *net, struct net *last) static LLIST_HEAD(cleanup_list); +struct task_struct *cleanup_net_task; + static void cleanup_net(struct work_struct *work) { const struct pernet_operations *ops; @@ -596,6 +598,8 @@ static void cleanup_net(struct work_struct *work) LIST_HEAD(net_exit_list); LIST_HEAD(dev_kill_list); + cleanup_net_task = current; + /* Atomically snapshot the list of namespaces to cleanup */ net_kill_list = llist_del_all(&cleanup_list); @@ -670,6 +674,7 @@ static void cleanup_net(struct work_struct *work) put_user_ns(net->user_ns); net_free(net); } + cleanup_net_task = NULL; } /** From 8a2b61e9e87936649073e287242ccdcbfb636906 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 14 Jan 2025 20:55:28 +0000 Subject: [PATCH 2/5] net: no longer assume RTNL is held in flush_all_backlogs() flush_all_backlogs() uses per-cpu and static data to hold its temporary data, on the assumption it is called under RTNL protection. Following patch in the series will break this assumption. Use instead a dynamically allocated piece of memory. In the unlikely case the allocation fails, use a boot-time allocated memory. Signed-off-by: Eric Dumazet Reviewed-by: Jesse Brandeburg Link: https://patch.msgid.link/20250114205531.967841-3-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/core/dev.c | 53 +++++++++++++++++++++++++++++++++----------------- 1 file changed, 35 insertions(+), 18 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 115a7a0a11042..41da51f95486a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6013,8 +6013,6 @@ void netif_receive_skb_list(struct list_head *head) } EXPORT_SYMBOL(netif_receive_skb_list); -static DEFINE_PER_CPU(struct work_struct, flush_works); - /* Network device is going away, flush any packets still pending */ static void flush_backlog(struct work_struct *work) { @@ -6071,36 +6069,54 @@ static bool flush_required(int cpu) return true; } +struct flush_backlogs { + cpumask_t flush_cpus; + struct work_struct w[]; +}; + +static struct flush_backlogs *flush_backlogs_alloc(void) +{ + return kmalloc(struct_size_t(struct flush_backlogs, w, nr_cpu_ids), + GFP_KERNEL); +} + +static struct flush_backlogs *flush_backlogs_fallback; +static DEFINE_MUTEX(flush_backlogs_mutex); + static void flush_all_backlogs(void) { - static cpumask_t flush_cpus; + struct flush_backlogs *ptr = flush_backlogs_alloc(); unsigned int cpu; - /* since we are under rtnl lock protection we can use static data - * for the cpumask and avoid allocating on stack the possibly - * large mask - */ - ASSERT_RTNL(); + if (!ptr) { + mutex_lock(&flush_backlogs_mutex); + ptr = flush_backlogs_fallback; + } + cpumask_clear(&ptr->flush_cpus); cpus_read_lock(); - cpumask_clear(&flush_cpus); for_each_online_cpu(cpu) { if (flush_required(cpu)) { - queue_work_on(cpu, system_highpri_wq, - per_cpu_ptr(&flush_works, cpu)); - cpumask_set_cpu(cpu, &flush_cpus); + INIT_WORK(&ptr->w[cpu], flush_backlog); + queue_work_on(cpu, system_highpri_wq, &ptr->w[cpu]); + __cpumask_set_cpu(cpu, &ptr->flush_cpus); } } /* we can have in flight packet[s] on the cpus we are not flushing, * synchronize_net() in unregister_netdevice_many() will take care of - * them + * them. */ - for_each_cpu(cpu, &flush_cpus) - flush_work(per_cpu_ptr(&flush_works, cpu)); + for_each_cpu(cpu, &ptr->flush_cpus) + flush_work(&ptr->w[cpu]); cpus_read_unlock(); + + if (ptr != flush_backlogs_fallback) + kfree(ptr); + else + mutex_unlock(&flush_backlogs_mutex); } static void net_rps_send_ipi(struct softnet_data *remsd) @@ -12313,12 +12329,13 @@ static int __init net_dev_init(void) * Initialise the packet receive queues. */ + flush_backlogs_fallback = flush_backlogs_alloc(); + if (!flush_backlogs_fallback) + goto out; + for_each_possible_cpu(i) { - struct work_struct *flush = per_cpu_ptr(&flush_works, i); struct softnet_data *sd = &per_cpu(softnet_data, i); - INIT_WORK(flush, flush_backlog); - skb_queue_head_init(&sd->input_pkt_queue); skb_queue_head_init(&sd->process_queue); #ifdef CONFIG_XFRM_OFFLOAD From cfa579f6665635b72d4a075fc91eb144c2b0f74e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 14 Jan 2025 20:55:29 +0000 Subject: [PATCH 3/5] net: no longer hold RTNL while calling flush_all_backlogs() flush_all_backlogs() is called from unregister_netdevice_many_notify() as part of netdevice dismantles. This is currently called under RTNL, and can last up to 50 ms on busy hosts. There is no reason to hold RTNL at this stage, if our caller is cleanup_net() : netns are no more visible, devices are in NETREG_UNREGISTERING state and no other thread could mess our state while RTNL is temporarily released. In order to provide isolation, this patch provides a separate 'net_todo_list' for cleanup_net(). Signed-off-by: Eric Dumazet Reviewed-by: Jesse Brandeburg Link: https://patch.msgid.link/20250114205531.967841-4-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/core/dev.c | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 41da51f95486a..d652ea66e5755 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10124,14 +10124,37 @@ static bool from_cleanup_net(void) #endif } +static void rtnl_drop_if_cleanup_net(void) +{ + if (from_cleanup_net()) + __rtnl_unlock(); +} + +static void rtnl_acquire_if_cleanup_net(void) +{ + if (from_cleanup_net()) + rtnl_lock(); +} + /* Delayed registration/unregisteration */ LIST_HEAD(net_todo_list); +static LIST_HEAD(net_todo_list_for_cleanup_net); + +/* TODO: net_todo_list/net_todo_list_for_cleanup_net should probably + * be provided by callers, instead of being static, rtnl protected. + */ +static struct list_head *todo_list(void) +{ + return from_cleanup_net() ? &net_todo_list_for_cleanup_net : + &net_todo_list; +} + DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); atomic_t dev_unreg_count = ATOMIC_INIT(0); static void net_set_todo(struct net_device *dev) { - list_add_tail(&dev->todo_list, &net_todo_list); + list_add_tail(&dev->todo_list, todo_list()); } static netdev_features_t netdev_sync_upper_features(struct net_device *lower, @@ -10979,7 +11002,7 @@ void netdev_run_todo(void) #endif /* Snapshot list, allow later requests */ - list_replace_init(&net_todo_list, &list); + list_replace_init(todo_list(), &list); __rtnl_unlock(); @@ -11602,8 +11625,10 @@ void unregister_netdevice_many_notify(struct list_head *head, unlist_netdevice(dev); WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING); } - flush_all_backlogs(); + rtnl_drop_if_cleanup_net(); + flush_all_backlogs(); + rtnl_acquire_if_cleanup_net(); synchronize_net(); list_for_each_entry(dev, head, unreg_list) { From ae646f1a0bb97401bac0044bbe2a179a1e38b408 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 14 Jan 2025 20:55:30 +0000 Subject: [PATCH 4/5] net: reduce RTNL hold duration in unregister_netdevice_many_notify() (part 1) Two synchronize_net() calls are currently done while holding RTNL. This is source of RTNL contention in workloads adding and deleting many network namespaces per second, because synchronize_rcu() and synchronize_rcu_expedited() can use 60+ ms in some cases. For cleanup_net() use, temporarily release RTNL while calling the last synchronize_net(). This should be safe, because devices are no longer visible to other threads at this point. In any case, the new netdev_lock() / netdev_unlock() infrastructure that we are adding should allow to fix potential issues, with a combination of a per-device mutex and dev->reg_state awareness. Signed-off-by: Eric Dumazet Reviewed-by: Jesse Brandeburg Link: https://patch.msgid.link/20250114205531.967841-5-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/core/dev.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/core/dev.c b/net/core/dev.c index d652ea66e5755..3924a4af68b81 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -11629,6 +11629,7 @@ void unregister_netdevice_many_notify(struct list_head *head, rtnl_drop_if_cleanup_net(); flush_all_backlogs(); rtnl_acquire_if_cleanup_net(); + /* TODO: move this before the prior rtnl_acquire_if_cleanup_net() */ synchronize_net(); list_for_each_entry(dev, head, unreg_list) { @@ -11689,7 +11690,9 @@ void unregister_netdevice_many_notify(struct list_head *head, #endif } + rtnl_drop_if_cleanup_net(); synchronize_net(); + rtnl_acquire_if_cleanup_net(); list_for_each_entry(dev, head, unreg_list) { netdev_put(dev, &dev->dev_registered_tracker); From 83419b61d187ce22aa3da5ffdda850fca3a12600 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 14 Jan 2025 20:55:31 +0000 Subject: [PATCH 5/5] net: reduce RTNL hold duration in unregister_netdevice_many_notify() (part 2) One synchronize_net() call is currently done while holding RTNL. This is source of RTNL contention in workloads adding and deleting many network namespaces per second, because synchronize_rcu() and synchronize_rcu_expedited() can use 60+ ms in some cases. For cleanup_net() use, temporarily release RTNL while calling the last synchronize_net(). This should be safe, because devices are no longer visible to other threads after unlist_netdevice() call and setting dev->reg_state to NETREG_UNREGISTERING. In any case, the new netdev_lock() / netdev_unlock() infrastructure that we are adding should allow to fix potential issues, with a combination of a per-device mutex and dev->reg_state awareness. Signed-off-by: Eric Dumazet Reviewed-by: Jesse Brandeburg Link: https://patch.msgid.link/20250114205531.967841-6-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/core/dev.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 3924a4af68b81..3c87cb1cb8771 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -11628,9 +11628,8 @@ void unregister_netdevice_many_notify(struct list_head *head, rtnl_drop_if_cleanup_net(); flush_all_backlogs(); - rtnl_acquire_if_cleanup_net(); - /* TODO: move this before the prior rtnl_acquire_if_cleanup_net() */ synchronize_net(); + rtnl_acquire_if_cleanup_net(); list_for_each_entry(dev, head, unreg_list) { struct sk_buff *skb = NULL;