Skip to content

Commit

Permalink
net: Make cleanup_list and net::cleanup_list of llist type
Browse files Browse the repository at this point in the history
This simplifies cleanup queueing and makes cleanup lists
to use llist primitives. Since llist has its own cmpxchg()
ordering, cleanup_list_lock is not more need.

Also, struct llist_node is smaller, than struct list_head,
so we save some bytes in struct net with this patch.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Kirill Tkhai authored and David S. Miller committed Feb 20, 2018
1 parent 19efbd9 commit 65b7b5b
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 15 deletions.
3 changes: 2 additions & 1 deletion include/net/net_namespace.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,13 @@ struct net {
atomic64_t cookie_gen;

struct list_head list; /* list of network namespaces */
struct list_head cleanup_list; /* namespaces on death row */
struct list_head exit_list; /* To linked to call pernet exit
* methods on dead net (net_sem
* read locked), or to unregister
* pernet ops (net_sem wr locked).
*/
struct llist_node cleanup_list; /* namespaces on death row */

struct user_namespace *user_ns; /* Owning user namespace */
struct ucounts *ucounts;
spinlock_t nsid_lock;
Expand Down
20 changes: 6 additions & 14 deletions net/core/net_namespace.c
Original file line number Diff line number Diff line change
Expand Up @@ -481,21 +481,18 @@ static void unhash_nsid(struct net *net, struct net *last)
spin_unlock_bh(&net->nsid_lock);
}

static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
static LLIST_HEAD(cleanup_list);

static void cleanup_net(struct work_struct *work)
{
const struct pernet_operations *ops;
struct net *net, *tmp, *last;
struct list_head net_kill_list;
struct llist_node *net_kill_list;
LIST_HEAD(net_exit_list);
unsigned write;

/* Atomically snapshot the list of namespaces to cleanup */
spin_lock_irq(&cleanup_list_lock);
list_replace_init(&cleanup_list, &net_kill_list);
spin_unlock_irq(&cleanup_list_lock);
net_kill_list = llist_del_all(&cleanup_list);
again:
write = READ_ONCE(nr_sync_pernet_ops);
if (write)
Expand All @@ -510,7 +507,7 @@ static void cleanup_net(struct work_struct *work)

/* Don't let anyone else find us. */
rtnl_lock();
list_for_each_entry(net, &net_kill_list, cleanup_list)
llist_for_each_entry(net, net_kill_list, cleanup_list)
list_del_rcu(&net->list);
/* Cache last net. After we unlock rtnl, no one new net
* added to net_namespace_list can assign nsid pointer
Expand All @@ -525,7 +522,7 @@ static void cleanup_net(struct work_struct *work)
last = list_last_entry(&net_namespace_list, struct net, list);
rtnl_unlock();

list_for_each_entry(net, &net_kill_list, cleanup_list) {
llist_for_each_entry(net, net_kill_list, cleanup_list) {
unhash_nsid(net, last);
list_add_tail(&net->exit_list, &net_exit_list);
}
Expand Down Expand Up @@ -585,12 +582,7 @@ static DECLARE_WORK(net_cleanup_work, cleanup_net);
void __put_net(struct net *net)
{
/* Cleanup the network namespace in process context */
unsigned long flags;

spin_lock_irqsave(&cleanup_list_lock, flags);
list_add(&net->cleanup_list, &cleanup_list);
spin_unlock_irqrestore(&cleanup_list_lock, flags);

llist_add(&net->cleanup_list, &cleanup_list);
queue_work(netns_wq, &net_cleanup_work);
}
EXPORT_SYMBOL_GPL(__put_net);
Expand Down

0 comments on commit 65b7b5b

Please sign in to comment.