Skip to content

Commit

Permalink
cgroup: remove hierarchy_mutex
Browse files Browse the repository at this point in the history
It was introduced for memcg to iterate cgroup hierarchy without
holding cgroup_mutex, but soon after that it was replaced with
a lockless way in memcg.

No one used hierarchy_mutex since that, so remove it.

Signed-off-by: Li Zefan <lizefan@huawei.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
  • Loading branch information
Li Zefan authored and Tejun Heo committed Jun 7, 2012
1 parent 967db0e commit 6be96a5
Show file tree
Hide file tree
Showing 3 changed files with 3 additions and 61 deletions.
2 changes: 1 addition & 1 deletion Documentation/cgroups/cgroups.txt
Original file line number Diff line number Diff line change
Expand Up @@ -656,7 +656,7 @@ example in cpusets, no task may attach before 'cpus' and 'mems' are set
up.

void bind(struct cgroup *root)
(cgroup_mutex and ss->hierarchy_mutex held by caller)
(cgroup_mutex held by caller)

Called when a cgroup subsystem is rebound to a different hierarchy
and root cgroup. Currently this will only involve movement between
Expand Down
17 changes: 2 additions & 15 deletions include/linux/cgroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -499,22 +499,9 @@ struct cgroup_subsys {
#define MAX_CGROUP_TYPE_NAMELEN 32
const char *name;

/*
* Protects sibling/children links of cgroups in this
* hierarchy, plus protects which hierarchy (or none) the
* subsystem is a part of (i.e. root/sibling). To avoid
* potential deadlocks, the following operations should not be
* undertaken while holding any hierarchy_mutex:
*
* - allocating memory
* - initiating hotplug events
*/
struct mutex hierarchy_mutex;
struct lock_class_key subsys_key;

/*
* Link to parent, and list entry in parent's children.
* Protected by this->hierarchy_mutex and cgroup_lock()
* Protected by cgroup_lock()
*/
struct cgroupfs_root *root;
struct list_head sibling;
Expand Down Expand Up @@ -602,7 +589,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
* the lifetime of cgroup_subsys_state is subsys's matter.
*
* Looking up and scanning function should be called under rcu_read_lock().
* Taking cgroup_mutex()/hierarchy_mutex() is not necessary for following calls.
* Taking cgroup_mutex is not necessary for following calls.
* But the css returned by this routine can be "not populated yet" or "being
* destroyed". The caller should check css and cgroup's status.
*/
Expand Down
45 changes: 0 additions & 45 deletions kernel/cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1073,28 +1073,24 @@ static int rebind_subsystems(struct cgroupfs_root *root,
BUG_ON(cgrp->subsys[i]);
BUG_ON(!dummytop->subsys[i]);
BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
mutex_lock(&ss->hierarchy_mutex);
cgrp->subsys[i] = dummytop->subsys[i];
cgrp->subsys[i]->cgroup = cgrp;
list_move(&ss->sibling, &root->subsys_list);
ss->root = root;
if (ss->bind)
ss->bind(cgrp);
mutex_unlock(&ss->hierarchy_mutex);
/* refcount was already taken, and we're keeping it */
} else if (bit & removed_bits) {
/* We're removing this subsystem */
BUG_ON(ss == NULL);
BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
mutex_lock(&ss->hierarchy_mutex);
if (ss->bind)
ss->bind(dummytop);
dummytop->subsys[i]->cgroup = dummytop;
cgrp->subsys[i] = NULL;
subsys[i]->root = &rootnode;
list_move(&ss->sibling, &rootnode.subsys_list);
mutex_unlock(&ss->hierarchy_mutex);
/* subsystem is now free - drop reference on module */
module_put(ss->module);
} else if (bit & final_bits) {
Expand Down Expand Up @@ -3917,37 +3913,6 @@ static void init_cgroup_css(struct cgroup_subsys_state *css,
set_bit(CSS_CLEAR_CSS_REFS, &css->flags);
}

static void cgroup_lock_hierarchy(struct cgroupfs_root *root)
{
/* We need to take each hierarchy_mutex in a consistent order */
int i;

/*
* No worry about a race with rebind_subsystems that might mess up the
* locking order, since both parties are under cgroup_mutex.
*/
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
if (ss == NULL)
continue;
if (ss->root == root)
mutex_lock(&ss->hierarchy_mutex);
}
}

static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
{
int i;

for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
if (ss == NULL)
continue;
if (ss->root == root)
mutex_unlock(&ss->hierarchy_mutex);
}
}

/*
* cgroup_create - create a cgroup
* @parent: cgroup that will be parent of the new cgroup
Expand Down Expand Up @@ -4008,9 +3973,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
ss->post_clone(cgrp);
}

cgroup_lock_hierarchy(root);
list_add(&cgrp->sibling, &cgrp->parent->children);
cgroup_unlock_hierarchy(root);
root->number_of_cgroups++;

err = cgroup_create_dir(cgrp, dentry, mode);
Expand All @@ -4037,9 +4000,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,

err_remove:

cgroup_lock_hierarchy(root);
list_del(&cgrp->sibling);
cgroup_unlock_hierarchy(root);
root->number_of_cgroups--;

err_destroy:
Expand Down Expand Up @@ -4247,10 +4208,8 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
list_del_init(&cgrp->release_list);
raw_spin_unlock(&release_list_lock);

cgroup_lock_hierarchy(cgrp->root);
/* delete this cgroup from parent->children */
list_del_init(&cgrp->sibling);
cgroup_unlock_hierarchy(cgrp->root);

list_del_init(&cgrp->allcg_node);

Expand Down Expand Up @@ -4324,8 +4283,6 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
* need to invoke fork callbacks here. */
BUG_ON(!list_empty(&init_task.tasks));

mutex_init(&ss->hierarchy_mutex);
lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
ss->active = 1;

/* this function shouldn't be used with modular subsystems, since they
Expand Down Expand Up @@ -4452,8 +4409,6 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
}
write_unlock(&css_set_lock);

mutex_init(&ss->hierarchy_mutex);
lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
ss->active = 1;

/* success! */
Expand Down

0 comments on commit 6be96a5

Please sign in to comment.