Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 281466
b: refs/heads/master
c: 134d337
h: refs/heads/master
v: v3
  • Loading branch information
Tejun Heo committed Dec 13, 2011
1 parent 174195d commit a1ac813
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 25 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: cd3d095275374220921fcf0d4e0c16584b26ddbc
refs/heads/master: 134d33737f9015761c3832f6b268fae6274aac7f
66 changes: 42 additions & 24 deletions trunk/kernel/cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1757,6 +1757,11 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
}
EXPORT_SYMBOL_GPL(cgroup_path);

struct task_and_cgroup {
struct task_struct *task;
struct cgroup *cgrp;
};

/*
* cgroup_task_migrate - move a task from one cgroup to another.
*
Expand Down Expand Up @@ -2008,15 +2013,15 @@ static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
*/
int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
{
int retval, i, group_size;
int retval, i, group_size, nr_migrating_tasks;
struct cgroup_subsys *ss, *failed_ss = NULL;
bool cancel_failed_ss = false;
/* guaranteed to be initialized later, but the compiler needs this */
struct cgroup *oldcgrp = NULL;
struct css_set *oldcg;
struct cgroupfs_root *root = cgrp->root;
/* threadgroup list cursor and array */
struct task_struct *tsk;
struct task_and_cgroup *tc;
struct flex_array *group;
/*
* we need to make sure we have css_sets for all the tasks we're
Expand All @@ -2035,8 +2040,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
*/
group_size = get_nr_threads(leader);
/* flex_array supports very large thread-groups better than kmalloc. */
group = flex_array_alloc(sizeof(struct task_struct *), group_size,
GFP_KERNEL);
group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL);
if (!group)
return -ENOMEM;
/* pre-allocate to guarantee space while iterating in rcu read-side. */
Expand All @@ -2060,8 +2064,10 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
}
/* take a reference on each task in the group to go in the array. */
tsk = leader;
i = 0;
i = nr_migrating_tasks = 0;
do {
struct task_and_cgroup ent;

/* @tsk either already exited or can't exit until the end */
if (tsk->flags & PF_EXITING)
continue;
Expand All @@ -2073,14 +2079,23 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
* saying GFP_ATOMIC has no effect here because we did prealloc
* earlier, but it's good form to communicate our expectations.
*/
retval = flex_array_put_ptr(group, i, tsk, GFP_ATOMIC);
ent.task = tsk;
ent.cgrp = task_cgroup_from_root(tsk, root);
retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
BUG_ON(retval != 0);
i++;
if (ent.cgrp != cgrp)
nr_migrating_tasks++;
} while_each_thread(leader, tsk);
/* remember the number of threads in the array for later. */
group_size = i;
read_unlock(&tasklist_lock);

/* methods shouldn't be called if no task is actually migrating */
retval = 0;
if (!nr_migrating_tasks)
goto out_put_tasks;

/*
* step 1: check that we can legitimately attach to the cgroup.
*/
Expand All @@ -2096,8 +2111,10 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
if (ss->can_attach_task) {
/* run on each task in the threadgroup. */
for (i = 0; i < group_size; i++) {
tsk = flex_array_get_ptr(group, i);
retval = ss->can_attach_task(cgrp, tsk);
tc = flex_array_get(group, i);
if (tc->cgrp == cgrp)
continue;
retval = ss->can_attach_task(cgrp, tc->task);
if (retval) {
failed_ss = ss;
cancel_failed_ss = true;
Expand All @@ -2113,18 +2130,17 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
*/
INIT_LIST_HEAD(&newcg_list);
for (i = 0; i < group_size; i++) {
tsk = flex_array_get_ptr(group, i);
tc = flex_array_get(group, i);
/* nothing to do if this task is already in the cgroup */
oldcgrp = task_cgroup_from_root(tsk, root);
if (cgrp == oldcgrp)
if (tc->cgrp == cgrp)
continue;
/* get old css_set pointer */
task_lock(tsk);
oldcg = tsk->cgroups;
task_lock(tc->task);
oldcg = tc->task->cgroups;
get_css_set(oldcg);
task_unlock(tsk);
task_unlock(tc->task);
/* see if the new one for us is already in the list? */
if (css_set_check_fetched(cgrp, tsk, oldcg, &newcg_list)) {
if (css_set_check_fetched(cgrp, tc->task, oldcg, &newcg_list)) {
/* was already there, nothing to do. */
put_css_set(oldcg);
} else {
Expand All @@ -2147,17 +2163,16 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
ss->pre_attach(cgrp);
}
for (i = 0; i < group_size; i++) {
tsk = flex_array_get_ptr(group, i);
tc = flex_array_get(group, i);
/* leave current thread as it is if it's already there */
oldcgrp = task_cgroup_from_root(tsk, root);
if (cgrp == oldcgrp)
if (tc->cgrp == cgrp)
continue;
retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true);
retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true);
BUG_ON(retval);
/* attach each task to each subsystem */
for_each_subsys(root, ss) {
if (ss->attach_task)
ss->attach_task(cgrp, tsk);
ss->attach_task(cgrp, tc->task);
}
}
/* nothing is sensitive to fork() after this point. */
Expand All @@ -2168,8 +2183,10 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
* being moved, this call will need to be reworked to communicate that.
*/
for_each_subsys(root, ss) {
if (ss->attach)
ss->attach(ss, cgrp, oldcgrp, leader);
if (ss->attach) {
tc = flex_array_get(group, 0);
ss->attach(ss, cgrp, tc->cgrp, tc->task);
}
}

/*
Expand Down Expand Up @@ -2198,10 +2215,11 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
ss->cancel_attach(ss, cgrp, leader);
}
}
out_put_tasks:
/* clean up the array of referenced threads in the group. */
for (i = 0; i < group_size; i++) {
tsk = flex_array_get_ptr(group, i);
put_task_struct(tsk);
tc = flex_array_get(group, i);
put_task_struct(tc->task);
}
out_free_group_list:
flex_array_free(group);
Expand Down

0 comments on commit a1ac813

Please sign in to comment.