Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 281465
b: refs/heads/master
c: cd3d095
h: refs/heads/master
i:
  281463: aa8a15c
v: v3
  • Loading branch information
Tejun Heo committed Dec 13, 2011
1 parent 89469a5 commit 174195d
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 36 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 77e4ef99d1c596a31747668e5fd837f77b6349b6
refs/heads/master: cd3d095275374220921fcf0d4e0c16584b26ddbc
62 changes: 27 additions & 35 deletions trunk/kernel/cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1762,7 +1762,7 @@ EXPORT_SYMBOL_GPL(cgroup_path);
*
* 'guarantee' is set if the caller promises that a new css_set for the task
* will already exist. If not set, this function might sleep, and can fail with
* -ENOMEM. Otherwise, it can only fail with -ESRCH.
* -ENOMEM. Must be called with cgroup_mutex and threadgroup locked.
*/
static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
struct task_struct *tsk, bool guarantee)
Expand Down Expand Up @@ -1800,13 +1800,9 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
}
put_css_set(oldcg);

/* if PF_EXITING is set, the tsk->cgroups pointer is no longer safe. */
/* @tsk can't exit as its threadgroup is locked */
task_lock(tsk);
if (tsk->flags & PF_EXITING) {
task_unlock(tsk);
put_css_set(newcg);
return -ESRCH;
}
WARN_ON_ONCE(tsk->flags & PF_EXITING);
rcu_assign_pointer(tsk->cgroups, newcg);
task_unlock(tsk);

Expand All @@ -1832,8 +1828,8 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
* @cgrp: the cgroup the task is attaching to
* @tsk: the task to be attached
*
* Call holding cgroup_mutex. May take task_lock of
* the task 'tsk' during call.
* Call with cgroup_mutex and threadgroup locked. May take task_lock of
* @tsk during call.
*/
int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
Expand All @@ -1842,6 +1838,10 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
struct cgroup *oldcgrp;
struct cgroupfs_root *root = cgrp->root;

/* @tsk either already exited or can't exit until the end */
if (tsk->flags & PF_EXITING)
return -ESRCH;

/* Nothing to do if the task is already in that cgroup */
oldcgrp = task_cgroup_from_root(tsk, root);
if (cgrp == oldcgrp)
Expand Down Expand Up @@ -2062,6 +2062,10 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
tsk = leader;
i = 0;
do {
/* @tsk either already exited or can't exit until the end */
if (tsk->flags & PF_EXITING)
continue;

/* as per above, nr_threads may decrease, but not increase. */
BUG_ON(i >= group_size);
get_task_struct(tsk);
Expand Down Expand Up @@ -2116,11 +2120,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
continue;
/* get old css_set pointer */
task_lock(tsk);
if (tsk->flags & PF_EXITING) {
/* ignore this task if it's going away */
task_unlock(tsk);
continue;
}
oldcg = tsk->cgroups;
get_css_set(oldcg);
task_unlock(tsk);
Expand Down Expand Up @@ -2153,16 +2152,12 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
oldcgrp = task_cgroup_from_root(tsk, root);
if (cgrp == oldcgrp)
continue;
/* if the thread is PF_EXITING, it can just get skipped. */
retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true);
if (retval == 0) {
/* attach each task to each subsystem */
for_each_subsys(root, ss) {
if (ss->attach_task)
ss->attach_task(cgrp, tsk);
}
} else {
BUG_ON(retval != -ESRCH);
BUG_ON(retval);
/* attach each task to each subsystem */
for_each_subsys(root, ss) {
if (ss->attach_task)
ss->attach_task(cgrp, tsk);
}
}
/* nothing is sensitive to fork() after this point. */
Expand Down Expand Up @@ -2215,8 +2210,8 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)

/*
* Find the task_struct of the task to attach by vpid and pass it along to the
* function to attach either it or all tasks in its threadgroup. Will take
* cgroup_mutex; may take task_lock of task.
* function to attach either it or all tasks in its threadgroup. Will lock
* cgroup_mutex and threadgroup; may take task_lock of task.
*/
static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
{
Expand All @@ -2243,11 +2238,6 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
* detect it later.
*/
tsk = tsk->group_leader;
} else if (tsk->flags & PF_EXITING) {
/* optimization for the single-task-only case */
rcu_read_unlock();
cgroup_unlock();
return -ESRCH;
}
/*
* even if we're attaching all tasks in the thread group, we
Expand All @@ -2271,13 +2261,15 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
get_task_struct(tsk);
}

if (threadgroup) {
threadgroup_lock(tsk);
threadgroup_lock(tsk);

if (threadgroup)
ret = cgroup_attach_proc(cgrp, tsk);
threadgroup_unlock(tsk);
} else {
else
ret = cgroup_attach_task(cgrp, tsk);
}

threadgroup_unlock(tsk);

put_task_struct(tsk);
cgroup_unlock();
return ret;
Expand Down

0 comments on commit 174195d

Please sign in to comment.