diff --git a/[refs] b/[refs] index 02ca2f54b8f8..7e3ff072668f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 86f82d561864e902c70282b6f17cf590c0f34691 +refs/heads/master: c4c27fbdda4e8ba87806c415b6d15266b07bce4b diff --git a/trunk/kernel/cgroup.c b/trunk/kernel/cgroup.c index b2f203f25ec8..ad8eae5bb801 100644 --- a/trunk/kernel/cgroup.c +++ b/trunk/kernel/cgroup.c @@ -60,6 +60,7 @@ #include #include #include /* used in cgroup_attach_proc */ +#include #include @@ -2225,6 +2226,18 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) if (threadgroup) tsk = tsk->group_leader; + + /* + * Workqueue threads may acquire PF_THREAD_BOUND and become + * trapped in a cpuset, or RT worker may be born in a cgroup + * with no rt_runtime allocated. Just say no. + */ + if (tsk == kthreadd_task || (tsk->flags & PF_THREAD_BOUND)) { + ret = -EINVAL; + rcu_read_unlock(); + goto out_unlock_cgroup; + } + get_task_struct(tsk); rcu_read_unlock();