diff --git a/[refs] b/[refs] index 8e1d584b5b42..b5f041de964e 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: db7f47cf4805e30decb0841764b21b7c4000f7dc +refs/heads/master: 6d7b2f5f9e88902b19f91d0c8a7ef58a5455f1a2 diff --git a/trunk/kernel/cpuset.c b/trunk/kernel/cpuset.c index 2b93b50cbe4b..026faccca869 100644 --- a/trunk/kernel/cpuset.c +++ b/trunk/kernel/cpuset.c @@ -1342,19 +1342,22 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, struct task_struct *tsk) { struct cpuset *cs = cgroup_cs(cont); - int ret = 0; if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) return -ENOSPC; - if (tsk->flags & PF_THREAD_BOUND) { - mutex_lock(&callback_mutex); - if (!cpumask_equal(&tsk->cpus_allowed, cs->cpus_allowed)) - ret = -EINVAL; - mutex_unlock(&callback_mutex); - } + /* + * Kthreads bound to specific cpus cannot be moved to a new cpuset; we + * cannot change their cpu affinity and isolating such threads by their + * set of allowed nodes is unnecessary. Thus, cpusets are not + * applicable for such threads. This prevents checking for success of + * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may + * be changed. + */ + if (tsk->flags & PF_THREAD_BOUND) + return -EINVAL; - return ret < 0 ? ret : security_task_setscheduler(tsk, 0, NULL); + return security_task_setscheduler(tsk, 0, NULL); } static void cpuset_attach(struct cgroup_subsys *ss,