Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 191404
b: refs/heads/master
c: 9084bb8
h: refs/heads/master
v: v3
  • Loading branch information
Oleg Nesterov authored and Ingo Molnar committed Apr 2, 2010
1 parent 1862f62 commit cafc0a5
Show file tree
Hide file tree
Showing 4 changed files with 51 additions and 4 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6a1bdc1b577ebcb65f6603c57f8347309bc4ab13
refs/heads/master: 9084bb8246ea935b98320554229e2f371f7f52fa
7 changes: 7 additions & 0 deletions trunk/include/linux/cpuset.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
Expand Down Expand Up @@ -101,6 +102,12 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
cpumask_copy(mask, cpu_possible_mask);
}

static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
{
cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
return cpumask_any(cpu_active_mask);
}

static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
{
return node_possible_map;
Expand Down
42 changes: 42 additions & 0 deletions trunk/kernel/cpuset.c
Original file line number Diff line number Diff line change
Expand Up @@ -2188,6 +2188,48 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
mutex_unlock(&callback_mutex);
}

int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
{
const struct cpuset *cs;
int cpu;

rcu_read_lock();
cs = task_cs(tsk);
if (cs)
cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed);
rcu_read_unlock();

/*
* We own tsk->cpus_allowed, nobody can change it under us.
*
* But we used cs && cs->cpus_allowed lockless and thus can
* race with cgroup_attach_task() or update_cpumask() and get
* the wrong tsk->cpus_allowed. However, both cases imply the
* subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
* which takes task_rq_lock().
*
* If we are called after it dropped the lock we must see all
* changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
* set any mask even if it is not right from task_cs() pov,
* the pending set_cpus_allowed_ptr() will fix things.
*/

cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
if (cpu >= nr_cpu_ids) {
/*
* Either tsk->cpus_allowed is wrong (see above) or it
* is actually empty. The latter case is only possible
* if we are racing with remove_tasks_in_empty_cpuset().
* Like above we can temporary set any mask and rely on
* set_cpus_allowed_ptr() as synchronization point.
*/
cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask);
cpu = cpumask_any(cpu_active_mask);
}

return cpu;
}

void cpuset_init_current_mems_allowed(void)
{
nodes_setall(current->mems_allowed);
Expand Down
4 changes: 1 addition & 3 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2300,9 +2300,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)

/* No more Mr. Nice Guy. */
if (unlikely(dest_cpu >= nr_cpu_ids)) {
cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
dest_cpu = cpumask_any(cpu_active_mask);

dest_cpu = cpuset_cpus_allowed_fallback(p);
/*
* Don't tell them about moving exiting tasks or
* kernel threads (both mm NULL), since they never
Expand Down

0 comments on commit cafc0a5

Please sign in to comment.