Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 178032
b: refs/heads/master
c: 5da9a0f
h: refs/heads/master
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Dec 16, 2009
1 parent be4cc73 commit 2f554d4
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 36 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3802290628348674985d14914f9bfee7b9084548
refs/heads/master: 5da9a0fb673a0ea0a093862f95f6b89b3390c31e
75 changes: 40 additions & 35 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2317,6 +2317,43 @@ void task_oncpu_function_call(struct task_struct *p,
}

#ifdef CONFIG_SMP
static int select_fallback_rq(int cpu, struct task_struct *p)
{
int dest_cpu;
const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));

/* Look for allowed, online CPU in same node. */
for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
return dest_cpu;

/* Any allowed, online CPU? */
dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
if (dest_cpu < nr_cpu_ids)
return dest_cpu;

/* No more Mr. Nice Guy. */
if (dest_cpu >= nr_cpu_ids) {
rcu_read_lock();
cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
rcu_read_unlock();
dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);

/*
* Don't tell them about moving exiting tasks or
* kernel threads (both mm NULL), since they never
* leave kernel.
*/
if (p->mm && printk_ratelimit()) {
printk(KERN_INFO "process %d (%s) no "
"longer affine to cpu%d\n",
task_pid_nr(p), p->comm, cpu);
}
}

return dest_cpu;
}

/*
* Called from:
*
Expand All @@ -2343,14 +2380,8 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
* not worry about this generic constraint ]
*/
if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
!cpu_active(cpu))) {

cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
/*
* XXX: race against hot-plug modifying cpu_active_mask
*/
BUG_ON(cpu >= nr_cpu_ids);
}
!cpu_active(cpu)))
cpu = select_fallback_rq(task_cpu(p), p);

return cpu;
}
Expand Down Expand Up @@ -7319,36 +7350,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
{
int dest_cpu;
const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));

again:
/* Look for allowed, online CPU in same node. */
for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
goto move;

/* Any allowed, online CPU? */
dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
if (dest_cpu < nr_cpu_ids)
goto move;

/* No more Mr. Nice Guy. */
if (dest_cpu >= nr_cpu_ids) {
cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);

/*
* Don't tell them about moving exiting tasks or
* kernel threads (both mm NULL), since they never
* leave kernel.
*/
if (p->mm && printk_ratelimit()) {
pr_info("process %d (%s) no longer affine to cpu%d\n",
task_pid_nr(p), p->comm, dead_cpu);
}
}
dest_cpu = select_fallback_rq(dead_cpu, p);

move:
/* It can have affinity changed while we were choosing. */
if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
goto again;
Expand Down

0 comments on commit 2f554d4

Please sign in to comment.