Skip to content

Commit

Permalink
sched: Avoid SMT siblings in select_idle_sibling() if possible
Browse files Browse the repository at this point in the history
Avoid select_idle_sibling() from picking a sibling thread if there's
an idle core that shares cache.

This fixes SMT balancing in the increasingly common case where there's
a shared cache core available to balance to.

Tested-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Link: http://lkml.kernel.org/r/1321350377.1421.55.camel@twins
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Nov 16, 2011
1 parent f1c6f1a commit 4dcfe10
Showing 1 changed file with 28 additions and 14 deletions.
42 changes: 28 additions & 14 deletions kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -2326,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
struct sched_domain *sd;
int i;
struct sched_group *sg;
int i, smt = 0;

/*
* If the task is going to be woken-up on this cpu and if it is
Expand All @@ -2346,25 +2347,38 @@ static int select_idle_sibling(struct task_struct *p, int target)
* Otherwise, iterate the domains and find an elegible idle cpu.
*/
rcu_read_lock();
again:
for_each_domain(target, sd) {
if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
break;
if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
continue;

for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
if (idle_cpu(i)) {
target = i;
break;
if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
if (!smt) {
smt = 1;
goto again;
}
break;
}

/*
* Lets stop looking for an idle sibling when we reached
* the domain that spans the current cpu and prev_cpu.
*/
if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
break;
sg = sd->groups;
do {
if (!cpumask_intersects(sched_group_cpus(sg),
tsk_cpus_allowed(p)))
goto next;

for_each_cpu(i, sched_group_cpus(sg)) {
if (!idle_cpu(i))
goto next;
}

target = cpumask_first_and(sched_group_cpus(sg),
tsk_cpus_allowed(p));
goto done;
next:
sg = sg->next;
} while (sg != sd->groups);
}
done:
rcu_read_unlock();

return target;
Expand Down

0 comments on commit 4dcfe10

Please sign in to comment.