From bbffc2351a00b96c659cc4883c093fc1836347a8 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Mon, 28 Jan 2013 12:19:25 +0100 Subject: [PATCH] --- yaml --- r: 350131 b: refs/heads/master c: e0a79f529d5ba2507486d498b25da40911d95cf6 h: refs/heads/master i: 350129: aff265ccd8179ae06a198e85582cb6b16aaddbef 350127: 4f15255b18867c395e958516d614f755e0b68a3b v: v3 --- [refs] | 2 +- trunk/kernel/sched/fair.c | 21 +++++++-------------- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/[refs] b/[refs] index 071493d1ef9b..d1b034d2d36a 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 60334caf37dc7c59120b21faa625534a6fffead0 +refs/heads/master: e0a79f529d5ba2507486d498b25da40911d95cf6 diff --git a/trunk/kernel/sched/fair.c b/trunk/kernel/sched/fair.c index 8dbee9f4ceb2..ed18c74db017 100644 --- a/trunk/kernel/sched/fair.c +++ b/trunk/kernel/sched/fair.c @@ -3252,25 +3252,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) */ static int select_idle_sibling(struct task_struct *p, int target) { - int cpu = smp_processor_id(); - int prev_cpu = task_cpu(p); struct sched_domain *sd; struct sched_group *sg; - int i; + int i = task_cpu(p); - /* - * If the task is going to be woken-up on this cpu and if it is - * already idle, then it is the right target. - */ - if (target == cpu && idle_cpu(cpu)) - return cpu; + if (idle_cpu(target)) + return target; /* - * If the task is going to be woken-up on the cpu where it previously - * ran and if it is currently idle, then it the right target. + * If the prevous cpu is cache affine and idle, don't be stupid. */ - if (target == prev_cpu && idle_cpu(prev_cpu)) - return prev_cpu; + if (i != target && cpus_share_cache(i, target) && idle_cpu(i)) + return i; /* * Otherwise, iterate the domains and find an elegible idle cpu. @@ -3284,7 +3277,7 @@ static int select_idle_sibling(struct task_struct *p, int target) goto next; for_each_cpu(i, sched_group_cpus(sg)) { - if (!idle_cpu(i)) + if (i == target || !idle_cpu(i)) goto next; }