From c9d320c6c297bb22f22ec28dc667af217b9709f5 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Thu, 19 Jul 2007 21:28:35 +0200 Subject: [PATCH] --- yaml --- r: 61769 b: refs/heads/master c: 9439aab8dbc33c2c03c3a19dba267360383ba38c h: refs/heads/master i: 61767: ce25915343b1e66593ff69d1f27ecbec70c83ee8 v: v3 --- [refs] | 2 +- trunk/kernel/sched.c | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/[refs] b/[refs] index 42a63f11314d..0e3207de3015 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c41917df8a1adde34864116ce2231a7fe308d2ff +refs/heads/master: 9439aab8dbc33c2c03c3a19dba267360383ba38c diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 645256b228c3..e36d99d1ddb1 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -2235,7 +2235,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, rq = cpu_rq(i); - if (*sd_idle && !idle_cpu(i)) + if (*sd_idle && rq->nr_running) *sd_idle = 0; /* Bias balancing toward cpus of our domain */ @@ -2257,9 +2257,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, /* * First idle cpu or the first cpu(busiest) in this sched group * is eligible for doing load balancing at this and above - * domains. + * domains. In the newly idle case, we will allow all the cpu's + * to do the newly idle load balance. */ - if (local_group && balance_cpu != this_cpu && balance) { + if (idle != CPU_NEWLY_IDLE && local_group && + balance_cpu != this_cpu && balance) { *balance = 0; goto ret; }