diff --git a/[refs] b/[refs] index 42a63f11314d..0e3207de3015 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c41917df8a1adde34864116ce2231a7fe308d2ff +refs/heads/master: 9439aab8dbc33c2c03c3a19dba267360383ba38c diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 645256b228c3..e36d99d1ddb1 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -2235,7 +2235,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, rq = cpu_rq(i); - if (*sd_idle && !idle_cpu(i)) + if (*sd_idle && rq->nr_running) *sd_idle = 0; /* Bias balancing toward cpus of our domain */ @@ -2257,9 +2257,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, /* * First idle cpu or the first cpu(busiest) in this sched group * is eligible for doing load balancing at this and above - * domains. + * domains. In the newly idle case, we will allow all the cpu's + * to do the newly idle load balance. */ - if (local_group && balance_cpu != this_cpu && balance) { + if (idle != CPU_NEWLY_IDLE && local_group && + balance_cpu != this_cpu && balance) { *balance = 0; goto ret; }