From 05221bbb8758d049ac80d251cbb71d01d196b46f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 9 Jul 2010 15:15:43 +0200 Subject: [PATCH] --- yaml --- r: 205437 b: refs/heads/master c: bbc8cb5baead9607309583b20873ab0cc8d89eaf h: refs/heads/master i: 205435: a07b78a343a03c3f3927d4d3bb69362f43aba7ca v: v3 --- [refs] | 2 +- trunk/kernel/sched_fair.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/[refs] b/[refs] index 0f0b1ebbafa2..90b0f81ef650 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 5343bdb8fd076f16edc9d113a9e35e2a1d1f4966 +refs/heads/master: bbc8cb5baead9607309583b20873ab0cc8d89eaf diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index e44a591531a1..c9ac09760953 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -2425,14 +2425,14 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, * domains. In the newly idle case, we will allow all the cpu's * to do the newly idle load balance. */ - if (idle != CPU_NEWLY_IDLE && local_group && - balance_cpu != this_cpu) { - *balance = 0; - return; + if (idle != CPU_NEWLY_IDLE && local_group) { + if (balance_cpu != this_cpu) { + *balance = 0; + return; + } + update_group_power(sd, this_cpu); } - update_group_power(sd, this_cpu); - /* Adjust by relative CPU power of the group */ sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;