diff --git a/[refs] b/[refs] index 0bcd9c4c8c3e..fb70f2203e2c 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: f492e12ef050e02bf0185b6b57874992591b9be1 +refs/heads/master: 8f190fb3f7a405682666d3723f6ec370b5afe4da diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index de5ab1239e04..0b482f5b5b3b 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -2465,7 +2465,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, * to do the newly idle load balance. */ if (idle != CPU_NEWLY_IDLE && local_group && - balance_cpu != this_cpu && balance) { + balance_cpu != this_cpu) { *balance = 0; return; } @@ -2528,7 +2528,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle, local_group, cpus, balance, &sgs); - if (local_group && balance && !(*balance)) + if (local_group && !(*balance)) return; sds->total_load += sgs.group_load; @@ -2720,7 +2720,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, * 5) The imbalance is within the specified limit. * 6) Any rebalance would lead to ping-pong */ - if (balance && !(*balance)) + if (!(*balance)) goto ret; if (!sds.busiest || sds.busiest_nr_running == 0)