diff --git a/[refs] b/[refs] index 789bf3fce22b..3bab684dd5a3 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: de5eb2dd7f171ee8a45d23cd41aa2efe9ab922b3 +refs/heads/master: cfc03118047172f5bdc58d63c607d16d33ce5305 diff --git a/trunk/kernel/sched/fair.c b/trunk/kernel/sched/fair.c index 726e12905725..dfa92b7b3dec 100644 --- a/trunk/kernel/sched/fair.c +++ b/trunk/kernel/sched/fair.c @@ -5026,8 +5026,21 @@ static int load_balance(int this_cpu, struct rq *this_rq, .cpus = cpus, }; + /* + * For NEWLY_IDLE load_balancing, we don't need to consider + * other cpus in our group + */ + if (idle == CPU_NEWLY_IDLE) { + env.dst_grpmask = NULL; + /* + * we don't care max_lb_iterations in this case, + * in following patch, this will be removed + */ + max_lb_iterations = 0; + } else + max_lb_iterations = cpumask_weight(env.dst_grpmask); + cpumask_copy(cpus, cpu_active_mask); - max_lb_iterations = cpumask_weight(env.dst_grpmask); schedstat_inc(sd, lb_count[idle]);