diff --git a/[refs] b/[refs] index 3b78fd5390a2..01b882d82a09 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 642dbc39ab1ea00f47e0fee1b8e8a27da036d940 +refs/heads/master: f1cd0858100c67273f2c74344e0c464344c4a982 diff --git a/trunk/kernel/sched/fair.c b/trunk/kernel/sched/fair.c index 1c977350e322..25aaf93281de 100644 --- a/trunk/kernel/sched/fair.c +++ b/trunk/kernel/sched/fair.c @@ -5080,17 +5080,17 @@ static int load_balance(int this_cpu, struct rq *this_rq, double_rq_unlock(env.dst_rq, busiest); local_irq_restore(flags); - if (env.flags & LBF_NEED_BREAK) { - env.flags &= ~LBF_NEED_BREAK; - goto more_balance; - } - /* * some other cpu did the load balance for us. */ if (cur_ld_moved && env.dst_cpu != smp_processor_id()) resched_cpu(env.dst_cpu); + if (env.flags & LBF_NEED_BREAK) { + env.flags &= ~LBF_NEED_BREAK; + goto more_balance; + } + /* * Revisit (affine) tasks on src_cpu that couldn't be moved to * us and move them to an alternate dst_cpu in our sched_group