Skip to content

Commit

Permalink
sched: fix newidle smp group balancing
Browse files Browse the repository at this point in the history
Re-compute the shares on newidle - so we can make a decision based on
recent data.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 27, 2008
1 parent c8cba85 commit 3e5459b
Showing 1 changed file with 13 additions and 0 deletions.
13 changes: 13 additions & 0 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1579,6 +1579,13 @@ static void update_shares(struct sched_domain *sd)
walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
}

static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
{
spin_unlock(&rq->lock);
update_shares(sd);
spin_lock(&rq->lock);
}

static void update_h_load(int cpu)
{
walk_tg_tree(tg_load_down, tg_nop, cpu, NULL);
Expand All @@ -1595,6 +1602,10 @@ static inline void update_shares(struct sched_domain *sd)
{
}

static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
{
}

#endif

#endif
Expand Down Expand Up @@ -3543,6 +3554,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,

schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
redo:
update_shares_locked(this_rq, sd);
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
&sd_idle, cpus, NULL);
if (!group) {
Expand Down Expand Up @@ -3586,6 +3598,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
} else
sd->nr_balance_failed = 0;

update_shares_locked(this_rq, sd);
return ld_moved;

out_balanced:
Expand Down

0 comments on commit 3e5459b

Please sign in to comment.