From 408a315356b80a15ab833075f289e7c4bb73a8ed Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 11 May 2012 00:12:02 +0200 Subject: [PATCH] --- yaml --- r: 305001 b: refs/heads/master c: 04f733b4afac5dc93ae9b0a8703c60b87def491e h: refs/heads/master i: 304999: 2d42a2aa8bdec2747c530faef1addc5ae24d75a9 v: v3 --- [refs] | 2 +- trunk/include/linux/sched.h | 1 - trunk/kernel/sched/core.c | 2 -- trunk/kernel/sched/fair.c | 19 +++++++------------ 4 files changed, 8 insertions(+), 16 deletions(-) diff --git a/[refs] b/[refs] index 8a789a43461d..4707cdb8b2d9 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 316ad248307fba13be40f01e92a22b89457c32bc +refs/heads/master: 04f733b4afac5dc93ae9b0a8703c60b87def491e diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 3cbfb55bde25..4a559bf0622f 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -927,7 +927,6 @@ struct sched_group_power { struct sched_group { struct sched_group *next; /* Must be a circular list */ atomic_t ref; - int balance_cpu; unsigned int group_weight; struct sched_group_power *sgp; diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c index 0738036fa569..24922b7ff567 100644 --- a/trunk/kernel/sched/core.c +++ b/trunk/kernel/sched/core.c @@ -5976,7 +5976,6 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span)); atomic_inc(&sg->sgp->ref); - sg->balance_cpu = -1; if (cpumask_test_cpu(cpu, sg_span)) groups = sg; @@ -6052,7 +6051,6 @@ build_sched_groups(struct sched_domain *sd, int cpu) cpumask_clear(sched_group_cpus(sg)); sg->sgp->power = 0; - sg->balance_cpu = -1; for_each_cpu(j, span) { if (get_group(j, sdd, NULL) != group) diff --git a/trunk/kernel/sched/fair.c b/trunk/kernel/sched/fair.c index 9bd3366dbb1c..a259a614b394 100644 --- a/trunk/kernel/sched/fair.c +++ b/trunk/kernel/sched/fair.c @@ -3776,8 +3776,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, int *balance, struct sg_lb_stats *sgs) { unsigned long load, max_cpu_load, min_cpu_load, max_nr_running; - unsigned int balance_cpu = -1; - unsigned long balance_load = ~0UL; + unsigned int balance_cpu = -1, first_idle_cpu = 0; unsigned long avg_load_per_task = 0; int i; @@ -3794,11 +3793,12 @@ static inline void update_sg_lb_stats(struct lb_env *env, /* Bias balancing toward cpus of our domain */ if (local_group) { - load = target_load(i, load_idx); - if (load < balance_load || idle_cpu(i)) { - balance_load = load; + if (idle_cpu(i) && !first_idle_cpu) { + first_idle_cpu = 1; balance_cpu = i; } + + load = target_load(i, load_idx); } else { load = source_load(i, load_idx); if (load > max_cpu_load) { @@ -3824,8 +3824,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, */ if (local_group) { if (env->idle != CPU_NEWLY_IDLE) { - if (balance_cpu != env->dst_cpu || - cmpxchg(&group->balance_cpu, -1, balance_cpu) != -1) { + if (balance_cpu != env->dst_cpu) { *balance = 0; return; } @@ -4919,7 +4918,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) int balance = 1; struct rq *rq = cpu_rq(cpu); unsigned long interval; - struct sched_domain *sd, *last = NULL; + struct sched_domain *sd; /* Earliest time when we have to do rebalance again */ unsigned long next_balance = jiffies + 60*HZ; int update_next_balance = 0; @@ -4929,7 +4928,6 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) rcu_read_lock(); for_each_domain(cpu, sd) { - last = sd; if (!(sd->flags & SD_LOAD_BALANCE)) continue; @@ -4974,9 +4972,6 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) if (!balance) break; } - for (sd = last; sd; sd = sd->child) - (void)cmpxchg(&sd->groups->balance_cpu, cpu, -1); - rcu_read_unlock(); /*