From 07a47f9da976cfe523f7f413ca7551fdc0613a4c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 10 Sep 2009 14:40:57 +0200 Subject: [PATCH] --- yaml --- r: 162999 b: refs/heads/master c: ae154be1f34a674e6cbb43ccf6e442f56acd7a70 h: refs/heads/master i: 162997: c43d7b1bc219f25596bb4b865c9ee7ecc37e168e 162995: 43627d99ac62465a08ce6f23b6eb922def76cef3 162991: e9637286736752ab53d993c9cb1a8bbac2952dbc v: v3 --- [refs] | 2 +- trunk/kernel/sched.c | 40 +++++++++++++++++++-------------------- trunk/kernel/sched_fair.c | 21 +++++++++++++++++--- 3 files changed, 39 insertions(+), 24 deletions(-) diff --git a/[refs] b/[refs] index 97de2b1baf20..f06a40b840dd 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c88d5910890ad35af283344417891344604f0438 +refs/heads/master: ae154be1f34a674e6cbb43ccf6e442f56acd7a70 diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 6c819f338b11..f0ccb8b926c8 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -1538,6 +1538,26 @@ static unsigned long target_load(int cpu, int type) return max(rq->cpu_load[type-1], total); } +static struct sched_group *group_of(int cpu) +{ + struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd); + + if (!sd) + return NULL; + + return sd->groups; +} + +static unsigned long power_of(int cpu) +{ + struct sched_group *group = group_of(cpu); + + if (!group) + return SCHED_LOAD_SCALE; + + return group->cpu_power; +} + static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); static unsigned long cpu_avg_load_per_task(int cpu) @@ -3982,26 +4002,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, return NULL; } -static struct sched_group *group_of(int cpu) -{ - struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd); - - if (!sd) - return NULL; - - return sd->groups; -} - -static unsigned long power_of(int cpu) -{ - struct sched_group *group = group_of(cpu); - - if (!group) - return SCHED_LOAD_SCALE; - - return group->cpu_power; -} - /* * find_busiest_queue - find the busiest runqueue among the cpus in group. */ diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index 09d19f77eb3a..eaa00014b499 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -1333,10 +1333,25 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync) for_each_domain(cpu, tmp) { /* - * If power savings logic is enabled for a domain, stop there. + * If power savings logic is enabled for a domain, see if we + * are not overloaded, if so, don't balance wider. */ - if (tmp->flags & SD_POWERSAVINGS_BALANCE) - break; + if (tmp->flags & SD_POWERSAVINGS_BALANCE) { + unsigned long power = 0; + unsigned long nr_running = 0; + unsigned long capacity; + int i; + + for_each_cpu(i, sched_domain_span(tmp)) { + power += power_of(i); + nr_running += cpu_rq(i)->cfs.nr_running; + } + + capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE); + + if (nr_running/2 < capacity) + break; + } switch (flag) { case SD_BALANCE_WAKE: