Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 162999
b: refs/heads/master
c: ae154be
h: refs/heads/master
i:
  162997: c43d7b1
  162995: 43627d9
  162991: e963728
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Sep 15, 2009
1 parent 50aa2ae commit 07a47f9
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 24 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c88d5910890ad35af283344417891344604f0438
refs/heads/master: ae154be1f34a674e6cbb43ccf6e442f56acd7a70
40 changes: 20 additions & 20 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1538,6 +1538,26 @@ static unsigned long target_load(int cpu, int type)
return max(rq->cpu_load[type-1], total);
}

static struct sched_group *group_of(int cpu)
{
struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);

if (!sd)
return NULL;

return sd->groups;
}

static unsigned long power_of(int cpu)
{
struct sched_group *group = group_of(cpu);

if (!group)
return SCHED_LOAD_SCALE;

return group->cpu_power;
}

static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);

static unsigned long cpu_avg_load_per_task(int cpu)
Expand Down Expand Up @@ -3982,26 +4002,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
return NULL;
}

static struct sched_group *group_of(int cpu)
{
struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);

if (!sd)
return NULL;

return sd->groups;
}

static unsigned long power_of(int cpu)
{
struct sched_group *group = group_of(cpu);

if (!group)
return SCHED_LOAD_SCALE;

return group->cpu_power;
}

/*
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
Expand Down
21 changes: 18 additions & 3 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -1333,10 +1333,25 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)

for_each_domain(cpu, tmp) {
/*
* If power savings logic is enabled for a domain, stop there.
* If power savings logic is enabled for a domain, see if we
* are not overloaded, if so, don't balance wider.
*/
if (tmp->flags & SD_POWERSAVINGS_BALANCE)
break;
if (tmp->flags & SD_POWERSAVINGS_BALANCE) {
unsigned long power = 0;
unsigned long nr_running = 0;
unsigned long capacity;
int i;

for_each_cpu(i, sched_domain_span(tmp)) {
power += power_of(i);
nr_running += cpu_rq(i)->cfs.nr_running;
}

capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);

if (nr_running/2 < capacity)
break;
}

switch (flag) {
case SD_BALANCE_WAKE:
Expand Down

0 comments on commit 07a47f9

Please sign in to comment.