Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 205421
b: refs/heads/master
c: 9d5efe0
h: refs/heads/master
i:
  205419: 515e4fa
v: v3
  • Loading branch information
Srivatsa Vaddagiri authored and Ingo Molnar committed Jun 9, 2010
1 parent 15dd66f commit 5b919f8
Show file tree
Hide file tree
Showing 3 changed files with 45 additions and 12 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 83cd4fe27ad8446619b2e030b171b858501de87d
refs/heads/master: 9d5efe05eb0c904545a28b19c18b949f23334de0
2 changes: 1 addition & 1 deletion trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -857,7 +857,7 @@ struct sched_group {
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* single CPU.
*/
unsigned int cpu_power;
unsigned int cpu_power, cpu_power_orig;

/*
* The CPUs this group covers.
Expand Down
53 changes: 43 additions & 10 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -2285,13 +2285,6 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
unsigned long power = SCHED_LOAD_SCALE;
struct sched_group *sdg = sd->groups;

if (sched_feat(ARCH_POWER))
power *= arch_scale_freq_power(sd, cpu);
else
power *= default_scale_freq_power(sd, cpu);

power >>= SCHED_LOAD_SHIFT;

if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
if (sched_feat(ARCH_POWER))
power *= arch_scale_smt_power(sd, cpu);
Expand All @@ -2301,6 +2294,15 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
power >>= SCHED_LOAD_SHIFT;
}

sdg->cpu_power_orig = power;

if (sched_feat(ARCH_POWER))
power *= arch_scale_freq_power(sd, cpu);
else
power *= default_scale_freq_power(sd, cpu);

power >>= SCHED_LOAD_SHIFT;

power *= scale_rt_power(cpu);
power >>= SCHED_LOAD_SHIFT;

Expand Down Expand Up @@ -2333,6 +2335,31 @@ static void update_group_power(struct sched_domain *sd, int cpu)
sdg->cpu_power = power;
}

/*
* Try and fix up capacity for tiny siblings, this is needed when
* things like SD_ASYM_PACKING need f_b_g to select another sibling
* which on its own isn't powerful enough.
*
* See update_sd_pick_busiest() and check_asym_packing().
*/
static inline int
fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
{
/*
* Only siblings can have significantly less than SCHED_LOAD_SCALE
*/
if (sd->level != SD_LV_SIBLING)
return 0;

/*
* If ~90% of the cpu_power is still there, we're good.
*/
if (group->cpu_power * 32 < group->cpu_power_orig * 29)
return 1;

return 0;
}

/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @sd: The sched_domain whose statistics are to be updated.
Expand Down Expand Up @@ -2426,6 +2453,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,

sgs->group_capacity =
DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
if (!sgs->group_capacity)
sgs->group_capacity = fix_small_capacity(sd, group);
}

/**
Expand Down Expand Up @@ -2724,8 +2753,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
static struct rq *
find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
unsigned long imbalance, const struct cpumask *cpus)
find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
enum cpu_idle_type idle, unsigned long imbalance,
const struct cpumask *cpus)
{
struct rq *busiest = NULL, *rq;
unsigned long max_load = 0;
Expand All @@ -2736,6 +2766,9 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
unsigned long wl;

if (!capacity)
capacity = fix_small_capacity(sd, group);

if (!cpumask_test_cpu(i, cpus))
continue;

Expand Down Expand Up @@ -2852,7 +2885,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
goto out_balanced;
}

busiest = find_busiest_queue(group, idle, imbalance, cpus);
busiest = find_busiest_queue(sd, group, idle, imbalance, cpus);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[idle]);
goto out_balanced;
Expand Down

0 comments on commit 5b919f8

Please sign in to comment.