Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 182523
b: refs/heads/master
c: dd5feea
h: refs/heads/master
i:
  182521: abf06d8
  182519: 7be6961
v: v3
  • Loading branch information
Suresh Siddha authored and Ingo Molnar committed Feb 26, 2010
1 parent 087171e commit f3a1a76
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 34 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 83ab0aa0d5623d823444db82c3b3c34d7ec364ae
refs/heads/master: dd5feea14a7de4edbd9f36db1a2db785de91b88d
76 changes: 43 additions & 33 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -2097,6 +2097,7 @@ struct sd_lb_stats {
unsigned long max_load;
unsigned long busiest_load_per_task;
unsigned long busiest_nr_running;
unsigned long busiest_group_capacity;

int group_imb; /* Is there imbalance in this sd */
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
Expand Down Expand Up @@ -2416,14 +2417,12 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
unsigned long load, max_cpu_load, min_cpu_load;
int i;
unsigned int balance_cpu = -1, first_idle_cpu = 0;
unsigned long sum_avg_load_per_task;
unsigned long avg_load_per_task;
unsigned long avg_load_per_task = 0;

if (local_group)
balance_cpu = group_first_cpu(group);

/* Tally up the load of all CPUs in the group */
sum_avg_load_per_task = avg_load_per_task = 0;
max_cpu_load = 0;
min_cpu_load = ~0UL;

Expand Down Expand Up @@ -2453,7 +2452,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
sgs->sum_nr_running += rq->nr_running;
sgs->sum_weighted_load += weighted_cpuload(i);

sum_avg_load_per_task += cpu_avg_load_per_task(i);
}

/*
Expand All @@ -2473,7 +2471,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
/* Adjust by relative CPU power of the group */
sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;


/*
* Consider the group unbalanced when the imbalance is larger
* than the average weight of two tasks.
Expand All @@ -2483,8 +2480,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
* normalized nr_running number somewhere that negates
* the hierarchy?
*/
avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
group->cpu_power;
if (sgs->sum_nr_running)
avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;

if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
sgs->group_imb = 1;
Expand Down Expand Up @@ -2553,6 +2550,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
sds->max_load = sgs.avg_load;
sds->busiest = group;
sds->busiest_nr_running = sgs.sum_nr_running;
sds->busiest_group_capacity = sgs.group_capacity;
sds->busiest_load_per_task = sgs.sum_weighted_load;
sds->group_imb = sgs.group_imb;
}
Expand All @@ -2575,6 +2573,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
{
unsigned long tmp, pwr_now = 0, pwr_move = 0;
unsigned int imbn = 2;
unsigned long scaled_busy_load_per_task;

if (sds->this_nr_running) {
sds->this_load_per_task /= sds->this_nr_running;
Expand All @@ -2585,8 +2584,12 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
sds->this_load_per_task =
cpu_avg_load_per_task(this_cpu);

if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
sds->busiest_load_per_task * imbn) {
scaled_busy_load_per_task = sds->busiest_load_per_task
* SCHED_LOAD_SCALE;
scaled_busy_load_per_task /= sds->busiest->cpu_power;

if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
(scaled_busy_load_per_task * imbn)) {
*imbalance = sds->busiest_load_per_task;
return;
}
Expand Down Expand Up @@ -2637,7 +2640,14 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
unsigned long *imbalance)
{
unsigned long max_pull;
unsigned long max_pull, load_above_capacity = ~0UL;

sds->busiest_load_per_task /= sds->busiest_nr_running;
if (sds->group_imb) {
sds->busiest_load_per_task =
min(sds->busiest_load_per_task, sds->avg_load);
}

/*
* In the presence of smp nice balancing, certain scenarios can have
* max load less than avg load(as we skip the groups at or below
Expand All @@ -2648,9 +2658,29 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
return fix_small_imbalance(sds, this_cpu, imbalance);
}

/* Don't want to pull so many tasks that a group would go idle */
max_pull = min(sds->max_load - sds->avg_load,
sds->max_load - sds->busiest_load_per_task);
if (!sds->group_imb) {
/*
* Don't want to pull so many tasks that a group would go idle.
*/
load_above_capacity = (sds->busiest_nr_running -
sds->busiest_group_capacity);

load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);

load_above_capacity /= sds->busiest->cpu_power;
}

/*
* We're trying to get all the cpus to the average_load, so we don't
* want to push ourselves above the average load, nor do we wish to
* reduce the max loaded cpu below the average load. At the same time,
* we also don't want to reduce the group load below the group capacity
* (so that we can implement power-savings policies etc). Thus we look
* for the minimum possible imbalance.
* Be careful of negative numbers as they'll appear as very large values
* with unsigned longs.
*/
max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);

/* How much load to actually move to equalise the imbalance */
*imbalance = min(max_pull * sds->busiest->cpu_power,
Expand Down Expand Up @@ -2718,7 +2748,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* 4) This group is more busy than the avg busieness at this
* sched_domain.
* 5) The imbalance is within the specified limit.
* 6) Any rebalance would lead to ping-pong
*/
if (!(*balance))
goto ret;
Expand All @@ -2737,25 +2766,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
goto out_balanced;

sds.busiest_load_per_task /= sds.busiest_nr_running;
if (sds.group_imb)
sds.busiest_load_per_task =
min(sds.busiest_load_per_task, sds.avg_load);

/*
* We're trying to get all the cpus to the average_load, so we don't
* want to push ourselves above the average load, nor do we wish to
* reduce the max loaded cpu below the average load, as either of these
* actions would just result in more rebalancing later, and ping-pong
* tasks around. Thus we look for the minimum possible imbalance.
* Negative imbalances (*we* are more loaded than anyone else) will
* be counted as no imbalance for these purposes -- we can't fix that
* by pulling tasks to us. Be careful of negative numbers as they'll
* appear as very large values with unsigned longs.
*/
if (sds.max_load <= sds.busiest_load_per_task)
goto out_balanced;

/* Looks like there is an imbalance. Compute it */
calculate_imbalance(&sds, this_cpu, imbalance);
return sds.busiest;
Expand Down

0 comments on commit f3a1a76

Please sign in to comment.