Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 288741
b: refs/heads/master
c: 4ec4412
h: refs/heads/master
i:
  288739: a809265
v: v3
  • Loading branch information
Vincent Guittot authored and Ingo Molnar committed Jan 27, 2012
1 parent 26d5de3 commit 61cc70b
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 9 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 39be350127ec60a078edffe5b4915dafba4ba514
refs/heads/master: 4ec4412e1e91f44a3dcb97b6c9172a13fc78bac9
1 change: 1 addition & 0 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -905,6 +905,7 @@ struct sched_group_power {
* single CPU.
*/
unsigned int power, power_orig;
unsigned long next_update;
/*
* Number of busy cpus in this group.
*/
Expand Down
24 changes: 16 additions & 8 deletions trunk/kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,8 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,

const struct sched_class fair_sched_class;

static unsigned long __read_mostly max_load_balance_interval = HZ/10;

/**************************************************************
* CFS operations on generic schedulable entities:
*/
Expand Down Expand Up @@ -3776,6 +3778,11 @@ void update_group_power(struct sched_domain *sd, int cpu)
struct sched_domain *child = sd->child;
struct sched_group *group, *sdg = sd->groups;
unsigned long power;
unsigned long interval;

interval = msecs_to_jiffies(sd->balance_interval);
interval = clamp(interval, 1UL, max_load_balance_interval);
sdg->sgp->next_update = jiffies + interval;

if (!child) {
update_cpu_power(sd, cpu);
Expand Down Expand Up @@ -3883,12 +3890,15 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
* domains. In the newly idle case, we will allow all the cpu's
* to do the newly idle load balance.
*/
if (idle != CPU_NEWLY_IDLE && local_group) {
if (balance_cpu != this_cpu) {
*balance = 0;
return;
}
update_group_power(sd, this_cpu);
if (local_group) {
if (idle != CPU_NEWLY_IDLE) {
if (balance_cpu != this_cpu) {
*balance = 0;
return;
}
update_group_power(sd, this_cpu);
} else if (time_after_eq(jiffies, group->sgp->next_update))
update_group_power(sd, this_cpu);
}

/* Adjust by relative CPU power of the group */
Expand Down Expand Up @@ -4945,8 +4955,6 @@ static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,

static DEFINE_SPINLOCK(balancing);

static unsigned long __read_mostly max_load_balance_interval = HZ/10;

/*
* Scale the max load_balance interval with the number of CPUs in the system.
* This trades load-balance latency on larger machines for less cross talk.
Expand Down

0 comments on commit 61cc70b

Please sign in to comment.