Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 162995
b: refs/heads/master
c: aaee120
h: refs/heads/master
i:
  162993: 3e6f8ef
  162991: e963728
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Sep 15, 2009
1 parent 8245568 commit 43627d9
Show file tree
Hide file tree
Showing 3 changed files with 146 additions and 147 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f5f08f39ee4c5fd0a757d25d9e04d696676b3df7
refs/heads/master: aaee1203ca52b9db799433c33c9bffc33cdf8909
146 changes: 0 additions & 146 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2269,152 +2269,6 @@ void kick_process(struct task_struct *p)
preempt_enable();
}
EXPORT_SYMBOL_GPL(kick_process);

/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
*/
static struct sched_group *
find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
{
struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
unsigned long min_load = ULONG_MAX, this_load = 0;
int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;

do {
unsigned long load, avg_load;
int local_group;
int i;

/* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_cpus(group),
&p->cpus_allowed))
continue;

local_group = cpumask_test_cpu(this_cpu,
sched_group_cpus(group));

/* Tally up the load of all CPUs in the group */
avg_load = 0;

for_each_cpu(i, sched_group_cpus(group)) {
/* Bias balancing toward cpus of our domain */
if (local_group)
load = source_load(i, load_idx);
else
load = target_load(i, load_idx);

avg_load += load;
}

/* Adjust by relative CPU power of the group */
avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;

if (local_group) {
this_load = avg_load;
this = group;
} else if (avg_load < min_load) {
min_load = avg_load;
idlest = group;
}
} while (group = group->next, group != sd->groups);

if (!idlest || 100*this_load < imbalance*min_load)
return NULL;
return idlest;
}

/*
* find_idlest_cpu - find the idlest cpu among the cpus in group.
*/
static int
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
{
unsigned long load, min_load = ULONG_MAX;
int idlest = -1;
int i;

/* Traverse only the allowed CPUs */
for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
load = weighted_cpuload(i);

if (load < min_load || (load == min_load && i == this_cpu)) {
min_load = load;
idlest = i;
}
}

return idlest;
}

/*
* sched_balance_self: balance the current task (running on cpu) in domains
* that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
* SD_BALANCE_EXEC.
*
* Balance, ie. select the least loaded group.
*
* Returns the target CPU number, or the same CPU if no balancing is needed.
*
* preempt must be disabled.
*/
static int sched_balance_self(int cpu, int flag)
{
struct task_struct *t = current;
struct sched_domain *tmp, *sd = NULL;

for_each_domain(cpu, tmp) {
/*
* If power savings logic is enabled for a domain, stop there.
*/
if (tmp->flags & SD_POWERSAVINGS_BALANCE)
break;
if (tmp->flags & flag)
sd = tmp;
}

if (sd)
update_shares(sd);

while (sd) {
struct sched_group *group;
int new_cpu, weight;

if (!(sd->flags & flag)) {
sd = sd->child;
continue;
}

group = find_idlest_group(sd, t, cpu);
if (!group) {
sd = sd->child;
continue;
}

new_cpu = find_idlest_cpu(group, t, cpu);
if (new_cpu == -1 || new_cpu == cpu) {
/* Now try balancing at a lower domain level of cpu */
sd = sd->child;
continue;
}

/* Now try balancing at a lower domain level of new_cpu */
cpu = new_cpu;
weight = cpumask_weight(sched_domain_span(sd));
sd = NULL;
for_each_domain(cpu, tmp) {
if (weight <= cpumask_weight(sched_domain_span(tmp)))
break;
if (tmp->flags & flag)
sd = tmp;
}
/* while loop will break here if sd == NULL */
}

return cpu;
}

#endif /* CONFIG_SMP */

/**
Expand Down
145 changes: 145 additions & 0 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -1360,6 +1360,151 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
out:
return wake_idle(new_cpu, p);
}

/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
*/
static struct sched_group *
find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
{
struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
unsigned long min_load = ULONG_MAX, this_load = 0;
int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;

do {
unsigned long load, avg_load;
int local_group;
int i;

/* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_cpus(group),
&p->cpus_allowed))
continue;

local_group = cpumask_test_cpu(this_cpu,
sched_group_cpus(group));

/* Tally up the load of all CPUs in the group */
avg_load = 0;

for_each_cpu(i, sched_group_cpus(group)) {
/* Bias balancing toward cpus of our domain */
if (local_group)
load = source_load(i, load_idx);
else
load = target_load(i, load_idx);

avg_load += load;
}

/* Adjust by relative CPU power of the group */
avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;

if (local_group) {
this_load = avg_load;
this = group;
} else if (avg_load < min_load) {
min_load = avg_load;
idlest = group;
}
} while (group = group->next, group != sd->groups);

if (!idlest || 100*this_load < imbalance*min_load)
return NULL;
return idlest;
}

/*
* find_idlest_cpu - find the idlest cpu among the cpus in group.
*/
static int
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
{
unsigned long load, min_load = ULONG_MAX;
int idlest = -1;
int i;

/* Traverse only the allowed CPUs */
for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
load = weighted_cpuload(i);

if (load < min_load || (load == min_load && i == this_cpu)) {
min_load = load;
idlest = i;
}
}

return idlest;
}

/*
* sched_balance_self: balance the current task (running on cpu) in domains
* that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
* SD_BALANCE_EXEC.
*
* Balance, ie. select the least loaded group.
*
* Returns the target CPU number, or the same CPU if no balancing is needed.
*
* preempt must be disabled.
*/
static int sched_balance_self(int cpu, int flag)
{
struct task_struct *t = current;
struct sched_domain *tmp, *sd = NULL;

for_each_domain(cpu, tmp) {
/*
* If power savings logic is enabled for a domain, stop there.
*/
if (tmp->flags & SD_POWERSAVINGS_BALANCE)
break;
if (tmp->flags & flag)
sd = tmp;
}

if (sd)
update_shares(sd);

while (sd) {
struct sched_group *group;
int new_cpu, weight;

if (!(sd->flags & flag)) {
sd = sd->child;
continue;
}

group = find_idlest_group(sd, t, cpu);
if (!group) {
sd = sd->child;
continue;
}

new_cpu = find_idlest_cpu(group, t, cpu);
if (new_cpu == -1 || new_cpu == cpu) {
/* Now try balancing at a lower domain level of cpu */
sd = sd->child;
continue;
}

/* Now try balancing at a lower domain level of new_cpu */
cpu = new_cpu;
weight = cpumask_weight(sched_domain_span(sd));
sd = NULL;
for_each_domain(cpu, tmp) {
if (weight <= cpumask_weight(sched_domain_span(tmp)))
break;
if (tmp->flags & flag)
sd = tmp;
}
/* while loop will break here if sd == NULL */
}

return cpu;
}
#endif /* CONFIG_SMP */

/*
Expand Down

0 comments on commit 43627d9

Please sign in to comment.