Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 35560
b: refs/heads/master
c: 0a2966b
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed Sep 26, 2006
1 parent 84572bf commit 85b8843
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 9 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 656ddf798dbe588217c97e58b9cfdfce649ebdc3
refs/heads/master: 0a2966b48fb784e437520e400ddc94874ddbd4e8
54 changes: 46 additions & 8 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,7 @@ struct rq {
/* For active balancing */
int active_balance;
int push_cpu;
int cpu; /* cpu of this runqueue */

struct task_struct *migration_thread;
struct list_head migration_queue;
Expand Down Expand Up @@ -267,6 +268,15 @@ struct rq {

static DEFINE_PER_CPU(struct rq, runqueues);

static inline int cpu_of(struct rq *rq)
{
#ifdef CONFIG_SMP
return rq->cpu;
#else
return 0;
#endif
}

/*
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
* See detach_destroy_domains: synchronize_sched for details.
Expand Down Expand Up @@ -2211,7 +2221,8 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
*/
static struct sched_group *
find_busiest_group(struct sched_domain *sd, int this_cpu,
unsigned long *imbalance, enum idle_type idle, int *sd_idle)
unsigned long *imbalance, enum idle_type idle, int *sd_idle,
cpumask_t *cpus)
{
struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
unsigned long max_load, avg_load, total_load, this_load, total_pwr;
Expand Down Expand Up @@ -2248,7 +2259,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
sum_weighted_load = sum_nr_running = avg_load = 0;

for_each_cpu_mask(i, group->cpumask) {
struct rq *rq = cpu_rq(i);
struct rq *rq;

if (!cpu_isset(i, *cpus))
continue;

rq = cpu_rq(i);

if (*sd_idle && !idle_cpu(i))
*sd_idle = 0;
Expand Down Expand Up @@ -2466,13 +2482,17 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
*/
static struct rq *
find_busiest_queue(struct sched_group *group, enum idle_type idle,
unsigned long imbalance)
unsigned long imbalance, cpumask_t *cpus)
{
struct rq *busiest = NULL, *rq;
unsigned long max_load = 0;
int i;

for_each_cpu_mask(i, group->cpumask) {

if (!cpu_isset(i, *cpus))
continue;

rq = cpu_rq(i);

if (rq->nr_running == 1 && rq->raw_weighted_load > imbalance)
Expand Down Expand Up @@ -2511,20 +2531,23 @@ static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_group *group;
unsigned long imbalance;
struct rq *busiest;
cpumask_t cpus = CPU_MASK_ALL;

if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
!sched_smt_power_savings)
sd_idle = 1;

schedstat_inc(sd, lb_cnt[idle]);

group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle);
redo:
group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
&cpus);
if (!group) {
schedstat_inc(sd, lb_nobusyg[idle]);
goto out_balanced;
}

busiest = find_busiest_queue(group, idle, imbalance);
busiest = find_busiest_queue(group, idle, imbalance, &cpus);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[idle]);
goto out_balanced;
Expand All @@ -2549,8 +2572,12 @@ static int load_balance(int this_cpu, struct rq *this_rq,
double_rq_unlock(this_rq, busiest);

/* All tasks on this runqueue were pinned by CPU affinity */
if (unlikely(all_pinned))
if (unlikely(all_pinned)) {
cpu_clear(cpu_of(busiest), cpus);
if (!cpus_empty(cpus))
goto redo;
goto out_balanced;
}
}

if (!nr_moved) {
Expand Down Expand Up @@ -2639,18 +2666,22 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
unsigned long imbalance;
int nr_moved = 0;
int sd_idle = 0;
cpumask_t cpus = CPU_MASK_ALL;

if (sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings)
sd_idle = 1;

schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, &sd_idle);
redo:
group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE,
&sd_idle, &cpus);
if (!group) {
schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
goto out_balanced;
}

busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance);
busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance,
&cpus);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
goto out_balanced;
Expand All @@ -2668,6 +2699,12 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
minus_1_or_zero(busiest->nr_running),
imbalance, sd, NEWLY_IDLE, NULL);
spin_unlock(&busiest->lock);

if (!nr_moved) {
cpu_clear(cpu_of(busiest), cpus);
if (!cpus_empty(cpus))
goto redo;
}
}

if (!nr_moved) {
Expand Down Expand Up @@ -6747,6 +6784,7 @@ void __init sched_init(void)
rq->cpu_load[j] = 0;
rq->active_balance = 0;
rq->push_cpu = 0;
rq->cpu = i;
rq->migration_thread = NULL;
INIT_LIST_HEAD(&rq->migration_queue);
#endif
Expand Down

0 comments on commit 85b8843

Please sign in to comment.