Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 3285
b: refs/heads/master
c: 3950745
h: refs/heads/master
i:
  3283: ba6c1ad
v: v3
  • Loading branch information
Nick Piggin authored and Linus Torvalds committed Jun 25, 2005
1 parent d3b9991 commit 15c8fae
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 46 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 16cfb1c04c3cbe3759f339d3333e7e1e7d59712a
refs/heads/master: 3950745131e23472fb5ace2ee4a2093e7590ec69
76 changes: 31 additions & 45 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1995,7 +1995,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
* We've kicked active balancing, reset the failure
* counter.
*/
sd->nr_balance_failed = sd->cache_nice_tries;
sd->nr_balance_failed = sd->cache_nice_tries+1;
}
} else
sd->nr_balance_failed = 0;
Expand Down Expand Up @@ -2106,56 +2106,42 @@ static inline void idle_balance(int this_cpu, runqueue_t *this_rq)
static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu)
{
struct sched_domain *sd;
struct sched_group *cpu_group;
runqueue_t *target_rq;
cpumask_t visited_cpus;
int cpu;
int target_cpu = busiest_rq->push_cpu;

if (busiest_rq->nr_running <= 1)
/* no task to move */
return;

target_rq = cpu_rq(target_cpu);

/*
* Search for suitable CPUs to push tasks to in successively higher
* domains with SD_LOAD_BALANCE set.
* This condition is "impossible", if it occurs
* we need to fix it. Originally reported by
* Bjorn Helgaas on a 128-cpu setup.
*/
visited_cpus = CPU_MASK_NONE;
for_each_domain(busiest_cpu, sd) {
if (!(sd->flags & SD_LOAD_BALANCE))
/* no more domains to search */
break;
BUG_ON(busiest_rq == target_rq);

schedstat_inc(sd, alb_cnt);
/* move a task from busiest_rq to target_rq */
double_lock_balance(busiest_rq, target_rq);

cpu_group = sd->groups;
do {
for_each_cpu_mask(cpu, cpu_group->cpumask) {
if (busiest_rq->nr_running <= 1)
/* no more tasks left to move */
return;
if (cpu_isset(cpu, visited_cpus))
continue;
cpu_set(cpu, visited_cpus);
if (!cpu_and_siblings_are_idle(cpu) || cpu == busiest_cpu)
continue;

target_rq = cpu_rq(cpu);
/*
* This condition is "impossible", if it occurs
* we need to fix it. Originally reported by
* Bjorn Helgaas on a 128-cpu setup.
*/
BUG_ON(busiest_rq == target_rq);

/* move a task from busiest_rq to target_rq */
double_lock_balance(busiest_rq, target_rq);
if (move_tasks(target_rq, cpu, busiest_rq,
1, sd, SCHED_IDLE, NULL)) {
schedstat_inc(sd, alb_pushed);
} else {
schedstat_inc(sd, alb_failed);
}
spin_unlock(&target_rq->lock);
}
cpu_group = cpu_group->next;
} while (cpu_group != sd->groups);
}
/* Search for an sd spanning us and the target CPU. */
for_each_domain(target_cpu, sd)
if ((sd->flags & SD_LOAD_BALANCE) &&
cpu_isset(busiest_cpu, sd->span))
break;

if (unlikely(sd == NULL))
goto out;

schedstat_inc(sd, alb_cnt);

if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE, NULL))
schedstat_inc(sd, alb_pushed);
else
schedstat_inc(sd, alb_failed);
out:
spin_unlock(&target_rq->lock);
}

/*
Expand Down

0 comments on commit 15c8fae

Please sign in to comment.