Skip to content

Commit

Permalink
sched: Remove rq_iterator usage from load_balance_fair
Browse files Browse the repository at this point in the history
Since we only ever iterate the fair class, do away with this abstraction.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jan 21, 2010
1 parent 3d45fd8 commit ee00e66
Showing 1 changed file with 29 additions and 51 deletions.
80 changes: 29 additions & 51 deletions kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -1866,26 +1866,9 @@ static unsigned long
balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move, struct sched_domain *sd,
enum cpu_idle_type idle, int *all_pinned,
int *this_best_prio, struct rq_iterator *iterator);
int *this_best_prio, struct cfs_rq *busiest_cfs_rq);


static unsigned long
__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move, struct sched_domain *sd,
enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
struct cfs_rq *cfs_rq)
{
struct rq_iterator cfs_rq_iterator;

cfs_rq_iterator.start = load_balance_start_fair;
cfs_rq_iterator.next = load_balance_next_fair;
cfs_rq_iterator.arg = cfs_rq;

return balance_tasks(this_rq, this_cpu, busiest,
max_load_move, sd, idle, all_pinned,
this_best_prio, &cfs_rq_iterator);
}

#ifdef CONFIG_FAIR_GROUP_SCHED
static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
Expand Down Expand Up @@ -1915,9 +1898,9 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
rem_load = (u64)rem_load_move * busiest_weight;
rem_load = div_u64(rem_load, busiest_h_load + 1);

moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
moved_load = balance_tasks(this_rq, this_cpu, busiest,
rem_load, sd, idle, all_pinned, this_best_prio,
tg->cfs_rq[busiest_cpu]);
busiest_cfs_rq);

if (!moved_load)
continue;
Expand All @@ -1940,7 +1923,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
return __load_balance_fair(this_rq, this_cpu, busiest,
return balance_tasks(this_rq, this_cpu, busiest,
max_load_move, sd, idle, all_pinned,
this_best_prio, &busiest->cfs);
}
Expand Down Expand Up @@ -2050,53 +2033,48 @@ static unsigned long
balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move, struct sched_domain *sd,
enum cpu_idle_type idle, int *all_pinned,
int *this_best_prio, struct rq_iterator *iterator)
int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
{
int loops = 0, pulled = 0, pinned = 0;
struct task_struct *p;
long rem_load_move = max_load_move;
struct task_struct *p, *n;

if (max_load_move == 0)
goto out;

pinned = 1;

/*
* Start the load-balancing iterator:
*/
p = iterator->start(iterator->arg);
next:
if (!p || loops++ > sysctl_sched_nr_migrate)
goto out;
list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
if (loops++ > sysctl_sched_nr_migrate)
break;

if ((p->se.load.weight >> 1) > rem_load_move ||
!can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
p = iterator->next(iterator->arg);
goto next;
}
if ((p->se.load.weight >> 1) > rem_load_move ||
!can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned))
continue;

pull_task(busiest, p, this_rq, this_cpu);
pulled++;
rem_load_move -= p->se.load.weight;
pull_task(busiest, p, this_rq, this_cpu);
pulled++;
rem_load_move -= p->se.load.weight;

#ifdef CONFIG_PREEMPT
/*
* NEWIDLE balancing is a source of latency, so preemptible kernels
* will stop after the first task is pulled to minimize the critical
* section.
*/
if (idle == CPU_NEWLY_IDLE)
goto out;
/*
* NEWIDLE balancing is a source of latency, so preemptible
* kernels will stop after the first task is pulled to minimize
* the critical section.
*/
if (idle == CPU_NEWLY_IDLE)
break;
#endif

/*
* We only want to steal up to the prescribed amount of weighted load.
*/
if (rem_load_move > 0) {
/*
* We only want to steal up to the prescribed amount of
* weighted load.
*/
if (rem_load_move <= 0)
break;

if (p->prio < *this_best_prio)
*this_best_prio = p->prio;
p = iterator->next(iterator->arg);
goto next;
}
out:
/*
Expand Down

0 comments on commit ee00e66

Please sign in to comment.