Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 257172
b: refs/heads/master
c: 9763b67
h: refs/heads/master
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jul 21, 2011
1 parent e49d5d4 commit eac3ce1
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 38 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9598c82dcacadc3b9daa8170613fd054c6124d30
refs/heads/master: 9763b67fb9f3050c6da739105888327587c30c4d
32 changes: 0 additions & 32 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1568,38 +1568,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)
return rq->avg_load_per_task;
}

#ifdef CONFIG_FAIR_GROUP_SCHED

/*
* Compute the cpu's hierarchical load factor for each task group.
* This needs to be done in a top-down fashion because the load of a child
* group is a fraction of its parents load.
*/
static int tg_load_down(struct task_group *tg, void *data)
{
unsigned long load;
long cpu = (long)data;

if (!tg->parent) {
load = cpu_rq(cpu)->load.weight;
} else {
load = tg->parent->cfs_rq[cpu]->h_load;
load *= tg->se[cpu]->load.weight;
load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
}

tg->cfs_rq[cpu]->h_load = load;

return 0;
}

static void update_h_load(long cpu)
{
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
}

#endif

#ifdef CONFIG_PREEMPT

static void double_rq_lock(struct rq *rq1, struct rq *rq2);
Expand Down
40 changes: 35 additions & 5 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -2232,26 +2232,56 @@ static void update_shares(int cpu)
struct rq *rq = cpu_rq(cpu);

rcu_read_lock();
/*
* Iterates the task_group tree in a bottom up fashion, see
* list_add_leaf_cfs_rq() for details.
*/
for_each_leaf_cfs_rq(rq, cfs_rq)
update_shares_cpu(cfs_rq->tg, cpu);
rcu_read_unlock();
}

/*
* Compute the cpu's hierarchical load factor for each task group.
* This needs to be done in a top-down fashion because the load of a child
* group is a fraction of its parents load.
*/
static int tg_load_down(struct task_group *tg, void *data)
{
unsigned long load;
long cpu = (long)data;

if (!tg->parent) {
load = cpu_rq(cpu)->load.weight;
} else {
load = tg->parent->cfs_rq[cpu]->h_load;
load *= tg->se[cpu]->load.weight;
load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
}

tg->cfs_rq[cpu]->h_load = load;

return 0;
}

static void update_h_load(long cpu)
{
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
}

static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned)
{
long rem_load_move = max_load_move;
int busiest_cpu = cpu_of(busiest);
struct task_group *tg;
struct cfs_rq *busiest_cfs_rq;

rcu_read_lock();
update_h_load(busiest_cpu);
update_h_load(cpu_of(busiest));

list_for_each_entry_rcu(tg, &task_groups, list) {
struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
for_each_leaf_cfs_rq(busiest, busiest_cfs_rq) {
unsigned long busiest_h_load = busiest_cfs_rq->h_load;
unsigned long busiest_weight = busiest_cfs_rq->load.weight;
u64 rem_load, moved_load;
Expand Down

0 comments on commit eac3ce1

Please sign in to comment.