Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 93405
b: refs/heads/master
c: 3f5087a
h: refs/heads/master
i:
  93403: 715c6d2
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Apr 24, 2008
1 parent 30d193c commit 47f4cc6
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 46 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 126e01bf92dfc5f0ba91e88be02c473e1506d7d9
refs/heads/master: 3f5087a2bae5d1ce10a3d698dec8f879a96f5419
47 changes: 2 additions & 45 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1656,42 +1656,6 @@ void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd)
aggregate(tg, sd)->task_weight = task_weight;
}

/*
* Redistribute tg->shares amongst all tg->cfs_rq[]s.
*/
static void __aggregate_redistribute_shares(struct task_group *tg)
{
int i, max_cpu = smp_processor_id();
unsigned long rq_weight = 0;
unsigned long shares, max_shares = 0, shares_rem = tg->shares;

for_each_possible_cpu(i)
rq_weight += tg->cfs_rq[i]->load.weight;

for_each_possible_cpu(i) {
/*
* divide shares proportional to the rq_weights.
*/
shares = tg->shares * tg->cfs_rq[i]->load.weight;
shares /= rq_weight + 1;

tg->cfs_rq[i]->shares = shares;

if (shares > max_shares) {
max_shares = shares;
max_cpu = i;
}
shares_rem -= shares;
}

/*
* Ensure it all adds up to tg->shares; we can loose a few
* due to rounding down when computing the per-cpu shares.
*/
if (shares_rem)
tg->cfs_rq[max_cpu]->shares += shares_rem;
}

/*
* Compute the weight of this group on the given cpus.
*/
Expand All @@ -1701,18 +1665,11 @@ void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd)
unsigned long shares = 0;
int i;

again:
for_each_cpu_mask(i, sd->span)
shares += tg->cfs_rq[i]->shares;

/*
* When the span doesn't have any shares assigned, but does have
* tasks to run do a machine wide rebalance (should be rare).
*/
if (unlikely(!shares && aggregate(tg, sd)->rq_weight)) {
__aggregate_redistribute_shares(tg);
goto again;
}
if ((!shares && aggregate(tg, sd)->rq_weight) || shares > tg->shares)
shares = tg->shares;

aggregate(tg, sd)->shares = shares;
}
Expand Down

0 comments on commit 47f4cc6

Please sign in to comment.