Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 121328
b: refs/heads/master
c: ec4e0e2
h: refs/heads/master
v: v3
  • Loading branch information
Ken Chen authored and Ingo Molnar committed Nov 19, 2008
1 parent 6ae45db commit 04a5fbc
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 27 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3ac3ba0b396fd99550e08034b0e4c27fdf39c252
refs/heads/master: ec4e0e2fe018992d980910db901637c814575914
41 changes: 15 additions & 26 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1453,35 +1453,21 @@ static void
update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long sd_shares, unsigned long sd_rq_weight)
{
int boost = 0;
unsigned long shares;
unsigned long rq_weight;

if (!tg->se[cpu])
return;

rq_weight = tg->cfs_rq[cpu]->load.weight;

/*
* If there are currently no tasks on the cpu pretend there is one of
* average load so that when a new task gets to run here it will not
* get delayed by group starvation.
*/
if (!rq_weight) {
boost = 1;
rq_weight = NICE_0_LOAD;
}

if (unlikely(rq_weight > sd_rq_weight))
rq_weight = sd_rq_weight;
rq_weight = tg->cfs_rq[cpu]->rq_weight;

/*
* \Sum shares * rq_weight
* shares = -----------------------
* \Sum rq_weight
*
*/
shares = (sd_shares * rq_weight) / (sd_rq_weight + 1);
shares = (sd_shares * rq_weight) / sd_rq_weight;
shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);

if (abs(shares - tg->se[cpu]->load.weight) >
Expand All @@ -1490,11 +1476,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long flags;

spin_lock_irqsave(&rq->lock, flags);
/*
* record the actual number of shares, not the boosted amount.
*/
tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
tg->cfs_rq[cpu]->rq_weight = rq_weight;
tg->cfs_rq[cpu]->shares = shares;

__set_se_shares(tg->se[cpu], shares);
spin_unlock_irqrestore(&rq->lock, flags);
Expand All @@ -1508,13 +1490,23 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
*/
static int tg_shares_up(struct task_group *tg, void *data)
{
unsigned long rq_weight = 0;
unsigned long weight, rq_weight = 0;
unsigned long shares = 0;
struct sched_domain *sd = data;
int i;

for_each_cpu_mask(i, sd->span) {
rq_weight += tg->cfs_rq[i]->load.weight;
/*
* If there are currently no tasks on the cpu pretend there
* is one of average load so that when a new task gets to
* run here it will not get delayed by group starvation.
*/
weight = tg->cfs_rq[i]->load.weight;
if (!weight)
weight = NICE_0_LOAD;

tg->cfs_rq[i]->rq_weight = weight;
rq_weight += weight;
shares += tg->cfs_rq[i]->shares;
}

Expand All @@ -1524,9 +1516,6 @@ static int tg_shares_up(struct task_group *tg, void *data)
if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
shares = tg->shares;

if (!rq_weight)
rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;

for_each_cpu_mask(i, sd->span)
update_group_shares_cpu(tg, i, shares, rq_weight);

Expand Down

0 comments on commit 04a5fbc

Please sign in to comment.