Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 158345
b: refs/heads/master
c: 34d76c4
h: refs/heads/master
i:
  158343: 9a552f8
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Aug 28, 2009
1 parent f8edffd commit 5febed2
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 22 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a8af7246c114bfd939e539f9566b872c06f6225c
refs/heads/master: 34d76c41554a05425613d16efebb3069c4c545f0
50 changes: 29 additions & 21 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1515,30 +1515,29 @@ static unsigned long cpu_avg_load_per_task(int cpu)

#ifdef CONFIG_FAIR_GROUP_SCHED

struct update_shares_data {
unsigned long rq_weight[NR_CPUS];
};

static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);

static void __set_se_shares(struct sched_entity *se, unsigned long shares);

/*
* Calculate and set the cpu's group shares.
*/
static void
update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long sd_shares, unsigned long sd_rq_weight,
unsigned long sd_eff_weight)
static void update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long sd_shares,
unsigned long sd_rq_weight,
struct update_shares_data *usd)
{
unsigned long rq_weight;
unsigned long shares;
unsigned long shares, rq_weight;
int boost = 0;

if (!tg->se[cpu])
return;

rq_weight = tg->cfs_rq[cpu]->rq_weight;
rq_weight = usd->rq_weight[cpu];
if (!rq_weight) {
boost = 1;
rq_weight = NICE_0_LOAD;
if (sd_rq_weight == sd_eff_weight)
sd_eff_weight += NICE_0_LOAD;
sd_rq_weight = sd_eff_weight;
}

/*
Expand All @@ -1555,6 +1554,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long flags;

spin_lock_irqsave(&rq->lock, flags);
tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
__set_se_shares(tg->se[cpu], shares);
spin_unlock_irqrestore(&rq->lock, flags);
Expand All @@ -1568,25 +1568,31 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
*/
static int tg_shares_up(struct task_group *tg, void *data)
{
unsigned long weight, rq_weight = 0, eff_weight = 0;
unsigned long shares = 0;
unsigned long weight, rq_weight = 0, shares = 0;
struct update_shares_data *usd;
struct sched_domain *sd = data;
unsigned long flags;
int i;

if (!tg->se[0])
return 0;

local_irq_save(flags);
usd = &__get_cpu_var(update_shares_data);

for_each_cpu(i, sched_domain_span(sd)) {
weight = tg->cfs_rq[i]->load.weight;
usd->rq_weight[i] = weight;

/*
* If there are currently no tasks on the cpu pretend there
* is one of average load so that when a new task gets to
* run here it will not get delayed by group starvation.
*/
weight = tg->cfs_rq[i]->load.weight;
tg->cfs_rq[i]->rq_weight = weight;
rq_weight += weight;

if (!weight)
weight = NICE_0_LOAD;

eff_weight += weight;
rq_weight += weight;
shares += tg->cfs_rq[i]->shares;
}

Expand All @@ -1597,7 +1603,9 @@ static int tg_shares_up(struct task_group *tg, void *data)
shares = tg->shares;

for_each_cpu(i, sched_domain_span(sd))
update_group_shares_cpu(tg, i, shares, rq_weight, eff_weight);
update_group_shares_cpu(tg, i, shares, rq_weight, usd);

local_irq_restore(flags);

return 0;
}
Expand Down

0 comments on commit 5febed2

Please sign in to comment.