Skip to content

Commit

Permalink
sched: Fix division by zero - really
Browse files Browse the repository at this point in the history
When re-computing the shares for each task group's cpu
representation we need the ratio of weight on each cpu vs the
total weight of the sched domain.

Since load-balancing is loosely (read not) synchronized, the
weight of individual cpus can change between doing the sum and
calculating the ratio.

The previous patch dealt with only one of the race scenarios,
this patch side steps them all by saving a snapshot of all the
individual cpu weights, thereby always working on a consistent
set.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: torvalds@linux-foundation.org
Cc: jes@sgi.com
Cc: jens.axboe@oracle.com
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <1251371336.18584.77.camel@twins>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Aug 28, 2009
1 parent a8af724 commit 34d76c4
Showing 1 changed file with 29 additions and 21 deletions.
50 changes: 29 additions & 21 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1515,30 +1515,29 @@ static unsigned long cpu_avg_load_per_task(int cpu)

#ifdef CONFIG_FAIR_GROUP_SCHED

struct update_shares_data {
unsigned long rq_weight[NR_CPUS];
};

static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);

static void __set_se_shares(struct sched_entity *se, unsigned long shares);

/*
* Calculate and set the cpu's group shares.
*/
static void
update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long sd_shares, unsigned long sd_rq_weight,
unsigned long sd_eff_weight)
static void update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long sd_shares,
unsigned long sd_rq_weight,
struct update_shares_data *usd)
{
unsigned long rq_weight;
unsigned long shares;
unsigned long shares, rq_weight;
int boost = 0;

if (!tg->se[cpu])
return;

rq_weight = tg->cfs_rq[cpu]->rq_weight;
rq_weight = usd->rq_weight[cpu];
if (!rq_weight) {
boost = 1;
rq_weight = NICE_0_LOAD;
if (sd_rq_weight == sd_eff_weight)
sd_eff_weight += NICE_0_LOAD;
sd_rq_weight = sd_eff_weight;
}

/*
Expand All @@ -1555,6 +1554,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long flags;

spin_lock_irqsave(&rq->lock, flags);
tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
__set_se_shares(tg->se[cpu], shares);
spin_unlock_irqrestore(&rq->lock, flags);
Expand All @@ -1568,25 +1568,31 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
*/
static int tg_shares_up(struct task_group *tg, void *data)
{
unsigned long weight, rq_weight = 0, eff_weight = 0;
unsigned long shares = 0;
unsigned long weight, rq_weight = 0, shares = 0;
struct update_shares_data *usd;
struct sched_domain *sd = data;
unsigned long flags;
int i;

if (!tg->se[0])
return 0;

local_irq_save(flags);
usd = &__get_cpu_var(update_shares_data);

for_each_cpu(i, sched_domain_span(sd)) {
weight = tg->cfs_rq[i]->load.weight;
usd->rq_weight[i] = weight;

/*
* If there are currently no tasks on the cpu pretend there
* is one of average load so that when a new task gets to
* run here it will not get delayed by group starvation.
*/
weight = tg->cfs_rq[i]->load.weight;
tg->cfs_rq[i]->rq_weight = weight;
rq_weight += weight;

if (!weight)
weight = NICE_0_LOAD;

eff_weight += weight;
rq_weight += weight;
shares += tg->cfs_rq[i]->shares;
}

Expand All @@ -1597,7 +1603,9 @@ static int tg_shares_up(struct task_group *tg, void *data)
shares = tg->shares;

for_each_cpu(i, sched_domain_span(sd))
update_group_shares_cpu(tg, i, shares, rq_weight, eff_weight);
update_group_shares_cpu(tg, i, shares, rq_weight, usd);

local_irq_restore(flags);

return 0;
}
Expand Down

0 comments on commit 34d76c4

Please sign in to comment.