Skip to content

Commit

Permalink
sched: Update tg->shares after cpu.shares write
Browse files Browse the repository at this point in the history
Formerly sched_group_set_shares would force a rebalance by overflowing domain
share sums.  Now that per-cpu averages are maintained we can set the true value
by issuing an update_cfs_shares() following a tg->shares update.

Also initialize tg se->load to 0 for consistency since we'll now set correct
weights on enqueue.

Signed-off-by: Paul Turner <pjt@google.com?>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20101115234938.465521344@google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Paul Turner authored and Ingo Molnar committed Nov 18, 2010
1 parent d6b5591 commit 9437178
Showing 1 changed file with 11 additions and 31 deletions.
42 changes: 11 additions & 31 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -7646,7 +7646,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
se->cfs_rq = parent->my_q;

se->my_q = cfs_rq;
update_load_set(&se->load, tg->shares);
update_load_set(&se->load, 0);
se->parent = parent;
}
#endif
Expand Down Expand Up @@ -8274,37 +8274,12 @@ void sched_move_task(struct task_struct *tsk)
#endif /* CONFIG_CGROUP_SCHED */

#ifdef CONFIG_FAIR_GROUP_SCHED
static void __set_se_shares(struct sched_entity *se, unsigned long shares)
{
struct cfs_rq *cfs_rq = se->cfs_rq;
int on_rq;

on_rq = se->on_rq;
if (on_rq)
dequeue_entity(cfs_rq, se, 0);

update_load_set(&se->load, shares);

if (on_rq)
enqueue_entity(cfs_rq, se, 0);
}

static void set_se_shares(struct sched_entity *se, unsigned long shares)
{
struct cfs_rq *cfs_rq = se->cfs_rq;
struct rq *rq = cfs_rq->rq;
unsigned long flags;

raw_spin_lock_irqsave(&rq->lock, flags);
__set_se_shares(se, shares);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}

static DEFINE_MUTEX(shares_mutex);

int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int i;
unsigned long flags;

/*
* We can't change the weight of the root cgroup.
Expand All @@ -8323,10 +8298,15 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)

tg->shares = shares;
for_each_possible_cpu(i) {
/*
* force a rebalance
*/
set_se_shares(tg->se[i], shares);
struct rq *rq = cpu_rq(i);
struct sched_entity *se;

se = tg->se[i];
/* Propagate contribution to hierarchy */
raw_spin_lock_irqsave(&rq->lock, flags);
for_each_sched_entity(se)
update_cfs_shares(group_cfs_rq(se), 0);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}

done:
Expand Down

0 comments on commit 9437178

Please sign in to comment.