Skip to content

Commit

Permalink
sched: hierarchical load vs affine wakeups
Browse files Browse the repository at this point in the history
With hierarchical grouping we can't just compare task weight to rq weight - we
need to scale the weight appropriately.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 27, 2008
1 parent a8a51d5 commit bb3469a
Showing 1 changed file with 21 additions and 2 deletions.
23 changes: 21 additions & 2 deletions kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -1073,6 +1073,25 @@ static inline int wake_idle(int cpu, struct task_struct *p)

static const struct sched_class fair_sched_class;

#ifdef CONFIG_FAIR_GROUP_SCHED
static unsigned long task_h_load(struct task_struct *p)
{
unsigned long h_load = p->se.load.weight;
struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);

update_h_load(task_cpu(p));

h_load = calc_delta_mine(h_load, cfs_rq->h_load, &cfs_rq->load);

return h_load;
}
#else
static unsigned long task_h_load(struct task_struct *p)
{
return p->se.load.weight;
}
#endif

static int
wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
struct task_struct *p, int prev_cpu, int this_cpu, int sync,
Expand All @@ -1093,9 +1112,9 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
* of the current CPU:
*/
if (sync)
tl -= current->se.load.weight;
tl -= task_h_load(current);

balanced = 100*(tl + p->se.load.weight) <= imbalance*load;
balanced = 100*(tl + task_h_load(p)) <= imbalance*load;

/*
* If the currently running task will sleep within
Expand Down

0 comments on commit bb3469a

Please sign in to comment.