Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 100232
b: refs/heads/master
c: 4be9daa
h: refs/heads/master
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 27, 2008
1 parent 4c7e183 commit 1dc321f
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 10 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 42a3ac7d5cee89849448b41b86faeb86f98e92f6
refs/heads/master: 4be9daaa1b33701f011f4117f22dc1e45a3e6e34
49 changes: 40 additions & 9 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -1074,22 +1074,53 @@ static inline int wake_idle(int cpu, struct task_struct *p)
static const struct sched_class fair_sched_class;

#ifdef CONFIG_FAIR_GROUP_SCHED
static unsigned long task_h_load(struct task_struct *p)
static unsigned long effective_load(struct task_group *tg, long wl, int cpu)
{
unsigned long h_load = p->se.load.weight;
struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
struct sched_entity *se = tg->se[cpu];
long wg = wl;

update_h_load(task_cpu(p));
for_each_sched_entity(se) {
#define D(n) (likely(n) ? (n) : 1)

long S, Srw, rw, s, sn;

S = se->my_q->tg->shares;
s = se->my_q->shares;
rw = se->my_q->load.weight;

h_load = calc_delta_mine(h_load, cfs_rq->h_load, &cfs_rq->load);
Srw = S * rw / D(s);
sn = S * (rw + wl) / D(Srw + wg);

wl = sn - s;
wg = 0;
#undef D
}

return h_load;
return wl;
}

static unsigned long task_load_sub(struct task_struct *p)
{
return effective_load(task_group(p), -(long)p->se.load.weight, task_cpu(p));
}

static unsigned long task_load_add(struct task_struct *p, int cpu)
{
return effective_load(task_group(p), p->se.load.weight, cpu);
}

#else
static unsigned long task_h_load(struct task_struct *p)

static unsigned long task_load_sub(struct task_struct *p)
{
return -p->se.load.weight;
}

static unsigned long task_load_add(struct task_struct *p, int cpu)
{
return p->se.load.weight;
}

#endif

static int
Expand All @@ -1112,9 +1143,9 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
* of the current CPU:
*/
if (sync)
tl -= task_h_load(current);
tl += task_load_sub(current);

balanced = 100*(tl + task_h_load(p)) <= imbalance*load;
balanced = 100*(tl + task_load_add(p, this_cpu)) <= imbalance*load;

/*
* If the currently running task will sleep within
Expand Down

0 comments on commit 1dc321f

Please sign in to comment.