Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 100239
b: refs/heads/master
c: 8337826
h: refs/heads/master
i:
  100237: b1d7c10
  100235: 6a9a261
  100231: 4c7e183
  100223: 9482a58
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 27, 2008
1 parent a023f47 commit cd9515b
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 23 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 243e0e7b7d3b54749ece2e879ecd7e2a11874443
refs/heads/master: 83378269a5fad98f562ebc0f09c349575e6cbfe1
4 changes: 4 additions & 0 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -365,6 +365,10 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
#else

static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
static inline struct task_group *task_group(struct task_struct *p)
{
return NULL;
}

#endif /* CONFIG_GROUP_SCHED */

Expand Down
48 changes: 26 additions & 22 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -1074,10 +1074,10 @@ static inline int wake_idle(int cpu, struct task_struct *p)
static const struct sched_class fair_sched_class;

#ifdef CONFIG_FAIR_GROUP_SCHED
static unsigned long effective_load(struct task_group *tg, long wl, int cpu)
static unsigned long effective_load(struct task_group *tg, int cpu,
unsigned long wl, unsigned long wg)
{
struct sched_entity *se = tg->se[cpu];
long wg = wl;

for_each_sched_entity(se) {
#define D(n) (likely(n) ? (n) : 1)
Expand All @@ -1092,33 +1092,26 @@ static unsigned long effective_load(struct task_group *tg, long wl, int cpu)
b = S*rw + s*wg;

wl = s*(a-b)/D(b);
/*
* Assume the group is already running and will
* thus already be accounted for in the weight.
*
* That is, moving shares between CPUs, does not
* alter the group weight.
*/
wg = 0;
#undef D
}

return wl;
}

static unsigned long task_load_sub(struct task_struct *p)
{
return effective_load(task_group(p), -(long)p->se.load.weight, task_cpu(p));
}

static unsigned long task_load_add(struct task_struct *p, int cpu)
{
return effective_load(task_group(p), p->se.load.weight, cpu);
}

#else

static unsigned long task_load_sub(struct task_struct *p)
static inline unsigned long effective_load(struct task_group *tg, int cpu,
unsigned long wl, unsigned long wg)
{
return -p->se.load.weight;
}

static unsigned long task_load_add(struct task_struct *p, int cpu)
{
return p->se.load.weight;
return wl;
}

#endif
Expand All @@ -1130,8 +1123,10 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
unsigned int imbalance)
{
struct task_struct *curr = this_rq->curr;
struct task_group *tg;
unsigned long tl = this_load;
unsigned long tl_per_task;
unsigned long weight;
int balanced;

if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
Expand All @@ -1142,10 +1137,19 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
* effect of the currently running task from the load
* of the current CPU:
*/
if (sync)
tl += task_load_sub(current);
if (sync) {
tg = task_group(current);
weight = current->se.load.weight;

tl += effective_load(tg, this_cpu, -weight, -weight);
load += effective_load(tg, prev_cpu, 0, -weight);
}

tg = task_group(p);
weight = p->se.load.weight;

balanced = 100*(tl + task_load_add(p, this_cpu)) <= imbalance*load;
balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <=
imbalance*(load + effective_load(tg, prev_cpu, 0, weight));

/*
* If the currently running task will sleep within
Expand Down

0 comments on commit cd9515b

Please sign in to comment.