Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 97430
b: refs/heads/master
c: b3137bc
h: refs/heads/master
v: v3
  • Loading branch information
Mike Galbraith authored and Ingo Molnar committed May 29, 2008
1 parent b5c9d15 commit 945c8ee
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 12 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a381759d6ad5c5dea5a981918e0b4493e9b66ac7
refs/heads/master: b3137bc8e77962a8e3b4dfdc1bcfd38e437bd278
25 changes: 14 additions & 11 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -996,16 +996,27 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
struct task_struct *curr = this_rq->curr;
unsigned long tl = this_load;
unsigned long tl_per_task;
int balanced;

if (!(this_sd->flags & SD_WAKE_AFFINE))
if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
return 0;

/*
* If sync wakeup then subtract the (maximum possible)
* effect of the currently running task from the load
* of the current CPU:
*/
if (sync)
tl -= current->se.load.weight;

balanced = 100*(tl + p->se.load.weight) <= imbalance*load;

/*
* If the currently running task will sleep within
* a reasonable amount of time then attract this newly
* woken task:
*/
if (sync && curr->sched_class == &fair_sched_class) {
if (sync && balanced && curr->sched_class == &fair_sched_class) {
if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
p->se.avg_overlap < sysctl_sched_migration_cost)
return 1;
Expand All @@ -1014,16 +1025,8 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
schedstat_inc(p, se.nr_wakeups_affine_attempts);
tl_per_task = cpu_avg_load_per_task(this_cpu);

/*
* If sync wakeup then subtract the (maximum possible)
* effect of the currently running task from the load
* of the current CPU:
*/
if (sync)
tl -= current->se.load.weight;

if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
100*(tl + p->se.load.weight) <= imbalance*load) {
balanced) {
/*
* This domain has SD_WAKE_AFFINE and
* p is cache cold in this domain, and
Expand Down

0 comments on commit 945c8ee

Please sign in to comment.