From a45ad0c2dbce5e26af433e85b424e5ab3cb6e958 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 7 Sep 2009 18:28:05 +0200 Subject: [PATCH] --- yaml --- r: 158377 b: refs/heads/master c: 71a29aa7b600595d0ef373ea605ac656876d1f2f h: refs/heads/master i: 158375: 10fb5bc9656b78852fdc5f0c31dff8c1887ca554 v: v3 --- [refs] | 2 +- trunk/kernel/sched_fair.c | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/[refs] b/[refs] index cf42643c75c1..965e9fd6d1a1 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: cdd2ab3de4301728b20efd6225681d3ff591a938 +refs/heads/master: 71a29aa7b600595d0ef373ea605ac656876d1f2f diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index d7fda41ddaf0..cc97ea498f24 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -1262,7 +1262,17 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq, tg = task_group(p); weight = p->se.load.weight; - balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <= + /* + * In low-load situations, where prev_cpu is idle and this_cpu is idle + * due to the sync cause above having dropped tl to 0, we'll always have + * an imbalance, but there's really nothing you can do about that, so + * that's good too. + * + * Otherwise check if either cpus are near enough in load to allow this + * task to be woken on this_cpu. + */ + balanced = !tl || + 100*(tl + effective_load(tg, this_cpu, weight, weight)) <= imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); /*