From edbee93b51e4eecca59ce8ed8adbd26023648943 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Tue, 8 Nov 2005 21:38:58 -0800 Subject: [PATCH] --- yaml --- r: 13462 b: refs/heads/master c: 3b0bd9bc6f3b8a47853d1b1de4520de3878e8941 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/kernel/sched.c | 41 +++++++++++++++++++++++------------------ 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/[refs] b/[refs] index a3525771d33e..270fc70ef43c 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: dad1c65c8000f4485d8602e1875ded77e0d72133 +refs/heads/master: 3b0bd9bc6f3b8a47853d1b1de4520de3878e8941 diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index d9dbf8ee6ca4..ec9ea9119b98 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -972,22 +972,26 @@ void kick_process(task_t *p) static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) { runqueue_t *rq = cpu_rq(cpu); - unsigned long cpu_load = rq->cpu_load[type-1], + unsigned long source_load, cpu_load = rq->cpu_load[type-1], load_now = rq->nr_running * SCHED_LOAD_SCALE; - if (idle == NOT_IDLE) { + if (type == 0) + source_load = load_now; + else + source_load = min(cpu_load, load_now); + + if (idle == NOT_IDLE || rq->nr_running > 1) /* - * If we are balancing busy runqueues the load is biased by - * priority to create 'nice' support across cpus. + * If we are busy rebalancing the load is biased by + * priority to create 'nice' support across cpus. When + * idle rebalancing we should only bias the source_load if + * there is more than one task running on that queue to + * prevent idle rebalance from trying to pull tasks from a + * queue with only one running task. */ - cpu_load *= rq->prio_bias; - load_now *= rq->prio_bias; - } + source_load *= rq->prio_bias; - if (type == 0) - return load_now; - - return min(cpu_load, load_now); + return source_load; } static inline unsigned long source_load(int cpu, int type) @@ -1001,17 +1005,18 @@ static inline unsigned long source_load(int cpu, int type) static inline unsigned long __target_load(int cpu, int type, enum idle_type idle) { runqueue_t *rq = cpu_rq(cpu); - unsigned long cpu_load = rq->cpu_load[type-1], + unsigned long target_load, cpu_load = rq->cpu_load[type-1], load_now = rq->nr_running * SCHED_LOAD_SCALE; if (type == 0) - return load_now; + target_load = load_now; + else + target_load = max(cpu_load, load_now); - if (idle == NOT_IDLE) { - cpu_load *= rq->prio_bias; - load_now *= rq->prio_bias; - } - return max(cpu_load, load_now); + if (idle == NOT_IDLE || rq->nr_running > 1) + target_load *= rq->prio_bias; + + return target_load; } static inline unsigned long target_load(int cpu, int type)