From 873340373d5664e038a1442a0c1a0d3aba882f90 Mon Sep 17 00:00:00 2001 From: Gregory Haskins Date: Mon, 29 Dec 2008 09:39:50 -0500 Subject: [PATCH] --- yaml --- r: 135675 b: refs/heads/master c: 7e96fa5875d4a9be18d74d3ca7b90518d05bc426 h: refs/heads/master i: 135673: dbdca65167d633495493bed10de89cf6f1f9c01c 135671: ac47e730d3b9c3c933001e60f6fdece4db2d7df9 v: v3 --- [refs] | 2 +- trunk/kernel/sched.c | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/[refs] b/[refs] index 3690f9484932..49f64c125c44 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 777c2f389e463428fd7e2871051a84d7fe84b172 +refs/heads/master: 7e96fa5875d4a9be18d74d3ca7b90518d05bc426 diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 7729f9a45a8b..94d9a6c5ff94 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -2984,6 +2984,16 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, pulled++; rem_load_move -= p->se.load.weight; +#ifdef CONFIG_PREEMPT + /* + * NEWIDLE balancing is a source of latency, so preemptible kernels + * will stop after the first task is pulled to minimize the critical + * section. + */ + if (idle == CPU_NEWLY_IDLE) + goto out; +#endif + /* * We only want to steal up to the prescribed amount of weighted load. */ @@ -3030,9 +3040,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, sd, idle, all_pinned, &this_best_prio); class = class->next; +#ifdef CONFIG_PREEMPT + /* + * NEWIDLE balancing is a source of latency, so preemptible + * kernels will stop after the first task is pulled to minimize + * the critical section. + */ if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) break; - +#endif } while (class && max_load_move > total_load_moved); return total_load_moved > 0;