From ac90ebfb20fa3eb4edcf2ad57d73e533468e2f81 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:30 -0700 Subject: [PATCH] --- yaml --- r: 3301 b: refs/heads/master c: 77391d71681d05d2f4502f91ad62618522abf624 h: refs/heads/master i: 3299: b1e50c9c3c2dd3c373090cd667e083603e21f8dd v: v3 --- [refs] | 2 +- trunk/kernel/sched.c | 11 +++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/[refs] b/[refs] index 29f72ab9def9..f0919f1c67a8 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 476d139c218e44e045e4bc6d4cc02b010b343939 +refs/heads/master: 77391d71681d05d2f4502f91ad62618522abf624 diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 579da278e72f..6e452eb95ac3 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -2030,6 +2030,12 @@ static runqueue_t *find_busiest_queue(struct sched_group *group) return busiest; } +/* + * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but + * so long as it is large enough. + */ +#define MAX_PINNED_INTERVAL 512 + /* * Check this_cpu to ensure it is balanced within domain. Attempt to move * tasks if there is an imbalance. @@ -2042,7 +2048,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, struct sched_group *group; runqueue_t *busiest; unsigned long imbalance; - int nr_moved, all_pinned; + int nr_moved, all_pinned = 0; int active_balance = 0; spin_lock(&this_rq->lock); @@ -2133,7 +2139,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, sd->nr_balance_failed = 0; /* tune up the balancing interval */ - if (sd->balance_interval < sd->max_interval) + if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) || + (sd->balance_interval < sd->max_interval)) sd->balance_interval *= 2; return 0;