From 24813c909f98b298aed8bde7663cfe79ada9555b Mon Sep 17 00:00:00 2001 From: Gregory Haskins Date: Fri, 25 Jan 2008 21:08:12 +0100 Subject: [PATCH] --- yaml --- r: 76137 b: refs/heads/master c: a22d7fc187ed996b66d8439db27b2303f79a8e7b h: refs/heads/master i: 76135: 998e08f4c327d3687326e31983133dbd57affce9 v: v3 --- [refs] | 2 +- trunk/kernel/sched.c | 2 ++ trunk/kernel/sched_rt.c | 10 ++++++++-- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/[refs] b/[refs] index 874ff35eebf9..3b62cb9b62e8 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 6e1254d2c41215da27025add8900ed187bca121d +refs/heads/master: a22d7fc187ed996b66d8439db27b2303f79a8e7b diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 3344ba776b97..c591abd9ca38 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -346,6 +346,7 @@ struct rt_rq { unsigned long rt_nr_migratory; /* highest queued rt task prio */ int highest_prio; + int overloaded; }; /* @@ -6770,6 +6771,7 @@ void __init sched_init(void) rq->migration_thread = NULL; INIT_LIST_HEAD(&rq->migration_queue); rq->rt.highest_prio = MAX_RT_PRIO; + rq->rt.overloaded = 0; #endif atomic_set(&rq->nr_iowait, 0); diff --git a/trunk/kernel/sched_rt.c b/trunk/kernel/sched_rt.c index a9d7d4408160..87d7b3ff3861 100644 --- a/trunk/kernel/sched_rt.c +++ b/trunk/kernel/sched_rt.c @@ -16,6 +16,7 @@ static inline cpumask_t *rt_overload(void) } static inline void rt_set_overload(struct rq *rq) { + rq->rt.overloaded = 1; cpu_set(rq->cpu, rt_overload_mask); /* * Make sure the mask is visible before we set @@ -32,6 +33,7 @@ static inline void rt_clear_overload(struct rq *rq) /* the order here really doesn't matter */ atomic_dec(&rto_count); cpu_clear(rq->cpu, rt_overload_mask); + rq->rt.overloaded = 0; } static void update_rt_migration(struct rq *rq) @@ -448,6 +450,9 @@ static int push_rt_task(struct rq *rq) assert_spin_locked(&rq->lock); + if (!rq->rt.overloaded) + return 0; + next_task = pick_next_highest_task_rt(rq, -1); if (!next_task) return 0; @@ -675,7 +680,7 @@ static void schedule_tail_balance_rt(struct rq *rq) * the lock was owned by prev, we need to release it * first via finish_lock_switch and then reaquire it here. */ - if (unlikely(rq->rt.rt_nr_running > 1)) { + if (unlikely(rq->rt.overloaded)) { spin_lock_irq(&rq->lock); push_rt_tasks(rq); spin_unlock_irq(&rq->lock); @@ -687,7 +692,8 @@ static void wakeup_balance_rt(struct rq *rq, struct task_struct *p) { if (unlikely(rt_task(p)) && !task_running(rq, p) && - (p->prio >= rq->curr->prio)) + (p->prio >= rq->rt.highest_prio) && + rq->rt.overloaded) push_rt_tasks(rq); }