Skip to content

Commit

Permalink
sched: wake-balance fixes
Browse files Browse the repository at this point in the history
We have logic to detect whether the system has migratable tasks, but we are
not using it when deciding whether to push tasks away.  So we add support
for considering this new information.

Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Gregory Haskins authored and Ingo Molnar committed Jan 25, 2008
1 parent 6e1254d commit a22d7fc
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 2 deletions.
2 changes: 2 additions & 0 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -346,6 +346,7 @@ struct rt_rq {
unsigned long rt_nr_migratory;
/* highest queued rt task prio */
int highest_prio;
int overloaded;
};

/*
Expand Down Expand Up @@ -6770,6 +6771,7 @@ void __init sched_init(void)
rq->migration_thread = NULL;
INIT_LIST_HEAD(&rq->migration_queue);
rq->rt.highest_prio = MAX_RT_PRIO;
rq->rt.overloaded = 0;
#endif
atomic_set(&rq->nr_iowait, 0);

Expand Down
10 changes: 8 additions & 2 deletions kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ static inline cpumask_t *rt_overload(void)
}
static inline void rt_set_overload(struct rq *rq)
{
rq->rt.overloaded = 1;
cpu_set(rq->cpu, rt_overload_mask);
/*
* Make sure the mask is visible before we set
Expand All @@ -32,6 +33,7 @@ static inline void rt_clear_overload(struct rq *rq)
/* the order here really doesn't matter */
atomic_dec(&rto_count);
cpu_clear(rq->cpu, rt_overload_mask);
rq->rt.overloaded = 0;
}

static void update_rt_migration(struct rq *rq)
Expand Down Expand Up @@ -448,6 +450,9 @@ static int push_rt_task(struct rq *rq)

assert_spin_locked(&rq->lock);

if (!rq->rt.overloaded)
return 0;

next_task = pick_next_highest_task_rt(rq, -1);
if (!next_task)
return 0;
Expand Down Expand Up @@ -675,7 +680,7 @@ static void schedule_tail_balance_rt(struct rq *rq)
* the lock was owned by prev, we need to release it
* first via finish_lock_switch and then reaquire it here.
*/
if (unlikely(rq->rt.rt_nr_running > 1)) {
if (unlikely(rq->rt.overloaded)) {
spin_lock_irq(&rq->lock);
push_rt_tasks(rq);
spin_unlock_irq(&rq->lock);
Expand All @@ -687,7 +692,8 @@ static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
{
if (unlikely(rt_task(p)) &&
!task_running(rq, p) &&
(p->prio >= rq->curr->prio))
(p->prio >= rq->rt.highest_prio) &&
rq->rt.overloaded)
push_rt_tasks(rq);
}

Expand Down

0 comments on commit a22d7fc

Please sign in to comment.