Skip to content

Commit

Permalink
sched: break out search for RT tasks
Browse files Browse the repository at this point in the history
Isolate the search logic into a function so that it can be used later
in places other than find_locked_lowest_rq().

Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Gregory Haskins authored and Ingo Molnar committed Jan 25, 2008
1 parent e7693a3 commit 07b4032
Showing 1 changed file with 39 additions and 27 deletions.
66 changes: 39 additions & 27 deletions kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -263,54 +263,66 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq,

static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);

/* Will lock the rq it finds */
static struct rq *find_lock_lowest_rq(struct task_struct *task,
struct rq *this_rq)
static int find_lowest_rq(struct task_struct *task)
{
struct rq *lowest_rq = NULL;
int cpu;
int tries;
cpumask_t *cpu_mask = &__get_cpu_var(local_cpu_mask);
struct rq *lowest_rq = NULL;

cpus_and(*cpu_mask, cpu_online_map, task->cpus_allowed);

for (tries = 0; tries < RT_MAX_TRIES; tries++) {
/*
* Scan each rq for the lowest prio.
*/
for_each_cpu_mask(cpu, *cpu_mask) {
struct rq *rq = &per_cpu(runqueues, cpu);
/*
* Scan each rq for the lowest prio.
*/
for_each_cpu_mask(cpu, *cpu_mask) {
struct rq *rq = cpu_rq(cpu);

if (cpu == this_rq->cpu)
continue;
if (cpu == rq->cpu)
continue;

/* We look for lowest RT prio or non-rt CPU */
if (rq->rt.highest_prio >= MAX_RT_PRIO) {
lowest_rq = rq;
break;
}
/* We look for lowest RT prio or non-rt CPU */
if (rq->rt.highest_prio >= MAX_RT_PRIO) {
lowest_rq = rq;
break;
}

/* no locking for now */
if (rq->rt.highest_prio > task->prio &&
(!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) {
lowest_rq = rq;
}
/* no locking for now */
if (rq->rt.highest_prio > task->prio &&
(!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) {
lowest_rq = rq;
}
}

return lowest_rq ? lowest_rq->cpu : -1;
}

/* Will lock the rq it finds */
static struct rq *find_lock_lowest_rq(struct task_struct *task,
struct rq *rq)
{
struct rq *lowest_rq = NULL;
int cpu;
int tries;

if (!lowest_rq)
for (tries = 0; tries < RT_MAX_TRIES; tries++) {
cpu = find_lowest_rq(task);

if (cpu == -1)
break;

lowest_rq = cpu_rq(cpu);

/* if the prio of this runqueue changed, try again */
if (double_lock_balance(this_rq, lowest_rq)) {
if (double_lock_balance(rq, lowest_rq)) {
/*
* We had to unlock the run queue. In
* the mean time, task could have
* migrated already or had its affinity changed.
* Also make sure that it wasn't scheduled on its rq.
*/
if (unlikely(task_rq(task) != this_rq ||
if (unlikely(task_rq(task) != rq ||
!cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
task_running(this_rq, task) ||
task_running(rq, task) ||
!task->se.on_rq)) {
spin_unlock(&lowest_rq->lock);
lowest_rq = NULL;
Expand Down

0 comments on commit 07b4032

Please sign in to comment.