Skip to content

Commit

Permalink
sched,rt: Remove return value from pull_rt_task()
Browse files Browse the repository at this point in the history
In order to be able to use pull_rt_task() from a callback, we need to
do away with the return value.

Since the return value indicates if we should reschedule, do this
inside the function. Since not all callers currently do this, this can
increase the number of reschedules due rt balancing.

Too many reschedules is not a correctness issues, too few are.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: ktkhai@parallels.com
Cc: rostedt@goodmis.org
Cc: juri.lelli@gmail.com
Cc: pang.xunlei@linaro.org
Cc: oleg@redhat.com
Cc: wanpeng.li@linux.intel.com
Cc: umgwanakikbuti@gmail.com
Link: http://lkml.kernel.org/r/20150611124742.679002000@infradead.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Peter Zijlstra authored and Thomas Gleixner committed Jun 18, 2015
1 parent 4c9a4bc commit 8046d68
Showing 1 changed file with 11 additions and 11 deletions.
22 changes: 11 additions & 11 deletions kernel/sched/rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)

#ifdef CONFIG_SMP

static int pull_rt_task(struct rq *this_rq);
static void pull_rt_task(struct rq *this_rq);

static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
Expand Down Expand Up @@ -415,9 +415,8 @@ static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
return false;
}

static inline int pull_rt_task(struct rq *this_rq)
static inline void pull_rt_task(struct rq *this_rq)
{
return 0;
}

static inline void queue_push_tasks(struct rq *rq)
Expand Down Expand Up @@ -1955,14 +1954,15 @@ static void push_irq_work_func(struct irq_work *work)
}
#endif /* HAVE_RT_PUSH_IPI */

static int pull_rt_task(struct rq *this_rq)
static void pull_rt_task(struct rq *this_rq)
{
int this_cpu = this_rq->cpu, ret = 0, cpu;
int this_cpu = this_rq->cpu, cpu;
bool resched = false;
struct task_struct *p;
struct rq *src_rq;

if (likely(!rt_overloaded(this_rq)))
return 0;
return;

/*
* Match the barrier from rt_set_overloaded; this guarantees that if we
Expand All @@ -1973,7 +1973,7 @@ static int pull_rt_task(struct rq *this_rq)
#ifdef HAVE_RT_PUSH_IPI
if (sched_feat(RT_PUSH_IPI)) {
tell_cpu_to_push(this_rq);
return 0;
return;
}
#endif

Expand Down Expand Up @@ -2026,7 +2026,7 @@ static int pull_rt_task(struct rq *this_rq)
if (p->prio < src_rq->curr->prio)
goto skip;

ret = 1;
resched = true;

deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
Expand All @@ -2042,7 +2042,8 @@ static int pull_rt_task(struct rq *this_rq)
double_unlock_balance(this_rq, src_rq);
}

return ret;
if (resched)
resched_curr(this_rq);
}

/*
Expand Down Expand Up @@ -2138,8 +2139,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
return;

if (pull_rt_task(rq))
resched_curr(rq);
pull_rt_task(rq);
}

void __init init_sched_rt_class(void)
Expand Down

0 comments on commit 8046d68

Please sign in to comment.