Skip to content

Commit

Permalink
sched: rt: move some code around
Browse files Browse the repository at this point in the history
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: "Daniel K." <dk@uw.no>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 20, 2008
1 parent b79f383 commit eff6549
Showing 1 changed file with 57 additions and 62 deletions.
119 changes: 57 additions & 62 deletions kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -228,68 +228,6 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)

#endif

#ifdef CONFIG_SMP
static int do_balance_runtime(struct rt_rq *rt_rq);

static int balance_runtime(struct rt_rq *rt_rq)
{
int more = 0;

if (rt_rq->rt_time > rt_rq->rt_runtime) {
spin_unlock(&rt_rq->rt_runtime_lock);
more = do_balance_runtime(rt_rq);
spin_lock(&rt_rq->rt_runtime_lock);
}

return more;
}
#else
static inline int balance_runtime(struct rt_rq *rt_rq)
{
return 0;
}
#endif

static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
{
int i, idle = 1;
cpumask_t span;

if (rt_b->rt_runtime == RUNTIME_INF)
return 1;

span = sched_rt_period_mask();
for_each_cpu_mask(i, span) {
int enqueue = 0;
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);

spin_lock(&rq->lock);
if (rt_rq->rt_time) {
u64 runtime;

spin_lock(&rt_rq->rt_runtime_lock);
if (rt_rq->rt_throttled)
balance_runtime(rt_rq);
runtime = rt_rq->rt_runtime;
rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
rt_rq->rt_throttled = 0;
enqueue = 1;
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
spin_unlock(&rt_rq->rt_runtime_lock);
}

if (enqueue)
sched_rt_rq_enqueue(rt_rq);
spin_unlock(&rq->lock);
}

return idle;
}

#ifdef CONFIG_SMP
static int do_balance_runtime(struct rt_rq *rt_rq)
{
Expand Down Expand Up @@ -425,8 +363,65 @@ static void enable_runtime(struct rq *rq)
spin_unlock_irqrestore(&rq->lock, flags);
}

static int balance_runtime(struct rt_rq *rt_rq)
{
int more = 0;

if (rt_rq->rt_time > rt_rq->rt_runtime) {
spin_unlock(&rt_rq->rt_runtime_lock);
more = do_balance_runtime(rt_rq);
spin_lock(&rt_rq->rt_runtime_lock);
}

return more;
}
#else
static inline int balance_runtime(struct rt_rq *rt_rq)
{
return 0;
}
#endif

static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
{
int i, idle = 1;
cpumask_t span;

if (rt_b->rt_runtime == RUNTIME_INF)
return 1;

span = sched_rt_period_mask();
for_each_cpu_mask(i, span) {
int enqueue = 0;
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);

spin_lock(&rq->lock);
if (rt_rq->rt_time) {
u64 runtime;

spin_lock(&rt_rq->rt_runtime_lock);
if (rt_rq->rt_throttled)
balance_runtime(rt_rq);
runtime = rt_rq->rt_runtime;
rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
rt_rq->rt_throttled = 0;
enqueue = 1;
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
spin_unlock(&rt_rq->rt_runtime_lock);
}

if (enqueue)
sched_rt_rq_enqueue(rt_rq);
spin_unlock(&rq->lock);
}

return idle;
}

static inline int rt_se_prio(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_RT_GROUP_SCHED
Expand Down

0 comments on commit eff6549

Please sign in to comment.