Skip to content

Commit

Permalink
sched/rt: Move RT related code from sched/core.c to sched/rt.c
Browse files Browse the repository at this point in the history
This helps making sched/core.c smaller and hopefully easier to understand and maintain.

Signed-off-by: Nicolas Pitre <nico@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20170621182203.30626-3-nicolas.pitre@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Nicolas Pitre authored and Ingo Molnar committed Jun 23, 2017
1 parent 06a76fe commit 8887cd9
Show file tree
Hide file tree
Showing 3 changed files with 315 additions and 315 deletions.
315 changes: 0 additions & 315 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -6224,321 +6224,6 @@ void sched_move_task(struct task_struct *tsk)

task_rq_unlock(rq, tsk, &rf);
}
#endif /* CONFIG_CGROUP_SCHED */

#ifdef CONFIG_RT_GROUP_SCHED
/*
* Ensure that the real time constraints are schedulable.
*/
static DEFINE_MUTEX(rt_constraints_mutex);

/* Must be called with tasklist_lock held */
static inline int tg_has_rt_tasks(struct task_group *tg)
{
struct task_struct *g, *p;

/*
* Autogroups do not have RT tasks; see autogroup_create().
*/
if (task_group_is_autogroup(tg))
return 0;

for_each_process_thread(g, p) {
if (rt_task(p) && task_group(p) == tg)
return 1;
}

return 0;
}

struct rt_schedulable_data {
struct task_group *tg;
u64 rt_period;
u64 rt_runtime;
};

static int tg_rt_schedulable(struct task_group *tg, void *data)
{
struct rt_schedulable_data *d = data;
struct task_group *child;
unsigned long total, sum = 0;
u64 period, runtime;

period = ktime_to_ns(tg->rt_bandwidth.rt_period);
runtime = tg->rt_bandwidth.rt_runtime;

if (tg == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}

/*
* Cannot have more runtime than the period.
*/
if (runtime > period && runtime != RUNTIME_INF)
return -EINVAL;

/*
* Ensure we don't starve existing RT tasks.
*/
if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
return -EBUSY;

total = to_ratio(period, runtime);

/*
* Nobody can have more than the global setting allows.
*/
if (total > to_ratio(global_rt_period(), global_rt_runtime()))
return -EINVAL;

/*
* The sum of our children's runtime should not exceed our own.
*/
list_for_each_entry_rcu(child, &tg->children, siblings) {
period = ktime_to_ns(child->rt_bandwidth.rt_period);
runtime = child->rt_bandwidth.rt_runtime;

if (child == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}

sum += to_ratio(period, runtime);
}

if (sum > total)
return -EINVAL;

return 0;
}

static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
int ret;

struct rt_schedulable_data data = {
.tg = tg,
.rt_period = period,
.rt_runtime = runtime,
};

rcu_read_lock();
ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
rcu_read_unlock();

return ret;
}

static int tg_set_rt_bandwidth(struct task_group *tg,
u64 rt_period, u64 rt_runtime)
{
int i, err = 0;

/*
* Disallowing the root group RT runtime is BAD, it would disallow the
* kernel creating (and or operating) RT threads.
*/
if (tg == &root_task_group && rt_runtime == 0)
return -EINVAL;

/* No period doesn't make any sense. */
if (rt_period == 0)
return -EINVAL;

mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
err = __rt_schedulable(tg, rt_period, rt_runtime);
if (err)
goto unlock;

raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;

for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];

raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);

return err;
}

static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
{
u64 rt_runtime, rt_period;

rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
if (rt_runtime_us < 0)
rt_runtime = RUNTIME_INF;

return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}

static long sched_group_rt_runtime(struct task_group *tg)
{
u64 rt_runtime_us;

if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
return -1;

rt_runtime_us = tg->rt_bandwidth.rt_runtime;
do_div(rt_runtime_us, NSEC_PER_USEC);
return rt_runtime_us;
}

static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
{
u64 rt_runtime, rt_period;

rt_period = rt_period_us * NSEC_PER_USEC;
rt_runtime = tg->rt_bandwidth.rt_runtime;

return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}

static long sched_group_rt_period(struct task_group *tg)
{
u64 rt_period_us;

rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
do_div(rt_period_us, NSEC_PER_USEC);
return rt_period_us;
}
#endif /* CONFIG_RT_GROUP_SCHED */

#ifdef CONFIG_RT_GROUP_SCHED
static int sched_rt_global_constraints(void)
{
int ret = 0;

mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
ret = __rt_schedulable(NULL, 0, 0);
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);

return ret;
}

static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
{
/* Don't accept realtime tasks when there is no way for them to run */
if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
return 0;

return 1;
}

#else /* !CONFIG_RT_GROUP_SCHED */
static int sched_rt_global_constraints(void)
{
unsigned long flags;
int i;

raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;

raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);

return 0;
}
#endif /* CONFIG_RT_GROUP_SCHED */

static int sched_rt_global_validate(void)
{
if (sysctl_sched_rt_period <= 0)
return -EINVAL;

if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
(sysctl_sched_rt_runtime > sysctl_sched_rt_period))
return -EINVAL;

return 0;
}

static void sched_rt_do_global(void)
{
def_rt_bandwidth.rt_runtime = global_rt_runtime();
def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
}

int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int old_period, old_runtime;
static DEFINE_MUTEX(mutex);
int ret;

mutex_lock(&mutex);
old_period = sysctl_sched_rt_period;
old_runtime = sysctl_sched_rt_runtime;

ret = proc_dointvec(table, write, buffer, lenp, ppos);

if (!ret && write) {
ret = sched_rt_global_validate();
if (ret)
goto undo;

ret = sched_dl_global_validate();
if (ret)
goto undo;

ret = sched_rt_global_constraints();
if (ret)
goto undo;

sched_rt_do_global();
sched_dl_do_global();
}
if (0) {
undo:
sysctl_sched_rt_period = old_period;
sysctl_sched_rt_runtime = old_runtime;
}
mutex_unlock(&mutex);

return ret;
}

int sched_rr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
static DEFINE_MUTEX(mutex);

mutex_lock(&mutex);
ret = proc_dointvec(table, write, buffer, lenp, ppos);
/*
* Make sure that internally we keep jiffies.
* Also, writing zero resets the timeslice to default:
*/
if (!ret && write) {
sched_rr_timeslice =
sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
msecs_to_jiffies(sysctl_sched_rr_timeslice);
}
mutex_unlock(&mutex);
return ret;
}

#ifdef CONFIG_CGROUP_SCHED

static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
{
Expand Down
Loading

0 comments on commit 8887cd9

Please sign in to comment.