Skip to content

Commit

Permalink
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull scheduler fixes from Thomas Gleixner:

 - two patches addressing the problem that the scheduler allows under
   certain conditions user space tasks to be scheduled on CPUs which are
   not yet fully booted which causes a few subtle and hard to debug
   issue

 - add a missing runqueue clock update in the deadline scheduler which
   triggers a warning under certain circumstances

 - fix a silly typo in the scheduler header file

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/headers: Fix typo
  sched/deadline: Fix missing clock update
  sched/core: Require cpu_active() in select_task_rq(), for user tasks
  sched/core: Fix rules for running on online && !active CPUs
  • Loading branch information
Linus Torvalds committed Jun 3, 2018
2 parents 26bdace + 595058b commit 874cd33
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 18 deletions.
45 changes: 31 additions & 14 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -881,6 +881,33 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
}

#ifdef CONFIG_SMP

static inline bool is_per_cpu_kthread(struct task_struct *p)
{
if (!(p->flags & PF_KTHREAD))
return false;

if (p->nr_cpus_allowed != 1)
return false;

return true;
}

/*
* Per-CPU kthreads are allowed to run on !actie && online CPUs, see
* __set_cpus_allowed_ptr() and select_fallback_rq().
*/
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
{
if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
return false;

if (is_per_cpu_kthread(p))
return cpu_online(cpu);

return cpu_active(cpu);
}

/*
* This is how migration works:
*
Expand Down Expand Up @@ -938,16 +965,8 @@ struct migration_arg {
static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int dest_cpu)
{
if (p->flags & PF_KTHREAD) {
if (unlikely(!cpu_online(dest_cpu)))
return rq;
} else {
if (unlikely(!cpu_active(dest_cpu)))
return rq;
}

/* Affinity changed (again). */
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
if (!is_cpu_allowed(p, dest_cpu))
return rq;

update_rq_clock(rq);
Expand Down Expand Up @@ -1476,10 +1495,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
for (;;) {
/* Any allowed, online CPU? */
for_each_cpu(dest_cpu, &p->cpus_allowed) {
if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
continue;
if (!cpu_online(dest_cpu))
if (!is_cpu_allowed(p, dest_cpu))
continue;

goto out;
}

Expand Down Expand Up @@ -1542,8 +1560,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
* [ this allows ->select_task() to simply return task_cpu(p) and
* not worry about this generic constraint ]
*/
if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
!cpu_online(cpu)))
if (unlikely(!is_cpu_allowed(p, cpu)))
cpu = select_fallback_rq(task_cpu(p), p);

return cpu;
Expand Down
6 changes: 3 additions & 3 deletions kernel/sched/deadline.c
Original file line number Diff line number Diff line change
Expand Up @@ -1259,6 +1259,9 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)

rq = task_rq_lock(p, &rf);

sched_clock_tick();
update_rq_clock(rq);

if (!dl_task(p) || p->state == TASK_DEAD) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));

Expand All @@ -1278,9 +1281,6 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
if (dl_se->dl_non_contending == 0)
goto unlock;

sched_clock_tick();
update_rq_clock(rq);

sub_running_bw(dl_se, &rq->dl);
dl_se->dl_non_contending = 0;
unlock:
Expand Down
2 changes: 1 addition & 1 deletion kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -983,7 +983,7 @@ static inline void rq_clock_skip_update(struct rq *rq)
}

/*
* See rt task throttoling, which is the only time a skip
* See rt task throttling, which is the only time a skip
* request is cancelled.
*/
static inline void rq_clock_cancel_skipupdate(struct rq *rq)
Expand Down

0 comments on commit 874cd33

Please sign in to comment.