Skip to content

Commit

Permalink
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull misc scheduler fixes from Ingo Molnar:

 - Fix potential deadlock under CONFIG_DEBUG_OBJECTS=y

 - PELT metrics update ordering fix

 - uclamp logic fix

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/uclamp: Fix incorrect condition
  sched/pelt: Fix update of blocked PELT ordering
  sched/core: Avoid spurious lock dependencies
  • Loading branch information
Linus Torvalds committed Nov 17, 2019
2 parents 6b27354 + 6e1ff07 commit cbb104f
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 11 deletions.
5 changes: 3 additions & 2 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1065,7 +1065,7 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
* affecting a valid clamp bucket, the next time it's enqueued,
* it will already see the updated clamp bucket value.
*/
if (!p->uclamp[clamp_id].active) {
if (p->uclamp[clamp_id].active) {
uclamp_rq_dec_id(rq, p, clamp_id);
uclamp_rq_inc_id(rq, p, clamp_id);
}
Expand Down Expand Up @@ -6019,10 +6019,11 @@ void init_idle(struct task_struct *idle, int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;

__sched_fork(0, idle);

raw_spin_lock_irqsave(&idle->pi_lock, flags);
raw_spin_lock(&rq->lock);

__sched_fork(0, idle);
idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
idle->flags |= PF_IDLE;
Expand Down
29 changes: 20 additions & 9 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -7547,6 +7547,19 @@ static void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);

/*
* update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
* that RT, DL and IRQ signals have been updated before updating CFS.
*/
curr_class = rq->curr->sched_class;
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
update_irq_load_avg(rq, 0);

/* Don't need periodic decay once load/util_avg are null */
if (others_have_blocked(rq))
done = false;

/*
* Iterates the task_group tree in a bottom up fashion, see
* list_add_leaf_cfs_rq() for details.
Expand Down Expand Up @@ -7574,14 +7587,6 @@ static void update_blocked_averages(int cpu)
done = false;
}

curr_class = rq->curr->sched_class;
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
update_irq_load_avg(rq, 0);
/* Don't need periodic decay once load/util_avg are null */
if (others_have_blocked(rq))
done = false;

update_blocked_load_status(rq, !done);
rq_unlock_irqrestore(rq, &rf);
}
Expand Down Expand Up @@ -7642,12 +7647,18 @@ static inline void update_blocked_averages(int cpu)

rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);

/*
* update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
* that RT, DL and IRQ signals have been updated before updating CFS.
*/
curr_class = rq->curr->sched_class;
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
update_irq_load_avg(rq, 0);

update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);

update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
rq_unlock_irqrestore(rq, &rf);
}
Expand Down

0 comments on commit cbb104f

Please sign in to comment.