Skip to content

Commit

Permalink
Merge tag 'sched-urgent-2020-05-24' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/tip/tip

Pull scheduler fixes from Thomas Gleixner:
 "A set of fixes for the scheduler:

   - Fix handling of throttled parents in enqueue_task_fair() completely.

     The recent fix overlooked a corner case where the first iteration
     terminates due to an entity already being on the runqueue which
     makes the list management incomplete and later triggers the
     assertion which checks for completeness.

   - Fix a similar problem in unthrottle_cfs_rq().

   - Show the correct uclamp values in procfs which prints the effective
     value twice instead of requested and effective"

* tag 'sched-urgent-2020-05-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix unthrottle_cfs_rq() for leaf_cfs_rq list
  sched/debug: Fix requested task uclamp values shown in procfs
  sched/fair: Fix enqueue_task_fair() warning some more
  • Loading branch information
Linus Torvalds committed May 24, 2020
2 parents caffb99 + 39f23ce commit 9e61d12
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 14 deletions.
4 changes: 2 additions & 2 deletions kernel/sched/debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -948,8 +948,8 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.avg.util_est.enqueued);
#endif
#ifdef CONFIG_UCLAMP_TASK
__PS("uclamp.min", p->uclamp[UCLAMP_MIN].value);
__PS("uclamp.max", p->uclamp[UCLAMP_MAX].value);
__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
#endif
Expand Down
49 changes: 37 additions & 12 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -4774,7 +4774,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
int enqueue = 1;
long task_delta, idle_task_delta;

se = cfs_rq->tg->se[cpu_of(rq)];
Expand All @@ -4798,26 +4797,44 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
idle_task_delta = cfs_rq->idle_h_nr_running;
for_each_sched_entity(se) {
if (se->on_rq)
enqueue = 0;
break;
cfs_rq = cfs_rq_of(se);
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);

cfs_rq->h_nr_running += task_delta;
cfs_rq->idle_h_nr_running += idle_task_delta;

/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
goto unthrottle_throttle;
}

for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
if (enqueue) {
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
} else {
update_load_avg(cfs_rq, se, 0);
se_update_runnable(se);
}

update_load_avg(cfs_rq, se, UPDATE_TG);
se_update_runnable(se);

cfs_rq->h_nr_running += task_delta;
cfs_rq->idle_h_nr_running += idle_task_delta;


/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
break;
goto unthrottle_throttle;

/*
* One parent has been throttled and cfs_rq removed from the
* list. Add it back to not break the leaf list.
*/
if (throttled_hierarchy(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
}

if (!se)
add_nr_running(rq, task_delta);
/* At this point se is NULL and we are at root level*/
add_nr_running(rq, task_delta);

unthrottle_throttle:
/*
* The cfs_rq_throttled() breaks in the above iteration can result in
* incomplete leaf list maintenance, resulting in triggering the
Expand All @@ -4826,7 +4843,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);

list_add_leaf_cfs_rq(cfs_rq);
if (list_add_leaf_cfs_rq(cfs_rq))
break;
}

assert_list_leaf_cfs_rq(rq);
Expand Down Expand Up @@ -5479,6 +5497,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
goto enqueue_throttle;

/*
* One parent has been throttled and cfs_rq removed from the
* list. Add it back to not break the leaf list.
*/
if (throttled_hierarchy(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
}

enqueue_throttle:
Expand Down

0 comments on commit 9e61d12

Please sign in to comment.