Skip to content

Commit

Permalink
sched/fair: Snapshot the min_vruntime of CPUs on force idle
Browse files Browse the repository at this point in the history
During force-idle, we end up doing cross-cpu comparison of vruntimes
during pick_next_task. If we simply compare (vruntime-min_vruntime)
across CPUs, and if the CPUs only have 1 task each, we will always
end up comparing 0 with 0 and pick just one of the tasks all the time.
This starves the task that was not picked. To fix this, take a snapshot
of the min_vruntime when entering force idle and use it for comparison.
This min_vruntime snapshot will only be used for cross-CPU vruntime
comparison, and nothing else.

A note about the min_vruntime snapshot and force idling:

During selection:

  When we're not fi, we need to update snapshot.
  when we're fi and we were not fi, we must update snapshot.
  When we're fi and we were already fi, we must not update snapshot.

Which gives:

  fib     fi      update
  0       0       1
  0       1       1
  1       0       1
  1       1       0

Where:

  fi:  force-idled now
  fib: force-idled before

So the min_vruntime snapshot needs to be updated when: !(fib && fi).

Also, the cfs_prio_less() function needs to be aware of whether the
core is in force idle or not, since it will be use this information to
know whether to advance a cfs_rq's min_vruntime_fi in the hierarchy.
So pass this information along via pick_task() -> prio_less().

Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Don Hiatt <dhiatt@digitalocean.com>
Tested-by: Hongyu Ning <hongyu.ning@linux.intel.com>
Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20210422123308.738542617@infradead.org
  • Loading branch information
Joel Fernandes (Google) authored and Peter Zijlstra committed May 12, 2021
1 parent 7afbba1 commit c6047c2
Show file tree
Hide file tree
Showing 3 changed files with 117 additions and 25 deletions.
59 changes: 34 additions & 25 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ static inline int __task_prio(struct task_struct *p)
*/

/* real prio, less is less */
static inline bool prio_less(struct task_struct *a, struct task_struct *b)
static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
{

int pa = __task_prio(a), pb = __task_prio(b);
Expand All @@ -125,19 +125,8 @@ static inline bool prio_less(struct task_struct *a, struct task_struct *b)
if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
return !dl_time_before(a->dl.deadline, b->dl.deadline);

if (pa == MAX_RT_PRIO + MAX_NICE) { /* fair */
u64 vruntime = b->se.vruntime;

/*
* Normalize the vruntime if tasks are in different cpus.
*/
if (task_cpu(a) != task_cpu(b)) {
vruntime -= task_cfs_rq(b)->min_vruntime;
vruntime += task_cfs_rq(a)->min_vruntime;
}

return !((s64)(a->se.vruntime - vruntime) <= 0);
}
if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
return cfs_prio_less(a, b, in_fi);

return false;
}
Expand All @@ -151,7 +140,7 @@ static inline bool __sched_core_less(struct task_struct *a, struct task_struct *
return false;

/* flip prio, so high prio is leftmost */
if (prio_less(b, a))
if (prio_less(b, a, task_rq(a)->core->core_forceidle))
return true;

return false;
Expand Down Expand Up @@ -5350,7 +5339,7 @@ static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
* - Else returns idle_task.
*/
static struct task_struct *
pick_task(struct rq *rq, const struct sched_class *class, struct task_struct *max)
pick_task(struct rq *rq, const struct sched_class *class, struct task_struct *max, bool in_fi)
{
struct task_struct *class_pick, *cookie_pick;
unsigned long cookie = rq->core->core_cookie;
Expand All @@ -5365,7 +5354,7 @@ pick_task(struct rq *rq, const struct sched_class *class, struct task_struct *ma
* higher priority than max.
*/
if (max && class_pick->core_cookie &&
prio_less(class_pick, max))
prio_less(class_pick, max, in_fi))
return idle_sched_class.pick_task(rq);

return class_pick;
Expand All @@ -5384,19 +5373,22 @@ pick_task(struct rq *rq, const struct sched_class *class, struct task_struct *ma
* the core (so far) and it must be selected, otherwise we must go with
* the cookie pick in order to satisfy the constraint.
*/
if (prio_less(cookie_pick, class_pick) &&
(!max || prio_less(max, class_pick)))
if (prio_less(cookie_pick, class_pick, in_fi) &&
(!max || prio_less(max, class_pick, in_fi)))
return class_pick;

return cookie_pick;
}

extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);

static struct task_struct *
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
struct task_struct *next, *max = NULL;
const struct sched_class *class;
const struct cpumask *smt_mask;
bool fi_before = false;
bool need_sync;
int i, j, cpu;

Expand Down Expand Up @@ -5478,9 +5470,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)

if (!next->core_cookie) {
rq->core_pick = NULL;
/*
* For robustness, update the min_vruntime_fi for
* unconstrained picks as well.
*/
WARN_ON_ONCE(fi_before);
task_vruntime_update(rq, next, false);
goto done;
}
need_sync = true;
}

for_each_cpu(i, smt_mask) {
Expand Down Expand Up @@ -5511,11 +5508,16 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
* highest priority task already selected for this
* core.
*/
p = pick_task(rq_i, class, max);
p = pick_task(rq_i, class, max, fi_before);
if (!p)
continue;

rq_i->core_pick = p;
if (rq_i->idle == p && rq_i->nr_running) {
rq->core->core_forceidle = true;
if (!fi_before)
rq->core->core_forceidle_seq++;
}

/*
* If this new candidate is of higher priority than the
Expand All @@ -5534,6 +5536,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
max = p;

if (old_max) {
rq->core->core_forceidle = false;
for_each_cpu(j, smt_mask) {
if (j == i)
continue;
Expand Down Expand Up @@ -5574,10 +5577,16 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
if (!rq_i->core_pick)
continue;

if (is_task_rq_idle(rq_i->core_pick) && rq_i->nr_running &&
!rq_i->core->core_forceidle) {
rq_i->core->core_forceidle = true;
}
/*
* Update for new !FI->FI transitions, or if continuing to be in !FI:
* fi_before fi update?
* 0 0 1
* 0 1 1
* 1 0 1
* 1 1 0
*/
if (!(fi_before && rq->core->core_forceidle))
task_vruntime_update(rq_i, rq_i->core_pick, rq->core->core_forceidle);

if (i == cpu) {
rq_i->core_pick = NULL;
Expand Down
75 changes: 75 additions & 0 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -10801,6 +10801,81 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
__entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
resched_curr(rq);
}

/*
* se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
*/
static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle)
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);

if (forceidle) {
if (cfs_rq->forceidle_seq == fi_seq)
break;
cfs_rq->forceidle_seq = fi_seq;
}

cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime;
}
}

void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
{
struct sched_entity *se = &p->se;

if (p->sched_class != &fair_sched_class)
return;

se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
}

bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
{
struct rq *rq = task_rq(a);
struct sched_entity *sea = &a->se;
struct sched_entity *seb = &b->se;
struct cfs_rq *cfs_rqa;
struct cfs_rq *cfs_rqb;
s64 delta;

SCHED_WARN_ON(task_rq(b)->core != rq->core);

#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* Find an se in the hierarchy for tasks a and b, such that the se's
* are immediate siblings.
*/
while (sea->cfs_rq->tg != seb->cfs_rq->tg) {
int sea_depth = sea->depth;
int seb_depth = seb->depth;

if (sea_depth >= seb_depth)
sea = parent_entity(sea);
if (sea_depth <= seb_depth)
seb = parent_entity(seb);
}

se_fi_update(sea, rq->core->core_forceidle_seq, in_fi);
se_fi_update(seb, rq->core->core_forceidle_seq, in_fi);

cfs_rqa = sea->cfs_rq;
cfs_rqb = seb->cfs_rq;
#else
cfs_rqa = &task_rq(a)->cfs;
cfs_rqb = &task_rq(b)->cfs;
#endif

/*
* Find delta after normalizing se's vruntime with its cfs_rq's
* min_vruntime_fi, which would have been updated in prior calls
* to se_fi_update().
*/
delta = (s64)(sea->vruntime - seb->vruntime) +
(s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi);

return delta > 0;
}
#else
static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {}
#endif
Expand Down
8 changes: 8 additions & 0 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -526,6 +526,11 @@ struct cfs_rq {

u64 exec_clock;
u64 min_vruntime;
#ifdef CONFIG_SCHED_CORE
unsigned int forceidle_seq;
u64 min_vruntime_fi;
#endif

#ifndef CONFIG_64BIT
u64 min_vruntime_copy;
#endif
Expand Down Expand Up @@ -1089,6 +1094,7 @@ struct rq {
unsigned int core_pick_seq;
unsigned long core_cookie;
unsigned char core_forceidle;
unsigned int core_forceidle_seq;
#endif
};

Expand Down Expand Up @@ -1162,6 +1168,8 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
return &rq->__lock;
}

bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi);

#else /* !CONFIG_SCHED_CORE */

static inline bool sched_core_enabled(struct rq *rq)
Expand Down

0 comments on commit c6047c2

Please sign in to comment.