Skip to content

Commit

Permalink
sched/dl: Add dl_rq utilization tracking
Browse files Browse the repository at this point in the history
Similarly to what happens with RT tasks, CFS tasks can be preempted by DL
tasks and the CFS's utilization might no longer describes the real
utilization level.

Current DL bandwidth reflects the requirements to meet deadline when tasks are
enqueued but not the current utilization of the DL sched class. We track
DL class utilization to estimate the system utilization.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten.Rasmussen@arm.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: claudio@evidence.eu.com
Cc: daniel.lezcano@linaro.org
Cc: dietmar.eggemann@arm.com
Cc: joel@joelfernandes.org
Cc: juri.lelli@redhat.com
Cc: luca.abeni@santannapisa.it
Cc: patrick.bellasi@arm.com
Cc: quentin.perret@arm.com
Cc: rjw@rjwysocki.net
Cc: valentin.schneider@arm.com
Cc: viresh.kumar@linaro.org
Link: http://lkml.kernel.org/r/1530200714-4504-5-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Vincent Guittot authored and Ingo Molnar committed Jul 15, 2018
1 parent 3ae117c commit 3727e0e
Show file tree
Hide file tree
Showing 5 changed files with 44 additions and 3 deletions.
6 changes: 6 additions & 0 deletions kernel/sched/deadline.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
* Fabio Checconi <fchecconi@gmail.com>
*/
#include "sched.h"
#include "pelt.h"

struct dl_bandwidth def_dl_bandwidth;

Expand Down Expand Up @@ -1761,13 +1762,17 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)

deadline_queue_push_tasks(rq);

if (rq->curr->sched_class != &dl_sched_class)
update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);

return p;
}

static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
{
update_curr_dl(rq);

update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
}
Expand All @@ -1784,6 +1789,7 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
{
update_curr_dl(rq);

update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
/*
* Even when we have runtime, update_curr_dl() might have resulted in us
* not being the leftmost task anymore. In that case NEED_RESCHED will
Expand Down
11 changes: 8 additions & 3 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -7290,11 +7290,14 @@ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
return false;
}

static inline bool rt_rq_has_blocked(struct rq *rq)
static inline bool others_rqs_have_blocked(struct rq *rq)
{
if (READ_ONCE(rq->avg_rt.util_avg))
return true;

if (READ_ONCE(rq->avg_dl.util_avg))
return true;

return false;
}

Expand Down Expand Up @@ -7358,8 +7361,9 @@ static void update_blocked_averages(int cpu)
done = false;
}
update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
/* Don't need periodic decay once load/util_avg are null */
if (rt_rq_has_blocked(rq))
if (others_rqs_have_blocked(rq))
done = false;

#ifdef CONFIG_NO_HZ_COMMON
Expand Down Expand Up @@ -7427,9 +7431,10 @@ static inline void update_blocked_averages(int cpu)
update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
if (!cfs_rq_has_blocked(cfs_rq) && !rt_rq_has_blocked(rq))
if (!cfs_rq_has_blocked(cfs_rq) && !others_rqs_have_blocked(rq))
rq->has_blocked_load = 0;
#endif
rq_unlock_irqrestore(rq, &rf);
Expand Down
23 changes: 23 additions & 0 deletions kernel/sched/pelt.c
Original file line number Diff line number Diff line change
Expand Up @@ -334,3 +334,26 @@ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)

return 0;
}

/*
* dl_rq:
*
* util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
* util_sum = cpu_scale * load_sum
* runnable_load_sum = load_sum
*
*/

int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
{
if (___update_load_sum(now, rq->cpu, &rq->avg_dl,
running,
running,
running)) {

___update_load_avg(&rq->avg_dl, 1, 1);
return 1;
}

return 0;
}
6 changes: 6 additions & 0 deletions kernel/sched/pelt.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se);
int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se);
int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);

/*
* When a task is dequeued, its estimated utilization should not be update if
Expand Down Expand Up @@ -45,6 +46,11 @@ update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
return 0;
}

static inline int
update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
{
return 0;
}
#endif


1 change: 1 addition & 0 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -856,6 +856,7 @@ struct rq {
u64 rt_avg;
u64 age_stamp;
struct sched_avg avg_rt;
struct sched_avg avg_dl;
u64 idle_stamp;
u64 avg_idle;

Expand Down

0 comments on commit 3727e0e

Please sign in to comment.