Skip to content

Commit

Permalink
sched/pelt: Relax the sync of load_sum with load_avg
Browse files Browse the repository at this point in the history
Similarly to util_avg and util_sum, don't sync load_sum with the low
bound of load_avg but only ensure that load_sum stays in the correct range.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Sachin Sant <sachinp@linux.ibm.com>
Link: https://lkml.kernel.org/r/20220111134659.24961-5-vincent.guittot@linaro.org
  • Loading branch information
Vincent Guittot authored and Peter Zijlstra committed Jan 18, 2022
1 parent 95246d1 commit 2d02fa8
Showing 1 changed file with 22 additions and 14 deletions.
36 changes: 22 additions & 14 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -3028,9 +3028,11 @@ enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
u32 divider = get_pelt_divider(&se->avg);
sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
/* See update_cfs_rq_load_avg() */
cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
}
#else
static inline void
Expand Down Expand Up @@ -3513,9 +3515,10 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
static inline void
update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
unsigned long load_avg;
u64 load_sum = 0;
s64 delta_sum;
u32 divider;

if (!runnable_sum)
Expand All @@ -3542,7 +3545,7 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
* assuming all tasks are equally runnable.
*/
if (scale_load_down(gcfs_rq->load.weight)) {
load_sum = div_s64(gcfs_rq->avg.load_sum,
load_sum = div_u64(gcfs_rq->avg.load_sum,
scale_load_down(gcfs_rq->load.weight));
}

Expand All @@ -3559,19 +3562,22 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
runnable_sum = max(runnable_sum, running_sum);

load_sum = (s64)se_weight(se) * runnable_sum;
load_avg = div_s64(load_sum, divider);

se->avg.load_sum = runnable_sum;
load_sum = se_weight(se) * runnable_sum;
load_avg = div_u64(load_sum, divider);

delta = load_avg - se->avg.load_avg;
if (!delta)
delta_avg = load_avg - se->avg.load_avg;
if (!delta_avg)
return;

se->avg.load_avg = load_avg;
delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;

add_positive(&cfs_rq->avg.load_avg, delta);
cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
se->avg.load_sum = runnable_sum;
se->avg.load_avg = load_avg;
add_positive(&cfs_rq->avg.load_avg, delta_avg);
add_positive(&cfs_rq->avg.load_sum, delta_sum);
/* See update_cfs_rq_load_avg() */
cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
}

static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
Expand Down Expand Up @@ -3687,7 +3693,9 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)

r = removed_load;
sub_positive(&sa->load_avg, r);
sa->load_sum = sa->load_avg * divider;
sub_positive(&sa->load_sum, r * divider);
/* See sa->util_sum below */
sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER);

r = removed_util;
sub_positive(&sa->util_avg, r);
Expand Down

0 comments on commit 2d02fa8

Please sign in to comment.