Skip to content

Commit

Permalink
sched: remove precise CPU load
Browse files Browse the repository at this point in the history
CPU load calculations are statistical anyway, and there's little benefit
from having it calculated on every scheduling event. So remove this code,
it gets rid of a divide from the scheduler wakeup and context-switch
fastpath.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Ingo Molnar committed Oct 15, 2007
1 parent 8ebc91d commit a25707f
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 41 deletions.
42 changes: 7 additions & 35 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1972,42 +1972,11 @@ unsigned long nr_active(void)
*/
static void update_cpu_load(struct rq *this_rq)
{
u64 fair_delta64, exec_delta64, idle_delta64, sample_interval64, tmp64;
unsigned long total_load = this_rq->ls.load.weight;
unsigned long this_load = total_load;
struct load_stat *ls = &this_rq->ls;
int i, scale;

this_rq->nr_load_updates++;
if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
goto do_avg;

/* Update delta_fair/delta_exec fields first */
update_curr_load(this_rq);

fair_delta64 = ls->delta_fair + 1;
ls->delta_fair = 0;

exec_delta64 = ls->delta_exec + 1;
ls->delta_exec = 0;

sample_interval64 = this_rq->clock - ls->load_update_last;
ls->load_update_last = this_rq->clock;

if ((s64)sample_interval64 < (s64)TICK_NSEC)
sample_interval64 = TICK_NSEC;

if (exec_delta64 > sample_interval64)
exec_delta64 = sample_interval64;

idle_delta64 = sample_interval64 - exec_delta64;

tmp64 = div64_64(SCHED_LOAD_SCALE * exec_delta64, fair_delta64);
tmp64 = div64_64(tmp64 * exec_delta64, sample_interval64);

this_load = (unsigned long)tmp64;

do_avg:

/* Update our load: */
for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
Expand All @@ -2017,7 +1986,13 @@ static void update_cpu_load(struct rq *this_rq)

old_load = this_rq->cpu_load[i];
new_load = this_load;

/*
* Round up the averaging division if load is increasing. This
* prevents us from getting stuck on 9 if the load is 10, for
* example.
*/
if (new_load > old_load)
new_load += scale-1;
this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
}
}
Expand Down Expand Up @@ -6484,7 +6459,6 @@ static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)

void __init sched_init(void)
{
u64 now = sched_clock();
int highest_cpu = 0;
int i, j;

Expand All @@ -6509,8 +6483,6 @@ void __init sched_init(void)
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
list_add(&rq->cfs.leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
#endif
rq->ls.load_update_last = now;
rq->ls.load_update_start = now;

for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
rq->cpu_load[j] = 0;
Expand Down
2 changes: 0 additions & 2 deletions kernel/sched_debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -145,8 +145,6 @@ static void print_cpu(struct seq_file *m, int cpu)
P(nr_running);
SEQ_printf(m, " .%-30s: %lu\n", "load",
rq->ls.load.weight);
P(ls.delta_fair);
P(ls.delta_exec);
P(nr_switches);
P(nr_load_updates);
P(nr_uninterruptible);
Expand Down
6 changes: 2 additions & 4 deletions kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -94,16 +94,14 @@ enum {
SCHED_FEAT_FAIR_SLEEPERS = 1,
SCHED_FEAT_SLEEPER_AVG = 2,
SCHED_FEAT_SLEEPER_LOAD_AVG = 4,
SCHED_FEAT_PRECISE_CPU_LOAD = 8,
SCHED_FEAT_START_DEBIT = 16,
SCHED_FEAT_SKIP_INITIAL = 32,
SCHED_FEAT_START_DEBIT = 8,
SCHED_FEAT_SKIP_INITIAL = 16,
};

const_debug unsigned int sysctl_sched_features =
SCHED_FEAT_FAIR_SLEEPERS *1 |
SCHED_FEAT_SLEEPER_AVG *0 |
SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
SCHED_FEAT_PRECISE_CPU_LOAD *1 |
SCHED_FEAT_START_DEBIT *1 |
SCHED_FEAT_SKIP_INITIAL *0;

Expand Down

0 comments on commit a25707f

Please sign in to comment.