Skip to content

Commit

Permalink
sched: Remove avg_wakeup
Browse files Browse the repository at this point in the history
Testing the load which led to this heuristic (nfs4 kbuild) shows that it has
outlived it's usefullness.  With intervening load balancing changes, I cannot
see any difference with/without, so recover there fastpath cycles.

Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1268301062.6785.29.camel@marge.simson.net>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Mike Galbraith authored and Ingo Molnar committed Mar 11, 2010
1 parent 39c0cbe commit b42e0c4
Showing 5 changed files with 4 additions and 63 deletions.
3 changes: 0 additions & 3 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
@@ -1185,9 +1185,6 @@ struct sched_entity {

u64 nr_migrations;

u64 start_runtime;
u64 avg_wakeup;

#ifdef CONFIG_SCHEDSTATS
struct sched_statistics statistics;
#endif
26 changes: 4 additions & 22 deletions kernel/sched.c
Original file line number Diff line number Diff line change
@@ -1880,27 +1880,18 @@ static void update_avg(u64 *avg, u64 sample)
static void
enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
{
if (wakeup)
p->se.start_runtime = p->se.sum_exec_runtime;

sched_info_queued(p);
p->sched_class->enqueue_task(rq, p, wakeup, head);
p->se.on_rq = 1;
}

static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
{
if (sleep) {
if (p->se.last_wakeup) {
update_avg(&p->se.avg_overlap,
p->se.sum_exec_runtime - p->se.last_wakeup);
p->se.last_wakeup = 0;
} else {
update_avg(&p->se.avg_wakeup,
sysctl_sched_wakeup_granularity);
}
if (sleep && p->se.last_wakeup) {
update_avg(&p->se.avg_overlap,
p->se.sum_exec_runtime - p->se.last_wakeup);
p->se.last_wakeup = 0;
}

sched_info_dequeued(p);
p->sched_class->dequeue_task(rq, p, sleep);
p->se.on_rq = 0;
@@ -2466,13 +2457,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
*/
if (!in_interrupt()) {
struct sched_entity *se = &current->se;
u64 sample = se->sum_exec_runtime;

if (se->last_wakeup)
sample -= se->last_wakeup;
else
sample -= se->start_runtime;
update_avg(&se->avg_wakeup, sample);

se->last_wakeup = se->sum_exec_runtime;
}
@@ -2540,8 +2524,6 @@ static void __sched_fork(struct task_struct *p)
p->se.nr_migrations = 0;
p->se.last_wakeup = 0;
p->se.avg_overlap = 0;
p->se.start_runtime = 0;
p->se.avg_wakeup = sysctl_sched_wakeup_granularity;

#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1 change: 0 additions & 1 deletion kernel/sched_debug.c
Original file line number Diff line number Diff line change
@@ -408,7 +408,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.vruntime);
PN(se.sum_exec_runtime);
PN(se.avg_overlap);
PN(se.avg_wakeup);

nr_switches = p->nvcsw + p->nivcsw;

31 changes: 0 additions & 31 deletions kernel/sched_fair.c
Original file line number Diff line number Diff line change
@@ -1592,42 +1592,11 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
}
#endif /* CONFIG_SMP */

/*
* Adaptive granularity
*
* se->avg_wakeup gives the average time a task runs until it does a wakeup,
* with the limit of wakeup_gran -- when it never does a wakeup.
*
* So the smaller avg_wakeup is the faster we want this task to preempt,
* but we don't want to treat the preemptee unfairly and therefore allow it
* to run for at least the amount of time we'd like to run.
*
* NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
*
* NOTE: we use *nr_running to scale with load, this nicely matches the
* degrading latency on load.
*/
static unsigned long
adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
{
u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
u64 gran = 0;

if (this_run < expected_wakeup)
gran = expected_wakeup - this_run;

return min_t(s64, gran, sysctl_sched_wakeup_granularity);
}

static unsigned long
wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;

if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
gran = adaptive_gran(curr, se);

/*
* Since its curr running now, convert the gran from real-time
* to virtual-time in his units.
6 changes: 0 additions & 6 deletions kernel/sched_features.h
Original file line number Diff line number Diff line change
@@ -30,12 +30,6 @@ SCHED_FEAT(START_DEBIT, 1)
*/
SCHED_FEAT(WAKEUP_PREEMPT, 1)

/*
* Compute wakeup_gran based on task behaviour, clipped to
* [0, sched_wakeup_gran_ns]
*/
SCHED_FEAT(ADAPTIVE_GRAN, 1)

/*
* When converting the wakeup granularity to virtual time, do it such
* that heavier tasks preempting a lighter task have an edge.

0 comments on commit b42e0c4

Please sign in to comment.