Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 191385
b: refs/heads/master
c: e12f31d
h: refs/heads/master
i:
  191383: 51febc5
v: v3
  • Loading branch information
Mike Galbraith authored and Ingo Molnar committed Mar 11, 2010
1 parent 72960a8 commit 384e1ca
Show file tree
Hide file tree
Showing 6 changed files with 1 addition and 72 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: b42e0c41a422a212ddea0666d5a3a0e3c35206db
refs/heads/master: e12f31d3e5d36328c7fbd0fce40a95e70b59152c
3 changes: 0 additions & 3 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1180,9 +1180,6 @@ struct sched_entity {
u64 vruntime;
u64 prev_sum_exec_runtime;

u64 last_wakeup;
u64 avg_overlap;

u64 nr_migrations;

#ifdef CONFIG_SCHEDSTATS
Expand Down
33 changes: 0 additions & 33 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1887,11 +1887,6 @@ enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)

static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
{
if (sleep && p->se.last_wakeup) {
update_avg(&p->se.avg_overlap,
p->se.sum_exec_runtime - p->se.last_wakeup);
p->se.last_wakeup = 0;
}
sched_info_dequeued(p);
p->sched_class->dequeue_task(rq, p, sleep);
p->se.on_rq = 0;
Expand Down Expand Up @@ -2452,15 +2447,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
activate_task(rq, p, 1);
success = 1;

/*
* Only attribute actual wakeups done by this task.
*/
if (!in_interrupt()) {
struct sched_entity *se = &current->se;

se->last_wakeup = se->sum_exec_runtime;
}

out_running:
trace_sched_wakeup(rq, p, success);
check_preempt_curr(rq, p, wake_flags);
Expand Down Expand Up @@ -2522,8 +2508,6 @@ static void __sched_fork(struct task_struct *p)
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.last_wakeup = 0;
p->se.avg_overlap = 0;

#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Expand Down Expand Up @@ -3594,23 +3578,6 @@ static inline void schedule_debug(struct task_struct *prev)

static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
if (prev->state == TASK_RUNNING) {
u64 runtime = prev->se.sum_exec_runtime;

runtime -= prev->se.prev_sum_exec_runtime;
runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);

/*
* In order to avoid avg_overlap growing stale when we are
* indeed overlapping and hence not getting put to sleep, grow
* the avg_overlap on preemption.
*
* We use the average preemption runtime because that
* correlates to the amount of cache footprint a task can
* build up.
*/
update_avg(&prev->se.avg_overlap, runtime);
}
prev->sched_class->put_prev_task(rq, prev);
}

Expand Down
1 change: 0 additions & 1 deletion trunk/kernel/sched_debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.exec_start);
PN(se.vruntime);
PN(se.sum_exec_runtime);
PN(se.avg_overlap);

nr_switches = p->nvcsw + p->nivcsw;

Expand Down
18 changes: 0 additions & 18 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -1241,7 +1241,6 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,

static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
{
struct task_struct *curr = current;
unsigned long this_load, load;
int idx, this_cpu, prev_cpu;
unsigned long tl_per_task;
Expand All @@ -1256,18 +1255,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
load = source_load(prev_cpu, idx);
this_load = target_load(this_cpu, idx);

if (sync) {
if (sched_feat(SYNC_LESS) &&
(curr->se.avg_overlap > sysctl_sched_migration_cost ||
p->se.avg_overlap > sysctl_sched_migration_cost))
sync = 0;
} else {
if (sched_feat(SYNC_MORE) &&
(curr->se.avg_overlap < sysctl_sched_migration_cost &&
p->se.avg_overlap < sysctl_sched_migration_cost))
sync = 1;
}

/*
* If sync wakeup then subtract the (maximum possible)
* effect of the currently running task from the load
Expand Down Expand Up @@ -1711,11 +1698,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (sched_feat(WAKEUP_SYNC) && sync)
goto preempt;

if (sched_feat(WAKEUP_OVERLAP) &&
se->avg_overlap < sysctl_sched_migration_cost &&
pse->avg_overlap < sysctl_sched_migration_cost)
goto preempt;

if (!sched_feat(WAKEUP_PREEMPT))
return;

Expand Down
16 changes: 0 additions & 16 deletions trunk/kernel/sched_features.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,6 @@ SCHED_FEAT(ASYM_GRAN, 1)
*/
SCHED_FEAT(WAKEUP_SYNC, 0)

/*
* Wakeup preempt based on task behaviour. Tasks that do not overlap
* don't get preempted.
*/
SCHED_FEAT(WAKEUP_OVERLAP, 0)

/*
* Use the SYNC wakeup hint, pipes and the likes use this to indicate
* the remote end is likely to consume the data we just wrote, and
Expand All @@ -63,16 +57,6 @@ SCHED_FEAT(SYNC_WAKEUPS, 1)
*/
SCHED_FEAT(AFFINE_WAKEUPS, 1)

/*
* Weaken SYNC hint based on overlap
*/
SCHED_FEAT(SYNC_LESS, 1)

/*
* Add SYNC hint based on overlap
*/
SCHED_FEAT(SYNC_MORE, 0)

/*
* Prefer to schedule the task we woke last (assuming it failed
* wakeup-preemption), since its likely going to consume data we
Expand Down

0 comments on commit 384e1ca

Please sign in to comment.