From 4cc67df68d73d9387834c3e28e8e37a4e705afe9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 5 Sep 2007 14:32:49 +0200 Subject: [PATCH] --- yaml --- r: 64723 b: refs/heads/master c: a206c07213cf6372289f189c3774c4c3255a7ae1 h: refs/heads/master i: 64721: 6b65f6affb15b7949fad1c1765c696f65c87b61b 64719: 59a842b9cebf22899c54546b2a75ee6100f7b979 v: v3 --- [refs] | 2 +- trunk/kernel/sched.c | 1 - trunk/kernel/sched_fair.c | 10 +++++----- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/[refs] b/[refs] index db85567bf408..6321c42a33dd 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a0dc72601d48b171b4870dfdd0824901a2b2b1a9 +refs/heads/master: a206c07213cf6372289f189c3774c4c3255a7ae1 diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index c8759ec6d8a9..97986f1f0be8 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -858,7 +858,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq) static void set_load_weight(struct task_struct *p) { - task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime; p->se.wait_runtime = 0; if (task_has_rt_policy(p)) { diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index 810b52d994e0..bac2aff8273c 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -194,6 +194,8 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) update_load_add(&cfs_rq->load, se->load.weight); cfs_rq->nr_running++; se->on_rq = 1; + + schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); } static inline void @@ -205,6 +207,8 @@ __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) update_load_sub(&cfs_rq->load, se->load.weight); cfs_rq->nr_running--; se->on_rq = 0; + + schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime); } static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) @@ -574,7 +578,6 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) prev_runtime = se->wait_runtime; __add_wait_runtime(cfs_rq, se, delta_fair); - schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); delta_fair = se->wait_runtime - prev_runtime; /* @@ -662,7 +665,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) if (tsk->state & TASK_UNINTERRUPTIBLE) se->block_start = rq_of(cfs_rq)->clock; } - cfs_rq->wait_runtime -= se->wait_runtime; #endif } __dequeue_entity(cfs_rq, se); @@ -1121,10 +1123,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) * The statistical average of wait_runtime is about * -granularity/2, so initialize the task with that: */ - if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) { + if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) se->wait_runtime = -(sched_granularity(cfs_rq) / 2); - schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); - } __enqueue_entity(cfs_rq, se); }