Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 269143
b: refs/heads/master
c: 953bfcd
h: refs/heads/master
i:
  269141: fa7c5d6
  269139: eaf8770
  269135: fedc087
v: v3
  • Loading branch information
Paul Turner authored and Ingo Molnar committed Aug 14, 2011
1 parent 9c9951c commit 9920b2a
Show file tree
Hide file tree
Showing 5 changed files with 15 additions and 6 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5710f15b52664ae0bfa60a66d75464769d297b2b
refs/heads/master: 953bfcd10e6f3697233e8e5128c611d275da39c1
6 changes: 2 additions & 4 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ struct task_group root_task_group;
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct load_weight load;
unsigned long nr_running;
unsigned long nr_running, h_nr_running;

u64 exec_clock;
u64 min_vruntime;
Expand Down Expand Up @@ -1802,7 +1802,6 @@ static void activate_task(struct rq *rq, struct task_struct *p, int flags)
rq->nr_uninterruptible--;

enqueue_task(rq, p, flags);
inc_nr_running(rq);
}

/*
Expand All @@ -1814,7 +1813,6 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
rq->nr_uninterruptible++;

dequeue_task(rq, p, flags);
dec_nr_running(rq);
}

#ifdef CONFIG_IRQ_TIME_ACCOUNTING
Expand Down Expand Up @@ -4258,7 +4256,7 @@ pick_next_task(struct rq *rq)
* Optimization: we know that if all tasks are in
* the fair class we can call that function directly:
*/
if (likely(rq->nr_running == rq->cfs.nr_running)) {
if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
p = fair_sched_class.pick_next_task(rq);
if (likely(p))
return p;
Expand Down
6 changes: 6 additions & 0 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -1310,16 +1310,19 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
break;
cfs_rq = cfs_rq_of(se);
enqueue_entity(cfs_rq, se, flags);
cfs_rq->h_nr_running++;
flags = ENQUEUE_WAKEUP;
}

for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
cfs_rq->h_nr_running++;

update_cfs_load(cfs_rq, 0);
update_cfs_shares(cfs_rq);
}

inc_nr_running(rq);
hrtick_update(rq);
}

Expand All @@ -1339,6 +1342,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, flags);
cfs_rq->h_nr_running--;

/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight) {
Expand All @@ -1358,11 +1362,13 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)

for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
cfs_rq->h_nr_running--;

update_cfs_load(cfs_rq, 0);
update_cfs_shares(cfs_rq);
}

dec_nr_running(rq);
hrtick_update(rq);
}

Expand Down
5 changes: 4 additions & 1 deletion trunk/kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -936,6 +936,8 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)

if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);

inc_nr_running(rq);
}

static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
Expand All @@ -946,6 +948,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
dequeue_rt_entity(rt_se);

dequeue_pushable_task(rq, p);

dec_nr_running(rq);
}

/*
Expand Down Expand Up @@ -1841,4 +1845,3 @@ static void print_rt_stats(struct seq_file *m, int cpu)
rcu_read_unlock();
}
#endif /* CONFIG_SCHED_DEBUG */

2 changes: 2 additions & 0 deletions trunk/kernel/sched_stoptask.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,13 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
static void
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
inc_nr_running(rq);
}

static void
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
dec_nr_running(rq);
}

static void yield_task_stop(struct rq *rq)
Expand Down

0 comments on commit 9920b2a

Please sign in to comment.