Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 223426
b: refs/heads/master
c: f26f9af
h: refs/heads/master
v: v3
  • Loading branch information
Mike Galbraith authored and Ingo Molnar committed Dec 8, 2010
1 parent ba6cc33 commit e7abb87
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 13 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0f004f5a696a9434b7214d0d3cbd0525ee77d428
refs/heads/master: f26f9aff6aaf67e9a430d16c266f91b13a5bff64
1 change: 1 addition & 0 deletions trunk/kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -273,6 +273,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)

setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk);
stackend = end_of_stack(tsk);
*stackend = STACK_END_MAGIC; /* for overflow detection */

Expand Down
26 changes: 14 additions & 12 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);

inline void update_rq_clock(struct rq *rq)
{
if (!rq->skip_clock_update) {
int cpu = cpu_of(rq);
u64 irq_time;
int cpu = cpu_of(rq);
u64 irq_time;

rq->clock = sched_clock_cpu(cpu);
irq_time = irq_time_cpu(cpu);
if (rq->clock - irq_time > rq->clock_task)
rq->clock_task = rq->clock - irq_time;
if (rq->skip_clock_update)
return;

sched_irq_time_avg_update(rq, irq_time);
}
rq->clock = sched_clock_cpu(cpu);
irq_time = irq_time_cpu(cpu);
if (rq->clock - irq_time > rq->clock_task)
rq->clock_task = rq->clock - irq_time;

sched_irq_time_avg_update(rq, irq_time);
}

/*
Expand Down Expand Up @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
* A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update.
*/
if (test_tsk_need_resched(rq->curr))
if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr))
rq->skip_clock_update = 1;
}

Expand Down Expand Up @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
if (prev->se.on_rq)
update_rq_clock(rq);
rq->skip_clock_update = 0;
prev->sched_class->put_prev_task(rq, prev);
}

Expand Down Expand Up @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void)
hrtick_clear(rq);

raw_spin_lock_irq(&rq->lock);
clear_tsk_need_resched(prev);

switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
Expand Down Expand Up @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void)

put_prev_task(rq, prev);
next = pick_next_task(rq);
clear_tsk_need_resched(prev);
rq->skip_clock_update = 0;

if (likely(prev != next)) {
sched_info_switch(prev, next);
Expand All @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void)
rq->nr_switches++;
rq->curr = next;
++*switch_count;
WARN_ON_ONCE(test_tsk_need_resched(next));

context_switch(rq, prev, next); /* unlocks the rq */
/*
Expand Down

0 comments on commit e7abb87

Please sign in to comment.