Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 158334
b: refs/heads/master
c: da19ab5
h: refs/heads/master
v: v3
  • Loading branch information
Steven Rostedt authored and Ingo Molnar committed Aug 2, 2009
1 parent ec78e82 commit e3d5815
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 12 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e709715915d69b6a929d77e7652c9c3fea61c317
refs/heads/master: da19ab510343c6496fe8b8f890091296032025c9
38 changes: 27 additions & 11 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2839,14 +2839,14 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
static void finish_task_switch(struct rq *rq, struct task_struct *prev)
static int finish_task_switch(struct rq *rq, struct task_struct *prev)
__releases(rq->lock)
{
struct mm_struct *mm = rq->prev_mm;
long prev_state;
#ifdef CONFIG_SMP
int post_schedule = 0;

#ifdef CONFIG_SMP
if (current->sched_class->needs_post_schedule)
post_schedule = current->sched_class->needs_post_schedule(rq);
#endif
Expand All @@ -2868,10 +2868,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
finish_arch_switch(prev);
perf_counter_task_sched_in(current, cpu_of(rq));
finish_lock_switch(rq, prev);
#ifdef CONFIG_SMP
if (post_schedule)
current->sched_class->post_schedule(rq);
#endif

fire_sched_in_preempt_notifiers(current);
if (mm)
Expand All @@ -2884,6 +2880,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
kprobe_flush_task(prev);
put_task_struct(prev);
}

return post_schedule;
}

/**
Expand All @@ -2894,8 +2892,15 @@ asmlinkage void schedule_tail(struct task_struct *prev)
__releases(rq->lock)
{
struct rq *rq = this_rq();
int post_schedule;

post_schedule = finish_task_switch(rq, prev);

#ifdef CONFIG_SMP
if (post_schedule)
current->sched_class->post_schedule(rq);
#endif

finish_task_switch(rq, prev);
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
/* In this case, finish_task_switch does not reenable preemption */
preempt_enable();
Expand All @@ -2908,7 +2913,7 @@ asmlinkage void schedule_tail(struct task_struct *prev)
* context_switch - switch to the new MM and the new
* thread's register state.
*/
static inline void
static inline int
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
Expand Down Expand Up @@ -2955,7 +2960,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
* CPUs since it called schedule(), thus the 'rq' on its stack
* frame will be invalid.
*/
finish_task_switch(this_rq(), prev);
return finish_task_switch(this_rq(), prev);
}

/*
Expand Down Expand Up @@ -5366,6 +5371,7 @@ asmlinkage void __sched schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
int post_schedule = 0;
struct rq *rq;
int cpu;

Expand Down Expand Up @@ -5416,15 +5422,25 @@ asmlinkage void __sched schedule(void)
rq->curr = next;
++*switch_count;

context_switch(rq, prev, next); /* unlocks the rq */
post_schedule = context_switch(rq, prev, next); /* unlocks the rq */
/*
* the context switch might have flipped the stack from under
* us, hence refresh the local variables.
*/
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
} else {
#ifdef CONFIG_SMP
if (current->sched_class->needs_post_schedule)
post_schedule = current->sched_class->needs_post_schedule(rq);
#endif
spin_unlock_irq(&rq->lock);
}

#ifdef CONFIG_SMP
if (post_schedule)
current->sched_class->post_schedule(rq);
#endif

if (unlikely(reacquire_kernel_lock(current) < 0))
goto need_resched_nonpreemptible;
Expand Down

0 comments on commit e3d5815

Please sign in to comment.