diff --git a/[refs] b/[refs] index a96c8d3766c1..581d2e7efadd 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 4653f803e6e0d970ffeac0efd2c01743eb6c5228 +refs/heads/master: 57fdc26d4a734a3e00c6b2fc0e1e40ff8da4dc31 diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index 0c59da7e3120..e3f3c10f7033 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -1249,6 +1249,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) if (unlikely(se == pse)) return; + cfs_rq_of(pse)->next = pse; + /* * We can come here with TIF_NEED_RESCHED already set from new task * wake up path. @@ -1256,8 +1258,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) if (test_tsk_need_resched(curr)) return; - cfs_rq_of(pse)->next = pse; - /* * Batch tasks do not preempt (their preemption is driven by * the tick):