diff --git a/[refs] b/[refs] index baac30895d49..837290825c5f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 2e8f7a3128bb8fac8351a994f1fc325717899308 +refs/heads/master: 3a5f5e488ceee9e08df3dff3f01b12fafc9e7e68 diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index d714611f1691..e9a0b61f12ab 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -1788,7 +1788,15 @@ context_switch(struct rq *rq, struct task_struct *prev, WARN_ON(rq->prev_mm); rq->prev_mm = oldmm; } + /* + * Since the runqueue lock will be released by the next + * task (which is an invalid locking op but in the case + * of the scheduler it's an obvious special-case), so we + * do an early lockdep release here: + */ +#ifndef __ARCH_WANT_UNLOCKED_CTXSW spin_release(&rq->lock.dep_map, 1, _THIS_IP_); +#endif /* Here we just switch the register state and the stack. */ switch_to(prev, next, prev);