diff --git a/[refs] b/[refs] index 5a192d05925b..c734ae60695f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 5c45bf279d378d436ce45825c0f136696c7b6109 +refs/heads/master: 9fea80e4d984d411aa188baa31225c273ebe0fe6 diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 54fa282657cc..19c0d5d16fef 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -359,7 +359,7 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) * interrupts. Note the ordering: we can safely lookup the task_rq without * explicitly disabling preemption. */ -static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) +static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) __acquires(rq->lock) { struct runqueue *rq;