From d1a01775643dce8291a43e1b33ef2543f98a8e3e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 27 Jun 2006 02:54:42 -0700 Subject: [PATCH] --- yaml --- r: 30802 b: refs/heads/master c: 9fea80e4d984d411aa188baa31225c273ebe0fe6 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/kernel/sched.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/[refs] b/[refs] index 5a192d05925b..c734ae60695f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 5c45bf279d378d436ce45825c0f136696c7b6109 +refs/heads/master: 9fea80e4d984d411aa188baa31225c273ebe0fe6 diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 54fa282657cc..19c0d5d16fef 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -359,7 +359,7 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) * interrupts. Note the ordering: we can safely lookup the task_rq without * explicitly disabling preemption. */ -static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) +static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) __acquires(rq->lock) { struct runqueue *rq;