From b10e2efeb1b030dfcc5dfd908c11939a8051d7f7 Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Tue, 13 May 2008 23:44:11 +0200 Subject: [PATCH] --- yaml --- r: 97425 b: refs/heads/master c: 3f33a7ce9567ded582af1ab71f9802165fe12f09 h: refs/heads/master i: 97423: a6f1f5d9145d3bab599d052baeefefe343e07384 v: v3 --- [refs] | 2 +- trunk/kernel/sched.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/[refs] b/[refs] index 68066afdfb1c..e51bf02a8277 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: f9305d4a0968201b2818dbed0dc8cb0d4ee7aeb3 +refs/heads/master: 3f33a7ce9567ded582af1ab71f9802165fe12f09 diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 4aac8aa16037..97017356669a 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -136,7 +136,7 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val) static inline int rt_policy(int policy) { - if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR)) + if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) return 1; return 0; } @@ -4433,7 +4433,7 @@ static inline void schedule_debug(struct task_struct *prev) * schedule() atomically, we ignore that path for now. * Otherwise, whine if we are scheduling when we should not be. */ - if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state)) + if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) __schedule_bug(prev); profile_hit(SCHED_PROFILING, __builtin_return_address(0));