From dbdca65167d633495493bed10de89cf6f1f9c01c Mon Sep 17 00:00:00 2001 From: Gregory Haskins Date: Mon, 29 Dec 2008 09:39:50 -0500 Subject: [PATCH] --- yaml --- r: 135673 b: refs/heads/master c: 74ab8e4f6412c0b2d730fe5de28dc21de8b92c01 h: refs/heads/master i: 135671: ac47e730d3b9c3c933001e60f6fdece4db2d7df9 v: v3 --- [refs] | 2 +- trunk/kernel/sched_rt.c | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index b56daa430611..3bc27afbdae1 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a8728944efe23417e38bf22063f06d9d8ee21d59 +refs/heads/master: 74ab8e4f6412c0b2d730fe5de28dc21de8b92c01 diff --git a/trunk/kernel/sched_rt.c b/trunk/kernel/sched_rt.c index f8fb3edadcaa..d047f288c411 100644 --- a/trunk/kernel/sched_rt.c +++ b/trunk/kernel/sched_rt.c @@ -1218,6 +1218,18 @@ static int pull_rt_task(struct rq *this_rq) continue; src_rq = cpu_rq(cpu); + + /* + * Don't bother taking the src_rq->lock if the next highest + * task is known to be lower-priority than our current task. + * This may look racy, but if this value is about to go + * logically higher, the src_rq will push this task away. + * And if its going logically lower, we do not care + */ + if (src_rq->rt.highest_prio.next >= + this_rq->rt.highest_prio.curr) + continue; + /* * We can potentially drop this_rq's lock in * double_lock_balance, and another CPU could