From 4d4740da1a16a402fe616a861570f0ded0865f65 Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Tue, 1 Mar 2011 16:28:21 -0800 Subject: [PATCH] --- yaml --- r: 234566 b: refs/heads/master c: 6d1cafd8b56ea726c10a5a104de57cc3ed8fa953 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/kernel/sched.c | 9 ++++++++- trunk/kernel/sched_fair.c | 4 ---- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/[refs] b/[refs] index b55c5a3a0c48..d0183141a911 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c02aa73b1d18e43cfd79c2f193b225e84ca497c8 +refs/heads/master: 6d1cafd8b56ea726c10a5a104de57cc3ed8fa953 diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index f3030709d826..61452e86c73b 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -5522,8 +5522,15 @@ bool __sched yield_to(struct task_struct *p, bool preempt) goto out; yielded = curr->sched_class->yield_to_task(rq, p, preempt); - if (yielded) + if (yielded) { schedstat_inc(rq, yld_count); + /* + * Make p's CPU reschedule; pick_next_entity takes care of + * fairness. + */ + if (preempt && rq != p_rq) + resched_task(p_rq->curr); + } out: double_rq_unlock(rq, p_rq); diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index 1438e13cf8be..3f7ec9e27ee1 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -1987,10 +1987,6 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp /* Tell the scheduler that we'd really like pse to run next. */ set_next_buddy(se); - /* Make p's CPU reschedule; pick_next_entity takes care of fairness. */ - if (preempt) - resched_task(rq->curr); - yield_task_fair(rq); return true;