From 8b680d31440f914ce670db10b5b98a6680a0d9e3 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 22 Nov 2011 15:21:26 +0100 Subject: [PATCH] --- yaml --- r: 277383 b: refs/heads/master c: 916671c08b7808aebec87cc56c85788e665b3c6b h: refs/heads/master i: 277381: f30bdbd035507c07fdd9b88e1f0a9173814ed392 277379: 840f86e21c5cbe13d8b71f0151295ff0e9ab3ef7 277375: b37a1d9e32de8fccb3d021c4ac83f9c53b596259 v: v3 --- [refs] | 2 +- trunk/kernel/sched/core.c | 7 +++++++ trunk/kernel/sched/fair.c | 6 ++++++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index ccbfe9e63629..03fa78011a5f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 76854c7e8f3f4172fef091e78d88b3b751463ac6 +refs/heads/master: 916671c08b7808aebec87cc56c85788e665b3c6b diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c index ca8fd44145ac..db313c33af29 100644 --- a/trunk/kernel/sched/core.c +++ b/trunk/kernel/sched/core.c @@ -4547,6 +4547,13 @@ bool __sched yield_to(struct task_struct *p, bool preempt) */ if (preempt && rq != p_rq) resched_task(p_rq->curr); + } else { + /* + * We might have set it in task_yield_fair(), but are + * not going to schedule(), so don't want to skip + * the next update. + */ + rq->skip_clock_update = 0; } out: diff --git a/trunk/kernel/sched/fair.c b/trunk/kernel/sched/fair.c index 8e534a05e3ed..81ccb811afb4 100644 --- a/trunk/kernel/sched/fair.c +++ b/trunk/kernel/sched/fair.c @@ -3075,6 +3075,12 @@ static void yield_task_fair(struct rq *rq) * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); + /* + * Tell update_rq_clock() that we've just updated, + * so we don't do microscopic update in schedule() + * and double the fastpath cost. + */ + rq->skip_clock_update = 1; } set_skip_buddy(se);