From 6530f732939f55f07f0fc9c7e64957b4e71aa774 Mon Sep 17 00:00:00 2001 From: Markus Metzger Date: Fri, 3 Apr 2009 16:43:34 +0200 Subject: [PATCH] --- yaml --- r: 146121 b: refs/heads/master c: a26b89f05d194413c7238e0bea071054f6b5d3c8 h: refs/heads/master i: 146119: 3b80db9c4c78ff7540db3fe71290f84111113a4c v: v3 --- [refs] | 2 +- trunk/include/linux/sched.h | 2 ++ trunk/kernel/sched.c | 43 +++++++++++++++++++++++++++++++++++++ 3 files changed, 46 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index 8d0fb3538854..9983f5384aad 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: cac94f979326212831c0ea44ed9ea1622b4f4e93 +refs/heads/master: a26b89f05d194413c7238e0bea071054f6b5d3c8 diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index b94f3541f67b..a5b9a83065fa 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -1993,8 +1993,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from); extern char *get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP +extern void wait_task_context_switch(struct task_struct *p); extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else +static inline void wait_task_context_switch(struct task_struct *p) {} static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) { diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 6cc1fd5d5072..f91bc8141dc3 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -2002,6 +2002,49 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) return 1; } +/* + * wait_task_context_switch - wait for a thread to complete at least one + * context switch. + * + * @p must not be current. + */ +void wait_task_context_switch(struct task_struct *p) +{ + unsigned long nvcsw, nivcsw, flags; + int running; + struct rq *rq; + + nvcsw = p->nvcsw; + nivcsw = p->nivcsw; + for (;;) { + /* + * The runqueue is assigned before the actual context + * switch. We need to take the runqueue lock. + * + * We could check initially without the lock but it is + * very likely that we need to take the lock in every + * iteration. + */ + rq = task_rq_lock(p, &flags); + running = task_running(rq, p); + task_rq_unlock(rq, &flags); + + if (likely(!running)) + break; + /* + * The switch count is incremented before the actual + * context switch. We thus wait for two switches to be + * sure at least one completed. + */ + if ((p->nvcsw - nvcsw) > 1) + break; + if ((p->nivcsw - nivcsw) > 1) + break; + + cpu_relax(); + } +} + /* * wait_task_inactive - wait for a thread to unschedule. *