From 6835d8a7bc2865835d3100336a5c2f94c02e9b85 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 10 Sep 2012 10:03:44 -0700 Subject: [PATCH] --- yaml --- r: 322614 b: refs/heads/master c: ee378aa49b594da9bda6a2c768cc5b2ad585f911 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/kernel/workqueue.c | 37 ++++++++++++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/[refs] b/[refs] index d6f1f797a79d..ee2ec7edb385 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 552a37e9360a293cd20e7f8ff1fb326a244c5f1e +refs/heads/master: ee378aa49b594da9bda6a2c768cc5b2ad585f911 diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index 383548ed0b54..1e1373bcb3e3 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -1825,10 +1825,45 @@ static bool manage_workers(struct worker *worker) struct worker_pool *pool = worker->pool; bool ret = false; - if (!mutex_trylock(&pool->manager_mutex)) + if (pool->flags & POOL_MANAGING_WORKERS) return ret; pool->flags |= POOL_MANAGING_WORKERS; + + /* + * To simplify both worker management and CPU hotplug, hold off + * management while hotplug is in progress. CPU hotplug path can't + * grab %POOL_MANAGING_WORKERS to achieve this because that can + * lead to idle worker depletion (all become busy thinking someone + * else is managing) which in turn can result in deadlock under + * extreme circumstances. Use @pool->manager_mutex to synchronize + * manager against CPU hotplug. + * + * manager_mutex would always be free unless CPU hotplug is in + * progress. trylock first without dropping @gcwq->lock. + */ + if (unlikely(!mutex_trylock(&pool->manager_mutex))) { + spin_unlock_irq(&pool->gcwq->lock); + mutex_lock(&pool->manager_mutex); + /* + * CPU hotplug could have happened while we were waiting + * for manager_mutex. Hotplug itself can't handle us + * because manager isn't either on idle or busy list, and + * @gcwq's state and ours could have deviated. + * + * As hotplug is now excluded via manager_mutex, we can + * simply try to bind. It will succeed or fail depending + * on @gcwq's current state. Try it and adjust + * %WORKER_UNBOUND accordingly. + */ + if (worker_maybe_bind_and_lock(worker)) + worker->flags &= ~WORKER_UNBOUND; + else + worker->flags |= WORKER_UNBOUND; + + ret = true; + } + pool->flags &= ~POOL_MANAGE_WORKERS; /*