Skip to content

Commit

Permalink
workqueue: move pwq_pool_locking outside of get/put_unbound_pool()
Browse files Browse the repository at this point in the history
The scheduled NUMA affinity support for unbound workqueues would need
to walk workqueues list and pool related operations on each workqueue.

Move wq_pool_mutex locking out of get/put_unbound_pool() to their
callers so that pool operations can be performed while walking the
workqueues list, which is also protected by wq_pool_mutex.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
  • Loading branch information
Tejun Heo committed Apr 1, 2013
1 parent 4862125 commit a892cac
Showing 1 changed file with 22 additions and 14 deletions.
36 changes: 22 additions & 14 deletions kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -3395,31 +3395,28 @@ static void rcu_free_pool(struct rcu_head *rcu)
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
*
* Should be called with wq_pool_mutex held.
*/
static void put_unbound_pool(struct worker_pool *pool)
{
struct worker *worker;

mutex_lock(&wq_pool_mutex);
if (--pool->refcnt) {
mutex_unlock(&wq_pool_mutex);
lockdep_assert_held(&wq_pool_mutex);

if (--pool->refcnt)
return;
}

/* sanity checks */
if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) ||
WARN_ON(!list_empty(&pool->worklist))) {
mutex_unlock(&wq_pool_mutex);
WARN_ON(!list_empty(&pool->worklist)))
return;
}

/* release id and unhash */
if (pool->id >= 0)
idr_remove(&worker_pool_idr, pool->id);
hash_del(&pool->hash_node);

mutex_unlock(&wq_pool_mutex);

/*
* Become the manager and destroy all workers. Grabbing
* manager_arb prevents @pool's workers from blocking on
Expand Down Expand Up @@ -3453,13 +3450,15 @@ static void put_unbound_pool(struct worker_pool *pool)
* reference count and return it. If there already is a matching
* worker_pool, it will be used; otherwise, this function attempts to
* create a new one. On failure, returns NULL.
*
* Should be called with wq_pool_mutex held.
*/
static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
{
u32 hash = wqattrs_hash(attrs);
struct worker_pool *pool;

mutex_lock(&wq_pool_mutex);
lockdep_assert_held(&wq_pool_mutex);

/* do we already have a matching pool? */
hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
Expand Down Expand Up @@ -3490,10 +3489,8 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
/* install */
hash_add(unbound_pool_hash, &pool->hash_node, hash);
out_unlock:
mutex_unlock(&wq_pool_mutex);
return pool;
fail:
mutex_unlock(&wq_pool_mutex);
if (pool)
put_unbound_pool(pool);
return NULL;
Expand Down Expand Up @@ -3530,7 +3527,10 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
is_last = list_empty(&wq->pwqs);
mutex_unlock(&wq->mutex);

mutex_lock(&wq_pool_mutex);
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);

call_rcu_sched(&pwq->rcu, rcu_free_pwq);

/*
Expand Down Expand Up @@ -3654,13 +3654,21 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
copy_workqueue_attrs(new_attrs, attrs);
cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);

mutex_lock(&wq_pool_mutex);

pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL);
if (!pwq)
if (!pwq) {
mutex_unlock(&wq_pool_mutex);
goto enomem;
}

pool = get_unbound_pool(new_attrs);
if (!pool)
if (!pool) {
mutex_unlock(&wq_pool_mutex);
goto enomem;
}

mutex_unlock(&wq_pool_mutex);

init_and_link_pwq(pwq, wq, pool, &last_pwq);
if (last_pwq) {
Expand Down

0 comments on commit a892cac

Please sign in to comment.