Skip to content

Commit

Permalink
workqueue: better define locking rules around worker creation / destr…
Browse files Browse the repository at this point in the history
…uction

When a manager creates or destroys workers, the operations are always
done with the manager_mutex held; however, initial worker creation or
worker destruction during pool release don't grab the mutex.  They are
still correct as initial worker creation doesn't require
synchronization and grabbing manager_arb provides enough exclusion for
pool release path.

Still, let's make everyone follow the same rules for consistency and
such that lockdep annotations can be added.

Update create_and_start_worker() and put_unbound_pool() to grab
manager_mutex around thread creation and destruction respectively and
add lockdep assertions to create_worker() and destroy_worker().

This patch doesn't introduce any visible behavior changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
  • Loading branch information
Tejun Heo committed Mar 14, 2013
1 parent ebf44d1 commit cd54968
Showing 1 changed file with 12 additions and 1 deletion.
13 changes: 12 additions & 1 deletion kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -1715,6 +1715,8 @@ static struct worker *create_worker(struct worker_pool *pool)
struct worker *worker = NULL;
int id = -1;

lockdep_assert_held(&pool->manager_mutex);

spin_lock_irq(&pool->lock);
while (ida_get_new(&pool->worker_ida, &id)) {
spin_unlock_irq(&pool->lock);
Expand Down Expand Up @@ -1796,19 +1798,23 @@ static void start_worker(struct worker *worker)
* create_and_start_worker - create and start a worker for a pool
* @pool: the target pool
*
* Create and start a new worker for @pool.
* Grab the managership of @pool and create and start a new worker for it.
*/
static int create_and_start_worker(struct worker_pool *pool)
{
struct worker *worker;

mutex_lock(&pool->manager_mutex);

worker = create_worker(pool);
if (worker) {
spin_lock_irq(&pool->lock);
start_worker(worker);
spin_unlock_irq(&pool->lock);
}

mutex_unlock(&pool->manager_mutex);

return worker ? 0 : -ENOMEM;
}

Expand All @@ -1826,6 +1832,9 @@ static void destroy_worker(struct worker *worker)
struct worker_pool *pool = worker->pool;
int id = worker->id;

lockdep_assert_held(&pool->manager_mutex);
lockdep_assert_held(&pool->lock);

/* sanity check frenzy */
if (WARN_ON(worker->current_work) ||
WARN_ON(!list_empty(&worker->scheduled)))
Expand Down Expand Up @@ -3531,13 +3540,15 @@ static void put_unbound_pool(struct worker_pool *pool)
* manager_mutex.
*/
mutex_lock(&pool->manager_arb);
mutex_lock(&pool->manager_mutex);
spin_lock_irq(&pool->lock);

while ((worker = first_worker(pool)))
destroy_worker(worker);
WARN_ON(pool->nr_workers || pool->nr_idle);

spin_unlock_irq(&pool->lock);
mutex_unlock(&pool->manager_mutex);
mutex_unlock(&pool->manager_arb);

/* shut down the timers */
Expand Down

0 comments on commit cd54968

Please sign in to comment.