Skip to content

Commit

Permalink
workqueue: simplify CPU hotplug code
Browse files Browse the repository at this point in the history
With trustee gone, CPU hotplug code can be simplified.

* gcwq_claim/release_management() now grab and release gcwq lock too
  respectively and gained _and_lock and _and_unlock postfixes.

* All CPU hotplug logic was implemented in workqueue_cpu_callback()
  which was called by workqueue_cpu_up/down_callback() for the correct
  priority.  This was because up and down paths shared a lot of logic,
  which is no longer true.  Remove workqueue_cpu_callback() and move
  all hotplug logic into the two actual callbacks.

This patch doesn't make any functional changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: "Rafael J. Wysocki" <rjw@sisk.pl>
  • Loading branch information
Tejun Heo committed Jul 17, 2012
1 parent 628c78e commit 8db25e7
Showing 1 changed file with 25 additions and 54 deletions.
79 changes: 25 additions & 54 deletions kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -3358,19 +3358,21 @@ EXPORT_SYMBOL_GPL(work_busy);
*/

/* claim manager positions of all pools */
static void gcwq_claim_management(struct global_cwq *gcwq)
static void gcwq_claim_management_and_lock(struct global_cwq *gcwq)
{
struct worker_pool *pool;

for_each_worker_pool(pool, gcwq)
mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools);
spin_lock_irq(&gcwq->lock);
}

/* release manager positions */
static void gcwq_release_management(struct global_cwq *gcwq)
static void gcwq_release_management_and_unlock(struct global_cwq *gcwq)
{
struct worker_pool *pool;

spin_unlock_irq(&gcwq->lock);
for_each_worker_pool(pool, gcwq)
mutex_unlock(&pool->manager_mutex);
}
Expand All @@ -3385,8 +3387,7 @@ static void gcwq_unbind_fn(struct work_struct *work)

BUG_ON(gcwq->cpu != smp_processor_id());

gcwq_claim_management(gcwq);
spin_lock_irq(&gcwq->lock);
gcwq_claim_management_and_lock(gcwq);

/*
* We've claimed all manager positions. Make all workers unbound
Expand All @@ -3403,8 +3404,7 @@ static void gcwq_unbind_fn(struct work_struct *work)

gcwq->flags |= GCWQ_DISASSOCIATED;

spin_unlock_irq(&gcwq->lock);
gcwq_release_management(gcwq);
gcwq_release_management_and_unlock(gcwq);

/*
* Call schedule() so that we cross rq->lock and thus can guarantee
Expand All @@ -3428,26 +3428,19 @@ static void gcwq_unbind_fn(struct work_struct *work)
atomic_set(get_pool_nr_running(pool), 0);
}

static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
/*
* Workqueues should be brought up before normal priority CPU notifiers.
* This will be registered high priority CPU notifier.
*/
static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct global_cwq *gcwq = get_gcwq(cpu);
struct worker_pool *pool;
struct work_struct unbind_work;
unsigned long flags;

action &= ~CPU_TASKS_FROZEN;

switch (action) {
case CPU_DOWN_PREPARE:
/* unbinding should happen on the local CPU */
INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
schedule_work_on(cpu, &unbind_work);
flush_work(&unbind_work);
break;

switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
for_each_worker_pool(pool, gcwq) {
struct worker *worker;
Expand All @@ -3463,45 +3456,16 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
start_worker(worker);
spin_unlock_irq(&gcwq->lock);
}
}

/* some are called w/ irq disabled, don't disturb irq status */
spin_lock_irqsave(&gcwq->lock, flags);
break;

switch (action) {
case CPU_DOWN_FAILED:
case CPU_ONLINE:
spin_unlock_irq(&gcwq->lock);
gcwq_claim_management(gcwq);
spin_lock_irq(&gcwq->lock);

gcwq_claim_management_and_lock(gcwq);
gcwq->flags &= ~GCWQ_DISASSOCIATED;

rebind_workers(gcwq);

gcwq_release_management(gcwq);
gcwq_release_management_and_unlock(gcwq);
break;
}

spin_unlock_irqrestore(&gcwq->lock, flags);

return notifier_from_errno(0);
}

/*
* Workqueues should be brought up before normal priority CPU notifiers.
* This will be registered high priority CPU notifier.
*/
static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
case CPU_DOWN_FAILED:
case CPU_ONLINE:
return workqueue_cpu_callback(nfb, action, hcpu);
}
return NOTIFY_OK;
}

Expand All @@ -3513,9 +3477,16 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct work_struct unbind_work;

switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
return workqueue_cpu_callback(nfb, action, hcpu);
/* unbinding should happen on the local CPU */
INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
schedule_work_on(cpu, &unbind_work);
flush_work(&unbind_work);
break;
}
return NOTIFY_OK;
}
Expand Down

0 comments on commit 8db25e7

Please sign in to comment.