From a7406b9539ff8f152464ee48be6a9a3a6121d6f5 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 29 Jul 2008 22:33:49 -0700 Subject: [PATCH] --- yaml --- r: 107030 b: refs/heads/master c: 6af8bf3d86d55c98af6e453cb920ddc30867e5c7 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/kernel/workqueue.c | 13 ++++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/[refs] b/[refs] index c69c335eea0e..6b2deac6e4da 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: dbacefc9c4f6bd365243db379473ab7041656d90 +refs/heads/master: 6af8bf3d86d55c98af6e453cb920ddc30867e5c7 diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index ec7e4f62aaff..4a26a1382df0 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -830,10 +830,21 @@ struct workqueue_struct *__create_workqueue_key(const char *name, start_workqueue_thread(cwq, -1); } else { cpu_maps_update_begin(); + /* + * We must place this wq on list even if the code below fails. + * cpu_down(cpu) can remove cpu from cpu_populated_map before + * destroy_workqueue() takes the lock, in that case we leak + * cwq[cpu]->thread. + */ spin_lock(&workqueue_lock); list_add(&wq->list, &workqueues); spin_unlock(&workqueue_lock); - + /* + * We must initialize cwqs for each possible cpu even if we + * are going to call destroy_workqueue() finally. Otherwise + * cpu_up() can hit the uninitialized cwq once we drop the + * lock. + */ for_each_possible_cpu(cpu) { cwq = init_cpu_workqueue(wq, cpu); if (err || !cpu_online(cpu))