From effef18e33f2b93d0c869cceede3ea2f3a019af5 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sun, 25 Jun 2006 05:49:10 -0700 Subject: [PATCH] --- yaml --- r: 29801 b: refs/heads/master c: fc75cdfa5b43ac4d3232b490800cd35063adafd3 h: refs/heads/master i: 29799: 49bdde7dd6145e67ed538f25ff3e4ce9b6a44683 v: v3 --- [refs] | 2 +- trunk/kernel/sched.c | 2 ++ trunk/kernel/softirq.c | 2 ++ trunk/kernel/softlockup.c | 2 ++ trunk/kernel/workqueue.c | 2 ++ 5 files changed, 9 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index b6d74264b689..b484270c3d23 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 04a3446c90137a473837288b04b517b19dc67a0d +refs/heads/master: fc75cdfa5b43ac4d3232b490800cd35063adafd3 diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index f8d540b324ca..f06d059edef5 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -4756,6 +4756,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action, break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: + if (!cpu_rq(cpu)->migration_thread) + break; /* Unbind it from offline cpu so it can run. Fall thru. */ kthread_bind(cpu_rq(cpu)->migration_thread, any_online_cpu(cpu_online_map)); diff --git a/trunk/kernel/softirq.c b/trunk/kernel/softirq.c index 336f92d64e2e..9e2f1c6e73d7 100644 --- a/trunk/kernel/softirq.c +++ b/trunk/kernel/softirq.c @@ -470,6 +470,8 @@ static int cpu_callback(struct notifier_block *nfb, break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: + if (!per_cpu(ksoftirqd, hotcpu)) + break; /* Unbind so it can run. Fall thru. */ kthread_bind(per_cpu(ksoftirqd, hotcpu), any_online_cpu(cpu_online_map)); diff --git a/trunk/kernel/softlockup.c b/trunk/kernel/softlockup.c index 2c1be1163edc..b5c3b94e01ce 100644 --- a/trunk/kernel/softlockup.c +++ b/trunk/kernel/softlockup.c @@ -127,6 +127,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: + if (!per_cpu(watchdog_task, hotcpu)) + break; /* Unbind so it can run. Fall thru. */ kthread_bind(per_cpu(watchdog_task, hotcpu), any_online_cpu(cpu_online_map)); diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index f869aff6bc0c..565cf7a1febd 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -590,6 +590,8 @@ static int workqueue_cpu_callback(struct notifier_block *nfb, case CPU_UP_CANCELED: list_for_each_entry(wq, &workqueues, list) { + if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread) + continue; /* Unbind so it can run. */ kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, any_online_cpu(cpu_online_map));