From 13f6e11a3cb607b2ae307ec96134d6693b0d715a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 15 Aug 2009 09:53:48 -0700 Subject: [PATCH] --- yaml --- r: 158150 b: refs/heads/master c: 2e597558086dec36d5c33521a36e0f6b1bc3f3a7 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/kernel/rcupdate.c | 16 +++++++++++++++- trunk/kernel/rcupreempt.c | 25 ++----------------------- trunk/kernel/rcutree.c | 17 +++++------------ 4 files changed, 23 insertions(+), 37 deletions(-) diff --git a/[refs] b/[refs] index a434349b6377..b16b71f797de 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 799e64f05f4bfaad2bb3165cab95c8c992a1c296 +refs/heads/master: 2e597558086dec36d5c33521a36e0f6b1bc3f3a7 diff --git a/trunk/kernel/rcupdate.c b/trunk/kernel/rcupdate.c index eae29c25fb14..8df115600c2d 100644 --- a/trunk/kernel/rcupdate.c +++ b/trunk/kernel/rcupdate.c @@ -217,9 +217,13 @@ static void rcu_migrate_callback(struct rcu_head *notused) wake_up(&rcu_migrate_wq); } +extern int rcu_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu); + static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, unsigned long action, void *hcpu) { + rcu_cpu_notify(self, action, hcpu); if (action == CPU_DYING) { /* * preempt_disable() in on_each_cpu() prevents stop_machine(), @@ -244,8 +248,18 @@ static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, void __init rcu_init(void) { + int i; + __rcu_init(); - hotcpu_notifier(rcu_barrier_cpu_hotplug, 0); + cpu_notifier(rcu_barrier_cpu_hotplug, 0); + + /* + * We don't need protection against CPU-hotplug here because + * this is called early in boot, before either interrupts + * or the scheduler are operational. + */ + for_each_online_cpu(i) + rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i); } void rcu_scheduler_starting(void) diff --git a/trunk/kernel/rcupreempt.c b/trunk/kernel/rcupreempt.c index beb0e659adcc..9b87f5134ed7 100644 --- a/trunk/kernel/rcupreempt.c +++ b/trunk/kernel/rcupreempt.c @@ -1417,8 +1417,8 @@ int rcu_pending(int cpu) return 0; } -static int __cpuinit rcu_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) +int __cpuinit rcu_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -1439,10 +1439,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata rcu_nb = { - .notifier_call = rcu_cpu_notify, -}; - void __init __rcu_init(void) { int cpu; @@ -1471,23 +1467,6 @@ void __init __rcu_init(void) rdp->waitschedtail = &rdp->waitschedlist; rdp->rcu_sched_sleeping = 0; } - register_cpu_notifier(&rcu_nb); - - /* - * We don't need protection against CPU-Hotplug here - * since - * a) If a CPU comes online while we are iterating over the - * cpu_online_mask below, we would only end up making a - * duplicate call to rcu_online_cpu() which sets the corresponding - * CPU's mask in the rcu_cpu_online_map. - * - * b) A CPU cannot go offline at this point in time since the user - * does not have access to the sysfs interface, nor do we - * suspend the system. - */ - for_each_online_cpu(cpu) - rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long) cpu); - open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); } diff --git a/trunk/kernel/rcutree.c b/trunk/kernel/rcutree.c index f3e43274ed53..75762cddbe03 100644 --- a/trunk/kernel/rcutree.c +++ b/trunk/kernel/rcutree.c @@ -1132,6 +1132,8 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; + WARN_ON_ONCE(rdp->beenonline == 0); + /* * If an RCU GP has gone long enough, go check for dyntick * idle CPUs and, if needed, send resched IPIs. @@ -1416,14 +1418,13 @@ static void __cpuinit rcu_online_cpu(int cpu) { rcu_init_percpu_data(cpu, &rcu_state); rcu_init_percpu_data(cpu, &rcu_bh_state); - open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); } /* * Handle CPU online/offline notifcation events. */ -static int __cpuinit rcu_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) +int __cpuinit rcu_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -1532,10 +1533,6 @@ do { \ } \ } while (0) -static struct notifier_block __cpuinitdata rcu_nb = { - .notifier_call = rcu_cpu_notify, -}; - void __init __rcu_init(void) { int i; /* All used by RCU_DATA_PTR_INIT(). */ @@ -1554,11 +1551,7 @@ void __init __rcu_init(void) RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data); for_each_possible_cpu(i) rcu_boot_init_percpu_data(i, &rcu_bh_state); - - for_each_online_cpu(i) - rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i); - /* Register notifier for non-boot CPUs */ - register_cpu_notifier(&rcu_nb); + open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); } module_param(blimit, int, 0);