From 37da69a5b6cf89a1ecab7648f8f9e61c5da5e9bc Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 25 Oct 2010 07:39:22 -0700 Subject: [PATCH] --- yaml --- r: 223802 b: refs/heads/master c: db3a8920995484e5e9a0abaf3bad2c7311b163db h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/kernel/rcutree_plugin.h | 18 ++++++++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/[refs] b/[refs] index 1acd278cdcb3..13cf54124a9e 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 2d999e03b7c8305b4385dd20992e4ed3e827177b +refs/heads/master: db3a8920995484e5e9a0abaf3bad2c7311b163db diff --git a/trunk/kernel/rcutree_plugin.h b/trunk/kernel/rcutree_plugin.h index 643c8f650dd0..c22c4ef2a0d0 100644 --- a/trunk/kernel/rcutree_plugin.h +++ b/trunk/kernel/rcutree_plugin.h @@ -1041,6 +1041,8 @@ static int synchronize_sched_expedited_cpu_stop(void *data) * robustness against future implementation changes. */ smp_mb(); /* See above comment block. */ + if (cpumask_first(cpu_online_mask) == smp_processor_id()) + atomic_inc(&synchronize_sched_expedited_count); return 0; } @@ -1053,13 +1055,26 @@ static int synchronize_sched_expedited_cpu_stop(void *data) * Note that it is illegal to call this function while holding any * lock that is acquired by a CPU-hotplug notifier. Failing to * observe this restriction will result in deadlock. + * + * The synchronize_sched_expedited_cpu_stop() function is called + * in stop-CPU context, but in order to keep overhead down to a dull + * roar, we don't force this function to wait for its counterparts + * on other CPUs. One instance of this function will increment the + * synchronize_sched_expedited_count variable per call to + * try_stop_cpus(), but there is no guarantee what order this instance + * will occur in. The worst case is that it is last on one call + * to try_stop_cpus(), and the first on the next call. This means + * that piggybacking requires that synchronize_sched_expedited_count + * be incremented by 3: this guarantees that the piggybacking + * task has waited through an entire cycle of context switches, + * even in the worst case. */ void synchronize_sched_expedited(void) { int snap, trycount = 0; smp_mb(); /* ensure prior mod happens before capturing snap. */ - snap = atomic_read(&synchronize_sched_expedited_count) + 1; + snap = atomic_read(&synchronize_sched_expedited_count) + 2; get_online_cpus(); while (try_stop_cpus(cpu_online_mask, synchronize_sched_expedited_cpu_stop, @@ -1077,7 +1092,6 @@ void synchronize_sched_expedited(void) } get_online_cpus(); } - atomic_inc(&synchronize_sched_expedited_count); smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */ put_online_cpus(); }