Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 223802
b: refs/heads/master
c: db3a892
h: refs/heads/master
v: v3
  • Loading branch information
Paul E. McKenney committed Nov 30, 2010
1 parent ea6a0a5 commit 37da69a
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 3 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2d999e03b7c8305b4385dd20992e4ed3e827177b
refs/heads/master: db3a8920995484e5e9a0abaf3bad2c7311b163db
18 changes: 16 additions & 2 deletions trunk/kernel/rcutree_plugin.h
Original file line number Diff line number Diff line change
Expand Up @@ -1041,6 +1041,8 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
* robustness against future implementation changes.
*/
smp_mb(); /* See above comment block. */
if (cpumask_first(cpu_online_mask) == smp_processor_id())
atomic_inc(&synchronize_sched_expedited_count);
return 0;
}

Expand All @@ -1053,13 +1055,26 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
* Note that it is illegal to call this function while holding any
* lock that is acquired by a CPU-hotplug notifier. Failing to
* observe this restriction will result in deadlock.
*
* The synchronize_sched_expedited_cpu_stop() function is called
* in stop-CPU context, but in order to keep overhead down to a dull
* roar, we don't force this function to wait for its counterparts
* on other CPUs. One instance of this function will increment the
* synchronize_sched_expedited_count variable per call to
* try_stop_cpus(), but there is no guarantee what order this instance
* will occur in. The worst case is that it is last on one call
* to try_stop_cpus(), and the first on the next call. This means
* that piggybacking requires that synchronize_sched_expedited_count
* be incremented by 3: this guarantees that the piggybacking
* task has waited through an entire cycle of context switches,
* even in the worst case.
*/
void synchronize_sched_expedited(void)
{
int snap, trycount = 0;

smp_mb(); /* ensure prior mod happens before capturing snap. */
snap = atomic_read(&synchronize_sched_expedited_count) + 1;
snap = atomic_read(&synchronize_sched_expedited_count) + 2;
get_online_cpus();
while (try_stop_cpus(cpu_online_mask,
synchronize_sched_expedited_cpu_stop,
Expand All @@ -1077,7 +1092,6 @@ void synchronize_sched_expedited(void)
}
get_online_cpus();
}
atomic_inc(&synchronize_sched_expedited_count);
smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
put_online_cpus();
}
Expand Down

0 comments on commit 37da69a

Please sign in to comment.