Skip to content

Commit

Permalink
rcu: Move synchronize_sched_expedited() state to rcu_state
Browse files Browse the repository at this point in the history
Tracing (debugfs) of expedited RCU primitives is required, which in turn
requires that the relevant data be located where the tracing code can find
it, not in its current static global variables in kernel/rcutree.c.
This commit therefore moves sync_sched_expedited_started and
sync_sched_expedited_done to the rcu_state structure, as fields
->expedited_start and ->expedited_done, respectively.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  • Loading branch information
Paul E. McKenney authored and Paul E. McKenney committed Nov 8, 2012
1 parent 1924bcb commit 40694d6
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 11 deletions.
20 changes: 9 additions & 11 deletions kernel/rcutree.c
Original file line number Diff line number Diff line change
Expand Up @@ -2249,9 +2249,6 @@ void synchronize_rcu_bh(void)
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);

static atomic_long_t sync_sched_expedited_started = ATOMIC_LONG_INIT(0);
static atomic_long_t sync_sched_expedited_done = ATOMIC_LONG_INIT(0);

static int synchronize_sched_expedited_cpu_stop(void *data)
{
/*
Expand Down Expand Up @@ -2310,6 +2307,7 @@ void synchronize_sched_expedited(void)
{
long firstsnap, s, snap;
int trycount = 0;
struct rcu_state *rsp = &rcu_sched_state;

/*
* If we are in danger of counter wrap, just do synchronize_sched().
Expand All @@ -2319,8 +2317,8 @@ void synchronize_sched_expedited(void)
* counter wrap on a 32-bit system. Quite a few more CPUs would of
* course be required on a 64-bit system.
*/
if (ULONG_CMP_GE((ulong)atomic_read(&sync_sched_expedited_started),
(ulong)atomic_read(&sync_sched_expedited_done) +
if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
(ulong)atomic_long_read(&rsp->expedited_done) +
ULONG_MAX / 8)) {
synchronize_sched();
return;
Expand All @@ -2330,7 +2328,7 @@ void synchronize_sched_expedited(void)
* Take a ticket. Note that atomic_inc_return() implies a
* full memory barrier.
*/
snap = atomic_long_inc_return(&sync_sched_expedited_started);
snap = atomic_long_inc_return(&rsp->expedited_start);
firstsnap = snap;
get_online_cpus();
WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
Expand All @@ -2345,7 +2343,7 @@ void synchronize_sched_expedited(void)
put_online_cpus();

/* Check to see if someone else did our work for us. */
s = atomic_long_read(&sync_sched_expedited_done);
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
smp_mb(); /* ensure test happens before caller kfree */
return;
Expand All @@ -2360,7 +2358,7 @@ void synchronize_sched_expedited(void)
}

/* Recheck to see if someone else did our work for us. */
s = atomic_long_read(&sync_sched_expedited_done);
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
smp_mb(); /* ensure test happens before caller kfree */
return;
Expand All @@ -2374,7 +2372,7 @@ void synchronize_sched_expedited(void)
* period works for us.
*/
get_online_cpus();
snap = atomic_long_read(&sync_sched_expedited_started);
snap = atomic_long_read(&rsp->expedited_start);
smp_mb(); /* ensure read is before try_stop_cpus(). */
}

Expand All @@ -2385,12 +2383,12 @@ void synchronize_sched_expedited(void)
* than we did already did their update.
*/
do {
s = atomic_long_read(&sync_sched_expedited_done);
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
smp_mb(); /* ensure test happens before caller kfree */
break;
}
} while (atomic_long_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
} while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);

put_online_cpus();
}
Expand Down
3 changes: 3 additions & 0 deletions kernel/rcutree.h
Original file line number Diff line number Diff line change
Expand Up @@ -404,6 +404,9 @@ struct rcu_state {
/* _rcu_barrier(). */
/* End of fields guarded by barrier_mutex. */

atomic_long_t expedited_start; /* Starting ticket. */
atomic_long_t expedited_done; /* Done ticket. */

unsigned long jiffies_force_qs; /* Time at which to invoke */
/* force_quiescent_state(). */
unsigned long n_force_qs; /* Number of calls to */
Expand Down

0 comments on commit 40694d6

Please sign in to comment.