Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 338817
b: refs/heads/master
c: 1924bcb
h: refs/heads/master
i:
  338815: 8e200f4
v: v3
  • Loading branch information
Paul E. McKenney authored and Paul E. McKenney committed Nov 8, 2012
1 parent 3110066 commit 996ffe8
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 19 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7b2e6011f150c42235c4a541d20cf6891afe878a
refs/heads/master: 1924bcb0259711eea98491a7942d1ffbf677e114
62 changes: 44 additions & 18 deletions trunk/kernel/rcutree.c
Original file line number Diff line number Diff line change
Expand Up @@ -2249,8 +2249,8 @@ void synchronize_rcu_bh(void)
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);

static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
static atomic_long_t sync_sched_expedited_started = ATOMIC_LONG_INIT(0);
static atomic_long_t sync_sched_expedited_done = ATOMIC_LONG_INIT(0);

static int synchronize_sched_expedited_cpu_stop(void *data)
{
Expand Down Expand Up @@ -2308,10 +2308,30 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
*/
void synchronize_sched_expedited(void)
{
int firstsnap, s, snap, trycount = 0;
long firstsnap, s, snap;
int trycount = 0;

/* Note that atomic_inc_return() implies full memory barrier. */
firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
/*
* If we are in danger of counter wrap, just do synchronize_sched().
* By allowing sync_sched_expedited_started to advance no more than
* ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
* that more than 3.5 billion CPUs would be required to force a
* counter wrap on a 32-bit system. Quite a few more CPUs would of
* course be required on a 64-bit system.
*/
if (ULONG_CMP_GE((ulong)atomic_read(&sync_sched_expedited_started),
(ulong)atomic_read(&sync_sched_expedited_done) +
ULONG_MAX / 8)) {
synchronize_sched();
return;
}

/*
* Take a ticket. Note that atomic_inc_return() implies a
* full memory barrier.
*/
snap = atomic_long_inc_return(&sync_sched_expedited_started);
firstsnap = snap;
get_online_cpus();
WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));

Expand All @@ -2324,6 +2344,13 @@ void synchronize_sched_expedited(void)
NULL) == -EAGAIN) {
put_online_cpus();

/* Check to see if someone else did our work for us. */
s = atomic_long_read(&sync_sched_expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
smp_mb(); /* ensure test happens before caller kfree */
return;
}

/* No joy, try again later. Or just synchronize_sched(). */
if (trycount++ < 10) {
udelay(trycount * num_online_cpus());
Expand All @@ -2332,39 +2359,38 @@ void synchronize_sched_expedited(void)
return;
}

/* Check to see if someone else did our work for us. */
s = atomic_read(&sync_sched_expedited_done);
if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
/* Recheck to see if someone else did our work for us. */
s = atomic_long_read(&sync_sched_expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
smp_mb(); /* ensure test happens before caller kfree */
return;
}

/*
* Refetching sync_sched_expedited_started allows later
* callers to piggyback on our grace period. We subtract
* 1 to get the same token that the last incrementer got.
* We retry after they started, so our grace period works
* for them, and they started after our first try, so their
* grace period works for us.
* callers to piggyback on our grace period. We retry
* after they started, so our grace period works for them,
* and they started after our first try, so their grace
* period works for us.
*/
get_online_cpus();
snap = atomic_read(&sync_sched_expedited_started);
snap = atomic_long_read(&sync_sched_expedited_started);
smp_mb(); /* ensure read is before try_stop_cpus(). */
}

/*
* Everyone up to our most recent fetch is covered by our grace
* period. Update the counter, but only if our work is still
* relevant -- which it won't be if someone who started later
* than we did beat us to the punch.
* than we did already did their update.
*/
do {
s = atomic_read(&sync_sched_expedited_done);
if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
s = atomic_long_read(&sync_sched_expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
smp_mb(); /* ensure test happens before caller kfree */
break;
}
} while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
} while (atomic_long_cmpxchg(&sync_sched_expedited_done, s, snap) != s);

put_online_cpus();
}
Expand Down

0 comments on commit 996ffe8

Please sign in to comment.