Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 338834
b: refs/heads/master
c: 4d9a5d4
h: refs/heads/master
v: v3
  • Loading branch information
Frederic Weisbecker authored and Paul E. McKenney committed Oct 23, 2012
1 parent 426059c commit 6196e8b
Show file tree
Hide file tree
Showing 10 changed files with 207 additions and 256 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7bd8f2a74bcbd39f4277766f4d49441c45dd10a0
refs/heads/master: 4d9a5d4319e22670ec6d6227e12b54f361c46d0f
2 changes: 1 addition & 1 deletion trunk/arch/um/drivers/mconsole_kern.c
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@ static void stack_proc(void *arg)
struct task_struct *from = current, *to = arg;

to->thread.saved_task = from;
rcu_switch(from, to);
rcu_user_hooks_switch(from, to);
switch_to(from, to, from);
}

Expand Down
2 changes: 2 additions & 0 deletions trunk/include/linux/rcupdate.h
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,8 @@ static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
static inline void rcu_user_enter_after_irq(void) { }
static inline void rcu_user_exit_after_irq(void) { }
static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* CONFIG_RCU_USER_QS */

extern void exit_rcu(void);
Expand Down
8 changes: 0 additions & 8 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1844,14 +1844,6 @@ static inline void rcu_copy_process(struct task_struct *p)

#endif

static inline void rcu_switch(struct task_struct *prev,
struct task_struct *next)
{
#ifdef CONFIG_RCU_USER_QS
rcu_user_hooks_switch(prev, next);
#endif
}

static inline void tsk_restore_flags(struct task_struct *task,
unsigned long orig_flags, unsigned long flags)
{
Expand Down
9 changes: 1 addition & 8 deletions trunk/kernel/rcutorture.c
Original file line number Diff line number Diff line change
Expand Up @@ -1502,7 +1502,6 @@ rcu_torture_onoff(void *arg)
unsigned long delta;
int maxcpu = -1;
DEFINE_RCU_RANDOM(rand);
int ret;
unsigned long starttime;

VERBOSE_PRINTK_STRING("rcu_torture_onoff task started");
Expand All @@ -1523,13 +1522,7 @@ rcu_torture_onoff(void *arg)
torture_type, cpu);
starttime = jiffies;
n_offline_attempts++;
ret = cpu_down(cpu);
if (ret) {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"rcu_torture_onoff task: offline %d failed: errno %d\n",
torture_type, cpu, ret);
} else {
if (cpu_down(cpu) == 0) {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"rcu_torture_onoff task: offlined %d\n",
Expand Down
98 changes: 31 additions & 67 deletions trunk/kernel/rcutree.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,9 @@ static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
.level = { &sname##_state.node[0] }, \
.call = cr, \
.fqs_state = RCU_GP_IDLE, \
.gpnum = 0UL - 300UL, \
.completed = 0UL - 300UL, \
.orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
.gpnum = -300, \
.completed = -300, \
.onofflock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.onofflock), \
.orphan_nxttail = &sname##_state.orphan_nxtlist, \
.orphan_donetail = &sname##_state.orphan_donelist, \
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
Expand Down Expand Up @@ -1573,16 +1573,16 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
/*
* Send the specified CPU's RCU callbacks to the orphanage. The
* specified CPU must be offline, and the caller must hold the
* ->orphan_lock.
* ->onofflock.
*/
static void
rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
struct rcu_node *rnp, struct rcu_data *rdp)
{
/*
* Orphan the callbacks. First adjust the counts. This is safe
* because _rcu_barrier() excludes CPU-hotplug operations, so it
* cannot be running now. Thus no memory barrier is required.
* because ->onofflock excludes _rcu_barrier()'s adoption of
* the callbacks, thus no memory barrier is required.
*/
if (rdp->nxtlist != NULL) {
rsp->qlen_lazy += rdp->qlen_lazy;
Expand Down Expand Up @@ -1623,7 +1623,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,

/*
* Adopt the RCU callbacks from the specified rcu_state structure's
* orphanage. The caller must hold the ->orphan_lock.
* orphanage. The caller must hold the ->onofflock.
*/
static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
{
Expand Down Expand Up @@ -1702,7 +1702,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)

/* Exclude any attempts to start a new grace period. */
mutex_lock(&rsp->onoff_mutex);
raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
raw_spin_lock_irqsave(&rsp->onofflock, flags);

/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
Expand All @@ -1729,10 +1729,10 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
/*
* We still hold the leaf rcu_node structure lock here, and
* irqs are still disabled. The reason for this subterfuge is
* because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
* because invoking rcu_report_unblock_qs_rnp() with ->onofflock
* held leads to deadlock.
*/
raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
rnp = rdp->mynode;
if (need_report & RCU_OFL_TASKS_NORM_GP)
rcu_report_unblock_qs_rnp(rnp, flags);
Expand Down Expand Up @@ -2249,6 +2249,9 @@ void synchronize_rcu_bh(void)
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);

static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);

static int synchronize_sched_expedited_cpu_stop(void *data)
{
/*
Expand Down Expand Up @@ -2305,32 +2308,10 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
*/
void synchronize_sched_expedited(void)
{
long firstsnap, s, snap;
int trycount = 0;
struct rcu_state *rsp = &rcu_sched_state;
int firstsnap, s, snap, trycount = 0;

/*
* If we are in danger of counter wrap, just do synchronize_sched().
* By allowing sync_sched_expedited_started to advance no more than
* ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
* that more than 3.5 billion CPUs would be required to force a
* counter wrap on a 32-bit system. Quite a few more CPUs would of
* course be required on a 64-bit system.
*/
if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
(ulong)atomic_long_read(&rsp->expedited_done) +
ULONG_MAX / 8)) {
synchronize_sched();
atomic_long_inc(&rsp->expedited_wrap);
return;
}

/*
* Take a ticket. Note that atomic_inc_return() implies a
* full memory barrier.
*/
snap = atomic_long_inc_return(&rsp->expedited_start);
firstsnap = snap;
/* Note that atomic_inc_return() implies full memory barrier. */
firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
get_online_cpus();
WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));

Expand All @@ -2342,65 +2323,48 @@ void synchronize_sched_expedited(void)
synchronize_sched_expedited_cpu_stop,
NULL) == -EAGAIN) {
put_online_cpus();
atomic_long_inc(&rsp->expedited_tryfail);

/* Check to see if someone else did our work for us. */
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
/* ensure test happens before caller kfree */
smp_mb__before_atomic_inc(); /* ^^^ */
atomic_long_inc(&rsp->expedited_workdone1);
return;
}

/* No joy, try again later. Or just synchronize_sched(). */
if (trycount++ < 10) {
udelay(trycount * num_online_cpus());
} else {
synchronize_sched();
atomic_long_inc(&rsp->expedited_normal);
return;
}

/* Recheck to see if someone else did our work for us. */
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
/* ensure test happens before caller kfree */
smp_mb__before_atomic_inc(); /* ^^^ */
atomic_long_inc(&rsp->expedited_workdone2);
/* Check to see if someone else did our work for us. */
s = atomic_read(&sync_sched_expedited_done);
if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
smp_mb(); /* ensure test happens before caller kfree */
return;
}

/*
* Refetching sync_sched_expedited_started allows later
* callers to piggyback on our grace period. We retry
* after they started, so our grace period works for them,
* and they started after our first try, so their grace
* period works for us.
* callers to piggyback on our grace period. We subtract
* 1 to get the same token that the last incrementer got.
* We retry after they started, so our grace period works
* for them, and they started after our first try, so their
* grace period works for us.
*/
get_online_cpus();
snap = atomic_long_read(&rsp->expedited_start);
snap = atomic_read(&sync_sched_expedited_started);
smp_mb(); /* ensure read is before try_stop_cpus(). */
}
atomic_long_inc(&rsp->expedited_stoppedcpus);

/*
* Everyone up to our most recent fetch is covered by our grace
* period. Update the counter, but only if our work is still
* relevant -- which it won't be if someone who started later
* than we did already did their update.
* than we did beat us to the punch.
*/
do {
atomic_long_inc(&rsp->expedited_done_tries);
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
/* ensure test happens before caller kfree */
smp_mb__before_atomic_inc(); /* ^^^ */
atomic_long_inc(&rsp->expedited_done_lost);
s = atomic_read(&sync_sched_expedited_done);
if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
smp_mb(); /* ensure test happens before caller kfree */
break;
}
} while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
atomic_long_inc(&rsp->expedited_done_exit);
} while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);

put_online_cpus();
}
Expand Down
19 changes: 4 additions & 15 deletions trunk/kernel/rcutree.h
Original file line number Diff line number Diff line change
Expand Up @@ -383,8 +383,9 @@ struct rcu_state {

/* End of fields guarded by root rcu_node's lock. */

raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
/* Protect following fields. */
raw_spinlock_t onofflock ____cacheline_internodealigned_in_smp;
/* exclude on/offline and */
/* starting new GP. */
struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */
/* need a grace period. */
struct rcu_head **orphan_nxttail; /* Tail of above. */
Expand All @@ -393,7 +394,7 @@ struct rcu_state {
struct rcu_head **orphan_donetail; /* Tail of above. */
long qlen_lazy; /* Number of lazy callbacks. */
long qlen; /* Total number of callbacks. */
/* End of fields guarded by orphan_lock. */
/* End of fields guarded by onofflock. */

struct mutex onoff_mutex; /* Coordinate hotplug & GPs. */

Expand All @@ -404,18 +405,6 @@ struct rcu_state {
/* _rcu_barrier(). */
/* End of fields guarded by barrier_mutex. */

atomic_long_t expedited_start; /* Starting ticket. */
atomic_long_t expedited_done; /* Done ticket. */
atomic_long_t expedited_wrap; /* # near-wrap incidents. */
atomic_long_t expedited_tryfail; /* # acquisition failures. */
atomic_long_t expedited_workdone1; /* # done by others #1. */
atomic_long_t expedited_workdone2; /* # done by others #2. */
atomic_long_t expedited_normal; /* # fallbacks to normal. */
atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
atomic_long_t expedited_done_tries; /* # tries to update _done. */
atomic_long_t expedited_done_lost; /* # times beaten to _done. */
atomic_long_t expedited_done_exit; /* # times exited _done loop. */

unsigned long jiffies_force_qs; /* Time at which to invoke */
/* force_quiescent_state(). */
unsigned long n_force_qs; /* Number of calls to */
Expand Down
3 changes: 1 addition & 2 deletions trunk/kernel/rcutree_plugin.h
Original file line number Diff line number Diff line change
Expand Up @@ -757,8 +757,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
* grace period for the specified rcu_node structure. If there are no such
* tasks, report it up the rcu_node hierarchy.
*
* Caller must hold sync_rcu_preempt_exp_mutex and must exclude
* CPU hotplug operations.
* Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
*/
static void
sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
Expand Down
Loading

0 comments on commit 6196e8b

Please sign in to comment.