Skip to content

Commit

Permalink
Merge branches 'doc.2013.09.25b' and 'fixes.2013.09.23b' into HEAD
Browse files Browse the repository at this point in the history
doc.2013.09.25b: Topic branch for documentation updates.
fixes.2013.09.23b: Topic branch for miscellaneous fixes.
  • Loading branch information
Paul E. McKenney committed Oct 15, 2013
2 parents 4b0d3f0 + 5d5a080 commit 460aeba
Show file tree
Hide file tree
Showing 7 changed files with 119 additions and 51 deletions.
23 changes: 21 additions & 2 deletions include/linux/rculist.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,21 @@
* be used anywhere you would want to use a list_empty_rcu().
*/

/*
* INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
* @list: list to be initialized
*
* You should instead use INIT_LIST_HEAD() for normal initialization and
* cleanup tasks, when readers have no access to the list being initialized.
* However, if the list being initialized is visible to readers, you
* need to keep the compiler from being too mischievous.
*/
static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
{
ACCESS_ONCE(list->next) = list;
ACCESS_ONCE(list->prev) = list;
}

/*
* return the ->next pointer of a list_head in an rcu safe
* way, we must not access it directly
Expand Down Expand Up @@ -191,9 +206,13 @@ static inline void list_splice_init_rcu(struct list_head *list,
if (list_empty(list))
return;

/* "first" and "last" tracking list, so initialize it. */
/*
* "first" and "last" tracking list, so initialize it. RCU readers
* have access to this list, so we must use INIT_LIST_HEAD_RCU()
* instead of INIT_LIST_HEAD().
*/

INIT_LIST_HEAD(list);
INIT_LIST_HEAD_RCU(list);

/*
* At this point, the list body still points to the source list.
Expand Down
7 changes: 7 additions & 0 deletions kernel/rcu.h
Original file line number Diff line number Diff line change
Expand Up @@ -122,4 +122,11 @@ int rcu_jiffies_till_stall_check(void);

#endif /* #ifdef CONFIG_RCU_STALL_COMMON */

/*
* Strings used in tracepoints need to be exported via the
* tracing system such that tools like perf and trace-cmd can
* translate the string address pointers to actual text.
*/
#define TPS(x) tracepoint_string(x)

#endif /* __LINUX_RCU_H */
2 changes: 1 addition & 1 deletion kernel/rcupdate.c
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
#endif

int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;

module_param(rcu_cpu_stall_suppress, int, 0644);
module_param(rcu_cpu_stall_timeout, int, 0644);
Expand Down
17 changes: 10 additions & 7 deletions kernel/rcutiny.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
#include <linux/time.h>
#include <linux/cpu.h>
#include <linux/prefetch.h>
#include <linux/ftrace_event.h>

#ifdef CONFIG_RCU_TRACE
#include <trace/events/rcu.h>
Expand All @@ -58,16 +59,17 @@ static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
static void rcu_idle_enter_common(long long newval)
{
if (newval) {
RCU_TRACE(trace_rcu_dyntick("--=",
RCU_TRACE(trace_rcu_dyntick(TPS("--="),
rcu_dynticks_nesting, newval));
rcu_dynticks_nesting = newval;
return;
}
RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting, newval));
RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
rcu_dynticks_nesting, newval));
if (!is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());

RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
rcu_dynticks_nesting, newval));
ftrace_dump(DUMP_ALL);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
Expand Down Expand Up @@ -120,15 +122,15 @@ EXPORT_SYMBOL_GPL(rcu_irq_exit);
static void rcu_idle_exit_common(long long oldval)
{
if (oldval) {
RCU_TRACE(trace_rcu_dyntick("++=",
RCU_TRACE(trace_rcu_dyntick(TPS("++="),
oldval, rcu_dynticks_nesting));
return;
}
RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
if (!is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());

RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
oldval, rcu_dynticks_nesting));
ftrace_dump(DUMP_ALL);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
Expand Down Expand Up @@ -304,7 +306,8 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
RCU_TRACE(cb_count++);
}
RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
RCU_TRACE(trace_rcu_batch_end(rcp->name,
cb_count, 0, need_resched(),
is_idle_task(current),
false));
}
Expand Down
97 changes: 65 additions & 32 deletions kernel/rcutree.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,13 +61,6 @@

#include "rcu.h"

/*
* Strings used in tracepoints need to be exported via the
* tracing system such that tools like perf and trace-cmd can
* translate the string address pointers to actual text.
*/
#define TPS(x) tracepoint_string(x)

/* Data structures. */

static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
Expand Down Expand Up @@ -222,7 +215,7 @@ void rcu_note_context_switch(int cpu)
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);

DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
.dynticks = ATOMIC_INIT(1),
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
Expand Down Expand Up @@ -371,7 +364,8 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
{
trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
if (!user && !is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());
struct task_struct *idle __maybe_unused =
idle_task(smp_processor_id());

trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
ftrace_dump(DUMP_ORIG);
Expand Down Expand Up @@ -407,7 +401,7 @@ static void rcu_eqs_enter(bool user)
long long oldval;
struct rcu_dynticks *rdtp;

rdtp = &__get_cpu_var(rcu_dynticks);
rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
Expand Down Expand Up @@ -435,7 +429,7 @@ void rcu_idle_enter(void)

local_irq_save(flags);
rcu_eqs_enter(false);
rcu_sysidle_enter(&__get_cpu_var(rcu_dynticks), 0);
rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_enter);
Expand Down Expand Up @@ -478,7 +472,7 @@ void rcu_irq_exit(void)
struct rcu_dynticks *rdtp;

local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting--;
WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
Expand Down Expand Up @@ -508,7 +502,8 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
rcu_cleanup_after_idle(smp_processor_id());
trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
if (!user && !is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());
struct task_struct *idle __maybe_unused =
idle_task(smp_processor_id());

trace_rcu_dyntick(TPS("Error on exit: not idle task"),
oldval, rdtp->dynticks_nesting);
Expand All @@ -528,7 +523,7 @@ static void rcu_eqs_exit(bool user)
struct rcu_dynticks *rdtp;
long long oldval;

rdtp = &__get_cpu_var(rcu_dynticks);
rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE(oldval < 0);
if (oldval & DYNTICK_TASK_NEST_MASK)
Expand All @@ -555,7 +550,7 @@ void rcu_idle_exit(void)

local_irq_save(flags);
rcu_eqs_exit(false);
rcu_sysidle_exit(&__get_cpu_var(rcu_dynticks), 0);
rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_exit);
Expand Down Expand Up @@ -599,7 +594,7 @@ void rcu_irq_enter(void)
long long oldval;

local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting++;
WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
Expand All @@ -620,7 +615,7 @@ void rcu_irq_enter(void)
*/
void rcu_nmi_enter(void)
{
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);

if (rdtp->dynticks_nmi_nesting == 0 &&
(atomic_read(&rdtp->dynticks) & 0x1))
Expand All @@ -642,7 +637,7 @@ void rcu_nmi_enter(void)
*/
void rcu_nmi_exit(void)
{
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);

if (rdtp->dynticks_nmi_nesting == 0 ||
--rdtp->dynticks_nmi_nesting != 0)
Expand All @@ -665,7 +660,7 @@ int rcu_is_cpu_idle(void)
int ret;

preempt_disable();
ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
ret = (atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1) == 0;
preempt_enable();
return ret;
}
Expand Down Expand Up @@ -703,7 +698,7 @@ bool rcu_lockdep_current_cpu_online(void)
if (in_nmi())
return 1;
preempt_disable();
rdp = &__get_cpu_var(rcu_sched_data);
rdp = this_cpu_ptr(&rcu_sched_data);
rnp = rdp->mynode;
ret = (rdp->grpmask & rnp->qsmaskinit) ||
!rcu_scheduler_fully_active;
Expand All @@ -723,7 +718,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
*/
static int rcu_is_cpu_rrupt_from_idle(void)
{
return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
}

/*
Expand Down Expand Up @@ -802,8 +797,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,

static void record_gp_stall_check_time(struct rcu_state *rsp)
{
rsp->gp_start = jiffies;
rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
unsigned long j = ACCESS_ONCE(jiffies);

rsp->gp_start = j;
smp_wmb(); /* Record start time before stall time. */
rsp->jiffies_stall = j + rcu_jiffies_till_stall_check();
}

/*
Expand Down Expand Up @@ -932,17 +930,48 @@ static void print_cpu_stall(struct rcu_state *rsp)

static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
{
unsigned long completed;
unsigned long gpnum;
unsigned long gps;
unsigned long j;
unsigned long js;
struct rcu_node *rnp;

if (rcu_cpu_stall_suppress)
if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
return;
j = ACCESS_ONCE(jiffies);

/*
* Lots of memory barriers to reject false positives.
*
* The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
* then rsp->gp_start, and finally rsp->completed. These values
* are updated in the opposite order with memory barriers (or
* equivalent) during grace-period initialization and cleanup.
* Now, a false positive can occur if we get an new value of
* rsp->gp_start and a old value of rsp->jiffies_stall. But given
* the memory barriers, the only way that this can happen is if one
* grace period ends and another starts between these two fetches.
* Detect this by comparing rsp->completed with the previous fetch
* from rsp->gpnum.
*
* Given this check, comparisons of jiffies, rsp->jiffies_stall,
* and rsp->gp_start suffice to forestall false positives.
*/
gpnum = ACCESS_ONCE(rsp->gpnum);
smp_rmb(); /* Pick up ->gpnum first... */
js = ACCESS_ONCE(rsp->jiffies_stall);
smp_rmb(); /* ...then ->jiffies_stall before the rest... */
gps = ACCESS_ONCE(rsp->gp_start);
smp_rmb(); /* ...and finally ->gp_start before ->completed. */
completed = ACCESS_ONCE(rsp->completed);
if (ULONG_CMP_GE(completed, gpnum) ||
ULONG_CMP_LT(j, js) ||
ULONG_CMP_GE(gps, js))
return; /* No stall or GP completed since entering function. */
rnp = rdp->mynode;
if (rcu_gp_in_progress(rsp) &&
(ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
(ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {

/* We haven't checked in, so go dump stack. */
print_cpu_stall(rsp);
Expand Down Expand Up @@ -1315,9 +1344,10 @@ static int rcu_gp_init(struct rcu_state *rsp)
}

/* Advance to a new grace period and initialize state. */
record_gp_stall_check_time(rsp);
smp_wmb(); /* Record GP times before starting GP. */
rsp->gpnum++;
trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
record_gp_stall_check_time(rsp);
raw_spin_unlock_irq(&rnp->lock);

/* Exclude any concurrent CPU-hotplug operations. */
Expand Down Expand Up @@ -1366,7 +1396,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
/*
* Do one round of quiescent-state forcing.
*/
int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
{
int fqs_state = fqs_state_in;
bool isidle = false;
Expand Down Expand Up @@ -1452,7 +1482,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
rdp = this_cpu_ptr(rsp->rda);
rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */
if (cpu_needs_another_gp(rsp, rdp))
rsp->gp_flags = 1;
rsp->gp_flags = RCU_GP_FLAG_INIT;
raw_spin_unlock_irq(&rnp->lock);
}

Expand Down Expand Up @@ -2725,10 +2755,13 @@ static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy)

for_each_rcu_flavor(rsp) {
rdp = per_cpu_ptr(rsp->rda, cpu);
if (rdp->qlen != rdp->qlen_lazy)
if (!rdp->nxtlist)
continue;
hc = true;
if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
al = false;
if (rdp->nxtlist)
hc = true;
break;
}
}
if (all_lazy)
*all_lazy = al;
Expand Down Expand Up @@ -3295,8 +3328,8 @@ void __init rcu_init(void)

rcu_bootup_announce();
rcu_init_geometry();
rcu_init_one(&rcu_sched_state, &rcu_sched_data);
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
rcu_init_one(&rcu_sched_state, &rcu_sched_data);
__rcu_init_preempt();
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);

Expand Down
Loading

0 comments on commit 460aeba

Please sign in to comment.