Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 302738
b: refs/heads/master
c: c57afe8
h: refs/heads/master
v: v3
  • Loading branch information
Paul E. McKenney authored and Paul E. McKenney committed Apr 25, 2012
1 parent 8a525c4 commit fec8384
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 5 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2ee3dc80660ac8285a37e662fd91b2e45c46f06a
refs/heads/master: c57afe80db4e169135eb675acc2d241e26cc064e
2 changes: 2 additions & 0 deletions trunk/kernel/rcutree.c
Original file line number Diff line number Diff line change
Expand Up @@ -1829,6 +1829,8 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
rdp->qlen++;
if (lazy)
rdp->qlen_lazy++;
else
rcu_idle_count_callbacks_posted();

if (__is_kfree_rcu_offset((unsigned long)func))
trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
Expand Down
1 change: 1 addition & 0 deletions trunk/kernel/rcutree.h
Original file line number Diff line number Diff line change
Expand Up @@ -471,6 +471,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu);
static void rcu_prepare_for_idle_init(int cpu);
static void rcu_cleanup_after_idle(int cpu);
static void rcu_prepare_for_idle(int cpu);
static void rcu_idle_count_callbacks_posted(void);
static void print_cpu_stall_info_begin(void);
static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
static void print_cpu_stall_info_end(void);
Expand Down
57 changes: 53 additions & 4 deletions trunk/kernel/rcutree_plugin.h
Original file line number Diff line number Diff line change
Expand Up @@ -1938,6 +1938,14 @@ static void rcu_prepare_for_idle(int cpu)
{
}

/*
* Don't bother keeping a running count of the number of RCU callbacks
* posted because CONFIG_RCU_FAST_NO_HZ=n.
*/
static void rcu_idle_count_callbacks_posted(void)
{
}

#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */

/*
Expand Down Expand Up @@ -1981,6 +1989,10 @@ static void rcu_prepare_for_idle(int cpu)
static DEFINE_PER_CPU(int, rcu_dyntick_drain);
static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer);
static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires);
static DEFINE_PER_CPU(bool, rcu_idle_first_pass);
static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted);
static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap);

/*
* Allow the CPU to enter dyntick-idle mode if either: (1) There are no
Expand All @@ -1993,6 +2005,8 @@ static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer);
*/
int rcu_needs_cpu(int cpu)
{
/* Flag a new idle sojourn to the idle-entry state machine. */
per_cpu(rcu_idle_first_pass, cpu) = 1;
/* If no callbacks, RCU doesn't need the CPU. */
if (!rcu_cpu_has_callbacks(cpu))
return 0;
Expand Down Expand Up @@ -2095,6 +2109,26 @@ static void rcu_cleanup_after_idle(int cpu)
*/
static void rcu_prepare_for_idle(int cpu)
{
/*
* If this is an idle re-entry, for example, due to use of
* RCU_NONIDLE() or the new idle-loop tracing API within the idle
* loop, then don't take any state-machine actions, unless the
* momentary exit from idle queued additional non-lazy callbacks.
* Instead, repost the rcu_idle_gp_timer if this CPU has callbacks
* pending.
*/
if (!per_cpu(rcu_idle_first_pass, cpu) &&
(per_cpu(rcu_nonlazy_posted, cpu) ==
per_cpu(rcu_nonlazy_posted_snap, cpu))) {
if (rcu_cpu_has_callbacks(cpu))
mod_timer(&per_cpu(rcu_idle_gp_timer, cpu),
per_cpu(rcu_idle_gp_timer_expires, cpu));
return;
}
per_cpu(rcu_idle_first_pass, cpu) = 0;
per_cpu(rcu_nonlazy_posted_snap, cpu) =
per_cpu(rcu_nonlazy_posted, cpu) - 1;

/*
* If there are no callbacks on this CPU, enter dyntick-idle mode.
* Also reset state to avoid prejudicing later attempts.
Expand Down Expand Up @@ -2127,11 +2161,15 @@ static void rcu_prepare_for_idle(int cpu)
per_cpu(rcu_dyntick_drain, cpu) = 0;
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
if (rcu_cpu_has_nonlazy_callbacks(cpu))
mod_timer(&per_cpu(rcu_idle_gp_timer, cpu),
jiffies + RCU_IDLE_GP_DELAY);
per_cpu(rcu_idle_gp_timer_expires, cpu) =
jiffies + RCU_IDLE_GP_DELAY;
else
mod_timer(&per_cpu(rcu_idle_gp_timer, cpu),
jiffies + RCU_IDLE_LAZY_GP_DELAY);
per_cpu(rcu_idle_gp_timer_expires, cpu) =
jiffies + RCU_IDLE_LAZY_GP_DELAY;
mod_timer(&per_cpu(rcu_idle_gp_timer, cpu),
per_cpu(rcu_idle_gp_timer_expires, cpu));
per_cpu(rcu_nonlazy_posted_snap, cpu) =
per_cpu(rcu_nonlazy_posted, cpu);
return; /* Nothing more to do immediately. */
} else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
/* We have hit the limit, so time to give up. */
Expand Down Expand Up @@ -2171,6 +2209,17 @@ static void rcu_prepare_for_idle(int cpu)
trace_rcu_prep_idle("Callbacks drained");
}

/*
* Keep a running count of callbacks posted so that rcu_prepare_for_idle()
* can detect when something out of the idle loop posts a callback.
* Of course, it had better do so either from a trace event designed to
* be called from idle or from within RCU_NONIDLE().
*/
static void rcu_idle_count_callbacks_posted(void)
{
__this_cpu_add(rcu_nonlazy_posted, 1);
}

#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */

#ifdef CONFIG_RCU_CPU_STALL_INFO
Expand Down

0 comments on commit fec8384

Please sign in to comment.