Skip to content

Commit

Permalink
rcu: Make rcu_read_lock_sched_held() take boot time into account
Browse files Browse the repository at this point in the history
Before the scheduler starts, all tasks are non-preemptible by
definition. So, during that time, rcu_read_lock_sched_held()
needs to always return "true".  This patch makes that be so.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1267135607-7056-2-git-send-email-paulmck@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Paul E. McKenney authored and Ingo Molnar committed Feb 26, 2010
1 parent 056ba4a commit d9f1bb6
Show file tree
Hide file tree
Showing 5 changed files with 21 additions and 25 deletions.
4 changes: 3 additions & 1 deletion include/linux/rcupdate.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@ extern int sched_expedited_torture_stats(char *page);

/* Internal to kernel */
extern void rcu_init(void);
extern int rcu_scheduler_active;
extern void rcu_scheduler_starting(void);

#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
#include <linux/rcutree.h>
Expand Down Expand Up @@ -140,7 +142,7 @@ static inline int rcu_read_lock_sched_held(void)

if (debug_locks)
lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
return lockdep_opinion || preempt_count() != 0;
return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active;
}

#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
Expand Down
4 changes: 0 additions & 4 deletions include/linux/rcutiny.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,10 +105,6 @@ static inline void rcu_exit_nohz(void)

#endif /* #else #ifdef CONFIG_NO_HZ */

static inline void rcu_scheduler_starting(void)
{
}

static inline void exit_rcu(void)
{
}
Expand Down
1 change: 0 additions & 1 deletion include/linux/rcutree.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ struct notifier_block;
extern void rcu_sched_qs(int cpu);
extern void rcu_bh_qs(int cpu);
extern int rcu_needs_cpu(int cpu);
extern void rcu_scheduler_starting(void);
extern int rcu_expedited_torture_stats(char *page);

#ifdef CONFIG_TREE_PREEMPT_RCU
Expand Down
18 changes: 18 additions & 0 deletions kernel/rcupdate.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/kernel_stat.h>

#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_key;
Expand All @@ -62,6 +63,23 @@ struct lockdep_map rcu_sched_lock_map =
EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
#endif

int rcu_scheduler_active __read_mostly;

/*
* This function is invoked towards the end of the scheduler's initialization
* process. Before this is called, the idle task might contain
* RCU read-side critical sections (during which time, this idle
* task is booting the system). After this function is called, the
* idle tasks are prohibited from containing RCU read-side critical
* sections.
*/
void rcu_scheduler_starting(void)
{
WARN_ON(num_online_cpus() != 1);
WARN_ON(nr_context_switches() > 0);
rcu_scheduler_active = 1;
}

/*
* Awaken the corresponding synchronize_rcu() instance now that a
* grace period has elapsed.
Expand Down
19 changes: 0 additions & 19 deletions kernel/rcutree.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/time.h>
#include <linux/kernel_stat.h>

#include "rcutree.h"

Expand Down Expand Up @@ -81,9 +80,6 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);

static int rcu_scheduler_active __read_mostly;


/*
* Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
* permit this function to be invoked without holding the root rcu_node
Expand Down Expand Up @@ -1565,21 +1561,6 @@ static int rcu_needs_cpu_quick_check(int cpu)
rcu_preempt_needs_cpu(cpu);
}

/*
* This function is invoked towards the end of the scheduler's initialization
* process. Before this is called, the idle task might contain
* RCU read-side critical sections (during which time, this idle
* task is booting the system). After this function is called, the
* idle tasks are prohibited from containing RCU read-side critical
* sections.
*/
void rcu_scheduler_starting(void)
{
WARN_ON(num_online_cpus() != 1);
WARN_ON(nr_context_switches() > 0);
rcu_scheduler_active = 1;
}

static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
static atomic_t rcu_barrier_cpu_count;
static DEFINE_MUTEX(rcu_barrier_mutex);
Expand Down

0 comments on commit d9f1bb6

Please sign in to comment.