Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 323583
b: refs/heads/master
c: adf5091
h: refs/heads/master
i:
  323581: 27720c4
  323579: 8f7960e
  323575: d503282
  323567: 1f62189
  323551: 0b6670a
  323519: c226b24
  323455: 6846110
  323327: cf682e1
  323071: a69bee5
  322559: 0be7cf4
  321535: 695fbec
  319487: d09cb98
v: v3
  • Loading branch information
Frederic Weisbecker committed Sep 26, 2012
1 parent 200d930 commit 375e9e5
Show file tree
Hide file tree
Showing 3 changed files with 104 additions and 35 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a9b86fab4b0a36fc4cd2712a07259c2c0e769742
refs/heads/master: adf5091e6ccaa02905e7a28f9ff44f46c7f4c230
2 changes: 2 additions & 0 deletions trunk/include/linux/rcupdate.h
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,8 @@ extern void rcu_idle_enter(void);
extern void rcu_idle_exit(void);
extern void rcu_irq_enter(void);
extern void rcu_irq_exit(void);
extern void rcu_user_enter(void);
extern void rcu_user_exit(void);
extern void exit_rcu(void);

/**
Expand Down
135 changes: 101 additions & 34 deletions trunk/kernel/rcutree.c
Original file line number Diff line number Diff line change
Expand Up @@ -322,16 +322,17 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
}

/*
* rcu_idle_enter_common - inform RCU that current CPU is moving towards idle
* rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
*
* If the new value of the ->dynticks_nesting counter now is zero,
* we really have entered idle, and must do the appropriate accounting.
* The caller must have disabled interrupts.
*/
static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
bool user)
{
trace_rcu_dyntick("Start", oldval, 0);
if (!is_idle_task(current)) {
if (!is_idle_task(current) && !user) {
struct task_struct *idle = idle_task(smp_processor_id());

trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
Expand All @@ -348,7 +349,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);

/*
* The idle task is not permitted to enter the idle loop while
* It is illegal to enter an extended quiescent state while
* in an RCU read-side critical section.
*/
rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
Expand All @@ -359,19 +360,11 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
"Illegal idle entry in RCU-sched read-side critical section.");
}

/**
* rcu_idle_enter - inform RCU that current CPU is entering idle
*
* Enter idle mode, in other words, -leave- the mode in which RCU
* read-side critical sections can occur. (Though RCU read-side
* critical sections can occur in irq handlers in idle, a possibility
* handled by irq_enter() and irq_exit().)
*
* We crowbar the ->dynticks_nesting field to zero to allow for
* the possibility of usermode upcalls having messed up our count
* of interrupt nesting level during the prior busy period.
/*
* Enter an RCU extended quiescent state, which can be either the
* idle loop or adaptive-tickless usermode execution.
*/
void rcu_idle_enter(void)
static void rcu_eqs_enter(bool user)
{
unsigned long flags;
long long oldval;
Expand All @@ -385,11 +378,53 @@ void rcu_idle_enter(void)
rdtp->dynticks_nesting = 0;
else
rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
rcu_idle_enter_common(rdtp, oldval);
rcu_eqs_enter_common(rdtp, oldval, user);
local_irq_restore(flags);
}

/**
* rcu_idle_enter - inform RCU that current CPU is entering idle
*
* Enter idle mode, in other words, -leave- the mode in which RCU
* read-side critical sections can occur. (Though RCU read-side
* critical sections can occur in irq handlers in idle, a possibility
* handled by irq_enter() and irq_exit().)
*
* We crowbar the ->dynticks_nesting field to zero to allow for
* the possibility of usermode upcalls having messed up our count
* of interrupt nesting level during the prior busy period.
*/
void rcu_idle_enter(void)
{
rcu_eqs_enter(0);
}
EXPORT_SYMBOL_GPL(rcu_idle_enter);

/**
* rcu_user_enter - inform RCU that we are resuming userspace.
*
* Enter RCU idle mode right before resuming userspace. No use of RCU
* is permitted between this call and rcu_user_exit(). This way the
* CPU doesn't need to maintain the tick for RCU maintenance purposes
* when the CPU runs in userspace.
*/
void rcu_user_enter(void)
{
/*
* Some contexts may involve an exception occuring in an irq,
* leading to that nesting:
* rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
* This would mess up the dyntick_nesting count though. And rcu_irq_*()
* helpers are enough to protect RCU uses inside the exception. So
* just return immediately if we detect we are in an IRQ.
*/
if (in_interrupt())
return;

rcu_eqs_enter(1);
}


/**
* rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
*
Expand Down Expand Up @@ -420,18 +455,19 @@ void rcu_irq_exit(void)
if (rdtp->dynticks_nesting)
trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
else
rcu_idle_enter_common(rdtp, oldval);
rcu_eqs_enter_common(rdtp, oldval, 1);
local_irq_restore(flags);
}

/*
* rcu_idle_exit_common - inform RCU that current CPU is moving away from idle
* rcu_eqs_exit_common - current CPU moving away from extended quiescent state
*
* If the new value of the ->dynticks_nesting counter was previously zero,
* we really have exited idle, and must do the appropriate accounting.
* The caller must have disabled interrupts.
*/
static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
int user)
{
smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
atomic_inc(&rdtp->dynticks);
Expand All @@ -440,7 +476,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
rcu_cleanup_after_idle(smp_processor_id());
trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
if (!is_idle_task(current)) {
if (!is_idle_task(current) && !user) {
struct task_struct *idle = idle_task(smp_processor_id());

trace_rcu_dyntick("Error on exit: not idle task",
Expand All @@ -452,18 +488,11 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
}
}

/**
* rcu_idle_exit - inform RCU that current CPU is leaving idle
*
* Exit idle mode, in other words, -enter- the mode in which RCU
* read-side critical sections can occur.
*
* We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
* allow for the possibility of usermode upcalls messing up our count
* of interrupt nesting level during the busy period that is just
* now starting.
/*
* Exit an RCU extended quiescent state, which can be either the
* idle loop or adaptive-tickless usermode execution.
*/
void rcu_idle_exit(void)
static void rcu_eqs_exit(bool user)
{
unsigned long flags;
struct rcu_dynticks *rdtp;
Expand All @@ -477,11 +506,49 @@ void rcu_idle_exit(void)
rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
else
rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
rcu_idle_exit_common(rdtp, oldval);
rcu_eqs_exit_common(rdtp, oldval, user);
local_irq_restore(flags);
}

/**
* rcu_idle_exit - inform RCU that current CPU is leaving idle
*
* Exit idle mode, in other words, -enter- the mode in which RCU
* read-side critical sections can occur.
*
* We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
* allow for the possibility of usermode upcalls messing up our count
* of interrupt nesting level during the busy period that is just
* now starting.
*/
void rcu_idle_exit(void)
{
rcu_eqs_exit(0);
}
EXPORT_SYMBOL_GPL(rcu_idle_exit);

/**
* rcu_user_exit - inform RCU that we are exiting userspace.
*
* Exit RCU idle mode while entering the kernel because it can
* run a RCU read side critical section anytime.
*/
void rcu_user_exit(void)
{
/*
* Some contexts may involve an exception occuring in an irq,
* leading to that nesting:
* rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
* This would mess up the dyntick_nesting count though. And rcu_irq_*()
* helpers are enough to protect RCU uses inside the exception. So
* just return immediately if we detect we are in an IRQ.
*/
if (in_interrupt())
return;

rcu_eqs_exit(1);
}

/**
* rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
*
Expand Down Expand Up @@ -515,7 +582,7 @@ void rcu_irq_enter(void)
if (oldval)
trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
else
rcu_idle_exit_common(rdtp, oldval);
rcu_eqs_exit_common(rdtp, oldval, 1);
local_irq_restore(flags);
}

Expand Down

0 comments on commit 375e9e5

Please sign in to comment.