Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 312198
b: refs/heads/master
c: bf1304e
h: refs/heads/master
v: v3
  • Loading branch information
Paul E. McKenney authored and Paul E. McKenney committed Jul 2, 2012
1 parent a0ed695 commit dac79bc
Show file tree
Hide file tree
Showing 9 changed files with 206 additions and 159 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: cfca927972e31a5b3da49bf641c525732ff3c357
refs/heads/master: bf1304e9cd755be7814ac8365834b7a1d0f06b58
4 changes: 2 additions & 2 deletions trunk/include/linux/init_task.h
Original file line number Diff line number Diff line change
Expand Up @@ -168,8 +168,8 @@ extern struct cred init_cred;
.children = LIST_HEAD_INIT(tsk.children), \
.sibling = LIST_HEAD_INIT(tsk.sibling), \
.group_leader = &tsk, \
RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
RCU_POINTER_INITIALIZER(cred, &init_cred), \
RCU_INIT_POINTER(.real_cred, &init_cred), \
RCU_INIT_POINTER(.cred, &init_cred), \
.comm = INIT_TASK_COMM, \
.thread = INIT_THREAD, \
.fs = &init_fs, \
Expand Down
4 changes: 1 addition & 3 deletions trunk/include/linux/key.h
Original file line number Diff line number Diff line change
Expand Up @@ -303,9 +303,7 @@ static inline bool key_is_instantiated(const struct key *key)
rwsem_is_locked(&((struct key *)(KEY))->sem)))

#define rcu_assign_keypointer(KEY, PAYLOAD) \
do { \
rcu_assign_pointer((KEY)->payload.rcudata, (PAYLOAD)); \
} while (0)
(rcu_assign_pointer((KEY)->payload.rcudata, PAYLOAD))

#ifdef CONFIG_SYSCTL
extern ctl_table key_sysctls[];
Expand Down
48 changes: 30 additions & 18 deletions trunk/include/linux/rcupdate.h
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,6 @@ extern void synchronize_sched(void);

extern void __rcu_read_lock(void);
extern void __rcu_read_unlock(void);
extern void rcu_read_unlock_special(struct task_struct *t);
void synchronize_rcu(void);

/*
Expand Down Expand Up @@ -256,10 +255,6 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
}
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */

#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
extern int rcu_is_cpu_idle(void);
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */

#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
bool rcu_lockdep_current_cpu_online(void);
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
Expand All @@ -271,6 +266,15 @@ static inline bool rcu_lockdep_current_cpu_online(void)

#ifdef CONFIG_DEBUG_LOCK_ALLOC

#ifdef CONFIG_PROVE_RCU
extern int rcu_is_cpu_idle(void);
#else /* !CONFIG_PROVE_RCU */
static inline int rcu_is_cpu_idle(void)
{
return 0;
}
#endif /* else !CONFIG_PROVE_RCU */

static inline void rcu_lock_acquire(struct lockdep_map *map)
{
lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
Expand Down Expand Up @@ -509,10 +513,10 @@ static inline void rcu_preempt_sleep_check(void)
(_________p1); \
})
#define __rcu_assign_pointer(p, v, space) \
do { \
({ \
smp_wmb(); \
(p) = (typeof(*v) __force space *)(v); \
} while (0)
})


/**
Expand Down Expand Up @@ -847,7 +851,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
*
* Assigns the specified value to the specified RCU-protected
* pointer, ensuring that any concurrent RCU readers will see
* any prior initialization.
* any prior initialization. Returns the value assigned.
*
* Inserts memory barriers on architectures that require them
* (which is most of them), and also prevents the compiler from
Expand Down Expand Up @@ -899,17 +903,25 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* the reader-accessible portions of the linked structure.
*/
#define RCU_INIT_POINTER(p, v) \
do { \
p = (typeof(*v) __force __rcu *)(v); \
} while (0)
p = (typeof(*v) __force __rcu *)(v)

/**
* RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
*
* GCC-style initialization for an RCU-protected pointer in a structure field.
*/
#define RCU_POINTER_INITIALIZER(p, v) \
.p = (typeof(*v) __force __rcu *)(v)
static __always_inline bool __is_kfree_rcu_offset(unsigned long offset)
{
return offset < 4096;
}

static __always_inline
void __kfree_rcu(struct rcu_head *head, unsigned long offset)
{
typedef void (*rcu_callback)(struct rcu_head *);

BUILD_BUG_ON(!__builtin_constant_p(offset));

/* See the kfree_rcu() header comment. */
BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));

kfree_call_rcu(head, (rcu_callback)offset);
}

/*
* Does the specified offset indicate that the corresponding rcu_head
Expand Down
44 changes: 0 additions & 44 deletions trunk/kernel/rcupdate.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,50 +53,6 @@

#ifdef CONFIG_PREEMPT_RCU

/*
* Preemptible RCU implementation for rcu_read_lock().
* Just increment ->rcu_read_lock_nesting, shared state will be updated
* if we block.
*/
void __rcu_read_lock(void)
{
current->rcu_read_lock_nesting++;
barrier(); /* critical section after entry code. */
}
EXPORT_SYMBOL_GPL(__rcu_read_lock);

/*
* Preemptible RCU implementation for rcu_read_unlock().
* Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
* rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
* invoke rcu_read_unlock_special() to clean up after a context switch
* in an RCU read-side critical section and other special cases.
*/
void __rcu_read_unlock(void)
{
struct task_struct *t = current;

if (t->rcu_read_lock_nesting != 1) {
--t->rcu_read_lock_nesting;
} else {
barrier(); /* critical section before exit code. */
t->rcu_read_lock_nesting = INT_MIN;
barrier(); /* assign before ->rcu_read_unlock_special load */
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
rcu_read_unlock_special(t);
barrier(); /* ->rcu_read_unlock_special load before assign */
t->rcu_read_lock_nesting = 0;
}
#ifdef CONFIG_PROVE_LOCKING
{
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);

WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
}
#endif /* #ifdef CONFIG_PROVE_LOCKING */
}
EXPORT_SYMBOL_GPL(__rcu_read_unlock);

/*
* Check for a task exiting while in a preemptible-RCU read-side
* critical section, clean up if so. No need to issue warnings,
Expand Down
4 changes: 2 additions & 2 deletions trunk/kernel/rcutiny.c
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ void rcu_irq_enter(void)
local_irq_restore(flags);
}

#ifdef CONFIG_DEBUG_LOCK_ALLOC
#ifdef CONFIG_PROVE_RCU

/*
* Test whether RCU thinks that the current CPU is idle.
Expand All @@ -183,7 +183,7 @@ int rcu_is_cpu_idle(void)
}
EXPORT_SYMBOL(rcu_is_cpu_idle);

#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#endif /* #ifdef CONFIG_PROVE_RCU */

/*
* Test whether the current CPU was interrupted from idle. Nested
Expand Down
47 changes: 46 additions & 1 deletion trunk/kernel/rcutiny_plugin.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
RCU_TRACE(.rcb.name = "rcu_preempt")
};

static void rcu_read_unlock_special(struct task_struct *t);
static int rcu_preempted_readers_exp(void);
static void rcu_report_exp_done(void);

Expand Down Expand Up @@ -525,12 +526,24 @@ void rcu_preempt_note_context_switch(void)
local_irq_restore(flags);
}

/*
* Tiny-preemptible RCU implementation for rcu_read_lock().
* Just increment ->rcu_read_lock_nesting, shared state will be updated
* if we block.
*/
void __rcu_read_lock(void)
{
current->rcu_read_lock_nesting++;
barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
}
EXPORT_SYMBOL_GPL(__rcu_read_lock);

/*
* Handle special cases during rcu_read_unlock(), such as needing to
* notify RCU core processing or task having blocked during the RCU
* read-side critical section.
*/
void rcu_read_unlock_special(struct task_struct *t)
static noinline void rcu_read_unlock_special(struct task_struct *t)
{
int empty;
int empty_exp;
Expand Down Expand Up @@ -613,6 +626,38 @@ void rcu_read_unlock_special(struct task_struct *t)
local_irq_restore(flags);
}

/*
* Tiny-preemptible RCU implementation for rcu_read_unlock().
* Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
* rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
* invoke rcu_read_unlock_special() to clean up after a context switch
* in an RCU read-side critical section and other special cases.
*/
void __rcu_read_unlock(void)
{
struct task_struct *t = current;

barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
if (t->rcu_read_lock_nesting != 1)
--t->rcu_read_lock_nesting;
else {
t->rcu_read_lock_nesting = INT_MIN;
barrier(); /* assign before ->rcu_read_unlock_special load */
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
rcu_read_unlock_special(t);
barrier(); /* ->rcu_read_unlock_special load before assign */
t->rcu_read_lock_nesting = 0;
}
#ifdef CONFIG_PROVE_LOCKING
{
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);

WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
}
#endif /* #ifdef CONFIG_PROVE_LOCKING */
}
EXPORT_SYMBOL_GPL(__rcu_read_unlock);

/*
* Check for a quiescent state from the current CPU. When a task blocks,
* the task is recorded in the rcu_preempt_ctrlblk structure, which is
Expand Down
Loading

0 comments on commit dac79bc

Please sign in to comment.