Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 349807
b: refs/heads/master
c: dc35c89
h: refs/heads/master
i:
  349805: ca81ffe
  349803: 5acca18
  349799: 61e95dc
  349791: c3890cf
v: v3
  • Loading branch information
Paul E. McKenney committed Jan 8, 2013
1 parent 60b0de4 commit 68cb040
Show file tree
Hide file tree
Showing 9 changed files with 251 additions and 166 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4eacdf18374e5d7d21a728b46dfec269ac8ef55c
refs/heads/master: dc35c8934eba959b690921615fcd987e8bc17e4a
2 changes: 1 addition & 1 deletion trunk/include/linux/rcupdate.h
Original file line number Diff line number Diff line change
Expand Up @@ -749,7 +749,7 @@ static inline void rcu_preempt_sleep_check(void)
* preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
* in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
* be preempted, but explicit blocking is illegal. Finally, in preemptible
* RCU implementations in real-time (with -rt patchset) kernel builds,
* RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds,
* RCU read-side critical sections may be preempted and they may also
* block, but only when acquiring spinlocks that are subject to priority
* inheritance.
Expand Down
6 changes: 3 additions & 3 deletions trunk/include/trace/events/rcu.h
Original file line number Diff line number Diff line change
Expand Up @@ -393,15 +393,15 @@ TRACE_EVENT(rcu_kfree_callback,
*/
TRACE_EVENT(rcu_batch_start,

TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit),
TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),

TP_ARGS(rcuname, qlen_lazy, qlen, blimit),

TP_STRUCT__entry(
__field(char *, rcuname)
__field(long, qlen_lazy)
__field(long, qlen)
__field(long, blimit)
__field(int, blimit)
),

TP_fast_assign(
Expand All @@ -411,7 +411,7 @@ TRACE_EVENT(rcu_batch_start,
__entry->blimit = blimit;
),

TP_printk("%s CBs=%ld/%ld bl=%ld",
TP_printk("%s CBs=%ld/%ld bl=%d",
__entry->rcuname, __entry->qlen_lazy, __entry->qlen,
__entry->blimit)
);
Expand Down
75 changes: 10 additions & 65 deletions trunk/kernel/context_tracking.c
Original file line number Diff line number Diff line change
@@ -1,19 +1,3 @@
/*
* Context tracking: Probe on high level context boundaries such as kernel
* and userspace. This includes syscalls and exceptions entry/exit.
*
* This is used by RCU to remove its dependency on the timer tick while a CPU
* runs in userspace.
*
* Started by Frederic Weisbecker:
*
* Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
*
* Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
* Steven Rostedt, Peter Zijlstra for suggestions and improvements.
*
*/

#include <linux/context_tracking.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
Expand All @@ -22,8 +6,8 @@

struct context_tracking {
/*
* When active is false, probes are unset in order
* to minimize overhead: TIF flags are cleared
* When active is false, hooks are not set to
* minimize overhead: TIF flags are cleared
* and calls to user_enter/exit are ignored. This
* may be further optimized using static keys.
*/
Expand All @@ -40,15 +24,6 @@ static DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
#endif
};

/**
* user_enter - Inform the context tracking that the CPU is going to
* enter userspace mode.
*
* This function must be called right before we switch from the kernel
* to userspace, when it's guaranteed the remaining kernel instructions
* to execute won't use any RCU read side critical section because this
* function sets RCU in extended quiescent state.
*/
void user_enter(void)
{
unsigned long flags;
Expand All @@ -64,70 +39,40 @@ void user_enter(void)
if (in_interrupt())
return;

/* Kernel threads aren't supposed to go to userspace */
WARN_ON_ONCE(!current->mm);

local_irq_save(flags);
if (__this_cpu_read(context_tracking.active) &&
__this_cpu_read(context_tracking.state) != IN_USER) {
__this_cpu_write(context_tracking.state, IN_USER);
/*
* At this stage, only low level arch entry code remains and
* then we'll run in userspace. We can assume there won't be
* any RCU read-side critical section until the next call to
* user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
* on the tick.
*/
rcu_user_enter();
}
local_irq_restore(flags);
}


/**
* user_exit - Inform the context tracking that the CPU is
* exiting userspace mode and entering the kernel.
*
* This function must be called after we entered the kernel from userspace
* before any use of RCU read side critical section. This potentially include
* any high level kernel code like syscalls, exceptions, signal handling, etc...
*
* This call supports re-entrancy. This way it can be called from any exception
* handler without needing to know if we came from userspace or not.
*/
void user_exit(void)
{
unsigned long flags;

/*
* Some contexts may involve an exception occuring in an irq,
* leading to that nesting:
* rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
* This would mess up the dyntick_nesting count though. And rcu_irq_*()
* helpers are enough to protect RCU uses inside the exception. So
* just return immediately if we detect we are in an IRQ.
*/
if (in_interrupt())
return;

local_irq_save(flags);
if (__this_cpu_read(context_tracking.state) == IN_USER) {
__this_cpu_write(context_tracking.state, IN_KERNEL);
/*
* We are going to run code that may use RCU. Inform
* RCU core about that (ie: we may need the tick again).
*/
rcu_user_exit();
}
local_irq_restore(flags);
}


/**
* context_tracking_task_switch - context switch the syscall callbacks
* @prev: the task that is being switched out
* @next: the task that is being switched in
*
* The context tracking uses the syscall slow path to implement its user-kernel
* boundaries probes on syscalls. This way it doesn't impact the syscall fast
* path on CPUs that don't do context tracking.
*
* But we need to clear the flag on the previous task because it may later
* migrate to some CPU that doesn't do the context tracking. As such the TIF
* flag may not be desired there.
*/
void context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next)
{
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/rcutiny.c
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ EXPORT_SYMBOL(rcu_is_cpu_idle);
* interrupts don't count, we must be running at the first interrupt
* level.
*/
static int rcu_is_cpu_rrupt_from_idle(void)
int rcu_is_cpu_rrupt_from_idle(void)
{
return rcu_dynticks_nesting <= 1;
}
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/rcutorture.c
Original file line number Diff line number Diff line change
Expand Up @@ -1749,7 +1749,7 @@ static int rcu_torture_barrier_init(void)
barrier_cbs_wq =
kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]),
GFP_KERNEL);
if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
if (barrier_cbs_tasks == NULL || barrier_cbs_wq == 0)
return -ENOMEM;
for (i = 0; i < n_barrier_cbs; i++) {
init_waitqueue_head(&barrier_cbs_wq[i]);
Expand Down
Loading

0 comments on commit 68cb040

Please sign in to comment.