Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 121269
b: refs/heads/master
c: 380c4b1
h: refs/heads/master
i:
  121267: 4c59f80
v: v3
  • Loading branch information
Frederic Weisbecker authored and Ingo Molnar committed Dec 8, 2008
1 parent 1fc064c commit 22cccd5
Show file tree
Hide file tree
Showing 6 changed files with 27 additions and 15 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8e1b82e0866befaa0b2920be296c6e4c3fc7f422
refs/heads/master: 380c4b1411ccd6885f92b2c8ceb08433a720f44e
5 changes: 4 additions & 1 deletion trunk/arch/x86/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -476,7 +476,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
&return_to_handler;

/* Nmi's are currently unsupported */
if (atomic_read(&in_nmi))
if (unlikely(atomic_read(&in_nmi)))
return;

if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;

/*
Expand Down
13 changes: 13 additions & 0 deletions trunk/include/linux/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -401,6 +401,16 @@ static inline int task_curr_ret_stack(struct task_struct *t)
{
return t->curr_ret_stack;
}

static inline void pause_graph_tracing(void)
{
atomic_inc(&current->tracing_graph_pause);
}

static inline void unpause_graph_tracing(void)
{
atomic_dec(&current->tracing_graph_pause);
}
#else

#define __notrace_funcgraph
Expand All @@ -412,6 +422,9 @@ static inline int task_curr_ret_stack(struct task_struct *tsk)
{
return -1;
}

static inline void pause_graph_tracing(void) { }
static inline void unpause_graph_tracing(void) { }
#endif

#ifdef CONFIG_TRACING
Expand Down
2 changes: 2 additions & 0 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1379,6 +1379,8 @@ struct task_struct {
* because of depth overrun.
*/
atomic_t trace_overrun;
/* Pause for the tracing */
atomic_t tracing_graph_pause;
#endif
#ifdef CONFIG_TRACING
/* state flags for use by tracers */
Expand Down
2 changes: 2 additions & 0 deletions trunk/kernel/trace/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -1998,6 +1998,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
/* Make sure IRQs see the -1 first: */
barrier();
t->ret_stack = ret_stack_list[start++];
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
}
} while_each_thread(g, t);
Expand Down Expand Up @@ -2077,6 +2078,7 @@ void ftrace_graph_init_task(struct task_struct *t)
if (!t->ret_stack)
return;
t->curr_ret_stack = -1;
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
} else
t->ret_stack = NULL;
Expand Down
18 changes: 5 additions & 13 deletions trunk/kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -3590,14 +3590,7 @@ static __init int tracer_init_debugfs(void)

int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
{
/*
* Raw Spinlock because a normal spinlock would be traced here
* and append an irrelevant couple spin_lock_irqsave/
* spin_unlock_irqrestore traced by ftrace around this
* TRACE_PRINTK trace.
*/
static raw_spinlock_t trace_buf_lock =
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(trace_buf_lock);
static char trace_buf[TRACE_BUF_SIZE];

struct ring_buffer_event *event;
Expand All @@ -3618,8 +3611,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
if (unlikely(atomic_read(&data->disabled)))
goto out;

local_irq_save(flags);
__raw_spin_lock(&trace_buf_lock);
pause_graph_tracing();
spin_lock_irqsave(&trace_buf_lock, irq_flags);
len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);

len = min(len, TRACE_BUF_SIZE-1);
Expand All @@ -3640,9 +3633,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);

out_unlock:
__raw_spin_unlock(&trace_buf_lock);
local_irq_restore(flags);

spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
unpause_graph_tracing();
out:
preempt_enable_notrace();

Expand Down

0 comments on commit 22cccd5

Please sign in to comment.