Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 121271
b: refs/heads/master
c: 361b73d
h: refs/heads/master
i:
  121269: 22cccd5
  121267: 4c59f80
  121263: 221c779
v: v3
  • Loading branch information
Lai Jiangshan authored and Ingo Molnar committed Dec 8, 2008
1 parent 559fd77 commit 4565b63
Show file tree
Hide file tree
Showing 14 changed files with 48 additions and 91 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e726f5f91effd8944c76475a2688093a03ba0d10
refs/heads/master: 361b73d5c34f59c3fd107bb9dbe7a1fbff2c2517
6 changes: 6 additions & 0 deletions trunk/arch/x86/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,12 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
endif

ifdef CONFIG_FUNCTION_GRAPH_TRACER
# Don't trace __switch_to() but let it for function tracer
CFLAGS_REMOVE_process_32.o = -pg
CFLAGS_REMOVE_process_64.o = -pg
endif

#
# vsyscalls (which work on the user stack) should have
# no stack-protector checks:
Expand Down
5 changes: 1 addition & 4 deletions trunk/arch/x86/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -476,10 +476,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
&return_to_handler;

/* Nmi's are currently unsupported */
if (unlikely(atomic_read(&in_nmi)))
return;

if (unlikely(atomic_read(&current->tracing_graph_pause)))
if (atomic_read(&in_nmi))
return;

/*
Expand Down
4 changes: 1 addition & 3 deletions trunk/arch/x86/kernel/process_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
#include <linux/percpu.h>
#include <linux/prctl.h>
#include <linux/dmi.h>
#include <linux/ftrace.h>

#include <asm/uaccess.h>
#include <asm/pgtable.h>
Expand Down Expand Up @@ -549,8 +548,7 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
* the task-switch, and shows up in ret_from_fork in entry.S,
* for example.
*/
__notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread;
Expand Down
4 changes: 1 addition & 3 deletions trunk/arch/x86/kernel/process_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@
#include <linux/prctl.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/ftrace.h>

#include <asm/pgtable.h>
#include <asm/system.h>
Expand Down Expand Up @@ -552,9 +551,8 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
* - could test fs/gs bitsliced
*
* Kprobes not supported here. Set the probe on schedule instead.
* Function graph tracer not supported too.
*/
__notrace_funcgraph struct task_struct *
struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread;
Expand Down
24 changes: 0 additions & 24 deletions trunk/include/linux/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -369,14 +369,6 @@ struct ftrace_graph_ret {
};

#ifdef CONFIG_FUNCTION_GRAPH_TRACER

/*
* Sometimes we don't want to trace a function with the function
* graph tracer but we want them to keep traced by the usual function
* tracer if the function graph tracer is not configured.
*/
#define __notrace_funcgraph notrace

#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
/* Type of the callback handlers for tracing function graph*/
Expand All @@ -401,30 +393,14 @@ static inline int task_curr_ret_stack(struct task_struct *t)
{
return t->curr_ret_stack;
}

static inline void pause_graph_tracing(void)
{
atomic_inc(&current->tracing_graph_pause);
}

static inline void unpause_graph_tracing(void)
{
atomic_dec(&current->tracing_graph_pause);
}
#else

#define __notrace_funcgraph

static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }

static inline int task_curr_ret_stack(struct task_struct *tsk)
{
return -1;
}

static inline void pause_graph_tracing(void) { }
static inline void unpause_graph_tracing(void) { }
#endif

#ifdef CONFIG_TRACING
Expand Down
10 changes: 6 additions & 4 deletions trunk/include/linux/ring_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,17 +28,19 @@ struct ring_buffer_event {
* size = 8 bytes
*
* @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock
* array[0] = tv_nsec
* array[1] = tv_sec
* array[0] = tv_nsec
* array[1..2] = tv_sec
* size = 16 bytes
*
* @RINGBUF_TYPE_DATA: Data record
* If len is zero:
* array[0] holds the actual length
* array[1..(length+3)/4-1] holds data
* array[1..(length+3)/4] holds data
* size = 4 + 4 + length (bytes)
* else
* length = len << 2
* array[0..(length+3)/4] holds data
* array[0..(length+3)/4-1] holds data
* size = 4 + length (bytes)
*/
enum ring_buffer_type {
RINGBUF_TYPE_PADDING,
Expand Down
2 changes: 0 additions & 2 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1379,8 +1379,6 @@ struct task_struct {
* because of depth overrun.
*/
atomic_t trace_overrun;
/* Pause for the tracing */
atomic_t tracing_graph_pause;
#endif
#ifdef CONFIG_TRACING
/* state flags for use by tracers */
Expand Down
4 changes: 4 additions & 0 deletions trunk/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,10 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg
CFLAGS_REMOVE_sched_clock.o = -pg
CFLAGS_REMOVE_sched.o = -pg
endif
ifdef CONFIG_FUNCTION_GRAPH_TRACER
CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address()
CFLAGS_REMOVE_module.o = -pg # For __module_text_address()
endif

obj-$(CONFIG_FREEZER) += freezer.o
obj-$(CONFIG_PROFILING) += profile.o
Expand Down
5 changes: 2 additions & 3 deletions trunk/kernel/extable.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ftrace.h>
#include <asm/uaccess.h>
#include <asm/sections.h>

Expand All @@ -41,7 +40,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
return e;
}

__notrace_funcgraph int core_kernel_text(unsigned long addr)
int core_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext &&
addr <= (unsigned long)_etext)
Expand All @@ -54,7 +53,7 @@ __notrace_funcgraph int core_kernel_text(unsigned long addr)
return 0;
}

__notrace_funcgraph int __kernel_text_address(unsigned long addr)
int __kernel_text_address(unsigned long addr)
{
if (core_kernel_text(addr))
return 1;
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -2704,7 +2704,7 @@ int is_module_address(unsigned long addr)


/* Is this a valid kernel address? */
__notrace_funcgraph struct module *__module_text_address(unsigned long addr)
struct module *__module_text_address(unsigned long addr)
{
struct module *mod;

Expand Down
2 changes: 0 additions & 2 deletions trunk/kernel/trace/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -1998,7 +1998,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
/* Make sure IRQs see the -1 first: */
barrier();
t->ret_stack = ret_stack_list[start++];
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
}
} while_each_thread(g, t);
Expand Down Expand Up @@ -2078,7 +2077,6 @@ void ftrace_graph_init_task(struct task_struct *t)
if (!t->ret_stack)
return;
t->curr_ret_stack = -1;
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
} else
t->ret_stack = NULL;
Expand Down
36 changes: 21 additions & 15 deletions trunk/kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,14 +44,13 @@
unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
unsigned long __read_mostly tracing_thresh;

/*
* We need to change this state when a selftest is running.
/* We need to change this state when a selftest is running.
* A selftest will lurk into the ring-buffer to count the
* entries inserted during the selftest although some concurrent
* insertions into the ring-buffer such as ftrace_printk could occurred
* at the same time, giving false positive or negative results.
*/
static bool __read_mostly tracing_selftest_running;
static atomic_t tracing_selftest_running = ATOMIC_INIT(0);

/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
Expand Down Expand Up @@ -575,8 +574,6 @@ int register_tracer(struct tracer *type)
unlock_kernel();
mutex_lock(&trace_types_lock);

tracing_selftest_running = true;

for (t = trace_types; t; t = t->next) {
if (strcmp(type->name, t->name) == 0) {
/* already found */
Expand All @@ -601,6 +598,7 @@ int register_tracer(struct tracer *type)
struct trace_array *tr = &global_trace;
int i;

atomic_set(&tracing_selftest_running, 1);
/*
* Run a selftest on this tracer.
* Here we reset the trace buffer, and set the current
Expand All @@ -615,6 +613,7 @@ int register_tracer(struct tracer *type)
/* the test is responsible for initializing and enabling */
pr_info("Testing tracer %s: ", type->name);
ret = type->selftest(type, tr);
atomic_set(&tracing_selftest_running, 0);
/* the test is responsible for resetting too */
current_trace = saved_tracer;
if (ret) {
Expand All @@ -636,7 +635,6 @@ int register_tracer(struct tracer *type)
max_tracer_type_len = len;

out:
tracing_selftest_running = false;
mutex_unlock(&trace_types_lock);
lock_kernel();

Expand Down Expand Up @@ -3590,17 +3588,24 @@ static __init int tracer_init_debugfs(void)

int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
{
static DEFINE_SPINLOCK(trace_buf_lock);
/*
* Raw Spinlock because a normal spinlock would be traced here
* and append an irrelevant couple spin_lock_irqsave/
* spin_unlock_irqrestore traced by ftrace around this
* TRACE_PRINTK trace.
*/
static raw_spinlock_t trace_buf_lock =
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];

struct ring_buffer_event *event;
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
int cpu, len = 0, size, pc;
struct print_entry *entry;
unsigned long irq_flags;
unsigned long flags, irq_flags;
int cpu, len = 0, size, pc;

if (tracing_disabled || tracing_selftest_running)
if (tracing_disabled || atomic_read(&tracing_selftest_running))
return 0;

pc = preempt_count();
Expand All @@ -3611,8 +3616,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
if (unlikely(atomic_read(&data->disabled)))
goto out;

pause_graph_tracing();
spin_lock_irqsave(&trace_buf_lock, irq_flags);
local_irq_save(flags);
__raw_spin_lock(&trace_buf_lock);
len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);

len = min(len, TRACE_BUF_SIZE-1);
Expand All @@ -3623,7 +3628,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
if (!event)
goto out_unlock;
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, irq_flags, pc);
tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_PRINT;
entry->ip = ip;
entry->depth = depth;
Expand All @@ -3633,8 +3638,9 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);

out_unlock:
spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
unpause_graph_tracing();
__raw_spin_unlock(&trace_buf_lock);
local_irq_restore(flags);

out:
preempt_enable_notrace();

Expand Down
33 changes: 4 additions & 29 deletions trunk/kernel/trace/trace_functions_graph.c
Original file line number Diff line number Diff line change
Expand Up @@ -570,36 +570,11 @@ print_graph_function(struct trace_iterator *iter)
}
}

static void print_graph_headers(struct seq_file *s)
{
/* 1st line */
seq_printf(s, "# ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
seq_printf(s, "CPU ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
seq_printf(s, "TASK/PID ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD)
seq_printf(s, "OVERHEAD/");
seq_printf(s, "DURATION FUNCTION CALLS\n");

/* 2nd line */
seq_printf(s, "# ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
seq_printf(s, "| ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
seq_printf(s, "| | ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
seq_printf(s, "| ");
seq_printf(s, "| | | | |\n");
} else
seq_printf(s, " | | | | |\n");
}
static struct tracer graph_trace __read_mostly = {
.name = "function_graph",
.init = graph_trace_init,
.reset = graph_trace_reset,
.print_line = print_graph_function,
.print_header = print_graph_headers,
.name = "function_graph",
.init = graph_trace_init,
.reset = graph_trace_reset,
.print_line = print_graph_function,
.flags = &tracer_flags,
};

Expand Down

0 comments on commit 4565b63

Please sign in to comment.