From 4565b63481d80fb282e47a495a1ffa9403142fda Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Dec 2008 10:58:08 +0800 Subject: [PATCH] --- yaml --- r: 121271 b: refs/heads/master c: 361b73d5c34f59c3fd107bb9dbe7a1fbff2c2517 h: refs/heads/master i: 121269: 22cccd56f1cd9373b3dc5d72815d10f31cbf7193 121267: 4c59f80e4f28dae36d6cda00000cd97e4e88c8ea 121263: 221c77988341077cc837225eb29e41c600417d72 v: v3 --- [refs] | 2 +- trunk/arch/x86/kernel/Makefile | 6 ++++ trunk/arch/x86/kernel/ftrace.c | 5 +-- trunk/arch/x86/kernel/process_32.c | 4 +-- trunk/arch/x86/kernel/process_64.c | 4 +-- trunk/include/linux/ftrace.h | 24 --------------- trunk/include/linux/ring_buffer.h | 10 +++--- trunk/include/linux/sched.h | 2 -- trunk/kernel/Makefile | 4 +++ trunk/kernel/extable.c | 5 ++- trunk/kernel/module.c | 2 +- trunk/kernel/trace/ftrace.c | 2 -- trunk/kernel/trace/trace.c | 36 +++++++++++++--------- trunk/kernel/trace/trace_functions_graph.c | 33 +++----------------- 14 files changed, 48 insertions(+), 91 deletions(-) diff --git a/[refs] b/[refs] index 3504f58fc67d..4a056dc4ac76 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: e726f5f91effd8944c76475a2688093a03ba0d10 +refs/heads/master: 361b73d5c34f59c3fd107bb9dbe7a1fbff2c2517 diff --git a/trunk/arch/x86/kernel/Makefile b/trunk/arch/x86/kernel/Makefile index 1cad9318d217..a3049da61985 100644 --- a/trunk/arch/x86/kernel/Makefile +++ b/trunk/arch/x86/kernel/Makefile @@ -14,6 +14,12 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg CFLAGS_REMOVE_ftrace.o = -pg endif +ifdef CONFIG_FUNCTION_GRAPH_TRACER +# Don't trace __switch_to() but let it for function tracer +CFLAGS_REMOVE_process_32.o = -pg +CFLAGS_REMOVE_process_64.o = -pg +endif + # # vsyscalls (which work on the user stack) should have # no stack-protector checks: diff --git a/trunk/arch/x86/kernel/ftrace.c b/trunk/arch/x86/kernel/ftrace.c index 1b43086b097a..f98c4076a170 100644 --- a/trunk/arch/x86/kernel/ftrace.c +++ b/trunk/arch/x86/kernel/ftrace.c @@ -476,10 +476,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) &return_to_handler; /* Nmi's are currently unsupported */ - if (unlikely(atomic_read(&in_nmi))) - return; - - if (unlikely(atomic_read(¤t->tracing_graph_pause))) + if (atomic_read(&in_nmi)) return; /* diff --git a/trunk/arch/x86/kernel/process_32.c b/trunk/arch/x86/kernel/process_32.c index 24c2276aa453..0a1302fe6d45 100644 --- a/trunk/arch/x86/kernel/process_32.c +++ b/trunk/arch/x86/kernel/process_32.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include @@ -549,8 +548,7 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, * the task-switch, and shows up in ret_from_fork in entry.S, * for example. */ -__notrace_funcgraph struct task_struct * -__switch_to(struct task_struct *prev_p, struct task_struct *next_p) +struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; diff --git a/trunk/arch/x86/kernel/process_64.c b/trunk/arch/x86/kernel/process_64.c index fbb321d53d34..c958120fb1b6 100644 --- a/trunk/arch/x86/kernel/process_64.c +++ b/trunk/arch/x86/kernel/process_64.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include @@ -552,9 +551,8 @@ static inline void __switch_to_xtra(struct task_struct *prev_p, * - could test fs/gs bitsliced * * Kprobes not supported here. Set the probe on schedule instead. - * Function graph tracer not supported too. */ -__notrace_funcgraph struct task_struct * +struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread; diff --git a/trunk/include/linux/ftrace.h b/trunk/include/linux/ftrace.h index 11cac81eed08..b9b4d0a22d10 100644 --- a/trunk/include/linux/ftrace.h +++ b/trunk/include/linux/ftrace.h @@ -369,14 +369,6 @@ struct ftrace_graph_ret { }; #ifdef CONFIG_FUNCTION_GRAPH_TRACER - -/* - * Sometimes we don't want to trace a function with the function - * graph tracer but we want them to keep traced by the usual function - * tracer if the function graph tracer is not configured. - */ -#define __notrace_funcgraph notrace - #define FTRACE_RETFUNC_DEPTH 50 #define FTRACE_RETSTACK_ALLOC_SIZE 32 /* Type of the callback handlers for tracing function graph*/ @@ -401,20 +393,7 @@ static inline int task_curr_ret_stack(struct task_struct *t) { return t->curr_ret_stack; } - -static inline void pause_graph_tracing(void) -{ - atomic_inc(¤t->tracing_graph_pause); -} - -static inline void unpause_graph_tracing(void) -{ - atomic_dec(¤t->tracing_graph_pause); -} #else - -#define __notrace_funcgraph - static inline void ftrace_graph_init_task(struct task_struct *t) { } static inline void ftrace_graph_exit_task(struct task_struct *t) { } @@ -422,9 +401,6 @@ static inline int task_curr_ret_stack(struct task_struct *tsk) { return -1; } - -static inline void pause_graph_tracing(void) { } -static inline void unpause_graph_tracing(void) { } #endif #ifdef CONFIG_TRACING diff --git a/trunk/include/linux/ring_buffer.h b/trunk/include/linux/ring_buffer.h index 1a350a847edd..d363467c8f13 100644 --- a/trunk/include/linux/ring_buffer.h +++ b/trunk/include/linux/ring_buffer.h @@ -28,17 +28,19 @@ struct ring_buffer_event { * size = 8 bytes * * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock - * array[0] = tv_nsec - * array[1] = tv_sec + * array[0] = tv_nsec + * array[1..2] = tv_sec * size = 16 bytes * * @RINGBUF_TYPE_DATA: Data record * If len is zero: * array[0] holds the actual length - * array[1..(length+3)/4-1] holds data + * array[1..(length+3)/4] holds data + * size = 4 + 4 + length (bytes) * else * length = len << 2 - * array[0..(length+3)/4] holds data + * array[0..(length+3)/4-1] holds data + * size = 4 + length (bytes) */ enum ring_buffer_type { RINGBUF_TYPE_PADDING, diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 4b81fc5f7731..4c152e0acc9e 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -1379,8 +1379,6 @@ struct task_struct { * because of depth overrun. */ atomic_t trace_overrun; - /* Pause for the tracing */ - atomic_t tracing_graph_pause; #endif #ifdef CONFIG_TRACING /* state flags for use by tracers */ diff --git a/trunk/kernel/Makefile b/trunk/kernel/Makefile index 19fad003b19d..703cf3b7389c 100644 --- a/trunk/kernel/Makefile +++ b/trunk/kernel/Makefile @@ -21,6 +21,10 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg CFLAGS_REMOVE_sched_clock.o = -pg CFLAGS_REMOVE_sched.o = -pg endif +ifdef CONFIG_FUNCTION_GRAPH_TRACER +CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address() +CFLAGS_REMOVE_module.o = -pg # For __module_text_address() +endif obj-$(CONFIG_FREEZER) += freezer.o obj-$(CONFIG_PROFILING) += profile.o diff --git a/trunk/kernel/extable.c b/trunk/kernel/extable.c index feb0317cf09a..a26cb2e17023 100644 --- a/trunk/kernel/extable.c +++ b/trunk/kernel/extable.c @@ -17,7 +17,6 @@ */ #include #include -#include #include #include @@ -41,7 +40,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) return e; } -__notrace_funcgraph int core_kernel_text(unsigned long addr) +int core_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) @@ -54,7 +53,7 @@ __notrace_funcgraph int core_kernel_text(unsigned long addr) return 0; } -__notrace_funcgraph int __kernel_text_address(unsigned long addr) +int __kernel_text_address(unsigned long addr) { if (core_kernel_text(addr)) return 1; diff --git a/trunk/kernel/module.c b/trunk/kernel/module.c index dd2a54155b54..89bcf7c1327d 100644 --- a/trunk/kernel/module.c +++ b/trunk/kernel/module.c @@ -2704,7 +2704,7 @@ int is_module_address(unsigned long addr) /* Is this a valid kernel address? */ -__notrace_funcgraph struct module *__module_text_address(unsigned long addr) +struct module *__module_text_address(unsigned long addr) { struct module *mod; diff --git a/trunk/kernel/trace/ftrace.c b/trunk/kernel/trace/ftrace.c index a12f80efceaa..2971fe48f55e 100644 --- a/trunk/kernel/trace/ftrace.c +++ b/trunk/kernel/trace/ftrace.c @@ -1998,7 +1998,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) /* Make sure IRQs see the -1 first: */ barrier(); t->ret_stack = ret_stack_list[start++]; - atomic_set(&t->tracing_graph_pause, 0); atomic_set(&t->trace_overrun, 0); } } while_each_thread(g, t); @@ -2078,7 +2077,6 @@ void ftrace_graph_init_task(struct task_struct *t) if (!t->ret_stack) return; t->curr_ret_stack = -1; - atomic_set(&t->tracing_graph_pause, 0); atomic_set(&t->trace_overrun, 0); } else t->ret_stack = NULL; diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index 8ebe0070c47a..7a93c663e52a 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -44,14 +44,13 @@ unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; unsigned long __read_mostly tracing_thresh; -/* - * We need to change this state when a selftest is running. +/* We need to change this state when a selftest is running. * A selftest will lurk into the ring-buffer to count the * entries inserted during the selftest although some concurrent * insertions into the ring-buffer such as ftrace_printk could occurred * at the same time, giving false positive or negative results. */ -static bool __read_mostly tracing_selftest_running; +static atomic_t tracing_selftest_running = ATOMIC_INIT(0); /* For tracers that don't implement custom flags */ static struct tracer_opt dummy_tracer_opt[] = { @@ -575,8 +574,6 @@ int register_tracer(struct tracer *type) unlock_kernel(); mutex_lock(&trace_types_lock); - tracing_selftest_running = true; - for (t = trace_types; t; t = t->next) { if (strcmp(type->name, t->name) == 0) { /* already found */ @@ -601,6 +598,7 @@ int register_tracer(struct tracer *type) struct trace_array *tr = &global_trace; int i; + atomic_set(&tracing_selftest_running, 1); /* * Run a selftest on this tracer. * Here we reset the trace buffer, and set the current @@ -615,6 +613,7 @@ int register_tracer(struct tracer *type) /* the test is responsible for initializing and enabling */ pr_info("Testing tracer %s: ", type->name); ret = type->selftest(type, tr); + atomic_set(&tracing_selftest_running, 0); /* the test is responsible for resetting too */ current_trace = saved_tracer; if (ret) { @@ -636,7 +635,6 @@ int register_tracer(struct tracer *type) max_tracer_type_len = len; out: - tracing_selftest_running = false; mutex_unlock(&trace_types_lock); lock_kernel(); @@ -3590,17 +3588,24 @@ static __init int tracer_init_debugfs(void) int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) { - static DEFINE_SPINLOCK(trace_buf_lock); + /* + * Raw Spinlock because a normal spinlock would be traced here + * and append an irrelevant couple spin_lock_irqsave/ + * spin_unlock_irqrestore traced by ftrace around this + * TRACE_PRINTK trace. + */ + static raw_spinlock_t trace_buf_lock = + (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; static char trace_buf[TRACE_BUF_SIZE]; struct ring_buffer_event *event; struct trace_array *tr = &global_trace; struct trace_array_cpu *data; - int cpu, len = 0, size, pc; struct print_entry *entry; - unsigned long irq_flags; + unsigned long flags, irq_flags; + int cpu, len = 0, size, pc; - if (tracing_disabled || tracing_selftest_running) + if (tracing_disabled || atomic_read(&tracing_selftest_running)) return 0; pc = preempt_count(); @@ -3611,8 +3616,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) if (unlikely(atomic_read(&data->disabled))) goto out; - pause_graph_tracing(); - spin_lock_irqsave(&trace_buf_lock, irq_flags); + local_irq_save(flags); + __raw_spin_lock(&trace_buf_lock); len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); len = min(len, TRACE_BUF_SIZE-1); @@ -3623,7 +3628,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) if (!event) goto out_unlock; entry = ring_buffer_event_data(event); - tracing_generic_entry_update(&entry->ent, irq_flags, pc); + tracing_generic_entry_update(&entry->ent, flags, pc); entry->ent.type = TRACE_PRINT; entry->ip = ip; entry->depth = depth; @@ -3633,8 +3638,9 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) ring_buffer_unlock_commit(tr->buffer, event, irq_flags); out_unlock: - spin_unlock_irqrestore(&trace_buf_lock, irq_flags); - unpause_graph_tracing(); + __raw_spin_unlock(&trace_buf_lock); + local_irq_restore(flags); + out: preempt_enable_notrace(); diff --git a/trunk/kernel/trace/trace_functions_graph.c b/trunk/kernel/trace/trace_functions_graph.c index af60eef4cbcc..32b7fb9a19df 100644 --- a/trunk/kernel/trace/trace_functions_graph.c +++ b/trunk/kernel/trace/trace_functions_graph.c @@ -570,36 +570,11 @@ print_graph_function(struct trace_iterator *iter) } } -static void print_graph_headers(struct seq_file *s) -{ - /* 1st line */ - seq_printf(s, "# "); - if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) - seq_printf(s, "CPU "); - if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) - seq_printf(s, "TASK/PID "); - if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) - seq_printf(s, "OVERHEAD/"); - seq_printf(s, "DURATION FUNCTION CALLS\n"); - - /* 2nd line */ - seq_printf(s, "# "); - if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) - seq_printf(s, "| "); - if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) - seq_printf(s, "| | "); - if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { - seq_printf(s, "| "); - seq_printf(s, "| | | | |\n"); - } else - seq_printf(s, " | | | | |\n"); -} static struct tracer graph_trace __read_mostly = { - .name = "function_graph", - .init = graph_trace_init, - .reset = graph_trace_reset, - .print_line = print_graph_function, - .print_header = print_graph_headers, + .name = "function_graph", + .init = graph_trace_init, + .reset = graph_trace_reset, + .print_line = print_graph_function, .flags = &tracer_flags, };