From dd30dc755c1d69bc2453aabcdd1148ba53105241 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 31 Oct 2008 09:36:38 -0400 Subject: [PATCH] --- yaml --- r: 121068 b: refs/heads/master c: 7e5e26a3d8ac4bcadb380073dc9604c07a9a6198 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/arm/include/asm/ftrace.h | 5 - trunk/arch/powerpc/include/asm/ftrace.h | 5 - trunk/arch/sh/include/asm/ftrace.h | 5 - trunk/arch/sparc/include/asm/ftrace.h | 5 - trunk/arch/x86/Kconfig | 1 - trunk/arch/x86/include/asm/ftrace.h | 16 -- trunk/arch/x86/kernel/entry_32.S | 6 - trunk/arch/x86/kernel/entry_64.S | 5 - trunk/drivers/char/sysrq.c | 18 +-- trunk/include/linux/ftrace.h | 64 +------- trunk/include/linux/hardirq.h | 2 +- trunk/init/main.c | 4 +- trunk/kernel/sysctl.c | 2 +- trunk/kernel/trace/Kconfig | 7 - trunk/kernel/trace/ftrace.c | 47 ++---- trunk/kernel/trace/ring_buffer.c | 58 +++---- trunk/kernel/trace/trace.c | 195 ++++++------------------ trunk/kernel/trace/trace.h | 55 +------ trunk/kernel/trace/trace_boot.c | 36 ++--- trunk/kernel/trace/trace_functions.c | 6 - trunk/kernel/trace/trace_irqsoff.c | 41 +---- trunk/kernel/trace/trace_sched_switch.c | 50 +++--- trunk/kernel/trace/trace_sched_wakeup.c | 52 ++----- trunk/kernel/trace/trace_stack.c | 8 +- 25 files changed, 159 insertions(+), 536 deletions(-) diff --git a/[refs] b/[refs] index a3cd25e59a7b..f005bd926bf0 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 3e03fb7f1da2e691644526c0d6df42d778716349 +refs/heads/master: 7e5e26a3d8ac4bcadb380073dc9604c07a9a6198 diff --git a/trunk/arch/arm/include/asm/ftrace.h b/trunk/arch/arm/include/asm/ftrace.h index 3f3a1d1508ea..39c8bc1a006a 100644 --- a/trunk/arch/arm/include/asm/ftrace.h +++ b/trunk/arch/arm/include/asm/ftrace.h @@ -1,11 +1,6 @@ #ifndef _ASM_ARM_FTRACE #define _ASM_ARM_FTRACE -#ifndef __ASSEMBLY__ -static inline void ftrace_nmi_enter(void) { } -static inline void ftrace_nmi_exit(void) { } -#endif - #ifdef CONFIG_FUNCTION_TRACER #define MCOUNT_ADDR ((long)(mcount)) #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ diff --git a/trunk/arch/powerpc/include/asm/ftrace.h b/trunk/arch/powerpc/include/asm/ftrace.h index 1cd72700fbc0..b298f7a631e6 100644 --- a/trunk/arch/powerpc/include/asm/ftrace.h +++ b/trunk/arch/powerpc/include/asm/ftrace.h @@ -1,11 +1,6 @@ #ifndef _ASM_POWERPC_FTRACE #define _ASM_POWERPC_FTRACE -#ifndef __ASSEMBLY__ -static inline void ftrace_nmi_enter(void) { } -static inline void ftrace_nmi_exit(void) { } -#endif - #ifdef CONFIG_FUNCTION_TRACER #define MCOUNT_ADDR ((long)(_mcount)) #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ diff --git a/trunk/arch/sh/include/asm/ftrace.h b/trunk/arch/sh/include/asm/ftrace.h index 31ada0370cb6..3aed362c9463 100644 --- a/trunk/arch/sh/include/asm/ftrace.h +++ b/trunk/arch/sh/include/asm/ftrace.h @@ -1,11 +1,6 @@ #ifndef __ASM_SH_FTRACE_H #define __ASM_SH_FTRACE_H -#ifndef __ASSEMBLY__ -static inline void ftrace_nmi_enter(void) { } -static inline void ftrace_nmi_exit(void) { } -#endif - #ifndef __ASSEMBLY__ extern void mcount(void); #endif diff --git a/trunk/arch/sparc/include/asm/ftrace.h b/trunk/arch/sparc/include/asm/ftrace.h index 62055ac0496e..d27716cd38c1 100644 --- a/trunk/arch/sparc/include/asm/ftrace.h +++ b/trunk/arch/sparc/include/asm/ftrace.h @@ -1,11 +1,6 @@ #ifndef _ASM_SPARC64_FTRACE #define _ASM_SPARC64_FTRACE -#ifndef __ASSEMBLY__ -static inline void ftrace_nmi_enter(void) { } -static inline void ftrace_nmi_exit(void) { } -#endif - #ifdef CONFIG_MCOUNT #define MCOUNT_ADDR ((long)(_mcount)) #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index d09e812c6223..6f20718d3156 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -29,7 +29,6 @@ config X86 select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACER - select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) select HAVE_ARCH_KGDB if !X86_VOYAGER select HAVE_ARCH_TRACEHOOK diff --git a/trunk/arch/x86/include/asm/ftrace.h b/trunk/arch/x86/include/asm/ftrace.h index a23468194b8c..f8173ed1c970 100644 --- a/trunk/arch/x86/include/asm/ftrace.h +++ b/trunk/arch/x86/include/asm/ftrace.h @@ -17,23 +17,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) */ return addr - 1; } - -#ifdef CONFIG_DYNAMIC_FTRACE -extern void ftrace_nmi_enter(void); -extern void ftrace_nmi_exit(void); -#else -static inline void ftrace_nmi_enter(void) { } -static inline void ftrace_nmi_exit(void) { } -#endif #endif /* __ASSEMBLY__ */ - -#else /* CONFIG_FUNCTION_TRACER */ - -#ifndef __ASSEMBLY__ -static inline void ftrace_nmi_enter(void) { } -static inline void ftrace_nmi_exit(void) { } -#endif - #endif /* CONFIG_FUNCTION_TRACER */ #endif /* _ASM_X86_FTRACE_H */ diff --git a/trunk/arch/x86/kernel/entry_32.S b/trunk/arch/x86/kernel/entry_32.S index 9134de814c97..28b597ef9ca1 100644 --- a/trunk/arch/x86/kernel/entry_32.S +++ b/trunk/arch/x86/kernel/entry_32.S @@ -1157,9 +1157,6 @@ ENTRY(mcount) END(mcount) ENTRY(ftrace_caller) - cmpl $0, function_trace_stop - jne ftrace_stub - pushl %eax pushl %ecx pushl %edx @@ -1183,9 +1180,6 @@ END(ftrace_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ ENTRY(mcount) - cmpl $0, function_trace_stop - jne ftrace_stub - cmpl $ftrace_stub, ftrace_trace_function jnz trace .globl ftrace_stub diff --git a/trunk/arch/x86/kernel/entry_64.S b/trunk/arch/x86/kernel/entry_64.S index 08aa6b10933c..b86f332c96a6 100644 --- a/trunk/arch/x86/kernel/entry_64.S +++ b/trunk/arch/x86/kernel/entry_64.S @@ -68,8 +68,6 @@ ENTRY(mcount) END(mcount) ENTRY(ftrace_caller) - cmpl $0, function_trace_stop - jne ftrace_stub /* taken from glibc */ subq $0x38, %rsp @@ -105,9 +103,6 @@ END(ftrace_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ ENTRY(mcount) - cmpl $0, function_trace_stop - jne ftrace_stub - cmpq $ftrace_stub, ftrace_trace_function jnz trace .globl ftrace_stub diff --git a/trunk/drivers/char/sysrq.c b/trunk/drivers/char/sysrq.c index 94966edfb44d..ce0d9da52a8a 100644 --- a/trunk/drivers/char/sysrq.c +++ b/trunk/drivers/char/sysrq.c @@ -274,22 +274,6 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = { .enable_mask = SYSRQ_ENABLE_DUMP, }; -#ifdef CONFIG_TRACING -#include - -static void sysrq_ftrace_dump(int key, struct tty_struct *tty) -{ - ftrace_dump(); -} -static struct sysrq_key_op sysrq_ftrace_dump_op = { - .handler = sysrq_ftrace_dump, - .help_msg = "dumpZ-ftrace-buffer", - .action_msg = "Dump ftrace buffer", - .enable_mask = SYSRQ_ENABLE_DUMP, -}; -#else -#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)0) -#endif static void sysrq_handle_showmem(int key, struct tty_struct *tty) { @@ -422,7 +406,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = { NULL, /* x */ /* y: May be registered on sparc64 for global register dump */ NULL, /* y */ - &sysrq_ftrace_dump_op, /* z */ + NULL /* z */ }; /* key2index calculation, -1 on invalid index */ diff --git a/trunk/include/linux/ftrace.h b/trunk/include/linux/ftrace.h index 7a75fc6d41f4..0ad1b48aea69 100644 --- a/trunk/include/linux/ftrace.h +++ b/trunk/include/linux/ftrace.h @@ -23,34 +23,6 @@ struct ftrace_ops { struct ftrace_ops *next; }; -extern int function_trace_stop; - -/** - * ftrace_stop - stop function tracer. - * - * A quick way to stop the function tracer. Note this an on off switch, - * it is not something that is recursive like preempt_disable. - * This does not disable the calling of mcount, it only stops the - * calling of functions from mcount. - */ -static inline void ftrace_stop(void) -{ - function_trace_stop = 1; -} - -/** - * ftrace_start - start the function tracer. - * - * This function is the inverse of ftrace_stop. This does not enable - * the function tracing if the function tracer is disabled. This only - * sets the function tracer flag to continue calling the functions - * from mcount. - */ -static inline void ftrace_start(void) -{ - function_trace_stop = 0; -} - /* * The ftrace_ops must be a static and should also * be read_mostly. These functions do modify read_mostly variables @@ -69,12 +41,9 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1); # define unregister_ftrace_function(ops) do { } while (0) # define clear_ftrace_function(ops) do { } while (0) static inline void ftrace_kill(void) { } -static inline void ftrace_stop(void) { } -static inline void ftrace_start(void) { } #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_DYNAMIC_FTRACE - enum { FTRACE_FL_FREE = (1 << 0), FTRACE_FL_FAILED = (1 << 1), @@ -135,6 +104,8 @@ extern void ftrace_release(void *start, unsigned long size); extern void ftrace_disable_daemon(void); extern void ftrace_enable_daemon(void); +extern void ftrace_nmi_enter(void); +extern void ftrace_nmi_exit(void); #else # define skip_trace(ip) ({ 0; }) @@ -143,6 +114,8 @@ extern void ftrace_enable_daemon(void); # define ftrace_disable_daemon() do { } while (0) # define ftrace_enable_daemon() do { } while (0) static inline void ftrace_release(void *start, unsigned long size) { } +static inline void ftrace_nmi_enter(void) { } +static inline void ftrace_nmi_exit(void) { } #endif /* CONFIG_DYNAMIC_FTRACE */ /* totally disable ftrace - can not re-enable after this */ @@ -216,9 +189,6 @@ static inline void __ftrace_enabled_restore(int enabled) #ifdef CONFIG_TRACING extern int ftrace_dump_on_oops; -extern void tracing_start(void); -extern void tracing_stop(void); - extern void ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); @@ -249,8 +219,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } static inline int ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0))); -static inline void tracing_start(void) { } -static inline void tracing_stop(void) { } static inline int ftrace_printk(const char *fmt, ...) { @@ -269,11 +237,6 @@ ftrace_init_module(unsigned long *start, unsigned long *end) { } #endif -/* - * Structure which defines the trace of an initcall. - * You don't have to fill the func field since it is - * only used internally by the tracer. - */ struct boot_trace { pid_t caller; char func[KSYM_NAME_LEN]; @@ -284,28 +247,13 @@ struct boot_trace { }; #ifdef CONFIG_BOOT_TRACER -/* Append the trace on the ring-buffer */ extern void trace_boot(struct boot_trace *it, initcall_t fn); - -/* Tells the tracer that smp_pre_initcall is finished. - * So we can start the tracing - */ extern void start_boot_trace(void); - -/* Resume the tracing of other necessary events - * such as sched switches - */ -extern void enable_boot_trace(void); - -/* Suspend this tracing. Actually, only sched_switches tracing have - * to be suspended. Initcalls doesn't need it.) - */ -extern void disable_boot_trace(void); +extern void stop_boot_trace(void); #else static inline void trace_boot(struct boot_trace *it, initcall_t fn) { } static inline void start_boot_trace(void) { } -static inline void enable_boot_trace(void) { } -static inline void disable_boot_trace(void) { } +static inline void stop_boot_trace(void) { } #endif diff --git a/trunk/include/linux/hardirq.h b/trunk/include/linux/hardirq.h index 0087cb43becf..ffc16ab5a878 100644 --- a/trunk/include/linux/hardirq.h +++ b/trunk/include/linux/hardirq.h @@ -4,8 +4,8 @@ #include #include #include +#include #include -#include #include /* diff --git a/trunk/init/main.c b/trunk/init/main.c index 4b03cd5656ca..7e117a231af1 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -711,7 +711,6 @@ int do_one_initcall(initcall_t fn) it.caller = task_pid_nr(current); printk("calling %pF @ %i\n", fn, it.caller); it.calltime = ktime_get(); - enable_boot_trace(); } it.result = fn(); @@ -723,7 +722,6 @@ int do_one_initcall(initcall_t fn) printk("initcall %pF returned %d after %Ld usecs\n", fn, it.result, it.duration); trace_boot(&it, fn); - disable_boot_trace(); } msgbuf[0] = 0; @@ -884,7 +882,7 @@ static int __init kernel_init(void * unused) * we're essentially up and running. Get rid of the * initmem segments and start the user-mode stuff.. */ - + stop_boot_trace(); init_post(); return 0; } diff --git a/trunk/kernel/sysctl.c b/trunk/kernel/sysctl.c index 65d4a9ba79e4..6b6b727258b5 100644 --- a/trunk/kernel/sysctl.c +++ b/trunk/kernel/sysctl.c @@ -487,7 +487,7 @@ static struct ctl_table kern_table[] = { #ifdef CONFIG_TRACING { .ctl_name = CTL_UNNUMBERED, - .procname = "ftrace_dump_on_oops", + .procname = "ftrace_dump_on_opps", .data = &ftrace_dump_on_oops, .maxlen = sizeof(int), .mode = 0644, diff --git a/trunk/kernel/trace/Kconfig b/trunk/kernel/trace/Kconfig index fc4febc3334a..33dbefd471e8 100644 --- a/trunk/kernel/trace/Kconfig +++ b/trunk/kernel/trace/Kconfig @@ -9,13 +9,6 @@ config NOP_TRACER config HAVE_FUNCTION_TRACER bool -config HAVE_FUNCTION_TRACE_MCOUNT_TEST - bool - help - This gets selected when the arch tests the function_trace_stop - variable at the mcount call site. Otherwise, this variable - is tested by the called function. - config HAVE_DYNAMIC_FTRACE bool diff --git a/trunk/kernel/trace/ftrace.c b/trunk/kernel/trace/ftrace.c index 896c71f0f4c4..4a39d24568c8 100644 --- a/trunk/kernel/trace/ftrace.c +++ b/trunk/kernel/trace/ftrace.c @@ -47,9 +47,6 @@ int ftrace_enabled __read_mostly; static int last_ftrace_enabled; -/* Quick disabling of function tracer. */ -int function_trace_stop; - /* * ftrace_disabled is set when an anomaly is discovered. * ftrace_disabled is much stronger than ftrace_enabled. @@ -66,7 +63,6 @@ static struct ftrace_ops ftrace_list_end __read_mostly = static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; -ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) { @@ -92,23 +88,8 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) void clear_ftrace_function(void) { ftrace_trace_function = ftrace_stub; - __ftrace_trace_function = ftrace_stub; } -#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST -/* - * For those archs that do not test ftrace_trace_stop in their - * mcount call site, we need to do it from C. - */ -static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) -{ - if (function_trace_stop) - return; - - __ftrace_trace_function(ip, parent_ip); -} -#endif - static int __register_ftrace_function(struct ftrace_ops *ops) { /* should not be called from interrupt context */ @@ -129,18 +110,10 @@ static int __register_ftrace_function(struct ftrace_ops *ops) * For one func, simply call it directly. * For more than one func, call the chain. */ -#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST if (ops->next == &ftrace_list_end) ftrace_trace_function = ops->func; else ftrace_trace_function = ftrace_list_func; -#else - if (ops->next == &ftrace_list_end) - __ftrace_trace_function = ops->func; - else - __ftrace_trace_function = ftrace_list_func; - ftrace_trace_function = ftrace_test_stop_func; -#endif } spin_unlock(&ftrace_lock); @@ -553,7 +526,7 @@ static void ftrace_run_update_code(int command) } static ftrace_func_t saved_ftrace_func; -static int ftrace_start_up; +static int ftrace_start; static DEFINE_MUTEX(ftrace_start_lock); static void ftrace_startup(void) @@ -564,8 +537,8 @@ static void ftrace_startup(void) return; mutex_lock(&ftrace_start_lock); - ftrace_start_up++; - if (ftrace_start_up == 1) + ftrace_start++; + if (ftrace_start == 1) command |= FTRACE_ENABLE_CALLS; if (saved_ftrace_func != ftrace_trace_function) { @@ -589,8 +562,8 @@ static void ftrace_shutdown(void) return; mutex_lock(&ftrace_start_lock); - ftrace_start_up--; - if (!ftrace_start_up) + ftrace_start--; + if (!ftrace_start) command |= FTRACE_DISABLE_CALLS; if (saved_ftrace_func != ftrace_trace_function) { @@ -616,8 +589,8 @@ static void ftrace_startup_sysctl(void) mutex_lock(&ftrace_start_lock); /* Force update next time */ saved_ftrace_func = NULL; - /* ftrace_start_up is true if we want ftrace running */ - if (ftrace_start_up) + /* ftrace_start is true if we want ftrace running */ + if (ftrace_start) command |= FTRACE_ENABLE_CALLS; ftrace_run_update_code(command); @@ -632,8 +605,8 @@ static void ftrace_shutdown_sysctl(void) return; mutex_lock(&ftrace_start_lock); - /* ftrace_start_up is true if ftrace is running */ - if (ftrace_start_up) + /* ftrace_start is true if ftrace is running */ + if (ftrace_start) command |= FTRACE_DISABLE_CALLS; ftrace_run_update_code(command); @@ -1213,7 +1186,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) mutex_lock(&ftrace_sysctl_lock); mutex_lock(&ftrace_start_lock); - if (iter->filtered && ftrace_start_up && ftrace_enabled) + if (iter->filtered && ftrace_start && ftrace_enabled) ftrace_run_update_code(FTRACE_ENABLE_CALLS); mutex_unlock(&ftrace_start_lock); mutex_unlock(&ftrace_sysctl_lock); diff --git a/trunk/kernel/trace/ring_buffer.c b/trunk/kernel/trace/ring_buffer.c index a2dea5008826..cedf4e268285 100644 --- a/trunk/kernel/trace/ring_buffer.c +++ b/trunk/kernel/trace/ring_buffer.c @@ -16,8 +16,6 @@ #include #include -#include "trace.h" - /* Up this if you want to test the TIME_EXTENTS and normalization */ #define DEBUG_SHIFT 0 @@ -154,7 +152,7 @@ static inline int test_time_stamp(u64 delta) struct ring_buffer_per_cpu { int cpu; struct ring_buffer *buffer; - raw_spinlock_t lock; + spinlock_t lock; struct lock_class_key lock_key; struct list_head pages; struct buffer_page *head_page; /* read from head */ @@ -291,7 +289,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) cpu_buffer->cpu = cpu; cpu_buffer->buffer = buffer; - cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + spin_lock_init(&cpu_buffer->lock); INIT_LIST_HEAD(&cpu_buffer->pages); page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), @@ -854,8 +852,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, if (write > BUF_PAGE_SIZE) { struct buffer_page *next_page = tail_page; - local_irq_save(flags); - __raw_spin_lock(&cpu_buffer->lock); + spin_lock_irqsave(&cpu_buffer->lock, flags); rb_inc_page(cpu_buffer, &next_page); @@ -931,8 +928,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, rb_set_commit_to_write(cpu_buffer); } - __raw_spin_unlock(&cpu_buffer->lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&cpu_buffer->lock, flags); /* fail and let the caller try again */ return ERR_PTR(-EAGAIN); @@ -955,8 +951,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, return event; out_unlock: - __raw_spin_unlock(&cpu_buffer->lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&cpu_buffer->lock, flags); return NULL; } @@ -1127,7 +1122,8 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, return NULL; /* If we are tracing schedule, we don't want to recurse */ - resched = ftrace_preempt_disable(); + resched = need_resched(); + preempt_disable_notrace(); cpu = raw_smp_processor_id(); @@ -1158,7 +1154,10 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, return event; out: - ftrace_preempt_enable(resched); + if (resched) + preempt_enable_notrace(); + else + preempt_enable_notrace(); return NULL; } @@ -1200,9 +1199,12 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, /* * Only the last preempt count needs to restore preemption. */ - if (preempt_count() == 1) - ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); - else + if (preempt_count() == 1) { + if (per_cpu(rb_need_resched, cpu)) + preempt_enable_no_resched_notrace(); + else + preempt_enable_notrace(); + } else preempt_enable_no_resched_notrace(); return 0; @@ -1235,7 +1237,8 @@ int ring_buffer_write(struct ring_buffer *buffer, if (atomic_read(&buffer->record_disabled)) return -EBUSY; - resched = ftrace_preempt_disable(); + resched = need_resched(); + preempt_disable_notrace(); cpu = raw_smp_processor_id(); @@ -1261,7 +1264,10 @@ int ring_buffer_write(struct ring_buffer *buffer, ret = 0; out: - ftrace_preempt_enable(resched); + if (resched) + preempt_enable_no_resched_notrace(); + else + preempt_enable_notrace(); return ret; } @@ -1527,8 +1533,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) struct buffer_page *reader = NULL; unsigned long flags; - local_irq_save(flags); - __raw_spin_lock(&cpu_buffer->lock); + spin_lock_irqsave(&cpu_buffer->lock, flags); again: reader = cpu_buffer->reader_page; @@ -1578,8 +1583,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) goto again; out: - __raw_spin_unlock(&cpu_buffer->lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&cpu_buffer->lock, flags); return reader; } @@ -1820,11 +1824,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) atomic_inc(&cpu_buffer->record_disabled); synchronize_sched(); - local_irq_save(flags); - __raw_spin_lock(&cpu_buffer->lock); + spin_lock_irqsave(&cpu_buffer->lock, flags); ring_buffer_iter_reset(iter); - __raw_spin_unlock(&cpu_buffer->lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&cpu_buffer->lock, flags); return iter; } @@ -1910,13 +1912,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) if (!cpu_isset(cpu, buffer->cpumask)) return; - local_irq_save(flags); - __raw_spin_lock(&cpu_buffer->lock); + spin_lock_irqsave(&cpu_buffer->lock, flags); rb_reset_cpu(cpu_buffer); - __raw_spin_unlock(&cpu_buffer->lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&cpu_buffer->lock, flags); } /** diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index ff1e9ed9b587..e4c40c868d67 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -43,15 +43,6 @@ unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; unsigned long __read_mostly tracing_thresh; - -/* - * Kill all tracing for good (never come back). - * It is initialized to 1 but will turn to zero if the initialization - * of the tracer is successful. But that is the only place that sets - * this back to zero. - */ -int tracing_disabled = 1; - static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); static inline void ftrace_disable_cpu(void) @@ -71,6 +62,8 @@ static cpumask_t __read_mostly tracing_buffer_mask; #define for_each_tracing_cpu(cpu) \ for_each_cpu_mask(cpu, tracing_buffer_mask) +static int tracing_disabled = 1; + /* * ftrace_dump_on_oops - variable to dump ftrace buffer on oops * @@ -150,19 +143,6 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data); /* tracer_enabled is used to toggle activation of a tracer */ static int tracer_enabled = 1; -/** - * tracing_is_enabled - return tracer_enabled status - * - * This function is used by other tracers to know the status - * of the tracer_enabled flag. Tracers may use this function - * to know if it should enable their features when starting - * up. See irqsoff tracer for an example (start_irqsoff_tracer). - */ -int tracing_is_enabled(void) -{ - return tracer_enabled; -} - /* function tracing enabled */ int ftrace_function_enabled; @@ -264,7 +244,6 @@ static const char *trace_options[] = { "stacktrace", "sched-tree", "ftrace_printk", - "ftrace_preempt", NULL }; @@ -633,76 +612,6 @@ static void trace_init_cmdlines(void) cmdline_idx = 0; } -static int trace_stop_count; -static DEFINE_SPINLOCK(tracing_start_lock); - -/** - * tracing_start - quick start of the tracer - * - * If tracing is enabled but was stopped by tracing_stop, - * this will start the tracer back up. - */ -void tracing_start(void) -{ - struct ring_buffer *buffer; - unsigned long flags; - - if (tracing_disabled) - return; - - spin_lock_irqsave(&tracing_start_lock, flags); - if (--trace_stop_count) - goto out; - - if (trace_stop_count < 0) { - /* Someone screwed up their debugging */ - WARN_ON_ONCE(1); - trace_stop_count = 0; - goto out; - } - - - buffer = global_trace.buffer; - if (buffer) - ring_buffer_record_enable(buffer); - - buffer = max_tr.buffer; - if (buffer) - ring_buffer_record_enable(buffer); - - ftrace_start(); - out: - spin_unlock_irqrestore(&tracing_start_lock, flags); -} - -/** - * tracing_stop - quick stop of the tracer - * - * Light weight way to stop tracing. Use in conjunction with - * tracing_start. - */ -void tracing_stop(void) -{ - struct ring_buffer *buffer; - unsigned long flags; - - ftrace_stop(); - spin_lock_irqsave(&tracing_start_lock, flags); - if (trace_stop_count++) - goto out; - - buffer = global_trace.buffer; - if (buffer) - ring_buffer_record_disable(buffer); - - buffer = max_tr.buffer; - if (buffer) - ring_buffer_record_disable(buffer); - - out: - spin_unlock_irqrestore(&tracing_start_lock, flags); -} - void trace_stop_cmdline_recording(void); static void trace_save_cmdline(struct task_struct *tsk) @@ -982,7 +891,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) #ifdef CONFIG_FUNCTION_TRACER static void -function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) +function_trace_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = &global_trace; struct trace_array_cpu *data; @@ -995,7 +904,8 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) return; pc = preempt_count(); - resched = ftrace_preempt_disable(); + resched = need_resched(); + preempt_disable_notrace(); local_save_flags(flags); cpu = raw_smp_processor_id(); data = tr->data[cpu]; @@ -1005,38 +915,10 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) trace_function(tr, data, ip, parent_ip, flags, pc); atomic_dec(&data->disabled); - ftrace_preempt_enable(resched); -} - -static void -function_trace_call(unsigned long ip, unsigned long parent_ip) -{ - struct trace_array *tr = &global_trace; - struct trace_array_cpu *data; - unsigned long flags; - long disabled; - int cpu; - int pc; - - if (unlikely(!ftrace_function_enabled)) - return; - - /* - * Need to use raw, since this must be called before the - * recursive protection is performed. - */ - raw_local_irq_save(flags); - cpu = raw_smp_processor_id(); - data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - - if (likely(disabled == 1)) { - pc = preempt_count(); - trace_function(tr, data, ip, parent_ip, flags, pc); - } - - atomic_dec(&data->disabled); - raw_local_irq_restore(flags); + if (resched) + preempt_enable_no_resched_notrace(); + else + preempt_enable_notrace(); } static struct ftrace_ops trace_ops __read_mostly = @@ -1047,14 +929,9 @@ static struct ftrace_ops trace_ops __read_mostly = void tracing_start_function_trace(void) { ftrace_function_enabled = 0; - - if (trace_flags & TRACE_ITER_PREEMPTONLY) - trace_ops.func = function_trace_call_preempt_only; - else - trace_ops.func = function_trace_call; - register_ftrace_function(&trace_ops); - ftrace_function_enabled = 1; + if (tracer_enabled) + ftrace_function_enabled = 1; } void tracing_stop_function_trace(void) @@ -1201,6 +1078,10 @@ static void *s_start(struct seq_file *m, loff_t *pos) atomic_inc(&trace_record_cmdline_disabled); + /* let the tracer grab locks here if needed */ + if (current_trace->start) + current_trace->start(iter); + if (*pos != iter->pos) { iter->ent = NULL; iter->cpu = 0; @@ -1227,7 +1108,14 @@ static void *s_start(struct seq_file *m, loff_t *pos) static void s_stop(struct seq_file *m, void *p) { + struct trace_iterator *iter = m->private; + atomic_dec(&trace_record_cmdline_disabled); + + /* let the tracer release locks here if needed */ + if (current_trace && current_trace == iter->trace && iter->trace->stop) + iter->trace->stop(iter); + mutex_unlock(&trace_types_lock); } @@ -2057,7 +1945,10 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) m->private = iter; /* stop the trace while dumping */ - tracing_stop(); + if (iter->tr->ctrl) { + tracer_enabled = 0; + ftrace_function_enabled = 0; + } if (iter->trace && iter->trace->open) iter->trace->open(iter); @@ -2102,7 +1993,14 @@ int tracing_release(struct inode *inode, struct file *file) iter->trace->close(iter); /* reenable tracing if it was previously enabled */ - tracing_start(); + if (iter->tr->ctrl) { + tracer_enabled = 1; + /* + * It is safe to enable function tracing even if it + * isn't used + */ + ftrace_function_enabled = 1; + } mutex_unlock(&trace_types_lock); seq_release(inode, file); @@ -2440,10 +2338,11 @@ static ssize_t tracing_ctrl_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { + struct trace_array *tr = filp->private_data; char buf[64]; int r; - r = sprintf(buf, "%u\n", tracer_enabled); + r = sprintf(buf, "%ld\n", tr->ctrl); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } @@ -2471,18 +2370,16 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, val = !!val; mutex_lock(&trace_types_lock); - if (tracer_enabled ^ val) { - if (val) { + if (tr->ctrl ^ val) { + if (val) tracer_enabled = 1; - if (current_trace->start) - current_trace->start(tr); - tracing_start(); - } else { + else tracer_enabled = 0; - tracing_stop(); - if (current_trace->stop) - current_trace->stop(tr); - } + + tr->ctrl = val; + + if (current_trace && current_trace->ctrl_update) + current_trace->ctrl_update(tr); } mutex_unlock(&trace_types_lock); @@ -3354,8 +3251,6 @@ __init static int tracer_alloc_buffers(void) register_tracer(&nop_trace); #ifdef CONFIG_BOOT_TRACER - /* We don't want to launch sched_switch tracer yet */ - global_trace.ctrl = 0; register_tracer(&boot_tracer); current_trace = &boot_tracer; current_trace->init(&global_trace); @@ -3364,7 +3259,7 @@ __init static int tracer_alloc_buffers(void) #endif /* All seems OK, enable tracing */ - global_trace.ctrl = 1; + global_trace.ctrl = tracer_enabled; tracing_disabled = 0; atomic_notifier_chain_register(&panic_notifier_list, diff --git a/trunk/kernel/trace/trace.h b/trunk/kernel/trace/trace.h index 3422489fad5e..8465ad052707 100644 --- a/trunk/kernel/trace/trace.h +++ b/trunk/kernel/trace/trace.h @@ -49,7 +49,6 @@ struct ftrace_entry { unsigned long parent_ip; }; extern struct tracer boot_tracer; -extern struct tracer sched_switch_trace; /* Used by the boot tracer */ /* * Context switch trace entry - which task (and prio) we switched from/to: @@ -237,11 +236,11 @@ struct tracer { const char *name; void (*init)(struct trace_array *tr); void (*reset)(struct trace_array *tr); - void (*start)(struct trace_array *tr); - void (*stop)(struct trace_array *tr); void (*open)(struct trace_iterator *iter); void (*pipe_open)(struct trace_iterator *iter); void (*close)(struct trace_iterator *iter); + void (*start)(struct trace_iterator *iter); + void (*stop)(struct trace_iterator *iter); ssize_t (*read)(struct trace_iterator *iter, struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos); @@ -282,7 +281,6 @@ struct trace_iterator { long idx; }; -int tracing_is_enabled(void); void trace_wake_up(void); void tracing_reset(struct trace_array *tr, int cpu); int tracing_open_generic(struct inode *inode, struct file *filp); @@ -417,57 +415,8 @@ enum trace_iterator_flags { TRACE_ITER_STACKTRACE = 0x100, TRACE_ITER_SCHED_TREE = 0x200, TRACE_ITER_PRINTK = 0x400, - TRACE_ITER_PREEMPTONLY = 0x800, }; extern struct tracer nop_trace; -/** - * ftrace_preempt_disable - disable preemption scheduler safe - * - * When tracing can happen inside the scheduler, there exists - * cases that the tracing might happen before the need_resched - * flag is checked. If this happens and the tracer calls - * preempt_enable (after a disable), a schedule might take place - * causing an infinite recursion. - * - * To prevent this, we read the need_recshed flag before - * disabling preemption. When we want to enable preemption we - * check the flag, if it is set, then we call preempt_enable_no_resched. - * Otherwise, we call preempt_enable. - * - * The rational for doing the above is that if need resched is set - * and we have yet to reschedule, we are either in an atomic location - * (where we do not need to check for scheduling) or we are inside - * the scheduler and do not want to resched. - */ -static inline int ftrace_preempt_disable(void) -{ - int resched; - - resched = need_resched(); - preempt_disable_notrace(); - - return resched; -} - -/** - * ftrace_preempt_enable - enable preemption scheduler safe - * @resched: the return value from ftrace_preempt_disable - * - * This is a scheduler safe way to enable preemption and not miss - * any preemption checks. The disabled saved the state of preemption. - * If resched is set, then we were either inside an atomic or - * are inside the scheduler (we would have already scheduled - * otherwise). In this case, we do not want to call normal - * preempt_enable, but preempt_enable_no_resched instead. - */ -static inline void ftrace_preempt_enable(int resched) -{ - if (resched) - preempt_enable_no_resched_notrace(); - else - preempt_enable_notrace(); -} - #endif /* _LINUX_KERNEL_TRACE_H */ diff --git a/trunk/kernel/trace/trace_boot.c b/trunk/kernel/trace/trace_boot.c index bd5046c9deb7..d0a5e50eeff2 100644 --- a/trunk/kernel/trace/trace_boot.c +++ b/trunk/kernel/trace/trace_boot.c @@ -13,33 +13,23 @@ #include "trace.h" static struct trace_array *boot_trace; -static bool pre_initcalls_finished; +static int trace_boot_enabled; -/* Tells the boot tracer that the pre_smp_initcalls are finished. - * So we are ready . - * It doesn't enable sched events tracing however. - * You have to call enable_boot_trace to do so. - */ -void start_boot_trace(void) -{ - pre_initcalls_finished = true; -} -void enable_boot_trace(void) +/* Should be started after do_pre_smp_initcalls() in init/main.c */ +void start_boot_trace(void) { - if (pre_initcalls_finished) - tracing_start_cmdline_record(); + trace_boot_enabled = 1; } -void disable_boot_trace(void) +void stop_boot_trace(void) { - if (pre_initcalls_finished) - tracing_stop_cmdline_record(); + trace_boot_enabled = 0; } -static void reset_boot_trace(struct trace_array *tr) +void reset_boot_trace(struct trace_array *tr) { - sched_switch_trace.reset(tr); + stop_boot_trace(); } static void boot_trace_init(struct trace_array *tr) @@ -47,18 +37,18 @@ static void boot_trace_init(struct trace_array *tr) int cpu; boot_trace = tr; + trace_boot_enabled = 0; + for_each_cpu_mask(cpu, cpu_possible_map) tracing_reset(tr, cpu); - - sched_switch_trace.init(tr); } static void boot_trace_ctrl_update(struct trace_array *tr) { if (tr->ctrl) - enable_boot_trace(); + start_boot_trace(); else - disable_boot_trace(); + stop_boot_trace(); } static enum print_line_t initcall_print_line(struct trace_iterator *iter) @@ -109,7 +99,7 @@ void trace_boot(struct boot_trace *it, initcall_t fn) unsigned long irq_flags; struct trace_array *tr = boot_trace; - if (!pre_initcalls_finished) + if (!trace_boot_enabled) return; /* Get its name now since this function could diff --git a/trunk/kernel/trace/trace_functions.c b/trunk/kernel/trace/trace_functions.c index 9f1b0de71284..0f85a64003d3 100644 --- a/trunk/kernel/trace/trace_functions.c +++ b/trunk/kernel/trace/trace_functions.c @@ -62,17 +62,11 @@ static void function_trace_ctrl_update(struct trace_array *tr) stop_function_trace(tr); } -static void function_trace_start(struct trace_array *tr) -{ - function_reset(tr); -} - static struct tracer function_trace __read_mostly = { .name = "function", .init = function_trace_init, .reset = function_trace_reset, - .start = function_trace_start, .ctrl_update = function_trace_ctrl_update, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_function, diff --git a/trunk/kernel/trace/trace_irqsoff.c b/trunk/kernel/trace/trace_irqsoff.c index a87a20fa3fc6..9c74071c10e0 100644 --- a/trunk/kernel/trace/trace_irqsoff.c +++ b/trunk/kernel/trace/trace_irqsoff.c @@ -353,28 +353,15 @@ void trace_preempt_off(unsigned long a0, unsigned long a1) } #endif /* CONFIG_PREEMPT_TRACER */ -/* - * save_tracer_enabled is used to save the state of the tracer_enabled - * variable when we disable it when we open a trace output file. - */ -static int save_tracer_enabled; - static void start_irqsoff_tracer(struct trace_array *tr) { register_ftrace_function(&trace_ops); - if (tracing_is_enabled()) { - tracer_enabled = 1; - save_tracer_enabled = 1; - } else { - tracer_enabled = 0; - save_tracer_enabled = 0; - } + tracer_enabled = 1; } static void stop_irqsoff_tracer(struct trace_array *tr) { tracer_enabled = 0; - save_tracer_enabled = 0; unregister_ftrace_function(&trace_ops); } @@ -402,29 +389,17 @@ static void irqsoff_tracer_ctrl_update(struct trace_array *tr) stop_irqsoff_tracer(tr); } -static void irqsoff_tracer_start(struct trace_array *tr) -{ - irqsoff_tracer_reset(tr); - tracer_enabled = 1; - save_tracer_enabled = 1; -} - -static void irqsoff_tracer_stop(struct trace_array *tr) -{ - tracer_enabled = 0; - save_tracer_enabled = 0; -} - static void irqsoff_tracer_open(struct trace_iterator *iter) { /* stop the trace while dumping */ - tracer_enabled = 0; + if (iter->tr->ctrl) + stop_irqsoff_tracer(iter->tr); } static void irqsoff_tracer_close(struct trace_iterator *iter) { - /* restart tracing */ - tracer_enabled = save_tracer_enabled; + if (iter->tr->ctrl) + start_irqsoff_tracer(iter->tr); } #ifdef CONFIG_IRQSOFF_TRACER @@ -439,8 +414,6 @@ static struct tracer irqsoff_tracer __read_mostly = .name = "irqsoff", .init = irqsoff_tracer_init, .reset = irqsoff_tracer_reset, - .start = irqsoff_tracer_start, - .stop = irqsoff_tracer_stop, .open = irqsoff_tracer_open, .close = irqsoff_tracer_close, .ctrl_update = irqsoff_tracer_ctrl_update, @@ -467,8 +440,6 @@ static struct tracer preemptoff_tracer __read_mostly = .name = "preemptoff", .init = preemptoff_tracer_init, .reset = irqsoff_tracer_reset, - .start = irqsoff_tracer_start, - .stop = irqsoff_tracer_stop, .open = irqsoff_tracer_open, .close = irqsoff_tracer_close, .ctrl_update = irqsoff_tracer_ctrl_update, @@ -497,8 +468,6 @@ static struct tracer preemptirqsoff_tracer __read_mostly = .name = "preemptirqsoff", .init = preemptirqsoff_tracer_init, .reset = irqsoff_tracer_reset, - .start = irqsoff_tracer_start, - .stop = irqsoff_tracer_stop, .open = irqsoff_tracer_open, .close = irqsoff_tracer_close, .ctrl_update = irqsoff_tracer_ctrl_update, diff --git a/trunk/kernel/trace/trace_sched_switch.c b/trunk/kernel/trace/trace_sched_switch.c index 91c699be8c87..b8f56beb1a62 100644 --- a/trunk/kernel/trace/trace_sched_switch.c +++ b/trunk/kernel/trace/trace_sched_switch.c @@ -16,8 +16,7 @@ static struct trace_array *ctx_trace; static int __read_mostly tracer_enabled; -static int sched_ref; -static DEFINE_MUTEX(sched_register_mutex); +static atomic_t sched_ref; static void probe_sched_switch(struct rq *__rq, struct task_struct *prev, @@ -28,7 +27,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, int cpu; int pc; - if (!sched_ref) + if (!atomic_read(&sched_ref)) return; tracing_record_cmdline(prev); @@ -124,22 +123,20 @@ static void tracing_sched_unregister(void) static void tracing_start_sched_switch(void) { - mutex_lock(&sched_register_mutex); - if (!(sched_ref++)) { - tracer_enabled = 1; + long ref; + + ref = atomic_inc_return(&sched_ref); + if (ref == 1) tracing_sched_register(); - } - mutex_unlock(&sched_register_mutex); } static void tracing_stop_sched_switch(void) { - mutex_lock(&sched_register_mutex); - if (!(--sched_ref)) { + long ref; + + ref = atomic_dec_and_test(&sched_ref); + if (ref) tracing_sched_unregister(); - tracer_enabled = 0; - } - mutex_unlock(&sched_register_mutex); } void tracing_start_cmdline_record(void) @@ -156,10 +153,12 @@ static void start_sched_trace(struct trace_array *tr) { sched_switch_reset(tr); tracing_start_cmdline_record(); + tracer_enabled = 1; } static void stop_sched_trace(struct trace_array *tr) { + tracer_enabled = 0; tracing_stop_cmdline_record(); } @@ -173,7 +172,7 @@ static void sched_switch_trace_init(struct trace_array *tr) static void sched_switch_trace_reset(struct trace_array *tr) { - if (tr->ctrl && sched_ref) + if (tr->ctrl) stop_sched_trace(tr); } @@ -186,24 +185,11 @@ static void sched_switch_trace_ctrl_update(struct trace_array *tr) stop_sched_trace(tr); } -static void sched_switch_trace_start(struct trace_array *tr) -{ - sched_switch_reset(tr); - tracing_start_sched_switch(); -} - -static void sched_switch_trace_stop(struct trace_array *tr) -{ - tracing_stop_sched_switch(); -} - -struct tracer sched_switch_trace __read_mostly = +static struct tracer sched_switch_trace __read_mostly = { .name = "sched_switch", .init = sched_switch_trace_init, .reset = sched_switch_trace_reset, - .start = sched_switch_trace_start, - .stop = sched_switch_trace_stop, .ctrl_update = sched_switch_trace_ctrl_update, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_sched_switch, @@ -212,6 +198,14 @@ struct tracer sched_switch_trace __read_mostly = __init static int init_sched_switch_trace(void) { + int ret = 0; + + if (atomic_read(&sched_ref)) + ret = tracing_sched_register(); + if (ret) { + pr_info("error registering scheduler trace\n"); + return ret; + } return register_tracer(&sched_switch_trace); } device_initcall(init_sched_switch_trace); diff --git a/trunk/kernel/trace/trace_sched_wakeup.c b/trunk/kernel/trace/trace_sched_wakeup.c index 240577bc8ba5..3ae93f16b565 100644 --- a/trunk/kernel/trace/trace_sched_wakeup.c +++ b/trunk/kernel/trace/trace_sched_wakeup.c @@ -50,7 +50,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) return; pc = preempt_count(); - resched = ftrace_preempt_disable(); + resched = need_resched(); + preempt_disable_notrace(); cpu = raw_smp_processor_id(); data = tr->data[cpu]; @@ -80,7 +81,15 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) out: atomic_dec(&data->disabled); - ftrace_preempt_enable(resched); + /* + * To prevent recursion from the scheduler, if the + * resched flag was set before we entered, then + * don't reschedule. + */ + if (resched) + preempt_enable_no_resched_notrace(); + else + preempt_enable_notrace(); } static struct ftrace_ops trace_ops __read_mostly = @@ -262,12 +271,6 @@ probe_wakeup(struct rq *rq, struct task_struct *p) atomic_dec(&wakeup_trace->data[cpu]->disabled); } -/* - * save_tracer_enabled is used to save the state of the tracer_enabled - * variable when we disable it when we open a trace output file. - */ -static int save_tracer_enabled; - static void start_wakeup_tracer(struct trace_array *tr) { int ret; @@ -306,13 +309,7 @@ static void start_wakeup_tracer(struct trace_array *tr) register_ftrace_function(&trace_ops); - if (tracing_is_enabled()) { - tracer_enabled = 1; - save_tracer_enabled = 1; - } else { - tracer_enabled = 0; - save_tracer_enabled = 0; - } + tracer_enabled = 1; return; fail_deprobe_wake_new: @@ -324,7 +321,6 @@ static void start_wakeup_tracer(struct trace_array *tr) static void stop_wakeup_tracer(struct trace_array *tr) { tracer_enabled = 0; - save_tracer_enabled = 0; unregister_ftrace_function(&trace_ops); unregister_trace_sched_switch(probe_wakeup_sched_switch); unregister_trace_sched_wakeup_new(probe_wakeup); @@ -356,32 +352,18 @@ static void wakeup_tracer_ctrl_update(struct trace_array *tr) stop_wakeup_tracer(tr); } -static void wakeup_tracer_start(struct trace_array *tr) -{ - wakeup_reset(tr); - tracer_enabled = 1; - save_tracer_enabled = 1; -} - -static void wakeup_tracer_stop(struct trace_array *tr) -{ - tracer_enabled = 0; - save_tracer_enabled = 0; -} - static void wakeup_tracer_open(struct trace_iterator *iter) { /* stop the trace while dumping */ - tracer_enabled = 0; + if (iter->tr->ctrl) + stop_wakeup_tracer(iter->tr); } static void wakeup_tracer_close(struct trace_iterator *iter) { /* forget about any processes we were recording */ - if (save_tracer_enabled) { - wakeup_reset(iter->tr); - tracer_enabled = 1; - } + if (iter->tr->ctrl) + start_wakeup_tracer(iter->tr); } static struct tracer wakeup_tracer __read_mostly = @@ -389,8 +371,6 @@ static struct tracer wakeup_tracer __read_mostly = .name = "wakeup", .init = wakeup_tracer_init, .reset = wakeup_tracer_reset, - .start = wakeup_tracer_start, - .stop = wakeup_tracer_stop, .open = wakeup_tracer_open, .close = wakeup_tracer_close, .ctrl_update = wakeup_tracer_ctrl_update, diff --git a/trunk/kernel/trace/trace_stack.c b/trunk/kernel/trace/trace_stack.c index d39e8b7de6a2..be682b62fe58 100644 --- a/trunk/kernel/trace/trace_stack.c +++ b/trunk/kernel/trace/trace_stack.c @@ -107,7 +107,8 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) if (unlikely(!ftrace_enabled || stack_trace_disabled)) return; - resched = ftrace_preempt_disable(); + resched = need_resched(); + preempt_disable_notrace(); cpu = raw_smp_processor_id(); /* no atomic needed, we only modify this variable by this cpu */ @@ -119,7 +120,10 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) out: per_cpu(trace_active, cpu)--; /* prevent recursion in schedule */ - ftrace_preempt_enable(resched); + if (resched) + preempt_enable_no_resched_notrace(); + else + preempt_enable_notrace(); } static struct ftrace_ops trace_ops __read_mostly =