diff --git a/[refs] b/[refs] index 4bdf43c77ad3..9598d0e3209d 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: faec2ec505d397e9426754722b6e80d519c4938f +refs/heads/master: 1fd8f2a3f9a91b287a876cef830b21baafc8a799 diff --git a/trunk/include/linux/ftrace.h b/trunk/include/linux/ftrace.h index b295d3106bfe..469ceb3e85ba 100644 --- a/trunk/include/linux/ftrace.h +++ b/trunk/include/linux/ftrace.h @@ -7,7 +7,6 @@ #include #include #include -#include #ifdef CONFIG_FUNCTION_TRACER @@ -392,49 +391,4 @@ static inline void ftrace_graph_init_task(struct task_struct *t) { } static inline void ftrace_graph_exit_task(struct task_struct *t) { } #endif -#ifdef CONFIG_TRACING -#include - -/* flags for current->trace */ -enum { - TSK_TRACE_FL_TRACE_BIT = 0, - TSK_TRACE_FL_GRAPH_BIT = 1, -}; -enum { - TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, - TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, -}; - -static inline void set_tsk_trace_trace(struct task_struct *tsk) -{ - set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); -} - -static inline void clear_tsk_trace_trace(struct task_struct *tsk) -{ - clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); -} - -static inline int test_tsk_trace_trace(struct task_struct *tsk) -{ - return tsk->trace & TSK_TRACE_FL_TRACE; -} - -static inline void set_tsk_trace_graph(struct task_struct *tsk) -{ - set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); -} - -static inline void clear_tsk_trace_graph(struct task_struct *tsk) -{ - clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); -} - -static inline int test_tsk_trace_graph(struct task_struct *tsk) -{ - return tsk->trace & TSK_TRACE_FL_GRAPH; -} - -#endif /* CONFIG_TRACING */ - #endif /* _LINUX_FTRACE_H */ diff --git a/trunk/include/linux/pid.h b/trunk/include/linux/pid.h index bb206c56d1f0..d7e98ff8021e 100644 --- a/trunk/include/linux/pid.h +++ b/trunk/include/linux/pid.h @@ -147,9 +147,9 @@ pid_t pid_vnr(struct pid *pid); #define do_each_pid_task(pid, type, task) \ do { \ struct hlist_node *pos___; \ - if ((pid) != NULL) \ + if (pid != NULL) \ hlist_for_each_entry_rcu((task), pos___, \ - &(pid)->tasks[type], pids[type].node) { + &pid->tasks[type], pids[type].node) { /* * Both old and new leaders may be attached to diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 4c152e0acc9e..2d0a93c31228 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -1380,10 +1380,6 @@ struct task_struct { */ atomic_t trace_overrun; #endif -#ifdef CONFIG_TRACING - /* state flags for use by tracers */ - unsigned long trace; -#endif }; /* diff --git a/trunk/kernel/trace/ftrace.c b/trunk/kernel/trace/ftrace.c index d2b156538162..65b9e863056b 100644 --- a/trunk/kernel/trace/ftrace.c +++ b/trunk/kernel/trace/ftrace.c @@ -47,9 +47,8 @@ int ftrace_enabled __read_mostly; static int last_ftrace_enabled; -/* set when tracing only a pid */ -struct pid *ftrace_pid_trace; -static struct pid * const ftrace_swapper_pid = (struct pid *)1; +/* ftrace_pid_trace >= 0 will only trace threads with this pid */ +static int ftrace_pid_trace = -1; /* Quick disabling of function tracer. */ int function_trace_stop; @@ -91,7 +90,7 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) { - if (!test_tsk_trace_trace(current)) + if (current->pid != ftrace_pid_trace) return; ftrace_pid_function(ip, parent_ip); @@ -154,7 +153,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) else func = ftrace_list_func; - if (ftrace_pid_trace) { + if (ftrace_pid_trace >= 0) { set_ftrace_pid_function(func); func = ftrace_pid_func; } @@ -210,7 +209,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) if (ftrace_list->next == &ftrace_list_end) { ftrace_func_t func = ftrace_list->func; - if (ftrace_pid_trace) { + if (ftrace_pid_trace >= 0) { set_ftrace_pid_function(func); func = ftrace_pid_func; } @@ -240,7 +239,7 @@ static void ftrace_update_pid_func(void) func = ftrace_trace_function; - if (ftrace_pid_trace) { + if (ftrace_pid_trace >= 0) { set_ftrace_pid_function(func); func = ftrace_pid_func; } else { @@ -1321,230 +1320,6 @@ static struct file_operations ftrace_notrace_fops = { .release = ftrace_notrace_release, }; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - -static DEFINE_MUTEX(graph_lock); - -int ftrace_graph_count; -unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; - -static void * -g_next(struct seq_file *m, void *v, loff_t *pos) -{ - unsigned long *array = m->private; - int index = *pos; - - (*pos)++; - - if (index >= ftrace_graph_count) - return NULL; - - return &array[index]; -} - -static void *g_start(struct seq_file *m, loff_t *pos) -{ - void *p = NULL; - - mutex_lock(&graph_lock); - - p = g_next(m, p, pos); - - return p; -} - -static void g_stop(struct seq_file *m, void *p) -{ - mutex_unlock(&graph_lock); -} - -static int g_show(struct seq_file *m, void *v) -{ - unsigned long *ptr = v; - char str[KSYM_SYMBOL_LEN]; - - if (!ptr) - return 0; - - kallsyms_lookup(*ptr, NULL, NULL, NULL, str); - - seq_printf(m, "%s\n", str); - - return 0; -} - -static struct seq_operations ftrace_graph_seq_ops = { - .start = g_start, - .next = g_next, - .stop = g_stop, - .show = g_show, -}; - -static int -ftrace_graph_open(struct inode *inode, struct file *file) -{ - int ret = 0; - - if (unlikely(ftrace_disabled)) - return -ENODEV; - - mutex_lock(&graph_lock); - if ((file->f_mode & FMODE_WRITE) && - !(file->f_flags & O_APPEND)) { - ftrace_graph_count = 0; - memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); - } - - if (file->f_mode & FMODE_READ) { - ret = seq_open(file, &ftrace_graph_seq_ops); - if (!ret) { - struct seq_file *m = file->private_data; - m->private = ftrace_graph_funcs; - } - } else - file->private_data = ftrace_graph_funcs; - mutex_unlock(&graph_lock); - - return ret; -} - -static ssize_t -ftrace_graph_read(struct file *file, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - if (file->f_mode & FMODE_READ) - return seq_read(file, ubuf, cnt, ppos); - else - return -EPERM; -} - -static int -ftrace_set_func(unsigned long *array, int idx, char *buffer) -{ - char str[KSYM_SYMBOL_LEN]; - struct dyn_ftrace *rec; - struct ftrace_page *pg; - int found = 0; - int i, j; - - if (ftrace_disabled) - return -ENODEV; - - /* should not be called from interrupt context */ - spin_lock(&ftrace_lock); - - for (pg = ftrace_pages_start; pg; pg = pg->next) { - for (i = 0; i < pg->index; i++) { - rec = &pg->records[i]; - - if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) - continue; - - kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); - if (strcmp(str, buffer) == 0) { - found = 1; - for (j = 0; j < idx; j++) - if (array[j] == rec->ip) { - found = 0; - break; - } - if (found) - array[idx] = rec->ip; - break; - } - } - } - spin_unlock(&ftrace_lock); - - return found ? 0 : -EINVAL; -} - -static ssize_t -ftrace_graph_write(struct file *file, const char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - unsigned char buffer[FTRACE_BUFF_MAX+1]; - unsigned long *array; - size_t read = 0; - ssize_t ret; - int index = 0; - char ch; - - if (!cnt || cnt < 0) - return 0; - - mutex_lock(&graph_lock); - - if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) { - ret = -EBUSY; - goto out; - } - - if (file->f_mode & FMODE_READ) { - struct seq_file *m = file->private_data; - array = m->private; - } else - array = file->private_data; - - ret = get_user(ch, ubuf++); - if (ret) - goto out; - read++; - cnt--; - - /* skip white space */ - while (cnt && isspace(ch)) { - ret = get_user(ch, ubuf++); - if (ret) - goto out; - read++; - cnt--; - } - - if (isspace(ch)) { - *ppos += read; - ret = read; - goto out; - } - - while (cnt && !isspace(ch)) { - if (index < FTRACE_BUFF_MAX) - buffer[index++] = ch; - else { - ret = -EINVAL; - goto out; - } - ret = get_user(ch, ubuf++); - if (ret) - goto out; - read++; - cnt--; - } - buffer[index] = 0; - - /* we allow only one at a time */ - ret = ftrace_set_func(array, ftrace_graph_count, buffer); - if (ret) - goto out; - - ftrace_graph_count++; - - file->f_pos += read; - - ret = read; - out: - mutex_unlock(&graph_lock); - - return ret; -} - -static const struct file_operations ftrace_graph_fops = { - .open = ftrace_graph_open, - .read = ftrace_graph_read, - .write = ftrace_graph_write, -}; -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ - static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { struct dentry *entry; @@ -1572,15 +1347,6 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) pr_warning("Could not create debugfs " "'set_ftrace_notrace' entry\n"); -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - entry = debugfs_create_file("set_graph_function", 0444, d_tracer, - NULL, - &ftrace_graph_fops); - if (!entry) - pr_warning("Could not create debugfs " - "'set_graph_function' entry\n"); -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ - return 0; } @@ -1685,84 +1451,18 @@ ftrace_pid_read(struct file *file, char __user *ubuf, char buf[64]; int r; - if (ftrace_pid_trace == ftrace_swapper_pid) - r = sprintf(buf, "swapper tasks\n"); - else if (ftrace_pid_trace) - r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace)); + if (ftrace_pid_trace >= 0) + r = sprintf(buf, "%u\n", ftrace_pid_trace); else r = sprintf(buf, "no pid\n"); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } -static void clear_ftrace_swapper(void) -{ - struct task_struct *p; - int cpu; - - get_online_cpus(); - for_each_online_cpu(cpu) { - p = idle_task(cpu); - clear_tsk_trace_trace(p); - } - put_online_cpus(); -} - -static void set_ftrace_swapper(void) -{ - struct task_struct *p; - int cpu; - - get_online_cpus(); - for_each_online_cpu(cpu) { - p = idle_task(cpu); - set_tsk_trace_trace(p); - } - put_online_cpus(); -} - -static void clear_ftrace_pid(struct pid *pid) -{ - struct task_struct *p; - - do_each_pid_task(pid, PIDTYPE_PID, p) { - clear_tsk_trace_trace(p); - } while_each_pid_task(pid, PIDTYPE_PID, p); - put_pid(pid); -} - -static void set_ftrace_pid(struct pid *pid) -{ - struct task_struct *p; - - do_each_pid_task(pid, PIDTYPE_PID, p) { - set_tsk_trace_trace(p); - } while_each_pid_task(pid, PIDTYPE_PID, p); -} - -static void clear_ftrace_pid_task(struct pid **pid) -{ - if (*pid == ftrace_swapper_pid) - clear_ftrace_swapper(); - else - clear_ftrace_pid(*pid); - - *pid = NULL; -} - -static void set_ftrace_pid_task(struct pid *pid) -{ - if (pid == ftrace_swapper_pid) - set_ftrace_swapper(); - else - set_ftrace_pid(pid); -} - static ssize_t ftrace_pid_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { - struct pid *pid; char buf[64]; long val; int ret; @@ -1780,37 +1480,18 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, return ret; mutex_lock(&ftrace_start_lock); - if (val < 0) { + if (ret < 0) { /* disable pid tracing */ - if (!ftrace_pid_trace) + if (ftrace_pid_trace < 0) goto out; - - clear_ftrace_pid_task(&ftrace_pid_trace); + ftrace_pid_trace = -1; } else { - /* swapper task is special */ - if (!val) { - pid = ftrace_swapper_pid; - if (pid == ftrace_pid_trace) - goto out; - } else { - pid = find_get_pid(val); - if (pid == ftrace_pid_trace) { - put_pid(pid); - goto out; - } - } - - if (ftrace_pid_trace) - clear_ftrace_pid_task(&ftrace_pid_trace); - - if (!pid) + if (ftrace_pid_trace == val) goto out; - ftrace_pid_trace = pid; - - set_ftrace_pid_task(ftrace_pid_trace); + ftrace_pid_trace = val; } /* update the function call */ diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index 1bd9574404e5..1ca74c0cee6a 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -1209,12 +1209,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) int cpu; int pc; - if (!ftrace_trace_task(current)) - return 0; - - if (!ftrace_graph_addr(trace->func)) - return 0; - local_irq_save(flags); cpu = raw_smp_processor_id(); data = tr->data[cpu]; @@ -1223,9 +1217,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) pc = preempt_count(); __trace_graph_entry(tr, data, trace, flags, pc); } - /* Only do the atomic if it is not already set */ - if (!test_tsk_trace_graph(current)) - set_tsk_trace_graph(current); atomic_dec(&data->disabled); local_irq_restore(flags); @@ -1249,8 +1240,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace) pc = preempt_count(); __trace_graph_return(tr, data, trace, flags, pc); } - if (!trace->depth) - clear_tsk_trace_graph(current); atomic_dec(&data->disabled); local_irq_restore(flags); } @@ -3346,7 +3335,7 @@ static int mark_printk(const char *fmt, ...) int ret; va_list args; va_start(args, fmt); - ret = trace_vprintk(0, fmt, args); + ret = trace_vprintk(0, -1, fmt, args); va_end(args); return ret; } @@ -3575,9 +3564,16 @@ static __init int tracer_init_debugfs(void) return 0; } -int trace_vprintk(unsigned long ip, const char *fmt, va_list args) +int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) { - static DEFINE_SPINLOCK(trace_buf_lock); + /* + * Raw Spinlock because a normal spinlock would be traced here + * and append an irrelevant couple spin_lock_irqsave/ + * spin_unlock_irqrestore traced by ftrace around this + * TRACE_PRINTK trace. + */ + static raw_spinlock_t trace_buf_lock = + (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; static char trace_buf[TRACE_BUF_SIZE]; struct ring_buffer_event *event; @@ -3598,7 +3594,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) if (unlikely(atomic_read(&data->disabled))) goto out; - spin_lock_irqsave(&trace_buf_lock, flags); + local_irq_save(flags); + __raw_spin_lock(&trace_buf_lock); len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); len = min(len, TRACE_BUF_SIZE-1); @@ -3612,13 +3609,15 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) tracing_generic_entry_update(&entry->ent, flags, pc); entry->ent.type = TRACE_PRINT; entry->ip = ip; + entry->depth = depth; memcpy(&entry->buf, trace_buf, len); entry->buf[len] = 0; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); out_unlock: - spin_unlock_irqrestore(&trace_buf_lock, flags); + __raw_spin_unlock(&trace_buf_lock); + local_irq_restore(flags); out: preempt_enable_notrace(); @@ -3636,7 +3635,13 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...) return 0; va_start(ap, fmt); - ret = trace_vprintk(ip, fmt, ap); + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + ret = trace_vprintk(ip, current->curr_ret_stack, fmt, ap); +#else + ret = trace_vprintk(ip, -1, fmt, ap); +#endif + va_end(ap); return ret; } diff --git a/trunk/kernel/trace/trace.h b/trunk/kernel/trace/trace.h index b4b7b735184d..fce98898205a 100644 --- a/trunk/kernel/trace/trace.h +++ b/trunk/kernel/trace/trace.h @@ -117,6 +117,7 @@ struct userstack_entry { struct print_entry { struct trace_entry ent; unsigned long ip; + int depth; char buf[]; }; @@ -498,62 +499,21 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt); extern long ns2usecs(cycle_t nsec); -extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); +extern int +trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); extern unsigned long trace_flags; /* Standard output formatting function used for function return traces */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER extern enum print_line_t print_graph_function(struct trace_iterator *iter); - -#ifdef CONFIG_DYNAMIC_FTRACE -/* TODO: make this variable */ -#define FTRACE_GRAPH_MAX_FUNCS 32 -extern int ftrace_graph_count; -extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; - -static inline int ftrace_graph_addr(unsigned long addr) -{ - int i; - - if (!ftrace_graph_count || test_tsk_trace_graph(current)) - return 1; - - for (i = 0; i < ftrace_graph_count; i++) { - if (addr == ftrace_graph_funcs[i]) - return 1; - } - - return 0; -} #else -static inline int ftrace_trace_addr(unsigned long addr) -{ - return 1; -} -static inline int ftrace_graph_addr(unsigned long addr) -{ - return 1; -} -#endif /* CONFIG_DYNAMIC_FTRACE */ - -#else /* CONFIG_FUNCTION_GRAPH_TRACER */ static inline enum print_line_t print_graph_function(struct trace_iterator *iter) { return TRACE_TYPE_UNHANDLED; } -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ - -extern struct pid *ftrace_pid_trace; - -static inline int ftrace_trace_task(struct task_struct *task) -{ - if (ftrace_pid_trace) - return 1; - - return test_tsk_trace_trace(task); -} +#endif /* * trace_iterator_flags is an enumeration that defines bit diff --git a/trunk/kernel/trace/trace_functions_graph.c b/trunk/kernel/trace/trace_functions_graph.c index c66578f2fdc2..32b7fb9a19df 100644 --- a/trunk/kernel/trace/trace_functions_graph.c +++ b/trunk/kernel/trace/trace_functions_graph.c @@ -173,7 +173,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu) */ ret = trace_seq_printf(s, - "\n ------------------------------------------\n |"); + " ------------------------------------------\n"); if (!ret) TRACE_TYPE_PARTIAL_LINE; @@ -477,6 +477,71 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, return TRACE_TYPE_HANDLED; } +static enum print_line_t +print_graph_comment(struct print_entry *trace, struct trace_seq *s, + struct trace_entry *ent, struct trace_iterator *iter) +{ + int i; + int ret; + + /* Pid */ + if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE) + return TRACE_TYPE_PARTIAL_LINE; + + /* Cpu */ + if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { + ret = print_graph_cpu(s, iter->cpu); + if (ret == TRACE_TYPE_PARTIAL_LINE) + return TRACE_TYPE_PARTIAL_LINE; + } + + /* Proc */ + if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { + ret = print_graph_proc(s, ent->pid); + if (ret == TRACE_TYPE_PARTIAL_LINE) + return TRACE_TYPE_PARTIAL_LINE; + + ret = trace_seq_printf(s, " | "); + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + } + + /* No overhead */ + if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { + ret = trace_seq_printf(s, " "); + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + } + + /* No time */ + ret = trace_seq_printf(s, " | "); + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + + /* Indentation */ + if (trace->depth > 0) + for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { + ret = trace_seq_printf(s, " "); + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + } + + /* The comment */ + ret = trace_seq_printf(s, "/* %s", trace->buf); + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + + if (ent->flags & TRACE_FLAG_CONT) + trace_seq_print_cont(s, iter); + + ret = trace_seq_printf(s, " */\n"); + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + + return TRACE_TYPE_HANDLED; +} + + enum print_line_t print_graph_function(struct trace_iterator *iter) { @@ -495,6 +560,11 @@ print_graph_function(struct trace_iterator *iter) trace_assign_type(field, entry); return print_graph_return(&field->ret, s, entry, iter->cpu); } + case TRACE_PRINT: { + struct print_entry *field; + trace_assign_type(field, entry); + return print_graph_comment(field, s, entry, iter); + } default: return TRACE_TYPE_UNHANDLED; } diff --git a/trunk/kernel/trace/trace_mmiotrace.c b/trunk/kernel/trace/trace_mmiotrace.c index 2a98a206acc2..2fb6da6523b3 100644 --- a/trunk/kernel/trace/trace_mmiotrace.c +++ b/trunk/kernel/trace/trace_mmiotrace.c @@ -366,5 +366,5 @@ void mmio_trace_mapping(struct mmiotrace_map *map) int mmio_trace_printk(const char *fmt, va_list args) { - return trace_vprintk(0, fmt, args); + return trace_vprintk(0, -1, fmt, args); }