diff --git a/[refs] b/[refs] index 1a385df2d3ff..3f1f9b48a3fd 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: b478b782e110fdb4135caa3062b6d687e989d994 +refs/heads/master: f58ba100678f421bdcb000a3c71793f432dfab93 diff --git a/trunk/Makefile b/trunk/Makefile index 46c04c546ee2..69b8091bfed1 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 29 -EXTRAVERSION = -rc8 +EXTRAVERSION = -rc7 NAME = Erotic Pickled Herring # *DOCUMENTATION* diff --git a/trunk/arch/x86/include/asm/ftrace.h b/trunk/arch/x86/include/asm/ftrace.h index db24c2278be0..bd2c6511c887 100644 --- a/trunk/arch/x86/include/asm/ftrace.h +++ b/trunk/arch/x86/include/asm/ftrace.h @@ -28,6 +28,13 @@ #endif +/* FIXME: I don't want to stay hardcoded */ +#ifdef CONFIG_X86_64 +# define FTRACE_SYSCALL_MAX 296 +#else +# define FTRACE_SYSCALL_MAX 333 +#endif + #ifdef CONFIG_FUNCTION_TRACER #define MCOUNT_ADDR ((long)(mcount)) #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ diff --git a/trunk/arch/x86/kernel/ftrace.c b/trunk/arch/x86/kernel/ftrace.c index a85da1764b1c..1d0d7f42efe3 100644 --- a/trunk/arch/x86/kernel/ftrace.c +++ b/trunk/arch/x86/kernel/ftrace.c @@ -453,3 +453,66 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_FTRACE_SYSCALLS + +extern unsigned long __start_syscalls_metadata[]; +extern unsigned long __stop_syscalls_metadata[]; +extern unsigned long *sys_call_table; + +static struct syscall_metadata **syscalls_metadata; + +static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) +{ + struct syscall_metadata *start; + struct syscall_metadata *stop; + char str[KSYM_SYMBOL_LEN]; + + + start = (struct syscall_metadata *)__start_syscalls_metadata; + stop = (struct syscall_metadata *)__stop_syscalls_metadata; + kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str); + + for ( ; start < stop; start++) { + if (start->name && !strcmp(start->name, str)) + return start; + } + return NULL; +} + +struct syscall_metadata *syscall_nr_to_meta(int nr) +{ + if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0) + return NULL; + + return syscalls_metadata[nr]; +} + +void arch_init_ftrace_syscalls(void) +{ + int i; + struct syscall_metadata *meta; + unsigned long **psys_syscall_table = &sys_call_table; + static atomic_t refs; + + if (atomic_inc_return(&refs) != 1) + goto end; + + syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * + FTRACE_SYSCALL_MAX, GFP_KERNEL); + if (!syscalls_metadata) { + WARN_ON(1); + return; + } + + for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { + meta = find_syscall_meta(psys_syscall_table[i]); + syscalls_metadata[i] = meta; + } + return; + + /* Paranoid: avoid overflow */ +end: + atomic_dec(&refs); +} +#endif diff --git a/trunk/include/linux/ftrace.h b/trunk/include/linux/ftrace.h index 9d598bbf28a6..c146c1021a29 100644 --- a/trunk/include/linux/ftrace.h +++ b/trunk/include/linux/ftrace.h @@ -145,6 +145,7 @@ enum { }; struct dyn_ftrace { + struct list_head list; unsigned long ip; /* address of mcount call-site */ unsigned long flags; struct dyn_arch_ftrace arch; diff --git a/trunk/include/linux/interrupt.h b/trunk/include/linux/interrupt.h index 9b7e9d743476..472f11765f60 100644 --- a/trunk/include/linux/interrupt.h +++ b/trunk/include/linux/interrupt.h @@ -258,11 +258,6 @@ enum NR_SOFTIRQS }; -/* map softirq index to softirq name. update 'softirq_to_name' in - * kernel/softirq.c when adding a new softirq. - */ -extern char *softirq_to_name[NR_SOFTIRQS]; - /* softirq mask and active fields moved to irq_cpustat_t in * asm/hardirq.h to get better cache usage. KAO */ diff --git a/trunk/include/linux/kernel.h b/trunk/include/linux/kernel.h index 1daca3b062bb..7742798c9208 100644 --- a/trunk/include/linux/kernel.h +++ b/trunk/include/linux/kernel.h @@ -452,45 +452,31 @@ do { \ #define trace_printk(fmt, args...) \ do { \ - __trace_printk_check_format(fmt, ##args); \ - if (__builtin_constant_p(fmt)) { \ - static const char *trace_printk_fmt \ - __attribute__((section("__trace_printk_fmt"))) = \ - __builtin_constant_p(fmt) ? fmt : NULL; \ + static const char *trace_printk_fmt \ + __attribute__((section("__trace_printk_fmt"))); \ + \ + if (!trace_printk_fmt) \ + trace_printk_fmt = fmt; \ \ - __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ - } else \ - __trace_printk(_THIS_IP_, fmt, ##args); \ + __trace_printk_check_format(fmt, ##args); \ + __trace_printk(_THIS_IP_, trace_printk_fmt, ##args); \ } while (0) -extern int -__trace_bprintk(unsigned long ip, const char *fmt, ...) - __attribute__ ((format (printf, 2, 3))); - extern int __trace_printk(unsigned long ip, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); -/* - * The double __builtin_constant_p is because gcc will give us an error - * if we try to allocate the static variable to fmt if it is not a - * constant. Even with the outer if statement. - */ #define ftrace_vprintk(fmt, vargs) \ do { \ - if (__builtin_constant_p(fmt)) { \ - static const char *trace_printk_fmt \ - __attribute__((section("__trace_printk_fmt"))) = \ - __builtin_constant_p(fmt) ? fmt : NULL; \ + static const char *trace_printk_fmt \ + __attribute__((section("__trace_printk_fmt"))); \ + \ + if (!trace_printk_fmt) \ + trace_printk_fmt = fmt; \ \ - __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \ - } else \ - __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ + __ftrace_vprintk(_THIS_IP_, trace_printk_fmt, vargs); \ } while (0) -extern int -__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); - extern int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); diff --git a/trunk/include/trace/irq_event_types.h b/trunk/include/trace/irq_event_types.h index 85964ebd47ec..214bb928fe9e 100644 --- a/trunk/include/trace/irq_event_types.h +++ b/trunk/include/trace/irq_event_types.h @@ -40,16 +40,4 @@ TRACE_EVENT(irq_handler_exit, __entry->irq, __entry->ret ? "handled" : "unhandled") ); -TRACE_FORMAT(softirq_entry, - TP_PROTO(struct softirq_action *h, struct softirq_action *vec), - TP_ARGS(h, vec), - TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec]) - ); - -TRACE_FORMAT(softirq_exit, - TP_PROTO(struct softirq_action *h, struct softirq_action *vec), - TP_ARGS(h, vec), - TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec]) - ); - #undef TRACE_SYSTEM diff --git a/trunk/kernel/softirq.c b/trunk/kernel/softirq.c index 65ff3e3961b4..7571bcb71be4 100644 --- a/trunk/kernel/softirq.c +++ b/trunk/kernel/softirq.c @@ -24,7 +24,6 @@ #include #include #include -#include #include /* @@ -54,11 +53,6 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); -char *softirq_to_name[NR_SOFTIRQS] = { - "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", - "TASKLET", "SCHED", "HRTIMER", "RCU" -}; - /* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency @@ -186,9 +180,6 @@ EXPORT_SYMBOL(local_bh_enable_ip); */ #define MAX_SOFTIRQ_RESTART 10 -DEFINE_TRACE(softirq_entry); -DEFINE_TRACE(softirq_exit); - asmlinkage void __do_softirq(void) { struct softirq_action *h; @@ -215,14 +206,12 @@ asmlinkage void __do_softirq(void) if (pending & 1) { int prev_count = preempt_count(); - trace_softirq_entry(h, softirq_vec); h->action(h); - trace_softirq_exit(h, softirq_vec); + if (unlikely(prev_count != preempt_count())) { - printk(KERN_ERR "huh, entered softirq %td %s %p" + printk(KERN_ERR "huh, entered softirq %td %p" "with preempt_count %08x," " exited with %08x?\n", h - softirq_vec, - softirq_to_name[h - softirq_vec], h->action, prev_count, preempt_count()); preempt_count() = prev_count; } diff --git a/trunk/kernel/trace/ftrace.c b/trunk/kernel/trace/ftrace.c index 90d5729afeff..d33d306bdcf4 100644 --- a/trunk/kernel/trace/ftrace.c +++ b/trunk/kernel/trace/ftrace.c @@ -272,7 +272,7 @@ enum { static int ftrace_filtered; -static struct dyn_ftrace *ftrace_new_addrs; +static LIST_HEAD(ftrace_new_addrs); static DEFINE_MUTEX(ftrace_regex_lock); @@ -356,8 +356,7 @@ void ftrace_release(void *start, unsigned long size) mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { - if ((rec->ip >= s) && (rec->ip < e) && - !(rec->flags & FTRACE_FL_FREE)) + if ((rec->ip >= s) && (rec->ip < e)) ftrace_free_rec(rec); } while_for_each_ftrace_rec(); mutex_unlock(&ftrace_lock); @@ -409,8 +408,8 @@ ftrace_record_ip(unsigned long ip) return NULL; rec->ip = ip; - rec->flags = (unsigned long)ftrace_new_addrs; - ftrace_new_addrs = rec; + + list_add(&rec->list, &ftrace_new_addrs); return rec; } @@ -532,12 +531,11 @@ static void ftrace_replace_code(int enable) do_for_each_ftrace_rec(pg, rec) { /* - * Skip over free records, records that have - * failed and not converted. + * Skip over free records and records that have + * failed. */ if (rec->flags & FTRACE_FL_FREE || - rec->flags & FTRACE_FL_FAILED || - rec->flags & FTRACE_FL_CONVERTED) + rec->flags & FTRACE_FL_FAILED) continue; /* ignore updates to this record's mcount site */ @@ -549,7 +547,7 @@ static void ftrace_replace_code(int enable) } failed = __ftrace_replace_code(rec, enable); - if (failed) { + if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { rec->flags |= FTRACE_FL_FAILED; if ((system_state == SYSTEM_BOOTING) || !core_kernel_text(rec->ip)) { @@ -716,21 +714,19 @@ unsigned long ftrace_update_tot_cnt; static int ftrace_update_code(struct module *mod) { - struct dyn_ftrace *p; + struct dyn_ftrace *p, *t; cycle_t start, stop; start = ftrace_now(raw_smp_processor_id()); ftrace_update_cnt = 0; - while (ftrace_new_addrs) { + list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) { /* If something went wrong, bail without enabling anything */ if (unlikely(ftrace_disabled)) return -1; - p = ftrace_new_addrs; - ftrace_new_addrs = (struct dyn_ftrace *)p->flags; - p->flags = 0L; + list_del_init(&p->list); /* convert record (i.e, patch mcount-call with NOP) */ if (ftrace_code_disable(mod, p)) { @@ -1122,6 +1118,16 @@ ftrace_notrace_open(struct inode *inode, struct file *file) return ftrace_regex_open(inode, file, 0); } +static ssize_t +ftrace_regex_read(struct file *file, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + if (file->f_mode & FMODE_READ) + return seq_read(file, ubuf, cnt, ppos); + else + return -EPERM; +} + static loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin) { @@ -1874,7 +1880,7 @@ static const struct file_operations ftrace_failures_fops = { static const struct file_operations ftrace_filter_fops = { .open = ftrace_filter_open, - .read = seq_read, + .read = ftrace_regex_read, .write = ftrace_filter_write, .llseek = ftrace_regex_lseek, .release = ftrace_filter_release, @@ -1882,7 +1888,7 @@ static const struct file_operations ftrace_filter_fops = { static const struct file_operations ftrace_notrace_fops = { .open = ftrace_notrace_open, - .read = seq_read, + .read = ftrace_regex_read, .write = ftrace_notrace_write, .llseek = ftrace_regex_lseek, .release = ftrace_notrace_release, @@ -1984,6 +1990,16 @@ ftrace_graph_open(struct inode *inode, struct file *file) return ret; } +static ssize_t +ftrace_graph_read(struct file *file, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + if (file->f_mode & FMODE_READ) + return seq_read(file, ubuf, cnt, ppos); + else + return -EPERM; +} + static int ftrace_set_func(unsigned long *array, int *idx, char *buffer) { @@ -2114,7 +2130,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf, static const struct file_operations ftrace_graph_fops = { .open = ftrace_graph_open, - .read = seq_read, + .read = ftrace_graph_read, .write = ftrace_graph_write, }; #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/trunk/kernel/trace/ring_buffer.c b/trunk/kernel/trace/ring_buffer.c index 58128ad2fde0..178858492a89 100644 --- a/trunk/kernel/trace/ring_buffer.c +++ b/trunk/kernel/trace/ring_buffer.c @@ -16,79 +16,10 @@ #include #include #include -#include #include #include "trace.h" -/* - * The ring buffer is made up of a list of pages. A separate list of pages is - * allocated for each CPU. A writer may only write to a buffer that is - * associated with the CPU it is currently executing on. A reader may read - * from any per cpu buffer. - * - * The reader is special. For each per cpu buffer, the reader has its own - * reader page. When a reader has read the entire reader page, this reader - * page is swapped with another page in the ring buffer. - * - * Now, as long as the writer is off the reader page, the reader can do what - * ever it wants with that page. The writer will never write to that page - * again (as long as it is out of the ring buffer). - * - * Here's some silly ASCII art. - * - * +------+ - * |reader| RING BUFFER - * |page | - * +------+ +---+ +---+ +---+ - * | |-->| |-->| | - * +---+ +---+ +---+ - * ^ | - * | | - * +---------------+ - * - * - * +------+ - * |reader| RING BUFFER - * |page |------------------v - * +------+ +---+ +---+ +---+ - * | |-->| |-->| | - * +---+ +---+ +---+ - * ^ | - * | | - * +---------------+ - * - * - * +------+ - * |reader| RING BUFFER - * |page |------------------v - * +------+ +---+ +---+ +---+ - * ^ | |-->| |-->| | - * | +---+ +---+ +---+ - * | | - * | | - * +------------------------------+ - * - * - * +------+ - * |buffer| RING BUFFER - * |page |------------------v - * +------+ +---+ +---+ +---+ - * ^ | | | |-->| | - * | New +---+ +---+ +---+ - * | Reader------^ | - * | page | - * +------------------------------+ - * - * - * After we make this swap, the reader can hand this page off to the splice - * code and be done with it. It can even allocate a new page if it needs to - * and swap that into the ring buffer. - * - * We will be using cmpxchg soon to make all this lockless. - * - */ - /* * A fast way to enable or disable all ring buffers is to * call tracing_on or tracing_off. Turning off the ring buffers @@ -370,10 +301,6 @@ struct ring_buffer { struct mutex mutex; struct ring_buffer_per_cpu **buffers; - -#ifdef CONFIG_HOTPLUG_CPU - struct notifier_block cpu_notify; -#endif }; struct ring_buffer_iter { @@ -532,11 +459,6 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) */ extern int ring_buffer_page_too_big(void); -#ifdef CONFIG_HOTPLUG_CPU -static int __cpuinit rb_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu); -#endif - /** * ring_buffer_alloc - allocate a new ring_buffer * @size: the size in bytes per cpu that is needed. @@ -574,8 +496,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) if (buffer->pages == 1) buffer->pages++; - get_online_cpus(); - cpumask_copy(buffer->cpumask, cpu_online_mask); + cpumask_copy(buffer->cpumask, cpu_possible_mask); buffer->cpus = nr_cpu_ids; bsize = sizeof(void *) * nr_cpu_ids; @@ -591,13 +512,6 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) goto fail_free_buffers; } -#ifdef CONFIG_HOTPLUG_CPU - buffer->cpu_notify.notifier_call = rb_cpu_notify; - buffer->cpu_notify.priority = 0; - register_cpu_notifier(&buffer->cpu_notify); -#endif - - put_online_cpus(); mutex_init(&buffer->mutex); return buffer; @@ -611,7 +525,6 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) fail_free_cpumask: free_cpumask_var(buffer->cpumask); - put_online_cpus(); fail_free_buffer: kfree(buffer); @@ -628,17 +541,9 @@ ring_buffer_free(struct ring_buffer *buffer) { int cpu; - get_online_cpus(); - -#ifdef CONFIG_HOTPLUG_CPU - unregister_cpu_notifier(&buffer->cpu_notify); -#endif - for_each_buffer_cpu(buffer, cpu) rb_free_cpu_buffer(buffer->buffers[cpu]); - put_online_cpus(); - free_cpumask_var(buffer->cpumask); kfree(buffer); @@ -744,15 +649,16 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) return size; mutex_lock(&buffer->mutex); - get_online_cpus(); nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); if (size < buffer_size) { /* easy case, just free pages */ - if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) - goto out_fail; + if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) { + mutex_unlock(&buffer->mutex); + return -1; + } rm_pages = buffer->pages - nr_pages; @@ -771,8 +677,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) * add these pages to the cpu_buffers. Otherwise we just free * them all and return -ENOMEM; */ - if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) - goto out_fail; + if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) { + mutex_unlock(&buffer->mutex); + return -1; + } new_pages = nr_pages - buffer->pages; @@ -797,12 +705,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) rb_insert_pages(cpu_buffer, &pages, new_pages); } - if (RB_WARN_ON(buffer, !list_empty(&pages))) - goto out_fail; + if (RB_WARN_ON(buffer, !list_empty(&pages))) { + mutex_unlock(&buffer->mutex); + return -1; + } out: buffer->pages = nr_pages; - put_online_cpus(); mutex_unlock(&buffer->mutex); return size; @@ -812,18 +721,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) list_del_init(&bpage->list); free_buffer_page(bpage); } - put_online_cpus(); mutex_unlock(&buffer->mutex); return -ENOMEM; - - /* - * Something went totally wrong, and we are too paranoid - * to even clean up the mess. - */ - out_fail: - put_online_cpus(); - mutex_unlock(&buffer->mutex); - return -1; } EXPORT_SYMBOL_GPL(ring_buffer_resize); @@ -1665,15 +1564,12 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; - unsigned long ret; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 0; cpu_buffer = buffer->buffers[cpu]; - ret = cpu_buffer->entries; - - return ret; + return cpu_buffer->entries; } EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); @@ -1685,15 +1581,12 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; - unsigned long ret; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 0; cpu_buffer = buffer->buffers[cpu]; - ret = cpu_buffer->overrun; - - return ret; + return cpu_buffer->overrun; } EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); @@ -1770,14 +1663,9 @@ static void rb_iter_reset(struct ring_buffer_iter *iter) */ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) { - struct ring_buffer_per_cpu *cpu_buffer; + struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; unsigned long flags; - if (!iter) - return; - - cpu_buffer = iter->cpu_buffer; - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); rb_iter_reset(iter); spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); @@ -2012,6 +1900,9 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) struct buffer_page *reader; int nr_loops = 0; + if (!cpumask_test_cpu(cpu, buffer->cpumask)) + return NULL; + cpu_buffer = buffer->buffers[cpu]; again: @@ -2140,9 +2031,6 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) struct ring_buffer_event *event; unsigned long flags; - if (!cpumask_test_cpu(cpu, buffer->cpumask)) - return NULL; - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); event = rb_buffer_peek(buffer, cpu, ts); spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); @@ -2183,30 +2071,23 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) struct ring_buffer_event * ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) { - struct ring_buffer_per_cpu *cpu_buffer; - struct ring_buffer_event *event = NULL; + struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; + struct ring_buffer_event *event; unsigned long flags; - /* might be called in atomic */ - preempt_disable(); - if (!cpumask_test_cpu(cpu, buffer->cpumask)) - goto out; + return NULL; - cpu_buffer = buffer->buffers[cpu]; spin_lock_irqsave(&cpu_buffer->reader_lock, flags); event = rb_buffer_peek(buffer, cpu, ts); if (!event) - goto out_unlock; + goto out; rb_advance_reader(cpu_buffer); - out_unlock: - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); - out: - preempt_enable(); + spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); return event; } @@ -2387,7 +2268,6 @@ int ring_buffer_empty(struct ring_buffer *buffer) if (!rb_per_cpu_empty(cpu_buffer)) return 0; } - return 1; } EXPORT_SYMBOL_GPL(ring_buffer_empty); @@ -2400,16 +2280,12 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty); int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; - int ret; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 1; cpu_buffer = buffer->buffers[cpu]; - ret = rb_per_cpu_empty(cpu_buffer); - - - return ret; + return rb_per_cpu_empty(cpu_buffer); } EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); @@ -2428,35 +2304,32 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, { struct ring_buffer_per_cpu *cpu_buffer_a; struct ring_buffer_per_cpu *cpu_buffer_b; - int ret = -EINVAL; if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || !cpumask_test_cpu(cpu, buffer_b->cpumask)) - goto out; + return -EINVAL; /* At least make sure the two buffers are somewhat the same */ if (buffer_a->pages != buffer_b->pages) - goto out; - - ret = -EAGAIN; + return -EINVAL; if (ring_buffer_flags != RB_BUFFERS_ON) - goto out; + return -EAGAIN; if (atomic_read(&buffer_a->record_disabled)) - goto out; + return -EAGAIN; if (atomic_read(&buffer_b->record_disabled)) - goto out; + return -EAGAIN; cpu_buffer_a = buffer_a->buffers[cpu]; cpu_buffer_b = buffer_b->buffers[cpu]; if (atomic_read(&cpu_buffer_a->record_disabled)) - goto out; + return -EAGAIN; if (atomic_read(&cpu_buffer_b->record_disabled)) - goto out; + return -EAGAIN; /* * We can't do a synchronize_sched here because this @@ -2476,9 +2349,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, atomic_dec(&cpu_buffer_a->record_disabled); atomic_dec(&cpu_buffer_b->record_disabled); - ret = 0; -out: - return ret; + return 0; } EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); @@ -2593,30 +2464,27 @@ int ring_buffer_read_page(struct ring_buffer *buffer, u64 save_timestamp; int ret = -1; - if (!cpumask_test_cpu(cpu, buffer->cpumask)) - goto out; - /* * If len is not big enough to hold the page header, then * we can not copy anything. */ if (len <= BUF_PAGE_HDR_SIZE) - goto out; + return -1; len -= BUF_PAGE_HDR_SIZE; if (!data_page) - goto out; + return -1; bpage = *data_page; if (!bpage) - goto out; + return -1; spin_lock_irqsave(&cpu_buffer->reader_lock, flags); reader = rb_get_reader_page(cpu_buffer); if (!reader) - goto out_unlock; + goto out; event = rb_reader_event(cpu_buffer); @@ -2638,7 +2506,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, unsigned int size; if (full) - goto out_unlock; + goto out; if (len > (commit - read)) len = (commit - read); @@ -2646,7 +2514,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, size = rb_event_length(event); if (len < size) - goto out_unlock; + goto out; /* save the current timestamp, since the user will need it */ save_timestamp = cpu_buffer->read_stamp; @@ -2685,10 +2553,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer, } ret = read; - out_unlock: + out: spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); - out: return ret; } @@ -2762,42 +2629,3 @@ static __init int rb_init_debugfs(void) } fs_initcall(rb_init_debugfs); - -#ifdef CONFIG_HOTPLUG_CPU -static int __cpuinit rb_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - struct ring_buffer *buffer = - container_of(self, struct ring_buffer, cpu_notify); - long cpu = (long)hcpu; - - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - if (cpu_isset(cpu, *buffer->cpumask)) - return NOTIFY_OK; - - buffer->buffers[cpu] = - rb_allocate_cpu_buffer(buffer, cpu); - if (!buffer->buffers[cpu]) { - WARN(1, "failed to allocate ring buffer on CPU %ld\n", - cpu); - return NOTIFY_OK; - } - smp_wmb(); - cpu_set(cpu, *buffer->cpumask); - break; - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - /* - * Do nothing. - * If we were to free the buffer, then the user would - * lose any trace that was in the buffer. - */ - break; - default: - break; - } - return NOTIFY_OK; -} -#endif diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index efe3202c0209..5c9c6d907054 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -44,12 +44,6 @@ unsigned long __read_mostly tracing_max_latency; unsigned long __read_mostly tracing_thresh; -/* - * On boot up, the ring buffer is set to the minimum size, so that - * we do not waste memory on systems that are not using tracing. - */ -static int ring_buffer_expanded; - /* * We need to change this state when a selftest is running. * A selftest will lurk into the ring-buffer to count the @@ -134,8 +128,6 @@ static int __init set_ftrace(char *str) { strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; - /* We are using ftrace early, expand it */ - ring_buffer_expanded = 1; return 1; } __setup("ftrace=", set_ftrace); @@ -1179,10 +1171,10 @@ void trace_graph_return(struct ftrace_graph_ret *trace) /** - * trace_vbprintk - write binary msg to tracing buffer + * trace_vprintk - write binary msg to tracing buffer * */ -int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args) +int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) { static raw_spinlock_t trace_buf_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; @@ -1191,7 +1183,7 @@ int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args) struct ring_buffer_event *event; struct trace_array *tr = &global_trace; struct trace_array_cpu *data; - struct bprint_entry *entry; + struct print_entry *entry; unsigned long flags; int resched; int cpu, len = 0, size, pc; @@ -1219,7 +1211,7 @@ int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args) goto out_unlock; size = sizeof(*entry) + sizeof(u32) * len; - event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc); + event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, flags, pc); if (!event) goto out_unlock; entry = ring_buffer_event_data(event); @@ -1240,60 +1232,6 @@ int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args) return len; } -EXPORT_SYMBOL_GPL(trace_vbprintk); - -int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) -{ - static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; - static char trace_buf[TRACE_BUF_SIZE]; - - struct ring_buffer_event *event; - struct trace_array *tr = &global_trace; - struct trace_array_cpu *data; - int cpu, len = 0, size, pc; - struct print_entry *entry; - unsigned long irq_flags; - - if (tracing_disabled || tracing_selftest_running) - return 0; - - pc = preempt_count(); - preempt_disable_notrace(); - cpu = raw_smp_processor_id(); - data = tr->data[cpu]; - - if (unlikely(atomic_read(&data->disabled))) - goto out; - - pause_graph_tracing(); - raw_local_irq_save(irq_flags); - __raw_spin_lock(&trace_buf_lock); - len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); - - len = min(len, TRACE_BUF_SIZE-1); - trace_buf[len] = 0; - - size = sizeof(*entry) + len + 1; - event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); - if (!event) - goto out_unlock; - entry = ring_buffer_event_data(event); - entry->ip = ip; - entry->depth = depth; - - memcpy(&entry->buf, trace_buf, len); - entry->buf[len] = 0; - ring_buffer_unlock_commit(tr->buffer, event); - - out_unlock: - __raw_spin_unlock(&trace_buf_lock); - raw_local_irq_restore(irq_flags); - unpause_graph_tracing(); - out: - preempt_enable_notrace(); - - return len; -} EXPORT_SYMBOL_GPL(trace_vprintk); enum trace_file_type { @@ -1682,22 +1620,6 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) return TRACE_TYPE_HANDLED; } -static enum print_line_t print_bprintk_msg_only(struct trace_iterator *iter) -{ - struct trace_seq *s = &iter->seq; - struct trace_entry *entry = iter->ent; - struct bprint_entry *field; - int ret; - - trace_assign_type(field, entry); - - ret = trace_seq_bprintf(s, field->fmt, field->buf); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - return TRACE_TYPE_HANDLED; -} - static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; @@ -1707,7 +1629,7 @@ static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) trace_assign_type(field, entry); - ret = trace_seq_printf(s, "%s", field->buf); + ret = trace_seq_bprintf(s, field->fmt, field->buf); if (!ret) return TRACE_TYPE_PARTIAL_LINE; @@ -1736,19 +1658,6 @@ static int trace_empty(struct trace_iterator *iter) { int cpu; - /* If we are looking at one CPU buffer, only check that one */ - if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { - cpu = iter->cpu_file; - if (iter->buffer_iter[cpu]) { - if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) - return 0; - } else { - if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) - return 0; - } - return 1; - } - for_each_tracing_cpu(cpu) { if (iter->buffer_iter[cpu]) { if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) @@ -1772,11 +1681,6 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) return ret; } - if (iter->ent->type == TRACE_BPRINT && - trace_flags & TRACE_ITER_PRINTK && - trace_flags & TRACE_ITER_PRINTK_MSGONLY) - return print_bprintk_msg_only(iter); - if (iter->ent->type == TRACE_PRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) @@ -1880,11 +1784,17 @@ __tracing_open(struct inode *inode, struct file *file) iter->buffer_iter[cpu] = ring_buffer_read_start(iter->tr->buffer, cpu); + + if (!iter->buffer_iter[cpu]) + goto fail_buffer; } } else { cpu = iter->cpu_file; iter->buffer_iter[cpu] = ring_buffer_read_start(iter->tr->buffer, cpu); + + if (!iter->buffer_iter[cpu]) + goto fail; } /* TODO stop tracer */ @@ -2405,75 +2315,6 @@ int tracer_init(struct tracer *t, struct trace_array *tr) return t->init(tr); } -static int tracing_resize_ring_buffer(unsigned long size) -{ - int ret; - - /* - * If kernel or user changes the size of the ring buffer - * we use the size that was given, and we can forget about - * expanding it later. - */ - ring_buffer_expanded = 1; - - ret = ring_buffer_resize(global_trace.buffer, size); - if (ret < 0) - return ret; - - ret = ring_buffer_resize(max_tr.buffer, size); - if (ret < 0) { - int r; - - r = ring_buffer_resize(global_trace.buffer, - global_trace.entries); - if (r < 0) { - /* - * AARGH! We are left with different - * size max buffer!!!! - * The max buffer is our "snapshot" buffer. - * When a tracer needs a snapshot (one of the - * latency tracers), it swaps the max buffer - * with the saved snap shot. We succeeded to - * update the size of the main buffer, but failed to - * update the size of the max buffer. But when we tried - * to reset the main buffer to the original size, we - * failed there too. This is very unlikely to - * happen, but if it does, warn and kill all - * tracing. - */ - WARN_ON(1); - tracing_disabled = 1; - } - return ret; - } - - global_trace.entries = size; - - return ret; -} - -/** - * tracing_update_buffers - used by tracing facility to expand ring buffers - * - * To save on memory when the tracing is never used on a system with it - * configured in. The ring buffers are set to a minimum size. But once - * a user starts to use the tracing facility, then they need to grow - * to their default size. - * - * This function is to be called when a tracer is about to be used. - */ -int tracing_update_buffers(void) -{ - int ret = 0; - - mutex_lock(&trace_types_lock); - if (!ring_buffer_expanded) - ret = tracing_resize_ring_buffer(trace_buf_size); - mutex_unlock(&trace_types_lock); - - return ret; -} - struct trace_option_dentry; static struct trace_option_dentry * @@ -2490,14 +2331,6 @@ static int tracing_set_tracer(const char *buf) int ret = 0; mutex_lock(&trace_types_lock); - - if (!ring_buffer_expanded) { - ret = tracing_resize_ring_buffer(trace_buf_size); - if (ret < 0) - return ret; - ret = 0; - } - for (t = trace_types; t; t = t->next) { if (strcmp(t->name, buf) == 0) break; @@ -3023,18 +2856,10 @@ tracing_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; - char buf[96]; + char buf[64]; int r; - mutex_lock(&trace_types_lock); - if (!ring_buffer_expanded) - r = sprintf(buf, "%lu (expanded: %lu)\n", - tr->entries >> 10, - trace_buf_size >> 10); - else - r = sprintf(buf, "%lu\n", tr->entries >> 10); - mutex_unlock(&trace_types_lock); - + r = sprintf(buf, "%lu\n", tr->entries >> 10); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } @@ -3078,11 +2903,28 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, val <<= 10; if (val != global_trace.entries) { - ret = tracing_resize_ring_buffer(val); + ret = ring_buffer_resize(global_trace.buffer, val); if (ret < 0) { cnt = ret; goto out; } + + ret = ring_buffer_resize(max_tr.buffer, val); + if (ret < 0) { + int r; + cnt = ret; + r = ring_buffer_resize(global_trace.buffer, + global_trace.entries); + if (r < 0) { + /* AARGH! We are left with different + * size max buffer!!!! */ + WARN_ON(1); + tracing_disabled = 1; + } + goto out; + } + + global_trace.entries = val; } filp->f_pos += cnt; @@ -3543,11 +3385,6 @@ static void tracing_init_debugfs_percpu(long cpu) (void *) cpu, &tracing_fops); if (!entry) pr_warning("Could not create debugfs 'trace' entry\n"); - - entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu, - (void *) cpu, &tracing_buffers_fops); - if (!entry) - pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n"); } #ifdef CONFIG_FTRACE_SELFTEST @@ -3831,6 +3668,7 @@ static __init void create_trace_options_dir(void) static __init int tracer_init_debugfs(void) { struct dentry *d_tracer; + struct dentry *buffers; struct dentry *entry; int cpu; @@ -3903,6 +3741,26 @@ static __init int tracer_init_debugfs(void) pr_warning("Could not create debugfs " "'trace_marker' entry\n"); + buffers = debugfs_create_dir("binary_buffers", d_tracer); + + if (!buffers) + pr_warning("Could not create buffers directory\n"); + else { + int cpu; + char buf[64]; + + for_each_tracing_cpu(cpu) { + sprintf(buf, "%d", cpu); + + entry = debugfs_create_file(buf, 0444, buffers, + (void *)(long)cpu, + &tracing_buffers_fops); + if (!entry) + pr_warning("Could not create debugfs buffers " + "'%s' entry\n", buf); + } + } + #ifdef CONFIG_DYNAMIC_FTRACE entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, &ftrace_update_tot_cnt, @@ -4058,7 +3916,6 @@ void ftrace_dump(void) __init static int tracer_alloc_buffers(void) { struct trace_array_cpu *data; - int ring_buf_size; int i; int ret = -ENOMEM; @@ -4071,18 +3928,12 @@ __init static int tracer_alloc_buffers(void) if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) goto out_free_tracing_cpumask; - /* To save memory, keep the ring buffer size to its minimum */ - if (ring_buffer_expanded) - ring_buf_size = trace_buf_size; - else - ring_buf_size = 1; - cpumask_copy(tracing_buffer_mask, cpu_possible_mask); cpumask_copy(tracing_cpumask, cpu_all_mask); cpumask_clear(tracing_reader_cpumask); /* TODO: make the number of buffers hot pluggable with CPUS */ - global_trace.buffer = ring_buffer_alloc(ring_buf_size, + global_trace.buffer = ring_buffer_alloc(trace_buf_size, TRACE_BUFFER_FLAGS); if (!global_trace.buffer) { printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); @@ -4093,7 +3944,7 @@ __init static int tracer_alloc_buffers(void) #ifdef CONFIG_TRACER_MAX_TRACE - max_tr.buffer = ring_buffer_alloc(ring_buf_size, + max_tr.buffer = ring_buffer_alloc(trace_buf_size, TRACE_BUFFER_FLAGS); if (!max_tr.buffer) { printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); diff --git a/trunk/kernel/trace/trace.h b/trunk/kernel/trace/trace.h index 56ce34d90b03..3d49daae47dc 100644 --- a/trunk/kernel/trace/trace.h +++ b/trunk/kernel/trace/trace.h @@ -20,7 +20,6 @@ enum trace_type { TRACE_WAKE, TRACE_STACK, TRACE_PRINT, - TRACE_BPRINT, TRACE_SPECIAL, TRACE_MMIO_RW, TRACE_MMIO_MAP, @@ -120,7 +119,7 @@ struct userstack_entry { /* * trace_printk entry: */ -struct bprint_entry { +struct print_entry { struct trace_entry ent; unsigned long ip; int depth; @@ -128,13 +127,6 @@ struct bprint_entry { u32 buf[]; }; -struct print_entry { - struct trace_entry ent; - unsigned long ip; - int depth; - char buf[]; -}; - #define TRACE_OLD_SIZE 88 struct trace_field_cont { @@ -296,7 +288,6 @@ extern void __ftrace_bad_type(void); IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ - IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ IF_ASSIGN(var, ent, struct special_entry, 0); \ IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ TRACE_MMIO_RW); \ @@ -581,8 +572,6 @@ extern int trace_selftest_startup_branch(struct tracer *trace, extern void *head_page(struct trace_array_cpu *data); extern long ns2usecs(cycle_t nsec); extern int -trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args); -extern int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); extern unsigned long trace_flags; @@ -750,9 +739,6 @@ static inline void trace_branch_disable(void) } #endif /* CONFIG_BRANCH_TRACER */ -/* set ring buffers to default size if not already done so */ -int tracing_update_buffers(void); - /* trace event type bit fields, not numeric */ enum { TRACE_EVENT_TYPE_PRINTF = 1, @@ -775,26 +761,4 @@ void event_trace_printk(unsigned long ip, const char *fmt, ...); extern struct ftrace_event_call __start_ftrace_events[]; extern struct ftrace_event_call __stop_ftrace_events[]; -extern const char *__start___trace_bprintk_fmt[]; -extern const char *__stop___trace_bprintk_fmt[]; - -/* - * The double __builtin_constant_p is because gcc will give us an error - * if we try to allocate the static variable to fmt if it is not a - * constant. Even with the outer if statement optimizing out. - */ -#define event_trace_printk(ip, fmt, args...) \ -do { \ - __trace_printk_check_format(fmt, ##args); \ - tracing_record_cmdline(current); \ - if (__builtin_constant_p(fmt)) { \ - static const char *trace_printk_fmt \ - __attribute__((section("__trace_printk_fmt"))) = \ - __builtin_constant_p(fmt) ? fmt : NULL; \ - \ - __trace_bprintk(ip, trace_printk_fmt, ##args); \ - } else \ - __trace_printk(ip, fmt, ##args); \ -} while (0) - #endif /* _LINUX_KERNEL_TRACE_H */ diff --git a/trunk/kernel/trace/trace_event_types.h b/trunk/kernel/trace/trace_event_types.h index 019915063fe6..5cca4c978bde 100644 --- a/trunk/kernel/trace/trace_event_types.h +++ b/trunk/kernel/trace/trace_event_types.h @@ -102,20 +102,11 @@ TRACE_EVENT_FORMAT(user_stack, TRACE_USER_STACK, userstack_entry, ignore, "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n") ); -TRACE_EVENT_FORMAT(bprint, TRACE_BPRINT, bprint_entry, ignore, - TRACE_STRUCT( - TRACE_FIELD(unsigned long, ip, ip) - TRACE_FIELD(unsigned int, depth, depth) - TRACE_FIELD(char *, fmt, fmt) - TRACE_FIELD_ZERO_CHAR(buf) - ), - TP_RAW_FMT("%08lx (%d) fmt:%p %s") -); - TRACE_EVENT_FORMAT(print, TRACE_PRINT, print_entry, ignore, TRACE_STRUCT( TRACE_FIELD(unsigned long, ip, ip) TRACE_FIELD(unsigned int, depth, depth) + TRACE_FIELD(char *, fmt, fmt) TRACE_FIELD_ZERO_CHAR(buf) ), TP_RAW_FMT("%08lx (%d) fmt:%p %s") diff --git a/trunk/kernel/trace/trace_events.c b/trunk/kernel/trace/trace_events.c index 238ea95a4115..769dfd00fc85 100644 --- a/trunk/kernel/trace/trace_events.c +++ b/trunk/kernel/trace/trace_events.c @@ -24,6 +24,16 @@ static DEFINE_MUTEX(event_mutex); (unsigned long)event < (unsigned long)__stop_ftrace_events; \ event++) +void event_trace_printk(unsigned long ip, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + tracing_record_cmdline(current); + trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); + va_end(ap); +} + static void ftrace_clear_events(void) { struct ftrace_event_call *call = (void *)__start_ftrace_events; @@ -131,10 +141,6 @@ ftrace_event_write(struct file *file, const char __user *ubuf, if (!cnt || cnt < 0) return 0; - ret = tracing_update_buffers(); - if (ret < 0) - return ret; - ret = get_user(ch, ubuf++); if (ret) return ret; @@ -325,10 +331,6 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, if (ret < 0) return ret; - ret = tracing_update_buffers(); - if (ret < 0) - return ret; - switch (val) { case 0: case 1: diff --git a/trunk/kernel/trace/trace_events_stage_2.h b/trunk/kernel/trace/trace_events_stage_2.h index 5117c43f5c67..ca347afd6aa0 100644 --- a/trunk/kernel/trace/trace_events_stage_2.h +++ b/trunk/kernel/trace/trace_events_stage_2.h @@ -57,7 +57,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ \ field = (typeof(field))entry; \ \ - ret = trace_seq_printf(s, #call ": " print); \ + ret = trace_seq_printf(s, print); \ if (!ret) \ return TRACE_TYPE_PARTIAL_LINE; \ \ diff --git a/trunk/kernel/trace/trace_functions_graph.c b/trunk/kernel/trace/trace_functions_graph.c index 4c388607ed67..8566c14b3e9a 100644 --- a/trunk/kernel/trace/trace_functions_graph.c +++ b/trunk/kernel/trace/trace_functions_graph.c @@ -684,7 +684,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, } static enum print_line_t -print_graph_comment(struct bprint_entry *trace, struct trace_seq *s, +print_graph_comment(struct print_entry *trace, struct trace_seq *s, struct trace_entry *ent, struct trace_iterator *iter) { int i; @@ -781,8 +781,8 @@ print_graph_function(struct trace_iterator *iter) trace_assign_type(field, entry); return print_graph_return(&field->ret, s, entry, iter); } - case TRACE_BPRINT: { - struct bprint_entry *field; + case TRACE_PRINT: { + struct print_entry *field; trace_assign_type(field, entry); return print_graph_comment(field, s, entry, iter); } diff --git a/trunk/kernel/trace/trace_mmiotrace.c b/trunk/kernel/trace/trace_mmiotrace.c index f095916e477f..23e346a734ca 100644 --- a/trunk/kernel/trace/trace_mmiotrace.c +++ b/trunk/kernel/trace/trace_mmiotrace.c @@ -254,7 +254,6 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; struct print_entry *print = (struct print_entry *)entry; - const char *msg = print->buf; struct trace_seq *s = &iter->seq; unsigned long long t = ns2usecs(iter->ts); unsigned long usec_rem = do_div(t, USEC_PER_SEC); @@ -262,7 +261,11 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter) int ret; /* The trailing newline must be in the message. */ - ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg); + ret = trace_seq_printf(s, "MARK %u.%06lu ", secs, usec_rem); + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + + ret = trace_seq_bprintf(s, print->fmt, print->buf); if (!ret) return TRACE_TYPE_PARTIAL_LINE; diff --git a/trunk/kernel/trace/trace_output.c b/trunk/kernel/trace/trace_output.c index ea9d3b410c7a..491832af9ba1 100644 --- a/trunk/kernel/trace/trace_output.c +++ b/trunk/kernel/trace/trace_output.c @@ -832,13 +832,13 @@ static struct trace_event trace_user_stack_event = { .binary = trace_special_bin, }; -/* TRACE_BPRINT */ +/* TRACE_PRINT */ static enum print_line_t -trace_bprint_print(struct trace_iterator *iter, int flags) +trace_print_print(struct trace_iterator *iter, int flags) { struct trace_entry *entry = iter->ent; struct trace_seq *s = &iter->seq; - struct bprint_entry *field; + struct print_entry *field; trace_assign_type(field, entry); @@ -858,10 +858,9 @@ trace_bprint_print(struct trace_iterator *iter, int flags) } -static enum print_line_t -trace_bprint_raw(struct trace_iterator *iter, int flags) +static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags) { - struct bprint_entry *field; + struct print_entry *field; struct trace_seq *s = &iter->seq; trace_assign_type(field, iter->ent); @@ -879,55 +878,12 @@ trace_bprint_raw(struct trace_iterator *iter, int flags) } -static struct trace_event trace_bprint_event = { - .type = TRACE_BPRINT, - .trace = trace_bprint_print, - .raw = trace_bprint_raw, -}; - -/* TRACE_PRINT */ -static enum print_line_t trace_print_print(struct trace_iterator *iter, - int flags) -{ - struct print_entry *field; - struct trace_seq *s = &iter->seq; - - trace_assign_type(field, iter->ent); - - if (!seq_print_ip_sym(s, field->ip, flags)) - goto partial; - - if (!trace_seq_printf(s, ": %s", field->buf)) - goto partial; - - return TRACE_TYPE_HANDLED; - - partial: - return TRACE_TYPE_PARTIAL_LINE; -} - -static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags) -{ - struct print_entry *field; - - trace_assign_type(field, iter->ent); - - if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf)) - goto partial; - - return TRACE_TYPE_HANDLED; - - partial: - return TRACE_TYPE_PARTIAL_LINE; -} - static struct trace_event trace_print_event = { - .type = TRACE_PRINT, + .type = TRACE_PRINT, .trace = trace_print_print, .raw = trace_print_raw, }; - static struct trace_event *events[] __initdata = { &trace_fn_event, &trace_ctx_event, @@ -935,7 +891,6 @@ static struct trace_event *events[] __initdata = { &trace_special_event, &trace_stack_event, &trace_user_stack_event, - &trace_bprint_event, &trace_print_event, NULL }; diff --git a/trunk/kernel/trace/trace_printk.c b/trunk/kernel/trace/trace_printk.c index 486785214e3e..a50aea22e929 100644 --- a/trunk/kernel/trace/trace_printk.c +++ b/trunk/kernel/trace/trace_printk.c @@ -4,19 +4,18 @@ * Copyright (C) 2008 Lai Jiangshan * */ -#include -#include -#include #include #include #include -#include -#include -#include #include #include +#include #include +#include +#include #include +#include +#include #include "trace.h" @@ -100,7 +99,7 @@ struct notifier_block module_trace_bprintk_format_nb = { .notifier_call = module_trace_bprintk_format_notify, }; -int __trace_bprintk(unsigned long ip, const char *fmt, ...) +int __trace_printk(unsigned long ip, const char *fmt, ...) { int ret; va_list ap; @@ -112,13 +111,13 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...) return 0; va_start(ap, fmt); - ret = trace_vbprintk(ip, task_curr_ret_stack(current), fmt, ap); + ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); va_end(ap); return ret; } -EXPORT_SYMBOL_GPL(__trace_bprintk); +EXPORT_SYMBOL_GPL(__trace_printk); -int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap) +int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) { if (unlikely(!fmt)) return 0; @@ -126,141 +125,10 @@ int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap) if (!(trace_flags & TRACE_ITER_PRINTK)) return 0; - return trace_vbprintk(ip, task_curr_ret_stack(current), fmt, ap); -} -EXPORT_SYMBOL_GPL(__ftrace_vbprintk); - -int __trace_printk(unsigned long ip, const char *fmt, ...) -{ - int ret; - va_list ap; - - if (!(trace_flags & TRACE_ITER_PRINTK)) - return 0; - - va_start(ap, fmt); - ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); - va_end(ap); - return ret; -} -EXPORT_SYMBOL_GPL(__trace_printk); - -int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) -{ - if (!(trace_flags & TRACE_ITER_PRINTK)) - return 0; - return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); } EXPORT_SYMBOL_GPL(__ftrace_vprintk); -static void * -t_next(struct seq_file *m, void *v, loff_t *pos) -{ - const char **fmt = m->private; - const char **next = fmt; - - (*pos)++; - - if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt) - return NULL; - - next = fmt; - m->private = ++next; - - return fmt; -} - -static void *t_start(struct seq_file *m, loff_t *pos) -{ - return t_next(m, NULL, pos); -} - -static int t_show(struct seq_file *m, void *v) -{ - const char **fmt = v; - const char *str = *fmt; - int i; - - seq_printf(m, "0x%lx : \"", (unsigned long)fmt); - - /* - * Tabs and new lines need to be converted. - */ - for (i = 0; str[i]; i++) { - switch (str[i]) { - case '\n': - seq_puts(m, "\\n"); - break; - case '\t': - seq_puts(m, "\\t"); - break; - case '\\': - seq_puts(m, "\\"); - break; - case '"': - seq_puts(m, "\\\""); - break; - default: - seq_putc(m, str[i]); - } - } - seq_puts(m, "\"\n"); - - return 0; -} - -static void t_stop(struct seq_file *m, void *p) -{ -} - -static const struct seq_operations show_format_seq_ops = { - .start = t_start, - .next = t_next, - .show = t_show, - .stop = t_stop, -}; - -static int -ftrace_formats_open(struct inode *inode, struct file *file) -{ - int ret; - - ret = seq_open(file, &show_format_seq_ops); - if (!ret) { - struct seq_file *m = file->private_data; - - m->private = __start___trace_bprintk_fmt; - } - return ret; -} - -static const struct file_operations ftrace_formats_fops = { - .open = ftrace_formats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -static __init int init_trace_printk_function_export(void) -{ - struct dentry *d_tracer; - struct dentry *entry; - - d_tracer = tracing_init_dentry(); - if (!d_tracer) - return 0; - - entry = debugfs_create_file("printk_formats", 0444, d_tracer, - NULL, &ftrace_formats_fops); - if (!entry) - pr_warning("Could not create debugfs " - "'printk_formats' entry\n"); - - return 0; -} - -fs_initcall(init_trace_printk_function_export); static __init int init_trace_printk(void) { diff --git a/trunk/kernel/trace/trace_stack.c b/trunk/kernel/trace/trace_stack.c index c750f65f9661..d0871bc0aca5 100644 --- a/trunk/kernel/trace/trace_stack.c +++ b/trunk/kernel/trace/trace_stack.c @@ -245,31 +245,16 @@ static int trace_lookup_stack(struct seq_file *m, long i) #endif } -static void print_disabled(struct seq_file *m) -{ - seq_puts(m, "#\n" - "# Stack tracer disabled\n" - "#\n" - "# To enable the stack tracer, either add 'stacktrace' to the\n" - "# kernel command line\n" - "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n" - "#\n"); -} - static int t_show(struct seq_file *m, void *v) { long i; int size; if (v == SEQ_START_TOKEN) { - seq_printf(m, " Depth Size Location" + seq_printf(m, " Depth Size Location" " (%d entries)\n" - " ----- ---- --------\n", + " ----- ---- --------\n", max_stack_trace.nr_entries); - - if (!stack_tracer_enabled && !max_stack_size) - print_disabled(m); - return 0; } diff --git a/trunk/kernel/trace/trace_workqueue.c b/trunk/kernel/trace/trace_workqueue.c index 9ab035b58cf1..fb5ccac8bbc0 100644 --- a/trunk/kernel/trace/trace_workqueue.c +++ b/trunk/kernel/trace/trace_workqueue.c @@ -193,20 +193,12 @@ static int workqueue_stat_show(struct seq_file *s, void *p) struct cpu_workqueue_stats *cws = p; unsigned long flags; int cpu = cws->cpu; - struct pid *pid; - struct task_struct *tsk; - - pid = find_get_pid(cws->pid); - if (pid) { - tsk = get_pid_task(pid, PIDTYPE_PID); - if (tsk) { - seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, - atomic_read(&cws->inserted), cws->executed, - tsk->comm); - put_task_struct(tsk); - } - put_pid(pid); - } + struct task_struct *tsk = find_task_by_vpid(cws->pid); + + seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, + atomic_read(&cws->inserted), + cws->executed, + tsk ? tsk->comm : "<...>"); spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); if (&cws->list == workqueue_cpu_stat(cpu)->list.next) diff --git a/trunk/lib/bitmap.c b/trunk/lib/bitmap.c index 35a1f7ff4149..1338469ac849 100644 --- a/trunk/lib/bitmap.c +++ b/trunk/lib/bitmap.c @@ -948,15 +948,15 @@ static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op) */ int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) { - int pos, end; /* scans bitmap by regions of size order */ + int pos; /* scans bitmap by regions of size order */ - for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) { - if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) - continue; - __reg_op(bitmap, pos, order, REG_OP_ALLOC); - return pos; - } - return -ENOMEM; + for (pos = 0; pos < bits; pos += (1 << order)) + if (__reg_op(bitmap, pos, order, REG_OP_ISFREE)) + break; + if (pos == bits) + return -ENOMEM; + __reg_op(bitmap, pos, order, REG_OP_ALLOC); + return pos; } EXPORT_SYMBOL(bitmap_find_free_region); diff --git a/trunk/scripts/kallsyms.c b/trunk/scripts/kallsyms.c index 6654cbed965b..ad2434b26970 100644 --- a/trunk/scripts/kallsyms.c +++ b/trunk/scripts/kallsyms.c @@ -500,51 +500,6 @@ static void optimize_token_table(void) optimize_result(); } -/* guess for "linker script provide" symbol */ -static int may_be_linker_script_provide_symbol(const struct sym_entry *se) -{ - const char *symbol = (char *)se->sym + 1; - int len = se->len - 1; - - if (len < 8) - return 0; - - if (symbol[0] != '_' || symbol[1] != '_') - return 0; - - /* __start_XXXXX */ - if (!memcmp(symbol + 2, "start_", 6)) - return 1; - - /* __stop_XXXXX */ - if (!memcmp(symbol + 2, "stop_", 5)) - return 1; - - /* __end_XXXXX */ - if (!memcmp(symbol + 2, "end_", 4)) - return 1; - - /* __XXXXX_start */ - if (!memcmp(symbol + len - 6, "_start", 6)) - return 1; - - /* __XXXXX_end */ - if (!memcmp(symbol + len - 4, "_end", 4)) - return 1; - - return 0; -} - -static int prefix_underscores_count(const char *str) -{ - const char *tail = str; - - while (*tail != '_') - tail++; - - return tail - str; -} - static int compare_symbols(const void *a, const void *b) { const struct sym_entry *sa; @@ -566,18 +521,6 @@ static int compare_symbols(const void *a, const void *b) if (wa != wb) return wa - wb; - /* sort by "linker script provide" type */ - wa = may_be_linker_script_provide_symbol(sa); - wb = may_be_linker_script_provide_symbol(sb); - if (wa != wb) - return wa - wb; - - /* sort by the number of prefix underscores */ - wa = prefix_underscores_count((const char *)sa->sym + 1); - wb = prefix_underscores_count((const char *)sb->sym + 1); - if (wa != wb) - return wa - wb; - /* sort by initial order, so that other symbols are left undisturbed */ return sa->start_pos - sb->start_pos; }