From 5490749def341c8fc3bf52b0cdafd55e6bd68b37 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 7 Mar 2011 21:13:40 +0100 Subject: [PATCH] --- yaml --- r: 234531 b: refs/heads/master c: b9a46bba88001504235459c8410f17e6a7e38008 h: refs/heads/master i: 234529: cfb50a49018af488fa210012ecb64ab378b0beb1 234527: af17e16ed25d0a84433bbf2d28767e249f29c374 v: v3 --- [refs] | 2 +- trunk/Documentation/trace/ftrace.txt | 5 ---- trunk/arch/x86/kernel/ftrace.c | 15 ++++++------ trunk/arch/x86/kvm/trace.h | 8 +++---- trunk/include/linux/ftrace_event.h | 1 + trunk/include/linux/ring_buffer.h | 2 -- trunk/include/trace/events/mce.h | 8 +++---- trunk/include/trace/events/module.h | 5 ++-- trunk/include/trace/events/skb.h | 4 ++-- trunk/kernel/trace/ring_buffer.c | 24 +++---------------- trunk/kernel/trace/trace.c | 34 +++++++++----------------- trunk/kernel/trace/trace.h | 3 +-- trunk/kernel/trace/trace_entries.h | 6 ++--- trunk/kernel/trace/trace_events.c | 2 +- trunk/kernel/trace/trace_output.c | 36 +++++++++++++--------------- trunk/scripts/recordmcount.c | 3 +-- trunk/scripts/recordmcount.pl | 1 - trunk/tools/perf/util/top.c | 30 +++++++++++++---------- 18 files changed, 76 insertions(+), 113 deletions(-) diff --git a/[refs] b/[refs] index 82129a7113ff..5078ceecfba1 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 4a0b1665db09cf2da9ad7d0f12da386373c10bfa +refs/heads/master: b9a46bba88001504235459c8410f17e6a7e38008 diff --git a/trunk/Documentation/trace/ftrace.txt b/trunk/Documentation/trace/ftrace.txt index 1ebc24cf9a55..67f1cc473257 100644 --- a/trunk/Documentation/trace/ftrace.txt +++ b/trunk/Documentation/trace/ftrace.txt @@ -454,11 +454,6 @@ x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6] latencies, as described in "Latency trace format". - overwrite - This controls what happens when the trace buffer is - full. If "1" (default), the oldest events are - discarded and overwritten. If "0", then the newest - events are discarded. - ftrace_enabled -------------- diff --git a/trunk/arch/x86/kernel/ftrace.c b/trunk/arch/x86/kernel/ftrace.c index a93742a57468..382eb2936d4d 100644 --- a/trunk/arch/x86/kernel/ftrace.c +++ b/trunk/arch/x86/kernel/ftrace.c @@ -437,19 +437,18 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, return; } - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { + if (ftrace_push_return_trace(old, self_addr, &trace.depth, + frame_pointer) == -EBUSY) { *parent = old; return; } - if (ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer) == -EBUSY) { + trace.func = self_addr; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; *parent = old; - return; } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/trunk/arch/x86/kvm/trace.h b/trunk/arch/x86/kvm/trace.h index db932760ea82..1357d7cf4ec8 100644 --- a/trunk/arch/x86/kvm/trace.h +++ b/trunk/arch/x86/kvm/trace.h @@ -62,21 +62,21 @@ TRACE_EVENT(kvm_hv_hypercall, TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa), TP_STRUCT__entry( + __field( __u16, code ) + __field( bool, fast ) __field( __u16, rep_cnt ) __field( __u16, rep_idx ) __field( __u64, ingpa ) __field( __u64, outgpa ) - __field( __u16, code ) - __field( bool, fast ) ), TP_fast_assign( + __entry->code = code; + __entry->fast = fast; __entry->rep_cnt = rep_cnt; __entry->rep_idx = rep_idx; __entry->ingpa = ingpa; __entry->outgpa = outgpa; - __entry->code = code; - __entry->fast = fast; ), TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", diff --git a/trunk/include/linux/ftrace_event.h b/trunk/include/linux/ftrace_event.h index 22b32af1b5ec..1a99e7939c2b 100644 --- a/trunk/include/linux/ftrace_event.h +++ b/trunk/include/linux/ftrace_event.h @@ -37,6 +37,7 @@ struct trace_entry { unsigned char flags; unsigned char preempt_count; int pid; + int lock_depth; }; #define FTRACE_MAX_EVENT \ diff --git a/trunk/include/linux/ring_buffer.h b/trunk/include/linux/ring_buffer.h index ab38ac80b0f9..8d3a2486544d 100644 --- a/trunk/include/linux/ring_buffer.h +++ b/trunk/include/linux/ring_buffer.h @@ -100,8 +100,6 @@ void ring_buffer_free(struct ring_buffer *buffer); int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); -void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val); - struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length); int ring_buffer_unlock_commit(struct ring_buffer *buffer, diff --git a/trunk/include/trace/events/mce.h b/trunk/include/trace/events/mce.h index 4cbbcef6baa8..7eee77895cb3 100644 --- a/trunk/include/trace/events/mce.h +++ b/trunk/include/trace/events/mce.h @@ -17,36 +17,36 @@ TRACE_EVENT(mce_record, TP_STRUCT__entry( __field( u64, mcgcap ) __field( u64, mcgstatus ) + __field( u8, bank ) __field( u64, status ) __field( u64, addr ) __field( u64, misc ) __field( u64, ip ) + __field( u8, cs ) __field( u64, tsc ) __field( u64, walltime ) __field( u32, cpu ) __field( u32, cpuid ) __field( u32, apicid ) __field( u32, socketid ) - __field( u8, cs ) - __field( u8, bank ) __field( u8, cpuvendor ) ), TP_fast_assign( __entry->mcgcap = m->mcgcap; __entry->mcgstatus = m->mcgstatus; + __entry->bank = m->bank; __entry->status = m->status; __entry->addr = m->addr; __entry->misc = m->misc; __entry->ip = m->ip; + __entry->cs = m->cs; __entry->tsc = m->tsc; __entry->walltime = m->time; __entry->cpu = m->extcpu; __entry->cpuid = m->cpuid; __entry->apicid = m->apicid; __entry->socketid = m->socketid; - __entry->cs = m->cs; - __entry->bank = m->bank; __entry->cpuvendor = m->cpuvendor; ), diff --git a/trunk/include/trace/events/module.h b/trunk/include/trace/events/module.h index 21a546d27c0c..c6bae36547e5 100644 --- a/trunk/include/trace/events/module.h +++ b/trunk/include/trace/events/module.h @@ -108,14 +108,14 @@ TRACE_EVENT(module_request, TP_ARGS(name, wait, ip), TP_STRUCT__entry( - __field( unsigned long, ip ) __field( bool, wait ) + __field( unsigned long, ip ) __string( name, name ) ), TP_fast_assign( - __entry->ip = ip; __entry->wait = wait; + __entry->ip = ip; __assign_str(name, name); ), @@ -129,3 +129,4 @@ TRACE_EVENT(module_request, /* This part must be outside protection */ #include + diff --git a/trunk/include/trace/events/skb.h b/trunk/include/trace/events/skb.h index 0c68ae22da22..f10293c41b1e 100644 --- a/trunk/include/trace/events/skb.h +++ b/trunk/include/trace/events/skb.h @@ -19,14 +19,14 @@ TRACE_EVENT(kfree_skb, TP_STRUCT__entry( __field( void *, skbaddr ) - __field( void *, location ) __field( unsigned short, protocol ) + __field( void *, location ) ), TP_fast_assign( __entry->skbaddr = skb; - __entry->location = location; __entry->protocol = ntohs(skb->protocol); + __entry->location = location; ), TP_printk("skbaddr=%p protocol=%u location=%p", diff --git a/trunk/kernel/trace/ring_buffer.c b/trunk/kernel/trace/ring_buffer.c index db7b439d23ee..bd1c35a4fbcc 100644 --- a/trunk/kernel/trace/ring_buffer.c +++ b/trunk/kernel/trace/ring_buffer.c @@ -5,6 +5,7 @@ */ #include #include +#include #include #include #include @@ -1428,17 +1429,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) } EXPORT_SYMBOL_GPL(ring_buffer_resize); -void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) -{ - mutex_lock(&buffer->mutex); - if (val) - buffer->flags |= RB_FL_OVERWRITE; - else - buffer->flags &= ~RB_FL_OVERWRITE; - mutex_unlock(&buffer->mutex); -} -EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); - static inline void * __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) { @@ -2172,19 +2162,11 @@ rb_reserve_next_event(struct ring_buffer *buffer, if (likely(ts >= cpu_buffer->write_stamp)) { delta = diff; if (unlikely(test_time_stamp(delta))) { - int local_clock_stable = 1; -#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK - local_clock_stable = sched_clock_stable; -#endif WARN_ONCE(delta > (1ULL << 59), - KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s", + KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n", (unsigned long long)delta, (unsigned long long)ts, - (unsigned long long)cpu_buffer->write_stamp, - local_clock_stable ? "" : - "If you just came from a suspend/resume,\n" - "please switch to the trace global clock:\n" - " echo global > /sys/kernel/debug/tracing/trace_clock\n"); + (unsigned long long)cpu_buffer->write_stamp); add_timestamp = 1; } } diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index 9541c27c1cf2..8dc8da6733f9 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -41,6 +41,8 @@ #include "trace.h" #include "trace_output.h" +#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) + /* * On boot up, the ring buffer is set to the minimum size, so that * we do not waste memory on systems that are not using tracing. @@ -338,7 +340,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); /* trace_flags holds trace_options default values */ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | - TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE; + TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD; static int trace_stop_count; static DEFINE_SPINLOCK(tracing_start_lock); @@ -423,7 +425,6 @@ static const char *trace_options[] = { "sleep-time", "graph-time", "record-cmd", - "overwrite", NULL }; @@ -779,11 +780,6 @@ __acquires(kernel_lock) tracing_reset_online_cpus(tr); current_trace = type; - - /* If we expanded the buffers, make sure the max is expanded too */ - if (ring_buffer_expanded && type->use_max_tr) - ring_buffer_resize(max_tr.buffer, trace_buf_size); - /* the test is responsible for initializing and enabling */ pr_info("Testing tracer %s: ", type->name); ret = type->selftest(type, tr); @@ -796,10 +792,6 @@ __acquires(kernel_lock) /* Only reset on passing, to avoid touching corrupted buffers */ tracing_reset_online_cpus(tr); - /* Shrink the max buffer again */ - if (ring_buffer_expanded && type->use_max_tr) - ring_buffer_resize(max_tr.buffer, 1); - printk(KERN_CONT "PASSED\n"); } #endif @@ -1110,6 +1102,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, entry->preempt_count = pc & 0xff; entry->pid = (tsk) ? tsk->pid : 0; + entry->lock_depth = (tsk) ? tsk->lock_depth : 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | @@ -1756,9 +1749,10 @@ static void print_lat_help_header(struct seq_file *m) seq_puts(m, "# | / _----=> need-resched \n"); seq_puts(m, "# || / _---=> hardirq/softirq \n"); seq_puts(m, "# ||| / _--=> preempt-depth \n"); - seq_puts(m, "# |||| / delay \n"); - seq_puts(m, "# cmd pid ||||| time | caller \n"); - seq_puts(m, "# \\ / ||||| \\ | / \n"); + seq_puts(m, "# |||| /_--=> lock-depth \n"); + seq_puts(m, "# |||||/ delay \n"); + seq_puts(m, "# cmd pid |||||| time | caller \n"); + seq_puts(m, "# \\ / |||||| \\ | / \n"); } static void print_func_help_header(struct seq_file *m) @@ -2535,9 +2529,6 @@ static void set_tracer_flags(unsigned int mask, int enabled) if (mask == TRACE_ITER_RECORD_CMD) trace_event_enable_cmd_record(enabled); - - if (mask == TRACE_ITER_OVERWRITE) - ring_buffer_change_overwrite(global_trace.buffer, enabled); } static ssize_t @@ -4564,11 +4555,9 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) __init static int tracer_alloc_buffers(void) { int ring_buf_size; - enum ring_buffer_flags rb_flags; int i; int ret = -ENOMEM; - if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) goto out; @@ -4581,13 +4570,12 @@ __init static int tracer_alloc_buffers(void) else ring_buf_size = 1; - rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; - cpumask_copy(tracing_buffer_mask, cpu_possible_mask); cpumask_copy(tracing_cpumask, cpu_all_mask); /* TODO: make the number of buffers hot pluggable with CPUS */ - global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags); + global_trace.buffer = ring_buffer_alloc(ring_buf_size, + TRACE_BUFFER_FLAGS); if (!global_trace.buffer) { printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); WARN_ON(1); @@ -4597,7 +4585,7 @@ __init static int tracer_alloc_buffers(void) #ifdef CONFIG_TRACER_MAX_TRACE - max_tr.buffer = ring_buffer_alloc(1, rb_flags); + max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS); if (!max_tr.buffer) { printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); WARN_ON(1); diff --git a/trunk/kernel/trace/trace.h b/trunk/kernel/trace/trace.h index 5e9dfc6286dd..856e73cc1d3f 100644 --- a/trunk/kernel/trace/trace.h +++ b/trunk/kernel/trace/trace.h @@ -272,8 +272,8 @@ struct tracer { /* If you handled the flag setting, return 0 */ int (*set_flag)(u32 old_flags, u32 bit, int set); struct tracer *next; - struct tracer_flags *flags; int print_max; + struct tracer_flags *flags; int use_max_tr; }; @@ -606,7 +606,6 @@ enum trace_iterator_flags { TRACE_ITER_SLEEP_TIME = 0x40000, TRACE_ITER_GRAPH_TIME = 0x80000, TRACE_ITER_RECORD_CMD = 0x100000, - TRACE_ITER_OVERWRITE = 0x200000, }; /* diff --git a/trunk/kernel/trace/trace_entries.h b/trunk/kernel/trace/trace_entries.h index 1516cb3ec549..6cf223764be8 100644 --- a/trunk/kernel/trace/trace_entries.h +++ b/trunk/kernel/trace/trace_entries.h @@ -109,12 +109,12 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry, */ #define FTRACE_CTX_FIELDS \ __field( unsigned int, prev_pid ) \ - __field( unsigned int, next_pid ) \ - __field( unsigned int, next_cpu ) \ __field( unsigned char, prev_prio ) \ __field( unsigned char, prev_state ) \ + __field( unsigned int, next_pid ) \ __field( unsigned char, next_prio ) \ - __field( unsigned char, next_state ) + __field( unsigned char, next_state ) \ + __field( unsigned int, next_cpu ) FTRACE_ENTRY(context_switch, ctx_switch_entry, diff --git a/trunk/kernel/trace/trace_events.c b/trunk/kernel/trace/trace_events.c index e88f74fe1d4c..5f499e0438a4 100644 --- a/trunk/kernel/trace/trace_events.c +++ b/trunk/kernel/trace/trace_events.c @@ -116,6 +116,7 @@ static int trace_define_common_fields(void) __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); + __common_field(int, lock_depth); return ret; } @@ -325,7 +326,6 @@ int trace_set_clr_event(const char *system, const char *event, int set) { return __ftrace_set_clr_event(NULL, system, event, set); } -EXPORT_SYMBOL_GPL(trace_set_clr_event); /* 128 should be much more than enough */ #define EVENT_BUF_SIZE 127 diff --git a/trunk/kernel/trace/trace_output.c b/trunk/kernel/trace/trace_output.c index 456be9063c2d..02272baa2206 100644 --- a/trunk/kernel/trace/trace_output.c +++ b/trunk/kernel/trace/trace_output.c @@ -529,34 +529,24 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) * @entry: The trace entry field from the ring buffer * * Prints the generic fields of irqs off, in hard or softirq, preempt - * count. + * count and lock depth. */ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) { - char hardsoft_irq; - char need_resched; - char irqs_off; - int hardirq; - int softirq; + int hardirq, softirq; int ret; hardirq = entry->flags & TRACE_FLAG_HARDIRQ; softirq = entry->flags & TRACE_FLAG_SOFTIRQ; - irqs_off = - (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : - (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : - '.'; - need_resched = - (entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'; - hardsoft_irq = - (hardirq && softirq) ? 'H' : - hardirq ? 'h' : - softirq ? 's' : - '.'; - if (!trace_seq_printf(s, "%c%c%c", - irqs_off, need_resched, hardsoft_irq)) + (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : + (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? + 'X' : '.', + (entry->flags & TRACE_FLAG_NEED_RESCHED) ? + 'N' : '.', + (hardirq && softirq) ? 'H' : + hardirq ? 'h' : softirq ? 's' : '.')) return 0; if (entry->preempt_count) @@ -564,7 +554,13 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) else ret = trace_seq_putc(s, '.'); - return ret; + if (!ret) + return 0; + + if (entry->lock_depth < 0) + return trace_seq_putc(s, '.'); + + return trace_seq_printf(s, "%d", entry->lock_depth); } static int diff --git a/trunk/scripts/recordmcount.c b/trunk/scripts/recordmcount.c index f9f6f52db772..038b3d1e2981 100644 --- a/trunk/scripts/recordmcount.c +++ b/trunk/scripts/recordmcount.c @@ -206,8 +206,7 @@ static uint32_t (*w2)(uint16_t); static int is_mcounted_section_name(char const *const txtname) { - return 0 == strcmp(".text", txtname) || - 0 == strcmp(".ref.text", txtname) || + return 0 == strcmp(".text", txtname) || 0 == strcmp(".sched.text", txtname) || 0 == strcmp(".spinlock.text", txtname) || 0 == strcmp(".irqentry.text", txtname) || diff --git a/trunk/scripts/recordmcount.pl b/trunk/scripts/recordmcount.pl index 4be0deea71ca..1d7963f4ee79 100755 --- a/trunk/scripts/recordmcount.pl +++ b/trunk/scripts/recordmcount.pl @@ -130,7 +130,6 @@ # Acceptable sections to record. my %text_sections = ( ".text" => 1, - ".ref.text" => 1, ".sched.text" => 1, ".spinlock.text" => 1, ".irqentry.text" => 1, diff --git a/trunk/tools/perf/util/top.c b/trunk/tools/perf/util/top.c index 70a9c13f4ad5..4f869da4e9c7 100644 --- a/trunk/tools/perf/util/top.c +++ b/trunk/tools/perf/util/top.c @@ -61,6 +61,12 @@ static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se) rb_insert_color(&se->rb_node, tree); } +#define SNPRINTF(buf, size, fmt, args...) \ +({ \ + size_t r = snprintf(buf, size, fmt, ## args); \ + r > size ? size : r; \ +}) + size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) { struct perf_evsel *counter; @@ -70,7 +76,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) size_t ret = 0; if (!perf_guest) { - ret = snprintf(bf, size, + ret = SNPRINTF(bf, size, " PerfTop:%8.0f irqs/sec kernel:%4.1f%%" " exact: %4.1f%% [", samples_per_sec, 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) / @@ -81,7 +87,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs; float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs; - ret = snprintf(bf, size, + ret = SNPRINTF(bf, size, " PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%" " guest kernel:%4.1f%% guest us:%4.1f%%" " exact: %4.1f%% [", samples_per_sec, @@ -101,38 +107,38 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) if (top->evlist->nr_entries == 1 || !top->display_weighted) { struct perf_evsel *first; first = list_entry(top->evlist->entries.next, struct perf_evsel, node); - ret += snprintf(bf + ret, size - ret, "%" PRIu64 "%s ", + ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ", (uint64_t)first->attr.sample_period, top->freq ? "Hz" : ""); } if (!top->display_weighted) { - ret += snprintf(bf + ret, size - ret, "%s", + ret += SNPRINTF(bf + ret, size - ret, "%s", event_name(top->sym_evsel)); } else list_for_each_entry(counter, &top->evlist->entries, node) { - ret += snprintf(bf + ret, size - ret, "%s%s", + ret += SNPRINTF(bf + ret, size - ret, "%s%s", counter->idx ? "/" : "", event_name(counter)); } - ret += snprintf(bf + ret, size - ret, "], "); + ret += SNPRINTF(bf + ret, size - ret, "], "); if (top->target_pid != -1) - ret += snprintf(bf + ret, size - ret, " (target_pid: %d", + ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %d", top->target_pid); else if (top->target_tid != -1) - ret += snprintf(bf + ret, size - ret, " (target_tid: %d", + ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %d", top->target_tid); else - ret += snprintf(bf + ret, size - ret, " (all"); + ret += SNPRINTF(bf + ret, size - ret, " (all"); if (top->cpu_list) - ret += snprintf(bf + ret, size - ret, ", CPU%s: %s)", + ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", top->evlist->cpus->nr > 1 ? "s" : "", top->cpu_list); else { if (top->target_tid != -1) - ret += snprintf(bf + ret, size - ret, ")"); + ret += SNPRINTF(bf + ret, size - ret, ")"); else - ret += snprintf(bf + ret, size - ret, ", %d CPU%s)", + ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", top->evlist->cpus->nr, top->evlist->cpus->nr > 1 ? "s" : ""); }