diff --git a/[refs] b/[refs] index 95f14cd975b4..e2f5518e352e 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 95d0ad049cd6937634c0a75f9518f5166daabfce +refs/heads/master: 17d42c1c497aa54952b9e58c1502a46f0df40315 diff --git a/trunk/Documentation/lockdep-design.txt b/trunk/Documentation/lockdep-design.txt index abf768c681e2..e20d913d5914 100644 --- a/trunk/Documentation/lockdep-design.txt +++ b/trunk/Documentation/lockdep-design.txt @@ -30,9 +30,9 @@ State The validator tracks lock-class usage history into 4n + 1 separate state bits: - 'ever held in STATE context' -- 'ever held as readlock in STATE context' -- 'ever held with STATE enabled' -- 'ever held as readlock with STATE enabled' +- 'ever head as readlock in STATE context' +- 'ever head with STATE enabled' +- 'ever head as readlock with STATE enabled' Where STATE can be either one of (kernel/lockdep_states.h) - hardirq diff --git a/trunk/arch/x86/kernel/apic/x2apic_cluster.c b/trunk/arch/x86/kernel/apic/x2apic_cluster.c index a5371ec36776..2ed4e2bb3b32 100644 --- a/trunk/arch/x86/kernel/apic/x2apic_cluster.c +++ b/trunk/arch/x86/kernel/apic/x2apic_cluster.c @@ -17,13 +17,11 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) return x2apic_enabled(); } -/* - * need to use more than cpu 0, because we need more vectors when - * MSI-X are used. - */ +/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ + static const struct cpumask *x2apic_target_cpus(void) { - return cpu_online_mask; + return cpumask_of(0); } /* diff --git a/trunk/arch/x86/kernel/apic/x2apic_phys.c b/trunk/arch/x86/kernel/apic/x2apic_phys.c index a8989aadc99a..0b631c6a2e00 100644 --- a/trunk/arch/x86/kernel/apic/x2apic_phys.c +++ b/trunk/arch/x86/kernel/apic/x2apic_phys.c @@ -27,13 +27,11 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) return 0; } -/* - * need to use more than cpu 0, because we need more vectors when - * MSI-X are used. - */ +/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ + static const struct cpumask *x2apic_target_cpus(void) { - return cpu_online_mask; + return cpumask_of(0); } static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) diff --git a/trunk/arch/x86/kernel/efi.c b/trunk/arch/x86/kernel/efi.c index fe26ba3e3451..19ccf6d0dccf 100644 --- a/trunk/arch/x86/kernel/efi.c +++ b/trunk/arch/x86/kernel/efi.c @@ -354,7 +354,7 @@ void __init efi_init(void) */ c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2); if (c16) { - for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i) + for (i = 0; i < sizeof(vendor) && *c16; ++i) vendor[i] = *c16++; vendor[i] = '\0'; } else diff --git a/trunk/arch/x86/kernel/reboot.c b/trunk/arch/x86/kernel/reboot.c index 9eb897603705..834c9da8bf9d 100644 --- a/trunk/arch/x86/kernel/reboot.c +++ b/trunk/arch/x86/kernel/reboot.c @@ -405,7 +405,7 @@ EXPORT_SYMBOL(machine_real_restart); #endif /* CONFIG_X86_32 */ /* - * Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot + * Apple MacBook5,2 (2009 MacBook) needs reboot=p */ static int __init set_pci_reboot(const struct dmi_system_id *d) { @@ -426,14 +426,6 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"), }, }, - { /* Handle problems with rebooting on Apple MacBookPro5,1 */ - .callback = set_pci_reboot, - .ident = "Apple MacBookPro5,1", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,1"), - }, - }, { } }; diff --git a/trunk/arch/x86/kernel/vmi_32.c b/trunk/arch/x86/kernel/vmi_32.c index 95a7289e4b0c..b263423fbe2a 100644 --- a/trunk/arch/x86/kernel/vmi_32.c +++ b/trunk/arch/x86/kernel/vmi_32.c @@ -441,7 +441,7 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, ap.ds = __USER_DS; ap.es = __USER_DS; ap.fs = __KERNEL_PERCPU; - ap.gs = __KERNEL_STACK_CANARY; + ap.gs = 0; ap.eflags = 0; diff --git a/trunk/include/linux/ftrace_event.h b/trunk/include/linux/ftrace_event.h index a81170de7f6b..d7cd193c2277 100644 --- a/trunk/include/linux/ftrace_event.h +++ b/trunk/include/linux/ftrace_event.h @@ -89,9 +89,7 @@ enum print_line_t { TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ }; -void tracing_generic_entry_update(struct trace_entry *entry, - unsigned long flags, - int pc); + struct ring_buffer_event * trace_current_buffer_lock_reserve(int type, unsigned long len, unsigned long flags, int pc); diff --git a/trunk/include/linux/perf_counter.h b/trunk/include/linux/perf_counter.h index a67dd5c5b6d3..e604e6ef72dd 100644 --- a/trunk/include/linux/perf_counter.h +++ b/trunk/include/linux/perf_counter.h @@ -121,9 +121,8 @@ enum perf_counter_sample_format { PERF_SAMPLE_CPU = 1U << 7, PERF_SAMPLE_PERIOD = 1U << 8, PERF_SAMPLE_STREAM_ID = 1U << 9, - PERF_SAMPLE_TP_RECORD = 1U << 10, - PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ + PERF_SAMPLE_MAX = 1U << 10, /* non-ABI */ }; /* @@ -414,11 +413,6 @@ struct perf_callchain_entry { __u64 ip[PERF_MAX_STACK_DEPTH]; }; -struct perf_tracepoint_record { - int size; - char *record; -}; - struct task_struct; /** @@ -687,7 +681,6 @@ struct perf_sample_data { struct pt_regs *regs; u64 addr; u64 period; - void *private; }; extern int perf_counter_overflow(struct perf_counter *counter, int nmi, diff --git a/trunk/include/trace/ftrace.h b/trunk/include/trace/ftrace.h index 7fb16d90e7b1..1867553c61e5 100644 --- a/trunk/include/trace/ftrace.h +++ b/trunk/include/trace/ftrace.h @@ -144,9 +144,6 @@ #undef TP_fast_assign #define TP_fast_assign(args...) args -#undef TP_perf_assign -#define TP_perf_assign(args...) - #undef TRACE_EVENT #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ static int \ @@ -348,56 +345,6 @@ static inline int ftrace_get_offsets_##call( \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#ifdef CONFIG_EVENT_PROFILE - -/* - * Generate the functions needed for tracepoint perf_counter support. - * - * NOTE: The insertion profile callback (ftrace_profile_) is defined later - * - * static int ftrace_profile_enable_(struct ftrace_event_call *event_call) - * { - * int ret = 0; - * - * if (!atomic_inc_return(&event_call->profile_count)) - * ret = register_trace_(ftrace_profile_); - * - * return ret; - * } - * - * static void ftrace_profile_disable_(struct ftrace_event_call *event_call) - * { - * if (atomic_add_negative(-1, &event->call->profile_count)) - * unregister_trace_(ftrace_profile_); - * } - * - */ - -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ - \ -static void ftrace_profile_##call(proto); \ - \ -static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ -{ \ - int ret = 0; \ - \ - if (!atomic_inc_return(&event_call->profile_count)) \ - ret = register_trace_##call(ftrace_profile_##call); \ - \ - return ret; \ -} \ - \ -static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ -{ \ - if (atomic_add_negative(-1, &event_call->profile_count)) \ - unregister_trace_##call(ftrace_profile_##call); \ -} - -#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) - -#endif - /* * Stage 4 of the trace events. * @@ -500,6 +447,28 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ #define TP_FMT(fmt, args...) fmt "\n", ##args #ifdef CONFIG_EVENT_PROFILE +#define _TRACE_PROFILE(call, proto, args) \ +static void ftrace_profile_##call(proto) \ +{ \ + extern void perf_tpcounter_event(int); \ + perf_tpcounter_event(event_##call.id); \ +} \ + \ +static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ +{ \ + int ret = 0; \ + \ + if (!atomic_inc_return(&event_call->profile_count)) \ + ret = register_trace_##call(ftrace_profile_##call); \ + \ + return ret; \ +} \ + \ +static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ +{ \ + if (atomic_add_negative(-1, &event_call->profile_count)) \ + unregister_trace_##call(ftrace_profile_##call); \ +} #define _TRACE_PROFILE_INIT(call) \ .profile_count = ATOMIC_INIT(-1), \ @@ -507,6 +476,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ .profile_disable = ftrace_profile_disable_##call, #else +#define _TRACE_PROFILE(call, proto, args) #define _TRACE_PROFILE_INIT(call) #endif @@ -532,6 +502,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ #undef TRACE_EVENT #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ +_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ \ static struct ftrace_event_call event_##call; \ \ @@ -615,99 +586,6 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -/* - * Define the insertion callback to profile events - * - * The job is very similar to ftrace_raw_event_ except that we don't - * insert in the ring buffer but in a perf counter. - * - * static void ftrace_profile_(proto) - * { - * struct ftrace_data_offsets_ __maybe_unused __data_offsets; - * struct ftrace_event_call *event_call = &event_; - * extern void perf_tpcounter_event(int, u64, u64, void *, int); - * struct ftrace_raw_##call *entry; - * u64 __addr = 0, __count = 1; - * unsigned long irq_flags; - * int __entry_size; - * int __data_size; - * int pc; - * - * local_save_flags(irq_flags); - * pc = preempt_count(); - * - * __data_size = ftrace_get_offsets_(&__data_offsets, args); - * __entry_size = __data_size + sizeof(*entry); - * - * do { - * char raw_data[__entry_size]; <- allocate our sample in the stack - * struct trace_entry *ent; - * - * entry = (struct ftrace_raw_ *)raw_data; - * ent = &entry->ent; - * tracing_generic_entry_update(ent, irq_flags, pc); - * ent->type = event_call->id; - * - * <- do some jobs with dynamic arrays - * - * <- affect our values - * - * perf_tpcounter_event(event_call->id, __addr, __count, entry, - * __entry_size); <- submit them to perf counter - * } while (0); - * - * } - */ - -#ifdef CONFIG_EVENT_PROFILE - -#undef __perf_addr -#define __perf_addr(a) __addr = (a) - -#undef __perf_count -#define __perf_count(c) __count = (c) - -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ -static void ftrace_profile_##call(proto) \ -{ \ - struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ - struct ftrace_event_call *event_call = &event_##call; \ - extern void perf_tpcounter_event(int, u64, u64, void *, int); \ - struct ftrace_raw_##call *entry; \ - u64 __addr = 0, __count = 1; \ - unsigned long irq_flags; \ - int __entry_size; \ - int __data_size; \ - int pc; \ - \ - local_save_flags(irq_flags); \ - pc = preempt_count(); \ - \ - __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ - __entry_size = ALIGN(__data_size + sizeof(*entry), sizeof(u64));\ - \ - do { \ - char raw_data[__entry_size]; \ - struct trace_entry *ent; \ - \ - entry = (struct ftrace_raw_##call *)raw_data; \ - ent = &entry->ent; \ - tracing_generic_entry_update(ent, irq_flags, pc); \ - ent->type = event_call->id; \ - \ - tstruct \ - \ - { assign; } \ - \ - perf_tpcounter_event(event_call->id, __addr, __count, entry,\ - __entry_size); \ - } while (0); \ - \ -} - -#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#endif /* CONFIG_EVENT_PROFILE */ - +#undef _TRACE_PROFILE #undef _TRACE_PROFILE_INIT diff --git a/trunk/kernel/lockdep_proc.c b/trunk/kernel/lockdep_proc.c index e94caa666dba..d7135aa2d2c4 100644 --- a/trunk/kernel/lockdep_proc.c +++ b/trunk/kernel/lockdep_proc.c @@ -758,8 +758,7 @@ static int __init lockdep_proc_init(void) &proc_lockdep_stats_operations); #ifdef CONFIG_LOCK_STAT - proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL, - &proc_lock_stat_operations); + proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations); #endif return 0; diff --git a/trunk/kernel/perf_counter.c b/trunk/kernel/perf_counter.c index 868102172aa4..673c1aaf7332 100644 --- a/trunk/kernel/perf_counter.c +++ b/trunk/kernel/perf_counter.c @@ -2646,7 +2646,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, u64 counter; } group_entry; struct perf_callchain_entry *callchain = NULL; - struct perf_tracepoint_record *tp; int callchain_size = 0; u64 time; struct { @@ -2715,11 +2714,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, header.size += sizeof(u64); } - if (sample_type & PERF_SAMPLE_TP_RECORD) { - tp = data->private; - header.size += tp->size; - } - ret = perf_output_begin(&handle, counter, header.size, nmi, 1); if (ret) return; @@ -2783,9 +2777,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, } } - if (sample_type & PERF_SAMPLE_TP_RECORD) - perf_output_copy(&handle, tp->record, tp->size); - perf_output_end(&handle); } @@ -3712,24 +3703,17 @@ static const struct pmu perf_ops_task_clock = { }; #ifdef CONFIG_EVENT_PROFILE -void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, - int entry_size) +void perf_tpcounter_event(int event_id) { - struct perf_tracepoint_record tp = { - .size = entry_size, - .record = record, - }; - struct perf_sample_data data = { .regs = get_irq_regs(), - .addr = addr, - .private = &tp, + .addr = 0, }; if (!data.regs) data.regs = task_pt_regs(current); - do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data); + do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); } EXPORT_SYMBOL_GPL(perf_tpcounter_event); diff --git a/trunk/kernel/posix-cpu-timers.c b/trunk/kernel/posix-cpu-timers.c index bece7c0b67b2..e33a21cb9407 100644 --- a/trunk/kernel/posix-cpu-timers.c +++ b/trunk/kernel/posix-cpu-timers.c @@ -521,11 +521,12 @@ void posix_cpu_timers_exit(struct task_struct *tsk) } void posix_cpu_timers_exit_group(struct task_struct *tsk) { - struct task_cputime cputime; + struct signal_struct *const sig = tsk->signal; - thread_group_cputimer(tsk, &cputime); cleanup_timers(tsk->signal->cpu_timers, - cputime.utime, cputime.stime, cputime.sum_exec_runtime); + cputime_add(tsk->utime, sig->utime), + cputime_add(tsk->stime, sig->stime), + tsk->se.sum_exec_runtime + sig->sum_sched_runtime); } static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) diff --git a/trunk/kernel/rtmutex.c b/trunk/kernel/rtmutex.c index 29bd4baf9e75..fcd107a78c5a 100644 --- a/trunk/kernel/rtmutex.c +++ b/trunk/kernel/rtmutex.c @@ -1039,14 +1039,16 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) { /* We got the lock for task. */ debug_rt_mutex_lock(lock); + rt_mutex_set_owner(lock, task, 0); - spin_unlock(&lock->wait_lock); + rt_mutex_deadlock_account_lock(lock, task); return 1; } ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); + if (ret && !waiter->task) { /* * Reset the return value. We might have diff --git a/trunk/kernel/trace/ring_buffer.c b/trunk/kernel/trace/ring_buffer.c index a330513d96ce..bf27bb7a63e2 100644 --- a/trunk/kernel/trace/ring_buffer.c +++ b/trunk/kernel/trace/ring_buffer.c @@ -735,7 +735,6 @@ ring_buffer_free(struct ring_buffer *buffer) put_online_cpus(); - kfree(buffer->buffers); free_cpumask_var(buffer->cpumask); kfree(buffer); @@ -1786,7 +1785,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, */ RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); - if (rb_try_to_discard(cpu_buffer, event)) + if (!rb_try_to_discard(cpu_buffer, event)) goto out; /* @@ -2384,6 +2383,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) * the box. Return the padding, and we will release * the current locks, and try again. */ + rb_advance_reader(cpu_buffer); return event; case RINGBUF_TYPE_TIME_EXTEND: @@ -2486,7 +2486,7 @@ static inline int rb_ok_to_lock(void) * buffer too. A one time deal is all you get from reading * the ring buffer from an NMI. */ - if (likely(!in_nmi())) + if (likely(!in_nmi() && !oops_in_progress)) return 1; tracing_off_permanent(); @@ -2519,8 +2519,6 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) if (dolock) spin_lock(&cpu_buffer->reader_lock); event = rb_buffer_peek(buffer, cpu, ts); - if (event && event->type_len == RINGBUF_TYPE_PADDING) - rb_advance_reader(cpu_buffer); if (dolock) spin_unlock(&cpu_buffer->reader_lock); local_irq_restore(flags); @@ -2592,9 +2590,12 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) spin_lock(&cpu_buffer->reader_lock); event = rb_buffer_peek(buffer, cpu, ts); - if (event) - rb_advance_reader(cpu_buffer); + if (!event) + goto out_unlock; + rb_advance_reader(cpu_buffer); + + out_unlock: if (dolock) spin_unlock(&cpu_buffer->reader_lock); local_irq_restore(flags); diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index c22b40f8f576..8930e39b9d8c 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -848,7 +848,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); } -EXPORT_SYMBOL_GPL(tracing_generic_entry_update); struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, int type, diff --git a/trunk/kernel/trace/trace.h b/trunk/kernel/trace/trace.h index 8b9f4f6e9559..3548ae5cc780 100644 --- a/trunk/kernel/trace/trace.h +++ b/trunk/kernel/trace/trace.h @@ -438,6 +438,10 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts); +void tracing_generic_entry_update(struct trace_entry *entry, + unsigned long flags, + int pc); + void default_wait_pipe(struct trace_iterator *iter); void poll_wait_pipe(struct trace_iterator *iter); diff --git a/trunk/kernel/trace/trace_events_filter.c b/trunk/kernel/trace/trace_events_filter.c index f32dc9d1ea7b..936c621bbf46 100644 --- a/trunk/kernel/trace/trace_events_filter.c +++ b/trunk/kernel/trace/trace_events_filter.c @@ -624,6 +624,9 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, return -ENOSPC; } + filter->preds[filter->n_preds] = pred; + filter->n_preds++; + list_for_each_entry(call, &ftrace_events, list) { if (!call->define_fields) @@ -640,9 +643,6 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, } replace_filter_string(call->filter, filter_string); } - - filter->preds[filter->n_preds] = pred; - filter->n_preds++; out: return err; } @@ -1029,17 +1029,12 @@ static int replace_preds(struct event_subsystem *system, if (elt->op == OP_AND || elt->op == OP_OR) { pred = create_logical_pred(elt->op); - if (!pred) - return -ENOMEM; if (call) { err = filter_add_pred(ps, call, pred); filter_free_pred(pred); - } else { + } else err = filter_add_subsystem_pred(ps, system, pred, filter_string); - if (err) - filter_free_pred(pred); - } if (err) return err; @@ -1053,17 +1048,12 @@ static int replace_preds(struct event_subsystem *system, } pred = create_pred(elt->op, operand1, operand2); - if (!pred) - return -ENOMEM; if (call) { err = filter_add_pred(ps, call, pred); filter_free_pred(pred); - } else { + } else err = filter_add_subsystem_pred(ps, system, pred, filter_string); - if (err) - filter_free_pred(pred); - } if (err) return err; diff --git a/trunk/scripts/recordmcount.pl b/trunk/scripts/recordmcount.pl index 911ba7ffab84..d29baa2e063a 100755 --- a/trunk/scripts/recordmcount.pl +++ b/trunk/scripts/recordmcount.pl @@ -393,7 +393,7 @@ sub update_funcs $read_function = 0; } # print out any recorded offsets - update_funcs() if (defined($ref_func)); + update_funcs() if ($text_found); # reset all markers and arrays $text_found = 0; @@ -414,10 +414,7 @@ sub update_funcs $offset = hex $1; } else { # if we already have a function, and this is weak, skip it - if (!defined($ref_func) && !defined($weak{$text}) && - # PPC64 can have symbols that start with .L and - # gcc considers these special. Don't use them! - $text !~ /^\.L/) { + if (!defined($ref_func) && !defined($weak{$text})) { $ref_func = $text; $offset = hex $1; } @@ -444,7 +441,7 @@ sub update_funcs } # dump out anymore offsets that may have been found -update_funcs() if (defined($ref_func)); +update_funcs() if ($text_found); # If we did not find any mcount callers, we are done (do nothing). if (!$opened) { diff --git a/trunk/tools/perf/builtin-record.c b/trunk/tools/perf/builtin-record.c index 90c98082af10..6da09928130f 100644 --- a/trunk/tools/perf/builtin-record.c +++ b/trunk/tools/perf/builtin-record.c @@ -412,7 +412,6 @@ static void create_counter(int counter, int cpu, pid_t pid) if (call_graph) attr->sample_type |= PERF_SAMPLE_CALLCHAIN; - attr->mmap = track; attr->comm = track; attr->inherit = (cpu < 0) && inherit;