diff --git a/[refs] b/[refs] index e1c58d259919..3ff91fe2cc86 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: af2d03d4aaa847ef41a229dfee098a47908437c6 +refs/heads/master: 2494b030ba9334c7dd7df9b9f7abe4eacc950ec5 diff --git a/trunk/Makefile b/trunk/Makefile index e7d01adaf692..41ea6fbec55a 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1268,7 +1268,6 @@ help: @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' @echo ' make C=2 [targets] Force check of all c source with $$CHECK' @echo ' make W=1 [targets] Enable extra gcc checks' - @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' @echo '' @echo 'Execute "make" or "make all" to build all targets marked with [*] ' @echo 'For further info see the ./README file' diff --git a/trunk/arch/mips/include/asm/jump_label.h b/trunk/arch/mips/include/asm/jump_label.h index 1881b316ca45..7622ccf75076 100644 --- a/trunk/arch/mips/include/asm/jump_label.h +++ b/trunk/arch/mips/include/asm/jump_label.h @@ -20,18 +20,16 @@ #define WORD_INSN ".word" #endif -static __always_inline bool arch_static_branch(struct jump_label_key *key) -{ - asm goto("1:\tnop\n\t" - "nop\n\t" - ".pushsection __jump_table, \"aw\"\n\t" - WORD_INSN " 1b, %l[l_yes], %0\n\t" - ".popsection\n\t" - : : "i" (key) : : l_yes); - return false; -l_yes: - return true; -} +#define JUMP_LABEL(key, label) \ + do { \ + asm goto("1:\tnop\n\t" \ + "nop\n\t" \ + ".pushsection __jump_table, \"a\"\n\t" \ + WORD_INSN " 1b, %l[" #label "], %0\n\t" \ + ".popsection\n\t" \ + : : "i" (key) : : label); \ + } while (0) + #endif /* __KERNEL__ */ diff --git a/trunk/arch/s390/Kconfig b/trunk/arch/s390/Kconfig index 4a7f14079e03..2508a6f31588 100644 --- a/trunk/arch/s390/Kconfig +++ b/trunk/arch/s390/Kconfig @@ -88,7 +88,6 @@ config S390 select HAVE_KERNEL_XZ select HAVE_GET_USER_PAGES_FAST select HAVE_ARCH_MUTEX_CPU_RELAX - select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 select ARCH_INLINE_SPIN_TRYLOCK select ARCH_INLINE_SPIN_TRYLOCK_BH select ARCH_INLINE_SPIN_LOCK diff --git a/trunk/arch/s390/include/asm/ftrace.h b/trunk/arch/s390/include/asm/ftrace.h index b7931faaef6d..3c29be4836ed 100644 --- a/trunk/arch/s390/include/asm/ftrace.h +++ b/trunk/arch/s390/include/asm/ftrace.h @@ -11,13 +11,15 @@ struct dyn_arch_ftrace { }; #ifdef CONFIG_64BIT #define MCOUNT_INSN_SIZE 12 +#define MCOUNT_OFFSET 8 #else #define MCOUNT_INSN_SIZE 20 +#define MCOUNT_OFFSET 4 #endif static inline unsigned long ftrace_call_adjust(unsigned long addr) { - return addr; + return addr - MCOUNT_OFFSET; } #endif /* __ASSEMBLY__ */ diff --git a/trunk/arch/s390/include/asm/jump_label.h b/trunk/arch/s390/include/asm/jump_label.h deleted file mode 100644 index 95a6cf2b5b67..000000000000 --- a/trunk/arch/s390/include/asm/jump_label.h +++ /dev/null @@ -1,37 +0,0 @@ -#ifndef _ASM_S390_JUMP_LABEL_H -#define _ASM_S390_JUMP_LABEL_H - -#include - -#define JUMP_LABEL_NOP_SIZE 6 - -#ifdef CONFIG_64BIT -#define ASM_PTR ".quad" -#define ASM_ALIGN ".balign 8" -#else -#define ASM_PTR ".long" -#define ASM_ALIGN ".balign 4" -#endif - -static __always_inline bool arch_static_branch(struct jump_label_key *key) -{ - asm goto("0: brcl 0,0\n" - ".pushsection __jump_table, \"aw\"\n" - ASM_ALIGN "\n" - ASM_PTR " 0b, %l[label], %0\n" - ".popsection\n" - : : "X" (key) : : label); - return false; -label: - return true; -} - -typedef unsigned long jump_label_t; - -struct jump_entry { - jump_label_t code; - jump_label_t target; - jump_label_t key; -}; - -#endif diff --git a/trunk/arch/s390/kernel/Makefile b/trunk/arch/s390/kernel/Makefile index 5ff15dacb571..64230bc392fa 100644 --- a/trunk/arch/s390/kernel/Makefile +++ b/trunk/arch/s390/kernel/Makefile @@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ - vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o + vdso.o vtime.o sysinfo.o nmi.o sclp.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) diff --git a/trunk/arch/s390/kernel/jump_label.c b/trunk/arch/s390/kernel/jump_label.c deleted file mode 100644 index 44cc06bedf77..000000000000 --- a/trunk/arch/s390/kernel/jump_label.c +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Jump label s390 support - * - * Copyright IBM Corp. 2011 - * Author(s): Jan Glauber - */ -#include -#include -#include -#include -#include - -#ifdef HAVE_JUMP_LABEL - -struct insn { - u16 opcode; - s32 offset; -} __packed; - -struct insn_args { - unsigned long *target; - struct insn *insn; - ssize_t size; -}; - -static int __arch_jump_label_transform(void *data) -{ - struct insn_args *args = data; - int rc; - - rc = probe_kernel_write(args->target, args->insn, args->size); - WARN_ON_ONCE(rc < 0); - return 0; -} - -void arch_jump_label_transform(struct jump_entry *entry, - enum jump_label_type type) -{ - struct insn_args args; - struct insn insn; - - if (type == JUMP_LABEL_ENABLE) { - /* brcl 15,offset */ - insn.opcode = 0xc0f4; - insn.offset = (entry->target - entry->code) >> 1; - } else { - /* brcl 0,0 */ - insn.opcode = 0xc004; - insn.offset = 0; - } - - args.target = (void *) entry->code; - args.insn = &insn; - args.size = JUMP_LABEL_NOP_SIZE; - - stop_machine(__arch_jump_label_transform, &args, NULL); -} - -#endif diff --git a/trunk/arch/sparc/include/asm/jump_label.h b/trunk/arch/sparc/include/asm/jump_label.h index fc73a82366f8..427d4684e0d2 100644 --- a/trunk/arch/sparc/include/asm/jump_label.h +++ b/trunk/arch/sparc/include/asm/jump_label.h @@ -7,20 +7,17 @@ #define JUMP_LABEL_NOP_SIZE 4 -static __always_inline bool arch_static_branch(struct jump_label_key *key) -{ - asm goto("1:\n\t" - "nop\n\t" - "nop\n\t" - ".pushsection __jump_table, \"aw\"\n\t" - ".align 4\n\t" - ".word 1b, %l[l_yes], %c0\n\t" - ".popsection \n\t" - : : "i" (key) : : l_yes); - return false; -l_yes: - return true; -} +#define JUMP_LABEL(key, label) \ + do { \ + asm goto("1:\n\t" \ + "nop\n\t" \ + "nop\n\t" \ + ".pushsection __jump_table, \"a\"\n\t"\ + ".align 4\n\t" \ + ".word 1b, %l[" #label "], %c0\n\t" \ + ".popsection \n\t" \ + : : "i" (key) : : label);\ + } while (0) #endif /* __KERNEL__ */ diff --git a/trunk/arch/x86/include/asm/alternative.h b/trunk/arch/x86/include/asm/alternative.h index 8cdd1e247975..13009d1af99a 100644 --- a/trunk/arch/x86/include/asm/alternative.h +++ b/trunk/arch/x86/include/asm/alternative.h @@ -4,6 +4,7 @@ #include #include #include +#include #include /* @@ -190,7 +191,7 @@ extern void *text_poke(void *addr, const void *opcode, size_t len); extern void *text_poke_smp(void *addr, const void *opcode, size_t len); extern void text_poke_smp_batch(struct text_poke_param *params, int n); -#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) +#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) #define IDEAL_NOP_SIZE_5 5 extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; extern void arch_init_ideal_nop5(void); diff --git a/trunk/arch/x86/include/asm/ftrace.h b/trunk/arch/x86/include/asm/ftrace.h index 268c783ab1c0..db24c2278be0 100644 --- a/trunk/arch/x86/include/asm/ftrace.h +++ b/trunk/arch/x86/include/asm/ftrace.h @@ -38,10 +38,11 @@ extern void mcount(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { /* - * addr is the address of the mcount call instruction. - * recordmcount does the necessary offset calculation. + * call mcount is "e8 <4 byte offset>" + * The addr points to the 4 byte offset and the caller of this + * function wants the pointer to e8. Simply subtract one. */ - return addr; + return addr - 1; } #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/trunk/arch/x86/include/asm/jump_label.h b/trunk/arch/x86/include/asm/jump_label.h index a32b18ce6ead..574dbc22893a 100644 --- a/trunk/arch/x86/include/asm/jump_label.h +++ b/trunk/arch/x86/include/asm/jump_label.h @@ -5,25 +5,20 @@ #include #include -#include #define JUMP_LABEL_NOP_SIZE 5 -#define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" - -static __always_inline bool arch_static_branch(struct jump_label_key *key) -{ - asm goto("1:" - JUMP_LABEL_INITIAL_NOP - ".pushsection __jump_table, \"aw\" \n\t" - _ASM_ALIGN "\n\t" - _ASM_PTR "1b, %l[l_yes], %c0 \n\t" - ".popsection \n\t" - : : "i" (key) : : l_yes); - return false; -l_yes: - return true; -} +# define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" + +# define JUMP_LABEL(key, label) \ + do { \ + asm goto("1:" \ + JUMP_LABEL_INITIAL_NOP \ + ".pushsection __jump_table, \"aw\" \n\t"\ + _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ + ".popsection \n\t" \ + : : "i" (key) : : label); \ + } while (0) #endif /* __KERNEL__ */ diff --git a/trunk/arch/x86/include/asm/setup.h b/trunk/arch/x86/include/asm/setup.h index 647d8a06ce4f..db8aa19a08a2 100644 --- a/trunk/arch/x86/include/asm/setup.h +++ b/trunk/arch/x86/include/asm/setup.h @@ -88,7 +88,7 @@ void *extend_brk(size_t size, size_t align); * executable.) */ #define RESERVE_BRK(name,sz) \ - static void __section(.discard.text) __used notrace \ + static void __section(.discard.text) __used \ __brk_reservation_fn_##name##__(void) { \ asm volatile ( \ ".pushsection .brk_reservation,\"aw\",@nobits;" \ diff --git a/trunk/arch/x86/include/asm/stacktrace.h b/trunk/arch/x86/include/asm/stacktrace.h index 70bbe39043a9..d7e89c83645d 100644 --- a/trunk/arch/x86/include/asm/stacktrace.h +++ b/trunk/arch/x86/include/asm/stacktrace.h @@ -37,6 +37,9 @@ print_context_stack_bp(struct thread_info *tinfo, /* Generic stack tracer with callbacks */ struct stacktrace_ops { + void (*warning)(void *data, char *msg); + /* msg must contain %s for the symbol */ + void (*warning_symbol)(void *data, char *msg, unsigned long symbol); void (*address)(void *data, unsigned long address, int reliable); /* On negative return stop dumping */ int (*stack)(void *data, char *name); diff --git a/trunk/arch/x86/kernel/alternative.c b/trunk/arch/x86/kernel/alternative.c index 651454b0c811..4a234677e213 100644 --- a/trunk/arch/x86/kernel/alternative.c +++ b/trunk/arch/x86/kernel/alternative.c @@ -679,7 +679,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); } -#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) +#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) #ifdef CONFIG_X86_64 unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; diff --git a/trunk/arch/x86/kernel/cpu/common.c b/trunk/arch/x86/kernel/cpu/common.c index e2ced0074a45..173f3a3fa1a6 100644 --- a/trunk/arch/x86/kernel/cpu/common.c +++ b/trunk/arch/x86/kernel/cpu/common.c @@ -565,8 +565,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); - if (eax > 0) - c->x86_capability[9] = ebx; + c->x86_capability[9] = ebx; } /* AMD-defined flags: level 0x80000001 */ diff --git a/trunk/arch/x86/kernel/cpu/perf_event.c b/trunk/arch/x86/kernel/cpu/perf_event.c index 3a0338b4b179..e638689279d3 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.c +++ b/trunk/arch/x86/kernel/cpu/perf_event.c @@ -31,7 +31,6 @@ #include #include #include -#include #if 0 #undef wrmsrl @@ -364,18 +363,12 @@ x86_perf_event_update(struct perf_event *event) return new_raw_count; } +/* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */ static inline int x86_pmu_addr_offset(int index) { - int offset; - - /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */ - alternative_io(ASM_NOP2, - "shll $1, %%eax", - X86_FEATURE_PERFCTR_CORE, - "=a" (offset), - "a" (index)); - - return offset; + if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) + return index << 1; + return index; } static inline unsigned int x86_pmu_config_addr(int index) @@ -1773,6 +1766,17 @@ static struct pmu pmu = { * callchain support */ +static void +backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) +{ + /* Ignore warnings */ +} + +static void backtrace_warning(void *data, char *msg) +{ + /* Ignore warnings */ +} + static int backtrace_stack(void *data, char *name) { return 0; @@ -1786,6 +1790,8 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops backtrace_ops = { + .warning = backtrace_warning, + .warning_symbol = backtrace_warning_symbol, .stack = backtrace_stack, .address = backtrace_address, .walk_stack = print_context_stack_bp, diff --git a/trunk/arch/x86/kernel/cpu/perf_event_amd.c b/trunk/arch/x86/kernel/cpu/perf_event_amd.c index fe29c1d2219e..cf4e369cea67 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_amd.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_amd.c @@ -96,14 +96,12 @@ static __initconst const u64 amd_hw_cache_event_ids */ static const u64 amd_perfmon_event_map[] = { - [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, - [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, - [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, - [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, - [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ - [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ + [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, + [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, }; static u64 amd_pmu_event_map(int hw_event) diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c index 41178c826c48..447a28de6f09 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c @@ -36,7 +36,7 @@ static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, }; -static struct event_constraint intel_core_event_constraints[] __read_mostly = +static struct event_constraint intel_core_event_constraints[] = { INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ @@ -47,7 +47,7 @@ static struct event_constraint intel_core_event_constraints[] __read_mostly = EVENT_CONSTRAINT_END }; -static struct event_constraint intel_core2_event_constraints[] __read_mostly = +static struct event_constraint intel_core2_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -70,7 +70,7 @@ static struct event_constraint intel_core2_event_constraints[] __read_mostly = EVENT_CONSTRAINT_END }; -static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = +static struct event_constraint intel_nehalem_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -86,19 +86,19 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = EVENT_CONSTRAINT_END }; -static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = +static struct extra_reg intel_nehalem_extra_regs[] = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), EVENT_EXTRA_END }; -static struct event_constraint intel_nehalem_percore_constraints[] __read_mostly = +static struct event_constraint intel_nehalem_percore_constraints[] = { INTEL_EVENT_CONSTRAINT(0xb7, 0), EVENT_CONSTRAINT_END }; -static struct event_constraint intel_westmere_event_constraints[] __read_mostly = +static struct event_constraint intel_westmere_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -110,7 +110,7 @@ static struct event_constraint intel_westmere_event_constraints[] __read_mostly EVENT_CONSTRAINT_END }; -static struct event_constraint intel_snb_event_constraints[] __read_mostly = +static struct event_constraint intel_snb_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -123,21 +123,21 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = EVENT_CONSTRAINT_END }; -static struct extra_reg intel_westmere_extra_regs[] __read_mostly = +static struct extra_reg intel_westmere_extra_regs[] = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), EVENT_EXTRA_END }; -static struct event_constraint intel_westmere_percore_constraints[] __read_mostly = +static struct event_constraint intel_westmere_percore_constraints[] = { INTEL_EVENT_CONSTRAINT(0xb7, 0), INTEL_EVENT_CONSTRAINT(0xbb, 0), EVENT_CONSTRAINT_END }; -static struct event_constraint intel_gen_event_constraints[] __read_mostly = +static struct event_constraint intel_gen_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -1440,11 +1440,6 @@ static __init int intel_pmu_init(void) x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.extra_regs = intel_nehalem_extra_regs; - /* UOPS_ISSUED.STALLED_CYCLES */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; - /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; - if (ebx & 0x40) { /* * Erratum AAJ80 detected, we work it around by using @@ -1485,12 +1480,6 @@ static __init int intel_pmu_init(void) x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; x86_pmu.extra_regs = intel_westmere_extra_regs; - - /* UOPS_ISSUED.STALLED_CYCLES */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; - /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; - pr_cont("Westmere events, "); break; @@ -1502,12 +1491,6 @@ static __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_events; - - /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; - /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1; - pr_cont("SandyBridge events, "); break; diff --git a/trunk/arch/x86/kernel/cpu/perf_event_p4.c b/trunk/arch/x86/kernel/cpu/perf_event_p4.c index ead584fb6a7d..e93fcd55fae1 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_p4.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_p4.c @@ -468,7 +468,7 @@ static struct p4_event_bind p4_event_bind_map[] = { .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED), .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, .escr_emask = - P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS), + P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_X87_ASSIST] = { @@ -912,7 +912,8 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) int idx, handled = 0; u64 val; - perf_sample_data_init(&data, 0); + data.addr = 0; + data.raw = NULL; cpuc = &__get_cpu_var(cpu_hw_events); @@ -1196,7 +1197,7 @@ static __init int p4_pmu_init(void) { unsigned int low, high; - /* If we get stripped -- indexing fails */ + /* If we get stripped -- indexig fails */ BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC); rdmsr(MSR_IA32_MISC_ENABLE, low, high); diff --git a/trunk/arch/x86/kernel/dumpstack.c b/trunk/arch/x86/kernel/dumpstack.c index f478ff6877ef..e2a3f0606da4 100644 --- a/trunk/arch/x86/kernel/dumpstack.c +++ b/trunk/arch/x86/kernel/dumpstack.c @@ -135,6 +135,20 @@ print_context_stack_bp(struct thread_info *tinfo, } EXPORT_SYMBOL_GPL(print_context_stack_bp); + +static void +print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) +{ + printk(data); + print_symbol(msg, symbol); + printk("\n"); +} + +static void print_trace_warning(void *data, char *msg) +{ + printk("%s%s\n", (char *)data, msg); +} + static int print_trace_stack(void *data, char *name) { printk("%s <%s> ", (char *)data, name); @@ -152,6 +166,8 @@ static void print_trace_address(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops print_trace_ops = { + .warning = print_trace_warning, + .warning_symbol = print_trace_warning_symbol, .stack = print_trace_stack, .address = print_trace_address, .walk_stack = print_context_stack, diff --git a/trunk/arch/x86/kernel/module.c b/trunk/arch/x86/kernel/module.c index 52f256f2cc81..ab23f1ad4bf1 100644 --- a/trunk/arch/x86/kernel/module.c +++ b/trunk/arch/x86/kernel/module.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include diff --git a/trunk/arch/x86/kernel/stacktrace.c b/trunk/arch/x86/kernel/stacktrace.c index 55d9bc03f696..6515733a289d 100644 --- a/trunk/arch/x86/kernel/stacktrace.c +++ b/trunk/arch/x86/kernel/stacktrace.c @@ -9,6 +9,15 @@ #include #include +static void save_stack_warning(void *data, char *msg) +{ +} + +static void +save_stack_warning_symbol(void *data, char *msg, unsigned long symbol) +{ +} + static int save_stack_stack(void *data, char *name) { return 0; @@ -44,12 +53,16 @@ save_stack_address_nosched(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops save_stack_ops = { + .warning = save_stack_warning, + .warning_symbol = save_stack_warning_symbol, .stack = save_stack_stack, .address = save_stack_address, .walk_stack = print_context_stack, }; static const struct stacktrace_ops save_stack_ops_nosched = { + .warning = save_stack_warning, + .warning_symbol = save_stack_warning_symbol, .stack = save_stack_stack, .address = save_stack_address_nosched, .walk_stack = print_context_stack, diff --git a/trunk/arch/x86/oprofile/backtrace.c b/trunk/arch/x86/oprofile/backtrace.c index a5b64ab4cd6e..2d49d4e19a36 100644 --- a/trunk/arch/x86/oprofile/backtrace.c +++ b/trunk/arch/x86/oprofile/backtrace.c @@ -16,6 +16,17 @@ #include #include +static void backtrace_warning_symbol(void *data, char *msg, + unsigned long symbol) +{ + /* Ignore warnings */ +} + +static void backtrace_warning(void *data, char *msg) +{ + /* Ignore warnings */ +} + static int backtrace_stack(void *data, char *name) { /* Yes, we want all stacks */ @@ -31,6 +42,8 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) } static struct stacktrace_ops backtrace_ops = { + .warning = backtrace_warning, + .warning_symbol = backtrace_warning_symbol, .stack = backtrace_stack, .address = backtrace_address, .walk_stack = print_context_stack, diff --git a/trunk/include/asm-generic/vmlinux.lds.h b/trunk/include/asm-generic/vmlinux.lds.h index 75a8692d144f..bd297a20ab98 100644 --- a/trunk/include/asm-generic/vmlinux.lds.h +++ b/trunk/include/asm-generic/vmlinux.lds.h @@ -170,10 +170,6 @@ STRUCT_ALIGN(); \ *(__tracepoints) \ /* implement dynamic printk debug */ \ - . = ALIGN(8); \ - VMLINUX_SYMBOL(__start___jump_table) = .; \ - *(__jump_table) \ - VMLINUX_SYMBOL(__stop___jump_table) = .; \ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___verbose) = .; \ *(__verbose) \ @@ -232,6 +228,8 @@ \ BUG_TABLE \ \ + JUMP_TABLE \ + \ /* PCI quirks */ \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ @@ -591,6 +589,14 @@ #define BUG_TABLE #endif +#define JUMP_TABLE \ + . = ALIGN(8); \ + __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___jump_table) = .; \ + *(__jump_table) \ + VMLINUX_SYMBOL(__stop___jump_table) = .; \ + } + #ifdef CONFIG_PM_TRACE #define TRACEDATA \ . = ALIGN(4); \ diff --git a/trunk/include/linux/dynamic_debug.h b/trunk/include/linux/dynamic_debug.h index e747ecd48e1c..0c9653f11c18 100644 --- a/trunk/include/linux/dynamic_debug.h +++ b/trunk/include/linux/dynamic_debug.h @@ -1,6 +1,8 @@ #ifndef _DYNAMIC_DEBUG_H #define _DYNAMIC_DEBUG_H +#include + /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They * use independent hash functions, to reduce the chance of false positives. diff --git a/trunk/include/linux/ftrace.h b/trunk/include/linux/ftrace.h index 32047449b309..ca29e03c1fac 100644 --- a/trunk/include/linux/ftrace.h +++ b/trunk/include/linux/ftrace.h @@ -147,9 +147,11 @@ extern int ftrace_text_reserved(void *start, void *end); enum { FTRACE_FL_FREE = (1 << 0), - FTRACE_FL_FILTER = (1 << 1), - FTRACE_FL_ENABLED = (1 << 2), - FTRACE_FL_NOTRACE = (1 << 3), + FTRACE_FL_FAILED = (1 << 1), + FTRACE_FL_FILTER = (1 << 2), + FTRACE_FL_ENABLED = (1 << 3), + FTRACE_FL_NOTRACE = (1 << 4), + FTRACE_FL_CONVERTED = (1 << 5), }; struct dyn_ftrace { diff --git a/trunk/include/linux/init.h b/trunk/include/linux/init.h index 9146f39cdddf..577671c55153 100644 --- a/trunk/include/linux/init.h +++ b/trunk/include/linux/init.h @@ -79,29 +79,29 @@ #define __exitused __used #endif -#define __exit __section(.exit.text) __exitused __cold notrace +#define __exit __section(.exit.text) __exitused __cold /* Used for HOTPLUG */ -#define __devinit __section(.devinit.text) __cold notrace +#define __devinit __section(.devinit.text) __cold #define __devinitdata __section(.devinit.data) #define __devinitconst __section(.devinit.rodata) -#define __devexit __section(.devexit.text) __exitused __cold notrace +#define __devexit __section(.devexit.text) __exitused __cold #define __devexitdata __section(.devexit.data) #define __devexitconst __section(.devexit.rodata) /* Used for HOTPLUG_CPU */ -#define __cpuinit __section(.cpuinit.text) __cold notrace +#define __cpuinit __section(.cpuinit.text) __cold #define __cpuinitdata __section(.cpuinit.data) #define __cpuinitconst __section(.cpuinit.rodata) -#define __cpuexit __section(.cpuexit.text) __exitused __cold notrace +#define __cpuexit __section(.cpuexit.text) __exitused __cold #define __cpuexitdata __section(.cpuexit.data) #define __cpuexitconst __section(.cpuexit.rodata) /* Used for MEMORY_HOTPLUG */ -#define __meminit __section(.meminit.text) __cold notrace +#define __meminit __section(.meminit.text) __cold #define __meminitdata __section(.meminit.data) #define __meminitconst __section(.meminit.rodata) -#define __memexit __section(.memexit.text) __exitused __cold notrace +#define __memexit __section(.memexit.text) __exitused __cold #define __memexitdata __section(.memexit.data) #define __memexitconst __section(.memexit.rodata) diff --git a/trunk/include/linux/jump_label.h b/trunk/include/linux/jump_label.h index 83e745f3ead7..7880f18e4b86 100644 --- a/trunk/include/linux/jump_label.h +++ b/trunk/include/linux/jump_label.h @@ -1,43 +1,20 @@ #ifndef _LINUX_JUMP_LABEL_H #define _LINUX_JUMP_LABEL_H -#include -#include - #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) - -struct jump_label_key { - atomic_t enabled; - struct jump_entry *entries; -#ifdef CONFIG_MODULES - struct jump_label_mod *next; -#endif -}; - # include # define HAVE_JUMP_LABEL #endif enum jump_label_type { - JUMP_LABEL_DISABLE = 0, JUMP_LABEL_ENABLE, + JUMP_LABEL_DISABLE }; struct module; #ifdef HAVE_JUMP_LABEL -#ifdef CONFIG_MODULES -#define JUMP_LABEL_INIT {{ 0 }, NULL, NULL} -#else -#define JUMP_LABEL_INIT {{ 0 }, NULL} -#endif - -static __always_inline bool static_branch(struct jump_label_key *key) -{ - return arch_static_branch(key); -} - extern struct jump_entry __start___jump_table[]; extern struct jump_entry __stop___jump_table[]; @@ -46,37 +23,37 @@ extern void jump_label_unlock(void); extern void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type); extern void arch_jump_label_text_poke_early(jump_label_t addr); -extern int jump_label_text_reserved(void *start, void *end); -extern void jump_label_inc(struct jump_label_key *key); -extern void jump_label_dec(struct jump_label_key *key); -extern bool jump_label_enabled(struct jump_label_key *key); +extern void jump_label_update(unsigned long key, enum jump_label_type type); extern void jump_label_apply_nops(struct module *mod); +extern int jump_label_text_reserved(void *start, void *end); -#else +#define jump_label_enable(key) \ + jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); -#include +#define jump_label_disable(key) \ + jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); -#define JUMP_LABEL_INIT {ATOMIC_INIT(0)} +#else -struct jump_label_key { - atomic_t enabled; -}; +#define JUMP_LABEL(key, label) \ +do { \ + if (unlikely(*key)) \ + goto label; \ +} while (0) -static __always_inline bool static_branch(struct jump_label_key *key) -{ - if (unlikely(atomic_read(&key->enabled))) - return true; - return false; -} +#define jump_label_enable(cond_var) \ +do { \ + *(cond_var) = 1; \ +} while (0) -static inline void jump_label_inc(struct jump_label_key *key) -{ - atomic_inc(&key->enabled); -} +#define jump_label_disable(cond_var) \ +do { \ + *(cond_var) = 0; \ +} while (0) -static inline void jump_label_dec(struct jump_label_key *key) +static inline int jump_label_apply_nops(struct module *mod) { - atomic_dec(&key->enabled); + return 0; } static inline int jump_label_text_reserved(void *start, void *end) @@ -87,16 +64,16 @@ static inline int jump_label_text_reserved(void *start, void *end) static inline void jump_label_lock(void) {} static inline void jump_label_unlock(void) {} -static inline bool jump_label_enabled(struct jump_label_key *key) -{ - return !!atomic_read(&key->enabled); -} - -static inline int jump_label_apply_nops(struct module *mod) -{ - return 0; -} - #endif +#define COND_STMT(key, stmt) \ +do { \ + __label__ jl_enabled; \ + JUMP_LABEL(key, jl_enabled); \ + if (0) { \ +jl_enabled: \ + stmt; \ + } \ +} while (0) + #endif diff --git a/trunk/include/linux/jump_label_ref.h b/trunk/include/linux/jump_label_ref.h new file mode 100644 index 000000000000..e5d012ad92c6 --- /dev/null +++ b/trunk/include/linux/jump_label_ref.h @@ -0,0 +1,44 @@ +#ifndef _LINUX_JUMP_LABEL_REF_H +#define _LINUX_JUMP_LABEL_REF_H + +#include +#include + +#ifdef HAVE_JUMP_LABEL + +static inline void jump_label_inc(atomic_t *key) +{ + if (atomic_add_return(1, key) == 1) + jump_label_enable(key); +} + +static inline void jump_label_dec(atomic_t *key) +{ + if (atomic_dec_and_test(key)) + jump_label_disable(key); +} + +#else /* !HAVE_JUMP_LABEL */ + +static inline void jump_label_inc(atomic_t *key) +{ + atomic_inc(key); +} + +static inline void jump_label_dec(atomic_t *key) +{ + atomic_dec(key); +} + +#undef JUMP_LABEL +#define JUMP_LABEL(key, label) \ +do { \ + if (unlikely(__builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(key), atomic_t *), \ + atomic_read((atomic_t *)(key)), *(key)))) \ + goto label; \ +} while (0) + +#endif /* HAVE_JUMP_LABEL */ + +#endif /* _LINUX_JUMP_LABEL_REF_H */ diff --git a/trunk/include/linux/perf_event.h b/trunk/include/linux/perf_event.h index 3412684ce5d5..ee9f1e782800 100644 --- a/trunk/include/linux/perf_event.h +++ b/trunk/include/linux/perf_event.h @@ -2,8 +2,8 @@ * Performance events: * * Copyright (C) 2008-2009, Thomas Gleixner - * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar - * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra * * Data type definitions, declarations, prototypes. * @@ -52,8 +52,6 @@ enum perf_hw_id { PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, PERF_COUNT_HW_BRANCH_MISSES = 5, PERF_COUNT_HW_BUS_CYCLES = 6, - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, PERF_COUNT_HW_MAX, /* non-ABI */ }; @@ -470,9 +468,9 @@ enum perf_callchain_context { PERF_CONTEXT_MAX = (__u64)-4095, }; -#define PERF_FLAG_FD_NO_GROUP (1U << 0) -#define PERF_FLAG_FD_OUTPUT (1U << 1) -#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ +#define PERF_FLAG_FD_NO_GROUP (1U << 0) +#define PERF_FLAG_FD_OUTPUT (1U << 1) +#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ #ifdef __KERNEL__ /* @@ -486,9 +484,9 @@ enum perf_callchain_context { #endif struct perf_guest_info_callbacks { - int (*is_in_guest)(void); - int (*is_user_mode)(void); - unsigned long (*get_guest_ip)(void); + int (*is_in_guest) (void); + int (*is_user_mode) (void); + unsigned long (*get_guest_ip) (void); }; #ifdef CONFIG_HAVE_HW_BREAKPOINT @@ -507,7 +505,7 @@ struct perf_guest_info_callbacks { #include #include #include -#include +#include #include #include @@ -654,19 +652,19 @@ struct pmu { * Start the transaction, after this ->add() doesn't need to * do schedulability tests. */ - void (*start_txn) (struct pmu *pmu); /* optional */ + void (*start_txn) (struct pmu *pmu); /* optional */ /* * If ->start_txn() disabled the ->add() schedulability test * then ->commit_txn() is required to perform one. On success * the transaction is closed. On error the transaction is kept * open until ->cancel_txn() is called. */ - int (*commit_txn) (struct pmu *pmu); /* optional */ + int (*commit_txn) (struct pmu *pmu); /* optional */ /* * Will cancel the transaction, assumes ->del() is called * for each successful ->add() during the transaction. */ - void (*cancel_txn) (struct pmu *pmu); /* optional */ + void (*cancel_txn) (struct pmu *pmu); /* optional */ }; /** @@ -714,15 +712,15 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int, struct pt_regs *regs); enum perf_group_flag { - PERF_GROUP_SOFTWARE = 0x1, + PERF_GROUP_SOFTWARE = 0x1, }; -#define SWEVENT_HLIST_BITS 8 -#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) +#define SWEVENT_HLIST_BITS 8 +#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) struct swevent_hlist { - struct hlist_head heads[SWEVENT_HLIST_SIZE]; - struct rcu_head rcu_head; + struct hlist_head heads[SWEVENT_HLIST_SIZE]; + struct rcu_head rcu_head; }; #define PERF_ATTACH_CONTEXT 0x01 @@ -735,13 +733,13 @@ struct swevent_hlist { * This is a per-cpu dynamically allocated data structure. */ struct perf_cgroup_info { - u64 time; - u64 timestamp; + u64 time; + u64 timestamp; }; struct perf_cgroup { - struct cgroup_subsys_state css; - struct perf_cgroup_info *info; /* timing info, one per cpu */ + struct cgroup_subsys_state css; + struct perf_cgroup_info *info; /* timing info, one per cpu */ }; #endif @@ -925,7 +923,7 @@ struct perf_event_context { /* * Number of contexts where an event can trigger: - * task, softirq, hardirq, nmi. + * task, softirq, hardirq, nmi. */ #define PERF_NR_CONTEXTS 4 @@ -1003,7 +1001,8 @@ struct perf_sample_data { struct perf_raw_record *raw; }; -static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) +static inline +void perf_sample_data_init(struct perf_sample_data *data, u64 addr) { data->addr = addr; data->raw = NULL; @@ -1035,12 +1034,13 @@ static inline int is_software_event(struct perf_event *event) return event->pmu->task_ctx_nr == perf_sw_context; } -extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; +extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); #ifndef perf_arch_fetch_caller_regs -static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } +static inline void +perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } #endif /* @@ -1063,24 +1063,26 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { struct pt_regs hot_regs; - if (static_branch(&perf_swevent_enabled[event_id])) { - if (!regs) { - perf_fetch_caller_regs(&hot_regs); - regs = &hot_regs; - } - __perf_sw_event(event_id, nr, nmi, regs, addr); + JUMP_LABEL(&perf_swevent_enabled[event_id], have_event); + return; + +have_event: + if (!regs) { + perf_fetch_caller_regs(&hot_regs); + regs = &hot_regs; } + __perf_sw_event(event_id, nr, nmi, regs, addr); } -extern struct jump_label_key perf_sched_events; +extern atomic_t perf_sched_events; static inline void perf_event_task_sched_in(struct task_struct *task) { - if (static_branch(&perf_sched_events)) - __perf_event_task_sched_in(task); + COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task)); } -static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) +static inline +void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) { perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); @@ -1098,10 +1100,14 @@ extern void perf_event_fork(struct task_struct *tsk); /* Callchains */ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); -extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); -extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); +extern void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs); +extern void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs); + -static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) +static inline void +perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) { if (entry->nr < PERF_MAX_STACK_DEPTH) entry->ip[entry->nr++] = ip; @@ -1137,9 +1143,9 @@ extern void perf_tp_event(u64 addr, u64 count, void *record, extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags -# define perf_misc_flags(regs) \ - (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) -# define perf_instruction_pointer(regs) instruction_pointer(regs) +#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ + PERF_RECORD_MISC_KERNEL) +#define perf_instruction_pointer(regs) instruction_pointer(regs) #endif extern int perf_output_begin(struct perf_output_handle *handle, @@ -1174,9 +1180,9 @@ static inline void perf_bp_event(struct perf_event *event, void *data) { } static inline int perf_register_guest_info_callbacks -(struct perf_guest_info_callbacks *callbacks) { return 0; } +(struct perf_guest_info_callbacks *callbacks) { return 0; } static inline int perf_unregister_guest_info_callbacks -(struct perf_guest_info_callbacks *callbacks) { return 0; } +(struct perf_guest_info_callbacks *callbacks) { return 0; } static inline void perf_event_mmap(struct vm_area_struct *vma) { } static inline void perf_event_comm(struct task_struct *tsk) { } @@ -1189,22 +1195,23 @@ static inline void perf_event_disable(struct perf_event *event) { } static inline void perf_event_task_tick(void) { } #endif -#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) +#define perf_output_put(handle, x) \ + perf_output_copy((handle), &(x), sizeof(x)) /* * This has to have a higher priority than migration_notifier in sched.c. */ -#define perf_cpu_notifier(fn) \ -do { \ - static struct notifier_block fn##_nb __cpuinitdata = \ - { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ - fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ - (void *)(unsigned long)smp_processor_id()); \ - fn(&fn##_nb, (unsigned long)CPU_STARTING, \ - (void *)(unsigned long)smp_processor_id()); \ - fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ - (void *)(unsigned long)smp_processor_id()); \ - register_cpu_notifier(&fn##_nb); \ +#define perf_cpu_notifier(fn) \ +do { \ + static struct notifier_block fn##_nb __cpuinitdata = \ + { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ + fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ + (void *)(unsigned long)smp_processor_id()); \ + fn(&fn##_nb, (unsigned long)CPU_STARTING, \ + (void *)(unsigned long)smp_processor_id()); \ + fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ + (void *)(unsigned long)smp_processor_id()); \ + register_cpu_notifier(&fn##_nb); \ } while (0) #endif /* __KERNEL__ */ diff --git a/trunk/include/linux/tracepoint.h b/trunk/include/linux/tracepoint.h index d530a4460a0b..97c84a58efb8 100644 --- a/trunk/include/linux/tracepoint.h +++ b/trunk/include/linux/tracepoint.h @@ -29,7 +29,7 @@ struct tracepoint_func { struct tracepoint { const char *name; /* Tracepoint name */ - struct jump_label_key key; + int state; /* State. */ void (*regfunc)(void); void (*unregfunc)(void); struct tracepoint_func __rcu *funcs; @@ -146,7 +146,9 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin, extern struct tracepoint __tracepoint_##name; \ static inline void trace_##name(proto) \ { \ - if (static_branch(&__tracepoint_##name.key)) \ + JUMP_LABEL(&__tracepoint_##name.state, do_trace); \ + return; \ +do_trace: \ __DO_TRACE(&__tracepoint_##name, \ TP_PROTO(data_proto), \ TP_ARGS(data_args), \ @@ -174,14 +176,14 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin, * structures, so we create an array of pointers that will be used for iteration * on the tracepoints. */ -#define DEFINE_TRACE_FN(name, reg, unreg) \ - static const char __tpstrtab_##name[] \ - __attribute__((section("__tracepoints_strings"))) = #name; \ - struct tracepoint __tracepoint_##name \ - __attribute__((section("__tracepoints"))) = \ - { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\ - static struct tracepoint * const __tracepoint_ptr_##name __used \ - __attribute__((section("__tracepoints_ptrs"))) = \ +#define DEFINE_TRACE_FN(name, reg, unreg) \ + static const char __tpstrtab_##name[] \ + __attribute__((section("__tracepoints_strings"))) = #name; \ + struct tracepoint __tracepoint_##name \ + __attribute__((section("__tracepoints"))) = \ + { __tpstrtab_##name, 0, reg, unreg, NULL }; \ + static struct tracepoint * const __tracepoint_ptr_##name __used \ + __attribute__((section("__tracepoints_ptrs"))) = \ &__tracepoint_##name; #define DEFINE_TRACE(name) \ diff --git a/trunk/kernel/Makefile b/trunk/kernel/Makefile index e9cf19155b46..85cbfb31e73e 100644 --- a/trunk/kernel/Makefile +++ b/trunk/kernel/Makefile @@ -21,6 +21,7 @@ CFLAGS_REMOVE_mutex-debug.o = -pg CFLAGS_REMOVE_rtmutex-debug.o = -pg CFLAGS_REMOVE_cgroup-debug.o = -pg CFLAGS_REMOVE_sched_clock.o = -pg +CFLAGS_REMOVE_perf_event.o = -pg CFLAGS_REMOVE_irq_work.o = -pg endif @@ -102,9 +103,8 @@ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_TRACEPOINTS) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_IRQ_WORK) += irq_work.o - -obj-$(CONFIG_PERF_EVENTS) += events/ - +obj-$(CONFIG_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o obj-$(CONFIG_PADATA) += padata.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o diff --git a/trunk/kernel/events/Makefile b/trunk/kernel/events/Makefile deleted file mode 100644 index 1ce23d3d8394..000000000000 --- a/trunk/kernel/events/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -ifdef CONFIG_FUNCTION_TRACER -CFLAGS_REMOVE_core.o = -pg -endif - -obj-y := core.o -obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o diff --git a/trunk/kernel/events/hw_breakpoint.c b/trunk/kernel/hw_breakpoint.c similarity index 100% rename from trunk/kernel/events/hw_breakpoint.c rename to trunk/kernel/hw_breakpoint.c diff --git a/trunk/kernel/jump_label.c b/trunk/kernel/jump_label.c index 74d1c099fbd1..3b79bd938330 100644 --- a/trunk/kernel/jump_label.c +++ b/trunk/kernel/jump_label.c @@ -2,23 +2,43 @@ * jump label support * * Copyright (C) 2009 Jason Baron - * Copyright (C) 2011 Peter Zijlstra * */ +#include #include #include #include #include +#include #include #include #include -#include #ifdef HAVE_JUMP_LABEL +#define JUMP_LABEL_HASH_BITS 6 +#define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS) +static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE]; + /* mutex to protect coming/going of the the jump_label table */ static DEFINE_MUTEX(jump_label_mutex); +struct jump_label_entry { + struct hlist_node hlist; + struct jump_entry *table; + int nr_entries; + /* hang modules off here */ + struct hlist_head modules; + unsigned long key; +}; + +struct jump_label_module_entry { + struct hlist_node hlist; + struct jump_entry *table; + int nr_entries; + struct module *mod; +}; + void jump_label_lock(void) { mutex_lock(&jump_label_mutex); @@ -29,11 +49,6 @@ void jump_label_unlock(void) mutex_unlock(&jump_label_mutex); } -bool jump_label_enabled(struct jump_label_key *key) -{ - return !!atomic_read(&key->enabled); -} - static int jump_label_cmp(const void *a, const void *b) { const struct jump_entry *jea = a; @@ -49,7 +64,7 @@ static int jump_label_cmp(const void *a, const void *b) } static void -jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) +sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) { unsigned long size; @@ -58,25 +73,118 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); } -static void jump_label_update(struct jump_label_key *key, int enable); +static struct jump_label_entry *get_jump_label_entry(jump_label_t key) +{ + struct hlist_head *head; + struct hlist_node *node; + struct jump_label_entry *e; + u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0); + + head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; + hlist_for_each_entry(e, node, head, hlist) { + if (key == e->key) + return e; + } + return NULL; +} -void jump_label_inc(struct jump_label_key *key) +static struct jump_label_entry * +add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table) { - if (atomic_inc_not_zero(&key->enabled)) - return; + struct hlist_head *head; + struct jump_label_entry *e; + u32 hash; + + e = get_jump_label_entry(key); + if (e) + return ERR_PTR(-EEXIST); + + e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL); + if (!e) + return ERR_PTR(-ENOMEM); + + hash = jhash((void *)&key, sizeof(jump_label_t), 0); + head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; + e->key = key; + e->table = table; + e->nr_entries = nr_entries; + INIT_HLIST_HEAD(&(e->modules)); + hlist_add_head(&e->hlist, head); + return e; +} - jump_label_lock(); - if (atomic_add_return(1, &key->enabled) == 1) - jump_label_update(key, JUMP_LABEL_ENABLE); - jump_label_unlock(); +static int +build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop) +{ + struct jump_entry *iter, *iter_begin; + struct jump_label_entry *entry; + int count; + + sort_jump_label_entries(start, stop); + iter = start; + while (iter < stop) { + entry = get_jump_label_entry(iter->key); + if (!entry) { + iter_begin = iter; + count = 0; + while ((iter < stop) && + (iter->key == iter_begin->key)) { + iter++; + count++; + } + entry = add_jump_label_entry(iter_begin->key, + count, iter_begin); + if (IS_ERR(entry)) + return PTR_ERR(entry); + } else { + WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n"); + return -1; + } + } + return 0; } -void jump_label_dec(struct jump_label_key *key) +/*** + * jump_label_update - update jump label text + * @key - key value associated with a a jump label + * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE + * + * Will enable/disable the jump for jump label @key, depending on the + * value of @type. + * + */ + +void jump_label_update(unsigned long key, enum jump_label_type type) { - if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) - return; + struct jump_entry *iter; + struct jump_label_entry *entry; + struct hlist_node *module_node; + struct jump_label_module_entry *e_module; + int count; - jump_label_update(key, JUMP_LABEL_DISABLE); + jump_label_lock(); + entry = get_jump_label_entry((jump_label_t)key); + if (entry) { + count = entry->nr_entries; + iter = entry->table; + while (count--) { + if (kernel_text_address(iter->code)) + arch_jump_label_transform(iter, type); + iter++; + } + /* eanble/disable jump labels in modules */ + hlist_for_each_entry(e_module, module_node, &(entry->modules), + hlist) { + count = e_module->nr_entries; + iter = e_module->table; + while (count--) { + if (iter->key && + kernel_text_address(iter->code)) + arch_jump_label_transform(iter, type); + iter++; + } + } + } jump_label_unlock(); } @@ -89,33 +197,77 @@ static int addr_conflict(struct jump_entry *entry, void *start, void *end) return 0; } -static int __jump_label_text_reserved(struct jump_entry *iter_start, - struct jump_entry *iter_stop, void *start, void *end) +#ifdef CONFIG_MODULES + +static int module_conflict(void *start, void *end) { + struct hlist_head *head; + struct hlist_node *node, *node_next, *module_node, *module_node_next; + struct jump_label_entry *e; + struct jump_label_module_entry *e_module; struct jump_entry *iter; + int i, count; + int conflict = 0; + + for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { + head = &jump_label_table[i]; + hlist_for_each_entry_safe(e, node, node_next, head, hlist) { + hlist_for_each_entry_safe(e_module, module_node, + module_node_next, + &(e->modules), hlist) { + count = e_module->nr_entries; + iter = e_module->table; + while (count--) { + if (addr_conflict(iter, start, end)) { + conflict = 1; + goto out; + } + iter++; + } + } + } + } +out: + return conflict; +} + +#endif + +/*** + * jump_label_text_reserved - check if addr range is reserved + * @start: start text addr + * @end: end text addr + * + * checks if the text addr located between @start and @end + * overlaps with any of the jump label patch addresses. Code + * that wants to modify kernel text should first verify that + * it does not overlap with any of the jump label addresses. + * Caller must hold jump_label_mutex. + * + * returns 1 if there is an overlap, 0 otherwise + */ +int jump_label_text_reserved(void *start, void *end) +{ + struct jump_entry *iter; + struct jump_entry *iter_start = __start___jump_table; + struct jump_entry *iter_stop = __start___jump_table; + int conflict = 0; iter = iter_start; while (iter < iter_stop) { - if (addr_conflict(iter, start, end)) - return 1; + if (addr_conflict(iter, start, end)) { + conflict = 1; + goto out; + } iter++; } - return 0; -} - -static void __jump_label_update(struct jump_label_key *key, - struct jump_entry *entry, int enable) -{ - for (; entry->key == (jump_label_t)(unsigned long)key; entry++) { - /* - * entry->code set to 0 invalidates module init text sections - * kernel_text_address() verifies we are not in core kernel - * init code, see jump_label_invalidate_module_init(). - */ - if (entry->code && kernel_text_address(entry->code)) - arch_jump_label_transform(entry, enable); - } + /* now check modules */ +#ifdef CONFIG_MODULES + conflict = module_conflict(start, end); +#endif +out: + return conflict; } /* @@ -125,173 +277,142 @@ void __weak arch_jump_label_text_poke_early(jump_label_t addr) { } -static __init int jump_label_init(void) +static __init int init_jump_label(void) { + int ret; struct jump_entry *iter_start = __start___jump_table; struct jump_entry *iter_stop = __stop___jump_table; - struct jump_label_key *key = NULL; struct jump_entry *iter; jump_label_lock(); - jump_label_sort_entries(iter_start, iter_stop); - - for (iter = iter_start; iter < iter_stop; iter++) { + ret = build_jump_label_hashtable(__start___jump_table, + __stop___jump_table); + iter = iter_start; + while (iter < iter_stop) { arch_jump_label_text_poke_early(iter->code); - if (iter->key == (jump_label_t)(unsigned long)key) - continue; - - key = (struct jump_label_key *)(unsigned long)iter->key; - atomic_set(&key->enabled, 0); - key->entries = iter; -#ifdef CONFIG_MODULES - key->next = NULL; -#endif + iter++; } jump_label_unlock(); - - return 0; + return ret; } -early_initcall(jump_label_init); +early_initcall(init_jump_label); #ifdef CONFIG_MODULES -struct jump_label_mod { - struct jump_label_mod *next; - struct jump_entry *entries; - struct module *mod; -}; - -static int __jump_label_mod_text_reserved(void *start, void *end) -{ - struct module *mod; - - mod = __module_text_address((unsigned long)start); - if (!mod) - return 0; - - WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); - - return __jump_label_text_reserved(mod->jump_entries, - mod->jump_entries + mod->num_jump_entries, - start, end); -} - -static void __jump_label_mod_update(struct jump_label_key *key, int enable) -{ - struct jump_label_mod *mod = key->next; - - while (mod) { - __jump_label_update(key, mod->entries, enable); - mod = mod->next; - } -} - -/*** - * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() - * @mod: module to patch - * - * Allow for run-time selection of the optimal nops. Before the module - * loads patch these with arch_get_jump_label_nop(), which is specified by - * the arch specific jump label code. - */ -void jump_label_apply_nops(struct module *mod) +static struct jump_label_module_entry * +add_jump_label_module_entry(struct jump_label_entry *entry, + struct jump_entry *iter_begin, + int count, struct module *mod) { - struct jump_entry *iter_start = mod->jump_entries; - struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; - struct jump_entry *iter; - - /* if the module doesn't have jump label entries, just return */ - if (iter_start == iter_stop) - return; - - for (iter = iter_start; iter < iter_stop; iter++) - arch_jump_label_text_poke_early(iter->code); + struct jump_label_module_entry *e; + + e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL); + if (!e) + return ERR_PTR(-ENOMEM); + e->mod = mod; + e->nr_entries = count; + e->table = iter_begin; + hlist_add_head(&e->hlist, &entry->modules); + return e; } -static int jump_label_add_module(struct module *mod) +static int add_jump_label_module(struct module *mod) { - struct jump_entry *iter_start = mod->jump_entries; - struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; - struct jump_entry *iter; - struct jump_label_key *key = NULL; - struct jump_label_mod *jlm; + struct jump_entry *iter, *iter_begin; + struct jump_label_entry *entry; + struct jump_label_module_entry *module_entry; + int count; /* if the module doesn't have jump label entries, just return */ - if (iter_start == iter_stop) + if (!mod->num_jump_entries) return 0; - jump_label_sort_entries(iter_start, iter_stop); - - for (iter = iter_start; iter < iter_stop; iter++) { - if (iter->key == (jump_label_t)(unsigned long)key) - continue; - - key = (struct jump_label_key *)(unsigned long)iter->key; - - if (__module_address(iter->key) == mod) { - atomic_set(&key->enabled, 0); - key->entries = iter; - key->next = NULL; - continue; + sort_jump_label_entries(mod->jump_entries, + mod->jump_entries + mod->num_jump_entries); + iter = mod->jump_entries; + while (iter < mod->jump_entries + mod->num_jump_entries) { + entry = get_jump_label_entry(iter->key); + iter_begin = iter; + count = 0; + while ((iter < mod->jump_entries + mod->num_jump_entries) && + (iter->key == iter_begin->key)) { + iter++; + count++; } - - jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL); - if (!jlm) - return -ENOMEM; - - jlm->mod = mod; - jlm->entries = iter; - jlm->next = key->next; - key->next = jlm; - - if (jump_label_enabled(key)) - __jump_label_update(key, iter, JUMP_LABEL_ENABLE); + if (!entry) { + entry = add_jump_label_entry(iter_begin->key, 0, NULL); + if (IS_ERR(entry)) + return PTR_ERR(entry); + } + module_entry = add_jump_label_module_entry(entry, iter_begin, + count, mod); + if (IS_ERR(module_entry)) + return PTR_ERR(module_entry); } - return 0; } -static void jump_label_del_module(struct module *mod) +static void remove_jump_label_module(struct module *mod) { - struct jump_entry *iter_start = mod->jump_entries; - struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; - struct jump_entry *iter; - struct jump_label_key *key = NULL; - struct jump_label_mod *jlm, **prev; + struct hlist_head *head; + struct hlist_node *node, *node_next, *module_node, *module_node_next; + struct jump_label_entry *e; + struct jump_label_module_entry *e_module; + int i; - for (iter = iter_start; iter < iter_stop; iter++) { - if (iter->key == (jump_label_t)(unsigned long)key) - continue; - - key = (struct jump_label_key *)(unsigned long)iter->key; - - if (__module_address(iter->key) == mod) - continue; - - prev = &key->next; - jlm = key->next; - - while (jlm && jlm->mod != mod) { - prev = &jlm->next; - jlm = jlm->next; - } + /* if the module doesn't have jump label entries, just return */ + if (!mod->num_jump_entries) + return; - if (jlm) { - *prev = jlm->next; - kfree(jlm); + for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { + head = &jump_label_table[i]; + hlist_for_each_entry_safe(e, node, node_next, head, hlist) { + hlist_for_each_entry_safe(e_module, module_node, + module_node_next, + &(e->modules), hlist) { + if (e_module->mod == mod) { + hlist_del(&e_module->hlist); + kfree(e_module); + } + } + if (hlist_empty(&e->modules) && (e->nr_entries == 0)) { + hlist_del(&e->hlist); + kfree(e); + } } } } -static void jump_label_invalidate_module_init(struct module *mod) +static void remove_jump_label_module_init(struct module *mod) { - struct jump_entry *iter_start = mod->jump_entries; - struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; + struct hlist_head *head; + struct hlist_node *node, *node_next, *module_node, *module_node_next; + struct jump_label_entry *e; + struct jump_label_module_entry *e_module; struct jump_entry *iter; + int i, count; + + /* if the module doesn't have jump label entries, just return */ + if (!mod->num_jump_entries) + return; - for (iter = iter_start; iter < iter_stop; iter++) { - if (within_module_init(iter->code, mod)) - iter->code = 0; + for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { + head = &jump_label_table[i]; + hlist_for_each_entry_safe(e, node, node_next, head, hlist) { + hlist_for_each_entry_safe(e_module, module_node, + module_node_next, + &(e->modules), hlist) { + if (e_module->mod != mod) + continue; + count = e_module->nr_entries; + iter = e_module->table; + while (count--) { + if (within_module_init(iter->code, mod)) + iter->key = 0; + iter++; + } + } + } } } @@ -305,77 +426,59 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, switch (val) { case MODULE_STATE_COMING: jump_label_lock(); - ret = jump_label_add_module(mod); + ret = add_jump_label_module(mod); if (ret) - jump_label_del_module(mod); + remove_jump_label_module(mod); jump_label_unlock(); break; case MODULE_STATE_GOING: jump_label_lock(); - jump_label_del_module(mod); + remove_jump_label_module(mod); jump_label_unlock(); break; case MODULE_STATE_LIVE: jump_label_lock(); - jump_label_invalidate_module_init(mod); + remove_jump_label_module_init(mod); jump_label_unlock(); break; } - - return notifier_from_errno(ret); -} - -struct notifier_block jump_label_module_nb = { - .notifier_call = jump_label_module_notify, - .priority = 1, /* higher than tracepoints */ -}; - -static __init int jump_label_init_module(void) -{ - return register_module_notifier(&jump_label_module_nb); + return ret; } -early_initcall(jump_label_init_module); - -#endif /* CONFIG_MODULES */ /*** - * jump_label_text_reserved - check if addr range is reserved - * @start: start text addr - * @end: end text addr - * - * checks if the text addr located between @start and @end - * overlaps with any of the jump label patch addresses. Code - * that wants to modify kernel text should first verify that - * it does not overlap with any of the jump label addresses. - * Caller must hold jump_label_mutex. + * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() + * @mod: module to patch * - * returns 1 if there is an overlap, 0 otherwise + * Allow for run-time selection of the optimal nops. Before the module + * loads patch these with arch_get_jump_label_nop(), which is specified by + * the arch specific jump label code. */ -int jump_label_text_reserved(void *start, void *end) +void jump_label_apply_nops(struct module *mod) { - int ret = __jump_label_text_reserved(__start___jump_table, - __stop___jump_table, start, end); + struct jump_entry *iter; - if (ret) - return ret; + /* if the module doesn't have jump label entries, just return */ + if (!mod->num_jump_entries) + return; -#ifdef CONFIG_MODULES - ret = __jump_label_mod_text_reserved(start, end); -#endif - return ret; + iter = mod->jump_entries; + while (iter < mod->jump_entries + mod->num_jump_entries) { + arch_jump_label_text_poke_early(iter->code); + iter++; + } } -static void jump_label_update(struct jump_label_key *key, int enable) -{ - struct jump_entry *entry = key->entries; - - /* if there are no users, entry can be NULL */ - if (entry) - __jump_label_update(key, entry, enable); +struct notifier_block jump_label_module_nb = { + .notifier_call = jump_label_module_notify, + .priority = 0, +}; -#ifdef CONFIG_MODULES - __jump_label_mod_update(key, enable); -#endif +static __init int init_jump_label_module(void) +{ + return register_module_notifier(&jump_label_module_nb); } +early_initcall(init_jump_label_module); + +#endif /* CONFIG_MODULES */ #endif diff --git a/trunk/kernel/events/core.c b/trunk/kernel/perf_event.c similarity index 99% rename from trunk/kernel/events/core.c rename to trunk/kernel/perf_event.c index 0fc34a370ba4..8e81a9860a0d 100644 --- a/trunk/kernel/events/core.c +++ b/trunk/kernel/perf_event.c @@ -2,8 +2,8 @@ * Performance events core code: * * Copyright (C) 2008 Thomas Gleixner - * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. * * For licensing details see kernel-base/COPYING @@ -39,10 +39,10 @@ #include struct remote_function_call { - struct task_struct *p; - int (*func)(void *info); - void *info; - int ret; + struct task_struct *p; + int (*func)(void *info); + void *info; + int ret; }; static void remote_function(void *data) @@ -76,10 +76,10 @@ static int task_function_call(struct task_struct *p, int (*func) (void *info), void *info) { struct remote_function_call data = { - .p = p, - .func = func, - .info = info, - .ret = -ESRCH, /* No such (running) process */ + .p = p, + .func = func, + .info = info, + .ret = -ESRCH, /* No such (running) process */ }; if (task_curr(p)) @@ -100,10 +100,10 @@ task_function_call(struct task_struct *p, int (*func) (void *info), void *info) static int cpu_function_call(int cpu, int (*func) (void *info), void *info) { struct remote_function_call data = { - .p = NULL, - .func = func, - .info = info, - .ret = -ENXIO, /* No such CPU */ + .p = NULL, + .func = func, + .info = info, + .ret = -ENXIO, /* No such CPU */ }; smp_call_function_single(cpu, remote_function, &data, 1); @@ -125,7 +125,7 @@ enum event_type_t { * perf_sched_events : >0 events exist * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu */ -struct jump_label_key perf_sched_events __read_mostly; +atomic_t perf_sched_events __read_mostly; static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); static atomic_t nr_mmap_events __read_mostly; @@ -5429,7 +5429,7 @@ static int swevent_hlist_get(struct perf_event *event) return err; } -struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; +atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; static void sw_perf_event_destroy(struct perf_event *event) { @@ -7445,11 +7445,11 @@ static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, } struct cgroup_subsys perf_subsys = { - .name = "perf_event", - .subsys_id = perf_subsys_id, - .create = perf_cgroup_create, - .destroy = perf_cgroup_destroy, - .exit = perf_cgroup_exit, - .attach = perf_cgroup_attach, + .name = "perf_event", + .subsys_id = perf_subsys_id, + .create = perf_cgroup_create, + .destroy = perf_cgroup_destroy, + .exit = perf_cgroup_exit, + .attach = perf_cgroup_attach, }; #endif /* CONFIG_CGROUP_PERF */ diff --git a/trunk/kernel/trace/ftrace.c b/trunk/kernel/trace/ftrace.c index d3406346ced6..ee24fa1935ac 100644 --- a/trunk/kernel/trace/ftrace.c +++ b/trunk/kernel/trace/ftrace.c @@ -39,20 +39,16 @@ #include "trace_stat.h" #define FTRACE_WARN_ON(cond) \ - ({ \ - int ___r = cond; \ - if (WARN_ON(___r)) \ + do { \ + if (WARN_ON(cond)) \ ftrace_kill(); \ - ___r; \ - }) + } while (0) #define FTRACE_WARN_ON_ONCE(cond) \ - ({ \ - int ___r = cond; \ - if (WARN_ON_ONCE(___r)) \ + do { \ + if (WARN_ON_ONCE(cond)) \ ftrace_kill(); \ - ___r; \ - }) + } while (0) /* hash bits for specific function selection */ #define FTRACE_HASH_BITS 7 @@ -151,34 +147,6 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) } #endif -static void update_ftrace_function(void) -{ - ftrace_func_t func; - - /* - * If there's only one function registered, then call that - * function directly. Otherwise, we need to iterate over the - * registered callers. - */ - if (ftrace_list == &ftrace_list_end || - ftrace_list->next == &ftrace_list_end) - func = ftrace_list->func; - else - func = ftrace_list_func; - - /* If we filter on pids, update to use the pid function */ - if (!list_empty(&ftrace_pids)) { - set_ftrace_pid_function(func); - func = ftrace_pid_func; - } -#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST - ftrace_trace_function = func; -#else - __ftrace_trace_function = func; - ftrace_trace_function = ftrace_test_stop_func; -#endif -} - static int __register_ftrace_function(struct ftrace_ops *ops) { ops->next = ftrace_list; @@ -190,8 +158,30 @@ static int __register_ftrace_function(struct ftrace_ops *ops) */ rcu_assign_pointer(ftrace_list, ops); - if (ftrace_enabled) - update_ftrace_function(); + if (ftrace_enabled) { + ftrace_func_t func; + + if (ops->next == &ftrace_list_end) + func = ops->func; + else + func = ftrace_list_func; + + if (!list_empty(&ftrace_pids)) { + set_ftrace_pid_function(func); + func = ftrace_pid_func; + } + + /* + * For one func, simply call it directly. + * For more than one func, call the chain. + */ +#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST + ftrace_trace_function = func; +#else + __ftrace_trace_function = func; + ftrace_trace_function = ftrace_test_stop_func; +#endif + } return 0; } @@ -219,19 +209,52 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) *p = (*p)->next; - if (ftrace_enabled) - update_ftrace_function(); + if (ftrace_enabled) { + /* If we only have one func left, then call that directly */ + if (ftrace_list->next == &ftrace_list_end) { + ftrace_func_t func = ftrace_list->func; + + if (!list_empty(&ftrace_pids)) { + set_ftrace_pid_function(func); + func = ftrace_pid_func; + } +#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST + ftrace_trace_function = func; +#else + __ftrace_trace_function = func; +#endif + } + } return 0; } static void ftrace_update_pid_func(void) { - /* Only do something if we are tracing something */ + ftrace_func_t func; + if (ftrace_trace_function == ftrace_stub) return; - update_ftrace_function(); +#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST + func = ftrace_trace_function; +#else + func = __ftrace_trace_function; +#endif + + if (!list_empty(&ftrace_pids)) { + set_ftrace_pid_function(func); + func = ftrace_pid_func; + } else { + if (func == ftrace_pid_func) + func = ftrace_pid_function; + } + +#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST + ftrace_trace_function = func; +#else + __ftrace_trace_function = func; +#endif } #ifdef CONFIG_FUNCTION_PROFILER @@ -1056,16 +1079,19 @@ static void ftrace_replace_code(int enable) struct ftrace_page *pg; int failed; - if (unlikely(ftrace_disabled)) - return; - do_for_each_ftrace_rec(pg, rec) { - /* Skip over free records */ - if (rec->flags & FTRACE_FL_FREE) + /* + * Skip over free records, records that have + * failed and not converted. + */ + if (rec->flags & FTRACE_FL_FREE || + rec->flags & FTRACE_FL_FAILED || + !(rec->flags & FTRACE_FL_CONVERTED)) continue; failed = __ftrace_replace_code(rec, enable); if (failed) { + rec->flags |= FTRACE_FL_FAILED; ftrace_bug(failed, rec->ip); /* Stop processing */ return; @@ -1081,12 +1107,10 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) ip = rec->ip; - if (unlikely(ftrace_disabled)) - return 0; - ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); if (ret) { ftrace_bug(ret, ip); + rec->flags |= FTRACE_FL_FAILED; return 0; } return 1; @@ -1249,10 +1273,10 @@ static int ftrace_update_code(struct module *mod) */ if (!ftrace_code_disable(mod, p)) { ftrace_free_rec(p); - /* Game over */ - break; + continue; } + p->flags |= FTRACE_FL_CONVERTED; ftrace_update_cnt++; /* @@ -1327,8 +1351,9 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) enum { FTRACE_ITER_FILTER = (1 << 0), FTRACE_ITER_NOTRACE = (1 << 1), - FTRACE_ITER_PRINTALL = (1 << 2), - FTRACE_ITER_HASH = (1 << 3), + FTRACE_ITER_FAILURES = (1 << 2), + FTRACE_ITER_PRINTALL = (1 << 3), + FTRACE_ITER_HASH = (1 << 4), }; #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ @@ -1438,9 +1463,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos) struct ftrace_iterator *iter = m->private; struct dyn_ftrace *rec = NULL; - if (unlikely(ftrace_disabled)) - return NULL; - if (iter->flags & FTRACE_ITER_HASH) return t_hash_next(m, pos); @@ -1461,6 +1483,12 @@ t_next(struct seq_file *m, void *v, loff_t *pos) rec = &iter->pg->records[iter->idx++]; if ((rec->flags & FTRACE_FL_FREE) || + (!(iter->flags & FTRACE_ITER_FAILURES) && + (rec->flags & FTRACE_FL_FAILED)) || + + ((iter->flags & FTRACE_ITER_FAILURES) && + !(rec->flags & FTRACE_FL_FAILED)) || + ((iter->flags & FTRACE_ITER_FILTER) && !(rec->flags & FTRACE_FL_FILTER)) || @@ -1493,10 +1521,6 @@ static void *t_start(struct seq_file *m, loff_t *pos) loff_t l; mutex_lock(&ftrace_lock); - - if (unlikely(ftrace_disabled)) - return NULL; - /* * If an lseek was done, then reset and start from beginning. */ @@ -1605,6 +1629,24 @@ ftrace_avail_open(struct inode *inode, struct file *file) return ret; } +static int +ftrace_failures_open(struct inode *inode, struct file *file) +{ + int ret; + struct seq_file *m; + struct ftrace_iterator *iter; + + ret = ftrace_avail_open(inode, file); + if (!ret) { + m = file->private_data; + iter = m->private; + iter->flags = FTRACE_ITER_FAILURES; + } + + return ret; +} + + static void ftrace_filter_reset(int enable) { struct ftrace_page *pg; @@ -1615,6 +1657,8 @@ static void ftrace_filter_reset(int enable) if (enable) ftrace_filtered = 0; do_for_each_ftrace_rec(pg, rec) { + if (rec->flags & FTRACE_FL_FAILED) + continue; rec->flags &= ~type; } while_for_each_ftrace_rec(); mutex_unlock(&ftrace_lock); @@ -1716,63 +1760,42 @@ static int ftrace_match(char *str, char *regex, int len, int type) return matched; } -static void -update_record(struct dyn_ftrace *rec, unsigned long flag, int not) -{ - if (not) - rec->flags &= ~flag; - else - rec->flags |= flag; -} - static int -ftrace_match_record(struct dyn_ftrace *rec, char *mod, - char *regex, int len, int type) +ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) { char str[KSYM_SYMBOL_LEN]; - char *modname; - - kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); - - if (mod) { - /* module lookup requires matching the module */ - if (!modname || strcmp(modname, mod)) - return 0; - - /* blank search means to match all funcs in the mod */ - if (!len) - return 1; - } + kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); return ftrace_match(str, regex, len, type); } -static int match_records(char *buff, int len, char *mod, int enable, int not) +static int ftrace_match_records(char *buff, int len, int enable) { - unsigned search_len = 0; + unsigned int search_len; struct ftrace_page *pg; struct dyn_ftrace *rec; - int type = MATCH_FULL; - char *search = buff; unsigned long flag; + char *search; + int type; + int not; int found = 0; - if (len) { - type = filter_parse_regex(buff, len, &search, ¬); - search_len = strlen(search); - } - flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; + type = filter_parse_regex(buff, len, &search, ¬); - mutex_lock(&ftrace_lock); - - if (unlikely(ftrace_disabled)) - goto out_unlock; + search_len = strlen(search); + mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { - if (ftrace_match_record(rec, mod, search, search_len, type)) { - update_record(rec, flag, not); + if (rec->flags & FTRACE_FL_FAILED) + continue; + + if (ftrace_match_record(rec, search, search_len, type)) { + if (not) + rec->flags &= ~flag; + else + rec->flags |= flag; found = 1; } /* @@ -1781,23 +1804,43 @@ static int match_records(char *buff, int len, char *mod, int enable, int not) */ if (enable && (rec->flags & FTRACE_FL_FILTER)) ftrace_filtered = 1; - } while_for_each_ftrace_rec(); - out_unlock: mutex_unlock(&ftrace_lock); return found; } static int -ftrace_match_records(char *buff, int len, int enable) +ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, + char *regex, int len, int type) { - return match_records(buff, len, NULL, enable, 0); + char str[KSYM_SYMBOL_LEN]; + char *modname; + + kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); + + if (!modname || strcmp(modname, mod)) + return 0; + + /* blank search means to match all funcs in the mod */ + if (len) + return ftrace_match(str, regex, len, type); + else + return 1; } static int ftrace_match_module_records(char *buff, char *mod, int enable) { + unsigned search_len = 0; + struct ftrace_page *pg; + struct dyn_ftrace *rec; + int type = MATCH_FULL; + char *search = buff; + unsigned long flag; int not = 0; + int found = 0; + + flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; /* blank or '*' mean the same */ if (strcmp(buff, "*") == 0) @@ -1809,7 +1852,32 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable) not = 1; } - return match_records(buff, strlen(buff), mod, enable, not); + if (strlen(buff)) { + type = filter_parse_regex(buff, strlen(buff), &search, ¬); + search_len = strlen(search); + } + + mutex_lock(&ftrace_lock); + do_for_each_ftrace_rec(pg, rec) { + + if (rec->flags & FTRACE_FL_FAILED) + continue; + + if (ftrace_match_module_record(rec, mod, + search, search_len, type)) { + if (not) + rec->flags &= ~flag; + else + rec->flags |= flag; + found = 1; + } + if (enable && (rec->flags & FTRACE_FL_FILTER)) + ftrace_filtered = 1; + + } while_for_each_ftrace_rec(); + mutex_unlock(&ftrace_lock); + + return found; } /* @@ -1961,13 +2029,12 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, return -EINVAL; mutex_lock(&ftrace_lock); - - if (unlikely(ftrace_disabled)) - goto out_unlock; - do_for_each_ftrace_rec(pg, rec) { - if (!ftrace_match_record(rec, NULL, search, len, type)) + if (rec->flags & FTRACE_FL_FAILED) + continue; + + if (!ftrace_match_record(rec, search, len, type)) continue; entry = kmalloc(sizeof(*entry), GFP_KERNEL); @@ -2172,10 +2239,6 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, mutex_lock(&ftrace_regex_lock); - ret = -ENODEV; - if (unlikely(ftrace_disabled)) - goto out_unlock; - if (file->f_mode & FMODE_READ) { struct seq_file *m = file->private_data; iter = m->private; @@ -2350,16 +2413,14 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) ftrace_match_records(parser->buffer, parser->idx, enable); } + mutex_lock(&ftrace_lock); + if (ftrace_start_up && ftrace_enabled) + ftrace_run_update_code(FTRACE_ENABLE_CALLS); + mutex_unlock(&ftrace_lock); + trace_parser_put(parser); kfree(iter); - if (file->f_mode & FMODE_WRITE) { - mutex_lock(&ftrace_lock); - if (ftrace_start_up && ftrace_enabled) - ftrace_run_update_code(FTRACE_ENABLE_CALLS); - mutex_unlock(&ftrace_lock); - } - mutex_unlock(&ftrace_regex_lock); return 0; } @@ -2383,6 +2444,13 @@ static const struct file_operations ftrace_avail_fops = { .release = seq_release_private, }; +static const struct file_operations ftrace_failures_fops = { + .open = ftrace_failures_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + static const struct file_operations ftrace_filter_fops = { .open = ftrace_filter_open, .read = seq_read, @@ -2505,6 +2573,9 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) bool exists; int i; + if (ftrace_disabled) + return -ENODEV; + /* decode regex */ type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) @@ -2513,18 +2584,12 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) search_len = strlen(search); mutex_lock(&ftrace_lock); - - if (unlikely(ftrace_disabled)) { - mutex_unlock(&ftrace_lock); - return -ENODEV; - } - do_for_each_ftrace_rec(pg, rec) { - if (rec->flags & FTRACE_FL_FREE) + if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) continue; - if (ftrace_match_record(rec, NULL, search, search_len, type)) { + if (ftrace_match_record(rec, search, search_len, type)) { /* if it is in the array */ exists = false; for (i = 0; i < *idx; i++) { @@ -2614,6 +2679,9 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) trace_create_file("available_filter_functions", 0444, d_tracer, NULL, &ftrace_avail_fops); + trace_create_file("failures", 0444, + d_tracer, NULL, &ftrace_failures_fops); + trace_create_file("set_ftrace_filter", 0644, d_tracer, NULL, &ftrace_filter_fops); @@ -2635,6 +2703,7 @@ static int ftrace_process_locs(struct module *mod, { unsigned long *p; unsigned long addr; + unsigned long flags; mutex_lock(&ftrace_lock); p = start; @@ -2651,7 +2720,10 @@ static int ftrace_process_locs(struct module *mod, ftrace_record_ip(addr); } + /* disable interrupts to prevent kstop machine */ + local_irq_save(flags); ftrace_update_code(mod); + local_irq_restore(flags); mutex_unlock(&ftrace_lock); return 0; @@ -2663,11 +2735,10 @@ void ftrace_release_mod(struct module *mod) struct dyn_ftrace *rec; struct ftrace_page *pg; - mutex_lock(&ftrace_lock); - if (ftrace_disabled) - goto out_unlock; + return; + mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { if (within_module_core(rec->ip, mod)) { /* @@ -2678,7 +2749,6 @@ void ftrace_release_mod(struct module *mod) ftrace_free_rec(rec); } } while_for_each_ftrace_rec(); - out_unlock: mutex_unlock(&ftrace_lock); } @@ -3073,17 +3143,16 @@ void ftrace_kill(void) */ int register_ftrace_function(struct ftrace_ops *ops) { - int ret = -1; - - mutex_lock(&ftrace_lock); + int ret; if (unlikely(ftrace_disabled)) - goto out_unlock; + return -1; + + mutex_lock(&ftrace_lock); ret = __register_ftrace_function(ops); ftrace_startup(0); - out_unlock: mutex_unlock(&ftrace_lock); return ret; } @@ -3111,14 +3180,14 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - int ret = -ENODEV; - - mutex_lock(&ftrace_lock); + int ret; if (unlikely(ftrace_disabled)) - goto out; + return -ENODEV; + + mutex_lock(&ftrace_lock); - ret = proc_dointvec(table, write, buffer, lenp, ppos); + ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) goto out; diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index ee9c921d7f21..1cb49be7c7fb 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -2014,10 +2014,9 @@ enum print_line_t print_trace_line(struct trace_iterator *iter) { enum print_line_t ret; - if (iter->lost_events && - !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", - iter->cpu, iter->lost_events)) - return TRACE_TYPE_PARTIAL_LINE; + if (iter->lost_events) + trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", + iter->cpu, iter->lost_events); if (iter->trace && iter->trace->print_line) { ret = iter->trace->print_line(iter); @@ -3231,14 +3230,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, if (iter->seq.len >= cnt) break; - - /* - * Setting the full flag means we reached the trace_seq buffer - * size and we should leave by partial output condition above. - * One of the trace_seq_* functions is not used properly. - */ - WARN_ONCE(iter->seq.full, "full flag set for trace type %d", - iter->ent->type); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); diff --git a/trunk/kernel/trace/trace_output.c b/trunk/kernel/trace/trace_output.c index cf535ccedc86..456be9063c2d 100644 --- a/trunk/kernel/trace/trace_output.c +++ b/trunk/kernel/trace/trace_output.c @@ -830,9 +830,6 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_event); enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, struct trace_event *event) { - if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type)) - return TRACE_TYPE_PARTIAL_LINE; - return TRACE_TYPE_HANDLED; } diff --git a/trunk/kernel/trace/trace_printk.c b/trunk/kernel/trace/trace_printk.c index dff763b7baf1..2547d8813cf0 100644 --- a/trunk/kernel/trace/trace_printk.c +++ b/trunk/kernel/trace/trace_printk.c @@ -32,7 +32,7 @@ static DEFINE_MUTEX(btrace_mutex); struct trace_bprintk_fmt { struct list_head list; - const char *fmt; + char fmt[0]; }; static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) @@ -49,7 +49,6 @@ static void hold_module_trace_bprintk_format(const char **start, const char **end) { const char **iter; - char *fmt; mutex_lock(&btrace_mutex); for (iter = start; iter < end; iter++) { @@ -59,18 +58,14 @@ void hold_module_trace_bprintk_format(const char **start, const char **end) continue; } - tb_fmt = kmalloc(sizeof(*tb_fmt), GFP_KERNEL); - if (tb_fmt) - fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL); - if (tb_fmt && fmt) { + tb_fmt = kmalloc(offsetof(struct trace_bprintk_fmt, fmt) + + strlen(*iter) + 1, GFP_KERNEL); + if (tb_fmt) { list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list); - strcpy(fmt, *iter); - tb_fmt->fmt = fmt; + strcpy(tb_fmt->fmt, *iter); *iter = tb_fmt->fmt; - } else { - kfree(tb_fmt); + } else *iter = NULL; - } } mutex_unlock(&btrace_mutex); } @@ -89,76 +84,6 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self, return 0; } -/* - * The debugfs/tracing/printk_formats file maps the addresses with - * the ASCII formats that are used in the bprintk events in the - * buffer. For userspace tools to be able to decode the events from - * the buffer, they need to be able to map the address with the format. - * - * The addresses of the bprintk formats are in their own section - * __trace_printk_fmt. But for modules we copy them into a link list. - * The code to print the formats and their addresses passes around the - * address of the fmt string. If the fmt address passed into the seq - * functions is within the kernel core __trace_printk_fmt section, then - * it simply uses the next pointer in the list. - * - * When the fmt pointer is outside the kernel core __trace_printk_fmt - * section, then we need to read the link list pointers. The trick is - * we pass the address of the string to the seq function just like - * we do for the kernel core formats. To get back the structure that - * holds the format, we simply use containerof() and then go to the - * next format in the list. - */ -static const char ** -find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos) -{ - struct trace_bprintk_fmt *mod_fmt; - - if (list_empty(&trace_bprintk_fmt_list)) - return NULL; - - /* - * v will point to the address of the fmt record from t_next - * v will be NULL from t_start. - * If this is the first pointer or called from start - * then we need to walk the list. - */ - if (!v || start_index == *pos) { - struct trace_bprintk_fmt *p; - - /* search the module list */ - list_for_each_entry(p, &trace_bprintk_fmt_list, list) { - if (start_index == *pos) - return &p->fmt; - start_index++; - } - /* pos > index */ - return NULL; - } - - /* - * v points to the address of the fmt field in the mod list - * structure that holds the module print format. - */ - mod_fmt = container_of(v, typeof(*mod_fmt), fmt); - if (mod_fmt->list.next == &trace_bprintk_fmt_list) - return NULL; - - mod_fmt = container_of(mod_fmt->list.next, typeof(*mod_fmt), list); - - return &mod_fmt->fmt; -} - -static void format_mod_start(void) -{ - mutex_lock(&btrace_mutex); -} - -static void format_mod_stop(void) -{ - mutex_unlock(&btrace_mutex); -} - #else /* !CONFIG_MODULES */ __init static int module_trace_bprintk_format_notify(struct notifier_block *self, @@ -166,13 +91,6 @@ module_trace_bprintk_format_notify(struct notifier_block *self, { return 0; } -static inline const char ** -find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos) -{ - return NULL; -} -static inline void format_mod_start(void) { } -static inline void format_mod_stop(void) { } #endif /* CONFIG_MODULES */ @@ -235,33 +153,20 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) } EXPORT_SYMBOL_GPL(__ftrace_vprintk); -static const char **find_next(void *v, loff_t *pos) -{ - const char **fmt = v; - int start_index; - - if (!fmt) - fmt = __start___trace_bprintk_fmt + *pos; - - start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; - - if (*pos < start_index) - return fmt; - - return find_next_mod_format(start_index, v, fmt, pos); -} - static void * t_start(struct seq_file *m, loff_t *pos) { - format_mod_start(); - return find_next(NULL, pos); + const char **fmt = __start___trace_bprintk_fmt + *pos; + + if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt) + return NULL; + return fmt; } static void *t_next(struct seq_file *m, void * v, loff_t *pos) { (*pos)++; - return find_next(v, pos); + return t_start(m, pos); } static int t_show(struct seq_file *m, void *v) @@ -300,7 +205,6 @@ static int t_show(struct seq_file *m, void *v) static void t_stop(struct seq_file *m, void *p) { - format_mod_stop(); } static const struct seq_operations show_format_seq_ops = { diff --git a/trunk/kernel/tracepoint.c b/trunk/kernel/tracepoint.c index b219f1449c54..68187af4889e 100644 --- a/trunk/kernel/tracepoint.c +++ b/trunk/kernel/tracepoint.c @@ -251,9 +251,9 @@ static void set_tracepoint(struct tracepoint_entry **entry, { WARN_ON(strcmp((*entry)->name, elem->name) != 0); - if (elem->regfunc && !jump_label_enabled(&elem->key) && active) + if (elem->regfunc && !elem->state && active) elem->regfunc(); - else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active) + else if (elem->unregfunc && elem->state && !active) elem->unregfunc(); /* @@ -264,10 +264,13 @@ static void set_tracepoint(struct tracepoint_entry **entry, * is used. */ rcu_assign_pointer(elem->funcs, (*entry)->funcs); - if (active && !jump_label_enabled(&elem->key)) - jump_label_inc(&elem->key); - else if (!active && jump_label_enabled(&elem->key)) - jump_label_dec(&elem->key); + if (!elem->state && active) { + jump_label_enable(&elem->state); + elem->state = active; + } else if (elem->state && !active) { + jump_label_disable(&elem->state); + elem->state = active; + } } /* @@ -278,11 +281,13 @@ static void set_tracepoint(struct tracepoint_entry **entry, */ static void disable_tracepoint(struct tracepoint *elem) { - if (elem->unregfunc && jump_label_enabled(&elem->key)) + if (elem->unregfunc && elem->state) elem->unregfunc(); - if (jump_label_enabled(&elem->key)) - jump_label_dec(&elem->key); + if (elem->state) { + jump_label_disable(&elem->state); + elem->state = 0; + } rcu_assign_pointer(elem->funcs, NULL); } diff --git a/trunk/scripts/Makefile.build b/trunk/scripts/Makefile.build index fdca952f6a40..d5f925abe4d2 100644 --- a/trunk/scripts/Makefile.build +++ b/trunk/scripts/Makefile.build @@ -244,16 +244,13 @@ endif ifdef CONFIG_FTRACE_MCOUNT_RECORD ifdef BUILD_C_RECORDMCOUNT -ifeq ("$(origin RECORDMCOUNT_WARN)", "command line") - RECORDMCOUNT_FLAGS = -w -endif # Due to recursion, we must skip empty.o. # The empty.o file is created in the make process in order to determine # the target endianness and word size. It is made before all other C # files, including recordmcount. sub_cmd_record_mcount = \ if [ $(@) != "scripts/mod/empty.o" ]; then \ - $(objtree)/scripts/recordmcount $(RECORDMCOUNT_FLAGS) "$(@)"; \ + $(objtree)/scripts/recordmcount "$(@)"; \ fi; else sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \ diff --git a/trunk/scripts/recordmcount.c b/trunk/scripts/recordmcount.c index ee52cb8e17ad..f9f6f52db772 100644 --- a/trunk/scripts/recordmcount.c +++ b/trunk/scripts/recordmcount.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -40,7 +39,6 @@ static char gpfx; /* prefix for global symbol name (sometimes '_') */ static struct stat sb; /* Remember .st_size, etc. */ static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */ static const char *altmcount; /* alternate mcount symbol name */ -static int warn_on_notrace_sect; /* warn when section has mcount not being recorded */ /* setjmp() return values */ enum { @@ -80,7 +78,7 @@ static off_t ulseek(int const fd, off_t const offset, int const whence) { off_t const w = lseek(fd, offset, whence); - if (w == (off_t)-1) { + if ((off_t)-1 == w) { perror("lseek"); fail_file(); } @@ -113,41 +111,13 @@ static void * umalloc(size_t size) { void *const addr = malloc(size); - if (addr == 0) { + if (0 == addr) { fprintf(stderr, "malloc failed: %zu bytes\n", size); fail_file(); } return addr; } -static unsigned char ideal_nop5_x86_64[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 }; -static unsigned char ideal_nop5_x86_32[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 }; -static unsigned char *ideal_nop; - -static char rel_type_nop; - -static int (*make_nop)(void *map, size_t const offset); - -static int make_nop_x86(void *map, size_t const offset) -{ - uint32_t *ptr; - unsigned char *op; - - /* Confirm we have 0xe8 0x0 0x0 0x0 0x0 */ - ptr = map + offset; - if (*ptr != 0) - return -1; - - op = map + offset - 1; - if (*op != 0xe8) - return -1; - - /* convert to nop */ - ulseek(fd_map, offset - 1, SEEK_SET); - uwrite(fd_map, ideal_nop, 5); - return 0; -} - /* * Get the whole file as a programming convenience in order to avoid * malloc+lseek+read+free of many pieces. If successful, then mmap @@ -166,7 +136,7 @@ static void *mmap_file(char const *fname) void *addr; fd_map = open(fname, O_RDWR); - if (fd_map < 0 || fstat(fd_map, &sb) < 0) { + if (0 > fd_map || 0 > fstat(fd_map, &sb)) { perror(fname); fail_file(); } @@ -177,7 +147,7 @@ static void *mmap_file(char const *fname) addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd_map, 0); mmap_failed = 0; - if (addr == MAP_FAILED) { + if (MAP_FAILED == addr) { mmap_failed = 1; addr = umalloc(sb.st_size); uread(fd_map, addr, sb.st_size); @@ -236,13 +206,12 @@ static uint32_t (*w2)(uint16_t); static int is_mcounted_section_name(char const *const txtname) { - return strcmp(".text", txtname) == 0 || - strcmp(".ref.text", txtname) == 0 || - strcmp(".sched.text", txtname) == 0 || - strcmp(".spinlock.text", txtname) == 0 || - strcmp(".irqentry.text", txtname) == 0 || - strcmp(".kprobes.text", txtname) == 0 || - strcmp(".text.unlikely", txtname) == 0; + return 0 == strcmp(".text", txtname) || + 0 == strcmp(".ref.text", txtname) || + 0 == strcmp(".sched.text", txtname) || + 0 == strcmp(".spinlock.text", txtname) || + 0 == strcmp(".irqentry.text", txtname) || + 0 == strcmp(".text.unlikely", txtname); } /* 32 bit and 64 bit are very similar */ @@ -295,48 +264,43 @@ do_file(char const *const fname) w8 = w8nat; switch (ehdr->e_ident[EI_DATA]) { static unsigned int const endian = 1; - default: + default: { fprintf(stderr, "unrecognized ELF data encoding %d: %s\n", ehdr->e_ident[EI_DATA], fname); fail_file(); - break; - case ELFDATA2LSB: - if (*(unsigned char const *)&endian != 1) { + } break; + case ELFDATA2LSB: { + if (1 != *(unsigned char const *)&endian) { /* main() is big endian, file.o is little endian. */ w = w4rev; w2 = w2rev; w8 = w8rev; } - break; - case ELFDATA2MSB: - if (*(unsigned char const *)&endian != 0) { + } break; + case ELFDATA2MSB: { + if (0 != *(unsigned char const *)&endian) { /* main() is little endian, file.o is big endian. */ w = w4rev; w2 = w2rev; w8 = w8rev; } - break; + } break; } /* end switch */ - if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 - || w2(ehdr->e_type) != ET_REL - || ehdr->e_ident[EI_VERSION] != EV_CURRENT) { + if (0 != memcmp(ELFMAG, ehdr->e_ident, SELFMAG) + || ET_REL != w2(ehdr->e_type) + || EV_CURRENT != ehdr->e_ident[EI_VERSION]) { fprintf(stderr, "unrecognized ET_REL file %s\n", fname); fail_file(); } gpfx = 0; switch (w2(ehdr->e_machine)) { - default: + default: { fprintf(stderr, "unrecognized e_machine %d %s\n", w2(ehdr->e_machine), fname); fail_file(); - break; - case EM_386: - reltype = R_386_32; - make_nop = make_nop_x86; - ideal_nop = ideal_nop5_x86_32; - mcount_adjust_32 = -1; - break; + } break; + case EM_386: reltype = R_386_32; break; case EM_ARM: reltype = R_ARM_ABS32; altmcount = "__gnu_mcount_nc"; break; @@ -347,91 +311,67 @@ do_file(char const *const fname) case EM_S390: /* reltype: e_class */ gpfx = '_'; break; case EM_SH: reltype = R_SH_DIR32; break; case EM_SPARCV9: reltype = R_SPARC_64; gpfx = '_'; break; - case EM_X86_64: - make_nop = make_nop_x86; - ideal_nop = ideal_nop5_x86_64; - reltype = R_X86_64_64; - mcount_adjust_64 = -1; - break; + case EM_X86_64: reltype = R_X86_64_64; break; } /* end switch */ switch (ehdr->e_ident[EI_CLASS]) { - default: + default: { fprintf(stderr, "unrecognized ELF class %d %s\n", ehdr->e_ident[EI_CLASS], fname); fail_file(); - break; - case ELFCLASS32: - if (w2(ehdr->e_ehsize) != sizeof(Elf32_Ehdr) - || w2(ehdr->e_shentsize) != sizeof(Elf32_Shdr)) { + } break; + case ELFCLASS32: { + if (sizeof(Elf32_Ehdr) != w2(ehdr->e_ehsize) + || sizeof(Elf32_Shdr) != w2(ehdr->e_shentsize)) { fprintf(stderr, "unrecognized ET_REL file: %s\n", fname); fail_file(); } - if (w2(ehdr->e_machine) == EM_S390) { + if (EM_S390 == w2(ehdr->e_machine)) reltype = R_390_32; - mcount_adjust_32 = -4; - } - if (w2(ehdr->e_machine) == EM_MIPS) { + if (EM_MIPS == w2(ehdr->e_machine)) { reltype = R_MIPS_32; is_fake_mcount32 = MIPS32_is_fake_mcount; } do32(ehdr, fname, reltype); - break; + } break; case ELFCLASS64: { Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr; - if (w2(ghdr->e_ehsize) != sizeof(Elf64_Ehdr) - || w2(ghdr->e_shentsize) != sizeof(Elf64_Shdr)) { + if (sizeof(Elf64_Ehdr) != w2(ghdr->e_ehsize) + || sizeof(Elf64_Shdr) != w2(ghdr->e_shentsize)) { fprintf(stderr, "unrecognized ET_REL file: %s\n", fname); fail_file(); } - if (w2(ghdr->e_machine) == EM_S390) { + if (EM_S390 == w2(ghdr->e_machine)) reltype = R_390_64; - mcount_adjust_64 = -8; - } - if (w2(ghdr->e_machine) == EM_MIPS) { + if (EM_MIPS == w2(ghdr->e_machine)) { reltype = R_MIPS_64; Elf64_r_sym = MIPS64_r_sym; Elf64_r_info = MIPS64_r_info; is_fake_mcount64 = MIPS64_is_fake_mcount; } do64(ghdr, fname, reltype); - break; - } + } break; } /* end switch */ cleanup(); } int -main(int argc, char *argv[]) +main(int argc, char const *argv[]) { const char ftrace[] = "/ftrace.o"; int ftrace_size = sizeof(ftrace) - 1; int n_error = 0; /* gcc-4.3.0 false positive complaint */ - int c; - int i; - - while ((c = getopt(argc, argv, "w")) >= 0) { - switch (c) { - case 'w': - warn_on_notrace_sect = 1; - break; - default: - fprintf(stderr, "usage: recordmcount [-w] file.o...\n"); - return 0; - } - } - if ((argc - optind) < 1) { - fprintf(stderr, "usage: recordmcount [-w] file.o...\n"); + if (argc <= 1) { + fprintf(stderr, "usage: recordmcount file.o...\n"); return 0; } /* Process each file in turn, allowing deep failure. */ - for (i = optind; i < argc; i++) { - char *file = argv[i]; + for (--argc, ++argv; 0 < argc; --argc, ++argv) { int const sjval = setjmp(jmpenv); int len; @@ -440,29 +380,29 @@ main(int argc, char *argv[]) * function but does not call it. Since ftrace.o should * not be traced anyway, we just skip it. */ - len = strlen(file); + len = strlen(argv[0]); if (len >= ftrace_size && - strcmp(file + (len - ftrace_size), ftrace) == 0) + strcmp(argv[0] + (len - ftrace_size), ftrace) == 0) continue; switch (sjval) { - default: - fprintf(stderr, "internal error: %s\n", file); + default: { + fprintf(stderr, "internal error: %s\n", argv[0]); exit(1); - break; - case SJ_SETJMP: /* normal sequence */ + } break; + case SJ_SETJMP: { /* normal sequence */ /* Avoid problems if early cleanup() */ fd_map = -1; ehdr_curr = NULL; mmap_failed = 1; - do_file(file); - break; - case SJ_FAIL: /* error in do_file or below */ + do_file(argv[0]); + } break; + case SJ_FAIL: { /* error in do_file or below */ ++n_error; - break; - case SJ_SUCCEED: /* premature success */ + } break; + case SJ_SUCCEED: { /* premature success */ /* do nothing */ - break; + } break; } /* end switch */ } return !!n_error; diff --git a/trunk/scripts/recordmcount.h b/trunk/scripts/recordmcount.h index 4be60364a405..baf187bee983 100644 --- a/trunk/scripts/recordmcount.h +++ b/trunk/scripts/recordmcount.h @@ -22,15 +22,11 @@ #undef is_fake_mcount #undef fn_is_fake_mcount #undef MIPS_is_fake_mcount -#undef mcount_adjust #undef sift_rel_mcount -#undef nop_mcount #undef find_secsym_ndx #undef __has_rel_mcount #undef has_rel_mcount #undef tot_relsize -#undef get_mcountsym -#undef get_sym_str_and_relp #undef do_func #undef Elf_Addr #undef Elf_Ehdr @@ -53,18 +49,14 @@ #ifdef RECORD_MCOUNT_64 # define append_func append64 # define sift_rel_mcount sift64_rel_mcount -# define nop_mcount nop_mcount_64 # define find_secsym_ndx find64_secsym_ndx # define __has_rel_mcount __has64_rel_mcount # define has_rel_mcount has64_rel_mcount # define tot_relsize tot64_relsize -# define get_sym_str_and_relp get_sym_str_and_relp_64 # define do_func do64 -# define get_mcountsym get_mcountsym_64 # define is_fake_mcount is_fake_mcount64 # define fn_is_fake_mcount fn_is_fake_mcount64 # define MIPS_is_fake_mcount MIPS64_is_fake_mcount -# define mcount_adjust mcount_adjust_64 # define Elf_Addr Elf64_Addr # define Elf_Ehdr Elf64_Ehdr # define Elf_Shdr Elf64_Shdr @@ -85,18 +77,14 @@ #else # define append_func append32 # define sift_rel_mcount sift32_rel_mcount -# define nop_mcount nop_mcount_32 # define find_secsym_ndx find32_secsym_ndx # define __has_rel_mcount __has32_rel_mcount # define has_rel_mcount has32_rel_mcount # define tot_relsize tot32_relsize -# define get_sym_str_and_relp get_sym_str_and_relp_32 # define do_func do32 -# define get_mcountsym get_mcountsym_32 # define is_fake_mcount is_fake_mcount32 # define fn_is_fake_mcount fn_is_fake_mcount32 # define MIPS_is_fake_mcount MIPS32_is_fake_mcount -# define mcount_adjust mcount_adjust_32 # define Elf_Addr Elf32_Addr # define Elf_Ehdr Elf32_Ehdr # define Elf_Shdr Elf32_Shdr @@ -135,8 +123,6 @@ static void fn_ELF_R_INFO(Elf_Rel *const rp, unsigned sym, unsigned type) } static void (*Elf_r_info)(Elf_Rel *const rp, unsigned sym, unsigned type) = fn_ELF_R_INFO; -static int mcount_adjust = 0; - /* * MIPS mcount long call has 2 _mcount symbols, only the position of the 1st * _mcount symbol is needed for dynamic function tracer, with it, to disable @@ -248,49 +234,6 @@ static void append_func(Elf_Ehdr *const ehdr, uwrite(fd_map, ehdr, sizeof(*ehdr)); } -static unsigned get_mcountsym(Elf_Sym const *const sym0, - Elf_Rel const *relp, - char const *const str0) -{ - unsigned mcountsym = 0; - - Elf_Sym const *const symp = - &sym0[Elf_r_sym(relp)]; - char const *symname = &str0[w(symp->st_name)]; - char const *mcount = gpfx == '_' ? "_mcount" : "mcount"; - - if (symname[0] == '.') - ++symname; /* ppc64 hack */ - if (strcmp(mcount, symname) == 0 || - (altmcount && strcmp(altmcount, symname) == 0)) - mcountsym = Elf_r_sym(relp); - - return mcountsym; -} - -static void get_sym_str_and_relp(Elf_Shdr const *const relhdr, - Elf_Ehdr const *const ehdr, - Elf_Sym const **sym0, - char const **str0, - Elf_Rel const **relp) -{ - Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) - + (void *)ehdr); - unsigned const symsec_sh_link = w(relhdr->sh_link); - Elf_Shdr const *const symsec = &shdr0[symsec_sh_link]; - Elf_Shdr const *const strsec = &shdr0[w(symsec->sh_link)]; - Elf_Rel const *const rel0 = (Elf_Rel const *)(_w(relhdr->sh_offset) - + (void *)ehdr); - - *sym0 = (Elf_Sym const *)(_w(symsec->sh_offset) - + (void *)ehdr); - - *str0 = (char const *)(_w(strsec->sh_offset) - + (void *)ehdr); - - *relp = rel0; -} - /* * Look at the relocations in order to find the calls to mcount. * Accumulate the section offsets that are found, and their relocation info, @@ -307,27 +250,47 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, { uint_t *const mloc0 = mlocp; Elf_Rel *mrelp = *mrelpp; - Elf_Sym const *sym0; - char const *str0; - Elf_Rel const *relp; + Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) + + (void *)ehdr); + unsigned const symsec_sh_link = w(relhdr->sh_link); + Elf_Shdr const *const symsec = &shdr0[symsec_sh_link]; + Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symsec->sh_offset) + + (void *)ehdr); + + Elf_Shdr const *const strsec = &shdr0[w(symsec->sh_link)]; + char const *const str0 = (char const *)(_w(strsec->sh_offset) + + (void *)ehdr); + + Elf_Rel const *const rel0 = (Elf_Rel const *)(_w(relhdr->sh_offset) + + (void *)ehdr); unsigned rel_entsize = _w(relhdr->sh_entsize); unsigned const nrel = _w(relhdr->sh_size) / rel_entsize; + Elf_Rel const *relp = rel0; + unsigned mcountsym = 0; unsigned t; - get_sym_str_and_relp(relhdr, ehdr, &sym0, &str0, &relp); - for (t = nrel; t; --t) { - if (!mcountsym) - mcountsym = get_mcountsym(sym0, relp, str0); + if (!mcountsym) { + Elf_Sym const *const symp = + &sym0[Elf_r_sym(relp)]; + char const *symname = &str0[w(symp->st_name)]; + char const *mcount = '_' == gpfx ? "_mcount" : "mcount"; + + if ('.' == symname[0]) + ++symname; /* ppc64 hack */ + if (0 == strcmp(mcount, symname) || + (altmcount && 0 == strcmp(altmcount, symname))) + mcountsym = Elf_r_sym(relp); + } if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { - uint_t const addend = - _w(_w(relp->r_offset) - recval + mcount_adjust); + uint_t const addend = _w(_w(relp->r_offset) - recval); + mrelp->r_offset = _w(offbase + ((void *)mlocp - (void *)mloc0)); Elf_r_info(mrelp, recsym, reltype); - if (rel_entsize == sizeof(Elf_Rela)) { + if (sizeof(Elf_Rela) == rel_entsize) { ((Elf_Rela *)mrelp)->r_addend = addend; *mlocp++ = 0; } else @@ -341,63 +304,6 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, return mlocp; } -/* - * Read the relocation table again, but this time its called on sections - * that are not going to be traced. The mcount calls here will be converted - * into nops. - */ -static void nop_mcount(Elf_Shdr const *const relhdr, - Elf_Ehdr const *const ehdr, - const char *const txtname) -{ - Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) - + (void *)ehdr); - Elf_Sym const *sym0; - char const *str0; - Elf_Rel const *relp; - Elf_Shdr const *const shdr = &shdr0[w(relhdr->sh_info)]; - unsigned rel_entsize = _w(relhdr->sh_entsize); - unsigned const nrel = _w(relhdr->sh_size) / rel_entsize; - unsigned mcountsym = 0; - unsigned t; - int once = 0; - - get_sym_str_and_relp(relhdr, ehdr, &sym0, &str0, &relp); - - for (t = nrel; t; --t) { - int ret = -1; - - if (!mcountsym) - mcountsym = get_mcountsym(sym0, relp, str0); - - if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { - if (make_nop) - ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset); - if (warn_on_notrace_sect && !once) { - printf("Section %s has mcount callers being ignored\n", - txtname); - once = 1; - /* just warn? */ - if (!make_nop) - return; - } - } - - /* - * If we successfully removed the mcount, mark the relocation - * as a nop (don't do anything with it). - */ - if (!ret) { - Elf_Rel rel; - rel = *(Elf_Rel *)relp; - Elf_r_info(&rel, Elf_r_sym(relp), rel_type_nop); - ulseek(fd_map, (void *)relp - (void *)ehdr, SEEK_SET); - uwrite(fd_map, &rel, sizeof(rel)); - } - relp = (Elf_Rel const *)(rel_entsize + (void *)relp); - } -} - /* * Find a symbol in the given section, to be used as the base for relocating @@ -448,13 +354,13 @@ __has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */ Elf_Shdr const *const txthdr = &shdr0[w(relhdr->sh_info)]; char const *const txtname = &shstrtab[w(txthdr->sh_name)]; - if (strcmp("__mcount_loc", txtname) == 0) { + if (0 == strcmp("__mcount_loc", txtname)) { fprintf(stderr, "warning: __mcount_loc already exists: %s\n", fname); succeed_file(); } - if (w(txthdr->sh_type) != SHT_PROGBITS || - !(w(txthdr->sh_flags) & SHF_EXECINSTR)) + if (SHT_PROGBITS != w(txthdr->sh_type) || + !is_mcounted_section_name(txtname)) return NULL; return txtname; } @@ -464,7 +370,7 @@ static char const *has_rel_mcount(Elf_Shdr const *const relhdr, char const *const shstrtab, char const *const fname) { - if (w(relhdr->sh_type) != SHT_REL && w(relhdr->sh_type) != SHT_RELA) + if (SHT_REL != w(relhdr->sh_type) && SHT_RELA != w(relhdr->sh_type)) return NULL; return __has_rel_mcount(relhdr, shdr0, shstrtab, fname); } @@ -477,11 +383,9 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0, { unsigned totrelsz = 0; Elf_Shdr const *shdrp = shdr0; - char const *txtname; for (; nhdr; --nhdr, ++shdrp) { - txtname = has_rel_mcount(shdrp, shdr0, shstrtab, fname); - if (txtname && is_mcounted_section_name(txtname)) + if (has_rel_mcount(shdrp, shdr0, shstrtab, fname)) totrelsz += _w(shdrp->sh_size); } return totrelsz; @@ -517,7 +421,7 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { char const *const txtname = has_rel_mcount(relhdr, shdr0, shstrtab, fname); - if (txtname && is_mcounted_section_name(txtname)) { + if (txtname) { uint_t recval = 0; unsigned const recsym = find_secsym_ndx( w(relhdr->sh_info), txtname, &recval, @@ -528,12 +432,6 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) mlocp = sift_rel_mcount(mlocp, (void *)mlocp - (void *)mloc0, &mrelp, relhdr, ehdr, recsym, recval, reltype); - } else if (txtname && (warn_on_notrace_sect || make_nop)) { - /* - * This section is ignored by ftrace, but still - * has mcount calls. Convert them to nops now. - */ - nop_mcount(relhdr, ehdr, txtname); } } if (mloc0 != mlocp) { diff --git a/trunk/scripts/recordmcount.pl b/trunk/scripts/recordmcount.pl index 858966ab019c..4be0deea71ca 100755 --- a/trunk/scripts/recordmcount.pl +++ b/trunk/scripts/recordmcount.pl @@ -134,7 +134,6 @@ ".sched.text" => 1, ".spinlock.text" => 1, ".irqentry.text" => 1, - ".kprobes.text" => 1, ".text.unlikely" => 1, ); @@ -223,7 +222,6 @@ sub check_objcopy $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$"; $type = ".quad"; $alignment = 8; - $mcount_adjust = -1; # force flags for this arch $ld .= " -m elf_x86_64"; @@ -233,7 +231,6 @@ sub check_objcopy } elsif ($arch eq "i386") { $alignment = 4; - $mcount_adjust = -1; # force flags for this arch $ld .= " -m elf_i386"; @@ -243,14 +240,12 @@ sub check_objcopy } elsif ($arch eq "s390" && $bits == 32) { $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_32\\s+_mcount\$"; - $mcount_adjust = -4; $alignment = 4; $ld .= " -m elf_s390"; $cc .= " -m31"; } elsif ($arch eq "s390" && $bits == 64) { $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$"; - $mcount_adjust = -8; $alignment = 8; $type = ".quad"; $ld .= " -m elf64_s390"; diff --git a/trunk/tools/perf/Documentation/perf-script.txt b/trunk/tools/perf/Documentation/perf-script.txt index 86c87e214b11..66f040b30729 100644 --- a/trunk/tools/perf/Documentation/perf-script.txt +++ b/trunk/tools/perf/Documentation/perf-script.txt @@ -113,61 +113,13 @@ OPTIONS Do various checks like samples ordering and lost events. -f:: ---fields:: +--fields Comma separated list of fields to print. Options are: comm, tid, pid, time, cpu, event, trace, sym. Field - list can be prepended with the type, trace, sw or hw, + list must be prepended with the type, trace, sw or hw, to indicate to which event type the field list applies. e.g., -f sw:comm,tid,time,sym and -f trace:time,cpu,trace - perf script -f - - is equivalent to: - - perf script -f trace: -f sw: -f hw: - - i.e., the specified fields apply to all event types if the type string - is not given. - - The arguments are processed in the order received. A later usage can - reset a prior request. e.g.: - - -f trace: -f comm,tid,time,sym - - The first -f suppresses trace events (field list is ""), but then the - second invocation sets the fields to comm,tid,time,sym. In this case a - warning is given to the user: - - "Overriding previous field request for all events." - - Alternativey, consider the order: - - -f comm,tid,time,sym -f trace: - - The first -f sets the fields for all events and the second -f - suppresses trace events. The user is given a warning message about - the override, and the result of the above is that only S/W and H/W - events are displayed with the given fields. - - For the 'wildcard' option if a user selected field is invalid for an - event type, a message is displayed to the user that the option is - ignored for that type. For example: - - $ perf script -f comm,tid,trace - 'trace' not valid for hardware events. Ignoring. - 'trace' not valid for software events. Ignoring. - - Alternatively, if the type is given an invalid field is specified it - is an error. For example: - - perf script -v -f sw:comm,tid,trace - 'trace' not valid for software events. - - At this point usage is displayed, and perf-script exits. - - Finally, a user may not set fields to none for all event types. - i.e., -f "" is not allowed. - -k:: --vmlinux=:: vmlinux pathname diff --git a/trunk/tools/perf/Makefile b/trunk/tools/perf/Makefile index 1455413ec7a7..0c542563ea6c 100644 --- a/trunk/tools/perf/Makefile +++ b/trunk/tools/perf/Makefile @@ -5,8 +5,6 @@ endif # The default target of this Makefile is... all: -include config/utilities.mak - ifneq ($(OUTPUT),) # check that the output directory actually exists OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) @@ -15,12 +13,6 @@ endif # Define V to have a more verbose compile. # -# Define PYTHON to point to the python binary if the default -# `python' is not correct; for example: PYTHON=python2 -# -# Define PYTHON_CONFIG to point to the python-config binary if -# the default `$(PYTHON)-config' is not correct. -# # Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8 # # Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72. @@ -142,7 +134,7 @@ INSTALL = install # explicitly what architecture to check for. Fix this up for yours.. SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ --include config/feature-tests.mak +-include feature-tests.mak ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y) CFLAGS := $(CFLAGS) -fstack-protector-all @@ -177,10 +169,12 @@ grep-libs = $(filter -l%,$(1)) strip-libs = $(filter-out -l%,$(1)) $(OUTPUT)python/perf.so: $(PYRF_OBJS) - $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \ - --quiet build_ext \ - --build-lib='$(OUTPUT)python' \ - --build-temp='$(OUTPUT)python/temp' + $(QUIET_GEN)( \ + export CFLAGS="$(BASIC_CFLAGS)"; \ + python util/setup.py --quiet build_ext --build-lib='$(OUTPUT)python' \ + --build-temp='$(OUTPUT)python/temp' \ + ) + # # No Perl scripts right now: # @@ -485,74 +479,24 @@ else endif endif -disable-python = $(eval $(disable-python_code)) -define disable-python_code - BASIC_CFLAGS += -DNO_LIBPYTHON - $(if $(1),$(warning No $(1) was found)) - $(warning Python support won't be built) -endef - -override PYTHON := \ - $(call get-executable-or-default,PYTHON,python) - -ifndef PYTHON - $(call disable-python,python interpreter) - python-clean := +ifdef NO_LIBPYTHON + BASIC_CFLAGS += -DNO_LIBPYTHON else - - PYTHON_WORD := $(call shell-wordify,$(PYTHON)) - - python-clean := $(PYTHON_WORD) util/setup.py clean \ - --build-lib='$(OUTPUT)python' \ - --build-temp='$(OUTPUT)python/temp' - - ifdef NO_LIBPYTHON - $(call disable-python) - else - - override PYTHON_CONFIG := \ - $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON)-config) - - ifndef PYTHON_CONFIG - $(call disable-python,python-config tool) - else - - PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG)) - - PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) - PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) - PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) - PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) - FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) - - ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y) - $(call disable-python,Python.h (for Python 2.x)) - else - - ifneq ($(call try-cc,$(SOURCE_PYTHON_VERSION),$(FLAGS_PYTHON_EMBED)),y) - $(warning Python 3 is not yet supported; please set) - $(warning PYTHON and/or PYTHON_CONFIG appropriately.) - $(warning If you also have Python 2 installed, then) - $(warning try something like:) - $(warning $(and ,)) - $(warning $(and ,) make PYTHON=python2) - $(warning $(and ,)) - $(warning Otherwise, disable Python support entirely:) - $(warning $(and ,)) - $(warning $(and ,) make NO_LIBPYTHON=1) - $(warning $(and ,)) - $(error $(and ,)) - else - ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS) - EXTLIBS += $(PYTHON_EMBED_LIBADD) - LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o - LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o - LANG_BINDINGS += $(OUTPUT)python/perf.so - endif - - endif - endif - endif + PYTHON_EMBED_LDOPTS = $(shell python-config --ldflags 2>/dev/null) + PYTHON_EMBED_LDFLAGS = $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) + PYTHON_EMBED_LIBADD = $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) + PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null` + FLAGS_PYTHON_EMBED=$(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) + ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y) + msg := $(warning No Python.h found, install python-dev[el] to have python support in 'perf script' and to build the python bindings) + BASIC_CFLAGS += -DNO_LIBPYTHON + else + ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS) + EXTLIBS += $(PYTHON_EMBED_LIBADD) + LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o + LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o + LANG_BINDINGS += $(OUTPUT)python/perf.so + endif endif ifdef NO_DEMANGLE @@ -893,7 +837,8 @@ clean: $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(MAKE) -C Documentation/ clean $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS - $(python-clean) + @python util/setup.py clean --build-lib='$(OUTPUT)python' \ + --build-temp='$(OUTPUT)python/temp' .PHONY: all install clean strip .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell diff --git a/trunk/tools/perf/builtin-script.c b/trunk/tools/perf/builtin-script.c index 974f6d3f4e53..ac574ea23917 100644 --- a/trunk/tools/perf/builtin-script.c +++ b/trunk/tools/perf/builtin-script.c @@ -49,169 +49,57 @@ struct output_option { }; /* default set to maintain compatibility with current format */ -static struct { - bool user_set; - bool wildcard_set; - u64 fields; - u64 invalid_fields; -} output[PERF_TYPE_MAX] = { - - [PERF_TYPE_HARDWARE] = { - .user_set = false, - - .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | - PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | - PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, - - .invalid_fields = PERF_OUTPUT_TRACE, - }, - - [PERF_TYPE_SOFTWARE] = { - .user_set = false, - - .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | - PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | - PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, - - .invalid_fields = PERF_OUTPUT_TRACE, - }, - - [PERF_TYPE_TRACEPOINT] = { - .user_set = false, - - .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | - PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | - PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE, - }, - - [PERF_TYPE_RAW] = { - .user_set = false, - - .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | - PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | - PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, - - .invalid_fields = PERF_OUTPUT_TRACE, - }, +static u64 output_fields[PERF_TYPE_MAX] = { + [PERF_TYPE_HARDWARE] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ + PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ + PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, + + [PERF_TYPE_SOFTWARE] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ + PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ + PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, + + [PERF_TYPE_TRACEPOINT] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ + PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ + PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE, }; -static bool output_set_by_user(void) -{ - int j; - for (j = 0; j < PERF_TYPE_MAX; ++j) { - if (output[j].user_set) - return true; - } - return false; -} - -static const char *output_field2str(enum perf_output_field field) -{ - int i, imax = ARRAY_SIZE(all_output_options); - const char *str = ""; - - for (i = 0; i < imax; ++i) { - if (all_output_options[i].field == field) { - str = all_output_options[i].str; - break; - } - } - return str; -} +static bool output_set_by_user; -#define PRINT_FIELD(x) (output[attr->type].fields & PERF_OUTPUT_##x) +#define PRINT_FIELD(x) (output_fields[attr->type] & PERF_OUTPUT_##x) -static int perf_event_attr__check_stype(struct perf_event_attr *attr, - u64 sample_type, const char *sample_msg, - enum perf_output_field field) +static int perf_session__check_attr(struct perf_session *session, + struct perf_event_attr *attr) { - int type = attr->type; - const char *evname; - - if (attr->sample_type & sample_type) - return 0; - - if (output[type].user_set) { - evname = __event_name(attr->type, attr->config); - pr_err("Samples for '%s' event do not have %s attribute set. " - "Cannot print '%s' field.\n", - evname, sample_msg, output_field2str(field)); - return -1; - } - - /* user did not ask for it explicitly so remove from the default list */ - output[type].fields &= ~field; - evname = __event_name(attr->type, attr->config); - pr_debug("Samples for '%s' event do not have %s attribute set. " - "Skipping '%s' field.\n", - evname, sample_msg, output_field2str(field)); - - return 0; -} - -static int perf_evsel__check_attr(struct perf_evsel *evsel, - struct perf_session *session) -{ - struct perf_event_attr *attr = &evsel->attr; - if (PRINT_FIELD(TRACE) && !perf_session__has_traces(session, "record -R")) return -EINVAL; if (PRINT_FIELD(SYM)) { - if (perf_event_attr__check_stype(attr, PERF_SAMPLE_IP, "IP", - PERF_OUTPUT_SYM)) + if (!(session->sample_type & PERF_SAMPLE_IP)) { + pr_err("Samples do not contain IP data.\n"); return -EINVAL; - + } if (!no_callchain && - !(attr->sample_type & PERF_SAMPLE_CALLCHAIN)) + !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) symbol_conf.use_callchain = false; } if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) && - perf_event_attr__check_stype(attr, PERF_SAMPLE_TID, "TID", - PERF_OUTPUT_TID|PERF_OUTPUT_PID)) + !(session->sample_type & PERF_SAMPLE_TID)) { + pr_err("Samples do not contain TID/PID data.\n"); return -EINVAL; + } if (PRINT_FIELD(TIME) && - perf_event_attr__check_stype(attr, PERF_SAMPLE_TIME, "TIME", - PERF_OUTPUT_TIME)) + !(session->sample_type & PERF_SAMPLE_TIME)) { + pr_err("Samples do not contain timestamps.\n"); return -EINVAL; + } if (PRINT_FIELD(CPU) && - perf_event_attr__check_stype(attr, PERF_SAMPLE_CPU, "CPU", - PERF_OUTPUT_CPU)) + !(session->sample_type & PERF_SAMPLE_CPU)) { + pr_err("Samples do not contain cpu.\n"); return -EINVAL; - - return 0; -} - -/* - * verify all user requested events exist and the samples - * have the expected data - */ -static int perf_session__check_output_opt(struct perf_session *session) -{ - int j; - struct perf_evsel *evsel; - - for (j = 0; j < PERF_TYPE_MAX; ++j) { - evsel = perf_session__find_first_evtype(session, j); - - /* - * even if fields is set to 0 (ie., show nothing) event must - * exist if user explicitly includes it on the command line - */ - if (!evsel && output[j].user_set && !output[j].wildcard_set) { - pr_err("%s events do not exist. " - "Remove corresponding -f option to proceed.\n", - event_type(j)); - return -1; - } - - if (evsel && output[j].fields && - perf_evsel__check_attr(evsel, session)) - return -1; } return 0; @@ -280,7 +168,10 @@ static void process_event(union perf_event *event __unused, { struct perf_event_attr *attr = &evsel->attr; - if (output[attr->type].fields == 0) + if (output_fields[attr->type] == 0) + return; + + if (perf_session__check_attr(session, attr) < 0) return; print_sample_start(sample, thread, attr); @@ -560,7 +451,6 @@ static int parse_output_fields(const struct option *opt __used, { char *tok; int i, imax = sizeof(all_output_options) / sizeof(struct output_option); - int j; int rc = 0; char *str = strdup(arg); int type = -1; @@ -568,99 +458,52 @@ static int parse_output_fields(const struct option *opt __used, if (!str) return -ENOMEM; - /* first word can state for which event type the user is specifying - * the fields. If no type exists, the specified fields apply to all - * event types found in the file minus the invalid fields for a type. - */ - tok = strchr(str, ':'); - if (tok) { - *tok = '\0'; - tok++; - if (!strcmp(str, "hw")) - type = PERF_TYPE_HARDWARE; - else if (!strcmp(str, "sw")) - type = PERF_TYPE_SOFTWARE; - else if (!strcmp(str, "trace")) - type = PERF_TYPE_TRACEPOINT; - else if (!strcmp(str, "raw")) - type = PERF_TYPE_RAW; - else { - fprintf(stderr, "Invalid event type in field string.\n"); - return -EINVAL; - } - - if (output[type].user_set) - pr_warning("Overriding previous field request for %s events.\n", - event_type(type)); - - output[type].fields = 0; - output[type].user_set = true; - output[type].wildcard_set = false; - - } else { - tok = str; - if (strlen(str) == 0) { - fprintf(stderr, - "Cannot set fields to 'none' for all event types.\n"); - rc = -EINVAL; - goto out; - } - - if (output_set_by_user()) - pr_warning("Overriding previous field request for all events.\n"); + tok = strtok(str, ":"); + if (!tok) { + fprintf(stderr, + "Invalid field string - not prepended with type."); + return -EINVAL; + } - for (j = 0; j < PERF_TYPE_MAX; ++j) { - output[j].fields = 0; - output[j].user_set = true; - output[j].wildcard_set = true; - } + /* first word should state which event type user + * is specifying the fields + */ + if (!strcmp(tok, "hw")) + type = PERF_TYPE_HARDWARE; + else if (!strcmp(tok, "sw")) + type = PERF_TYPE_SOFTWARE; + else if (!strcmp(tok, "trace")) + type = PERF_TYPE_TRACEPOINT; + else { + fprintf(stderr, "Invalid event type in field string."); + return -EINVAL; } - tok = strtok(tok, ","); - while (tok) { + output_fields[type] = 0; + while (1) { + tok = strtok(NULL, ","); + if (!tok) + break; for (i = 0; i < imax; ++i) { - if (strcmp(tok, all_output_options[i].str) == 0) + if (strcmp(tok, all_output_options[i].str) == 0) { + output_fields[type] |= all_output_options[i].field; break; + } } if (i == imax) { - fprintf(stderr, "Invalid field requested.\n"); + fprintf(stderr, "Invalid field requested."); rc = -EINVAL; - goto out; - } - - if (type == -1) { - /* add user option to all events types for - * which it is valid - */ - for (j = 0; j < PERF_TYPE_MAX; ++j) { - if (output[j].invalid_fields & all_output_options[i].field) { - pr_warning("\'%s\' not valid for %s events. Ignoring.\n", - all_output_options[i].str, event_type(j)); - } else - output[j].fields |= all_output_options[i].field; - } - } else { - if (output[type].invalid_fields & all_output_options[i].field) { - fprintf(stderr, "\'%s\' not valid for %s events.\n", - all_output_options[i].str, event_type(type)); - - rc = -EINVAL; - goto out; - } - output[type].fields |= all_output_options[i].field; + break; } - - tok = strtok(NULL, ","); } - if (type >= 0) { - if (output[type].fields == 0) { - pr_debug("No fields requested for %s type. " - "Events will not be displayed.\n", event_type(type)); - } + if (output_fields[type] == 0) { + pr_debug("No fields requested for %s type. " + "Events will not be displayed\n", event_type(type)); } -out: + output_set_by_user = true; + free(str); return rc; } @@ -986,7 +829,7 @@ static const struct option options[] = { OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), OPT_CALLBACK('f', "fields", NULL, "str", - "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,sym", + "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace. Fields: comm,tid,pid,time,cpu,event,trace,sym", parse_output_fields), OPT_END() @@ -1177,7 +1020,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) struct stat perf_stat; int input; - if (output_set_by_user()) { + if (output_set_by_user) { fprintf(stderr, "custom fields not supported for generated scripts"); return -1; @@ -1217,11 +1060,6 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) pr_debug("perf script started with script %s\n\n", script_name); } - - err = perf_session__check_output_opt(session); - if (err < 0) - goto out; - err = __cmd_script(session); perf_session__delete(session); diff --git a/trunk/tools/perf/builtin-stat.c b/trunk/tools/perf/builtin-stat.c index 602c3c96fa1e..03f0e45f1479 100644 --- a/trunk/tools/perf/builtin-stat.c +++ b/trunk/tools/perf/builtin-stat.c @@ -46,7 +46,6 @@ #include "util/evlist.h" #include "util/evsel.h" #include "util/debug.h" -#include "util/color.h" #include "util/header.h" #include "util/cpumap.h" #include "util/thread.h" @@ -66,56 +65,14 @@ static struct perf_event_attr default_attrs[] = { { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, }; -/* - * Detailed stats: - */ -static struct perf_event_attr detailed_attrs[] = { - - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, - - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_L1D << 0 | - (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_L1D << 0 | - (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_LL << 0 | - (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_LL << 0 | - (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, -}; - struct perf_evlist *evsel_list; static bool system_wide = false; @@ -129,8 +86,6 @@ static pid_t target_pid = -1; static pid_t target_tid = -1; static pid_t child_pid = -1; static bool null_run = false; -static bool detailed_run = false; -static bool sync_run = false; static bool big_num = true; static int big_num_opt = -1; static const char *cpu_list; @@ -201,11 +156,7 @@ static double stddev_stats(struct stats *stats) struct stats runtime_nsecs_stats[MAX_NR_CPUS]; struct stats runtime_cycles_stats[MAX_NR_CPUS]; -struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS]; -struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS]; struct stats runtime_branches_stats[MAX_NR_CPUS]; -struct stats runtime_cacherefs_stats[MAX_NR_CPUS]; -struct stats runtime_l1_dcache_stats[MAX_NR_CPUS]; struct stats walltime_nsecs_stats; static int create_perf_stat_counter(struct perf_evsel *evsel) @@ -241,29 +192,6 @@ static inline int nsec_counter(struct perf_evsel *evsel) return 0; } -/* - * Update various tracking values we maintain to print - * more semantic information such as miss/hit ratios, - * instruction rates, etc: - */ -static void update_shadow_stats(struct perf_evsel *counter, u64 *count) -{ - if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) - update_stats(&runtime_nsecs_stats[0], count[0]); - else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) - update_stats(&runtime_cycles_stats[0], count[0]); - else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) - update_stats(&runtime_stalled_cycles_front_stats[0], count[0]); - else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) - update_stats(&runtime_stalled_cycles_back_stats[0], count[0]); - else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) - update_stats(&runtime_branches_stats[0], count[0]); - else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) - update_stats(&runtime_cacherefs_stats[0], count[0]); - else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) - update_stats(&runtime_l1_dcache_stats[0], count[0]); -} - /* * Read out the results of a single counter: * aggregate counts across CPUs in system-wide mode @@ -289,7 +217,12 @@ static int read_counter_aggr(struct perf_evsel *counter) /* * Save the full runtime - to allow normalization during printout: */ - update_shadow_stats(counter, count); + if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) + update_stats(&runtime_nsecs_stats[0], count[0]); + if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) + update_stats(&runtime_cycles_stats[0], count[0]); + if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) + update_stats(&runtime_branches_stats[0], count[0]); return 0; } @@ -309,7 +242,12 @@ static int read_counter(struct perf_evsel *counter) count = counter->counts->cpu[cpu].values; - update_shadow_stats(counter, count); + if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) + update_stats(&runtime_nsecs_stats[cpu], count[0]); + if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) + update_stats(&runtime_cycles_stats[cpu], count[0]); + if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) + update_stats(&runtime_branches_stats[cpu], count[0]); } return 0; @@ -377,18 +315,13 @@ static int run_perf_stat(int argc __used, const char **argv) list_for_each_entry(counter, &evsel_list->entries, node) { if (create_perf_stat_counter(counter) < 0) { - if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) { - if (verbose) - ui__warning("%s event is not supported by the kernel.\n", - event_name(counter)); - continue; - } - - if (errno == EPERM || errno == EACCES) { + if (errno == -EPERM || errno == -EACCES) { error("You may not have permission to collect %sstats.\n" "\t Consider tweaking" " /proc/sys/kernel/perf_event_paranoid or running as root.", system_wide ? "system-wide " : ""); + } else if (errno == ENOENT) { + error("%s event is not supported. ", event_name(counter)); } else { error("open_counter returned with %d (%s). " "/bin/dmesg may provide additional information.\n", @@ -439,16 +372,6 @@ static int run_perf_stat(int argc __used, const char **argv) return WEXITSTATUS(status); } -static void print_noise_pct(double total, double avg) -{ - double pct = 0.0; - - if (avg) - pct = 100.0*total/avg; - - fprintf(stderr, " ( +-%6.2f%% )", pct); -} - static void print_noise(struct perf_evsel *evsel, double avg) { struct perf_stat *ps; @@ -457,7 +380,8 @@ static void print_noise(struct perf_evsel *evsel, double avg) return; ps = evsel->priv; - print_noise_pct(stddev_stats(&ps->res_stats[0]), avg); + fprintf(stderr, " ( +- %7.3f%% )", + 100 * stddev_stats(&ps->res_stats[0]) / avg); } static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) @@ -480,99 +404,8 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) return; if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) - fprintf(stderr, " # %8.3f CPUs utilized ", avg / avg_stats(&walltime_nsecs_stats)); -} - -static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg) -{ - double total, ratio = 0.0; - const char *color; - - total = avg_stats(&runtime_cycles_stats[cpu]); - - if (total) - ratio = avg / total * 100.0; - - color = PERF_COLOR_NORMAL; - if (ratio > 50.0) - color = PERF_COLOR_RED; - else if (ratio > 30.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 10.0) - color = PERF_COLOR_YELLOW; - - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%6.2f%%", ratio); - fprintf(stderr, " frontend cycles idle "); -} - -static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg) -{ - double total, ratio = 0.0; - const char *color; - - total = avg_stats(&runtime_cycles_stats[cpu]); - - if (total) - ratio = avg / total * 100.0; - - color = PERF_COLOR_NORMAL; - if (ratio > 75.0) - color = PERF_COLOR_RED; - else if (ratio > 50.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 20.0) - color = PERF_COLOR_YELLOW; - - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%6.2f%%", ratio); - fprintf(stderr, " backend cycles idle "); -} - -static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg) -{ - double total, ratio = 0.0; - const char *color; - - total = avg_stats(&runtime_branches_stats[cpu]); - - if (total) - ratio = avg / total * 100.0; - - color = PERF_COLOR_NORMAL; - if (ratio > 20.0) - color = PERF_COLOR_RED; - else if (ratio > 10.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 5.0) - color = PERF_COLOR_YELLOW; - - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%6.2f%%", ratio); - fprintf(stderr, " of all branches "); -} - -static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg) -{ - double total, ratio = 0.0; - const char *color; - - total = avg_stats(&runtime_l1_dcache_stats[cpu]); - - if (total) - ratio = avg / total * 100.0; - - color = PERF_COLOR_NORMAL; - if (ratio > 20.0) - color = PERF_COLOR_RED; - else if (ratio > 10.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 5.0) - color = PERF_COLOR_YELLOW; - - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%6.2f%%", ratio); - fprintf(stderr, " of all L1-dcache hits "); + fprintf(stderr, " # %10.3f CPUs ", + avg / avg_stats(&walltime_nsecs_stats)); } static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) @@ -609,55 +442,23 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (total) ratio = avg / total; - fprintf(stderr, " # %5.2f insns per cycle ", ratio); - - total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]); - total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu])); - - if (total && avg) { - ratio = total / avg; - fprintf(stderr, "\n # %5.2f stalled cycles per insn", ratio); - } - + fprintf(stderr, " # %10.3f IPC ", ratio); } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && runtime_branches_stats[cpu].n != 0) { - print_branch_misses(cpu, evsel, avg); - } else if ( - evsel->attr.type == PERF_TYPE_HW_CACHE && - evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D | - ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | - ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && - runtime_l1_dcache_stats[cpu].n != 0) { - print_l1_dcache_misses(cpu, evsel, avg); - } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && - runtime_cacherefs_stats[cpu].n != 0) { - total = avg_stats(&runtime_cacherefs_stats[cpu]); + total = avg_stats(&runtime_branches_stats[cpu]); if (total) ratio = avg * 100 / total; - fprintf(stderr, " # %8.3f %% of all cache refs ", ratio); + fprintf(stderr, " # %10.3f %% ", ratio); - } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { - print_stalled_cycles_frontend(cpu, evsel, avg); - } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { - print_stalled_cycles_backend(cpu, evsel, avg); - } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { - total = avg_stats(&runtime_nsecs_stats[cpu]); - - if (total) - ratio = 1.0 * avg / total; - - fprintf(stderr, " # %8.3f GHz ", ratio); } else if (runtime_nsecs_stats[cpu].n != 0) { total = avg_stats(&runtime_nsecs_stats[cpu]); if (total) ratio = 1000.0 * avg / total; - fprintf(stderr, " # %8.3f M/sec ", ratio); - } else { - fprintf(stderr, " "); + fprintf(stderr, " # %10.3f M/sec", ratio); } } @@ -704,7 +505,8 @@ static void print_counter_aggr(struct perf_evsel *counter) avg_enabled = avg_stats(&ps->res_stats[1]); avg_running = avg_stats(&ps->res_stats[2]); - fprintf(stderr, " (%.2f%%)", 100 * avg_running / avg_enabled); + fprintf(stderr, " (scaled from %.2f%%)", + 100 * avg_running / avg_enabled); } fprintf(stderr, "\n"); } @@ -746,8 +548,10 @@ static void print_counter(struct perf_evsel *counter) if (!csv_output) { print_noise(counter, 1.0); - if (run != ena) - fprintf(stderr, " (%.2f%%)", 100.0 * run / ena); + if (run != ena) { + fprintf(stderr, " (scaled from %.2f%%)", + 100.0 * run / ena); + } } fputc('\n', stderr); } @@ -791,8 +595,9 @@ static void print_stat(int argc, const char **argv) fprintf(stderr, " %18.9f seconds time elapsed", avg_stats(&walltime_nsecs_stats)/1e9); if (run_count > 1) { - print_noise_pct(stddev_stats(&walltime_nsecs_stats), - avg_stats(&walltime_nsecs_stats)); + fprintf(stderr, " ( +- %7.3f%% )", + 100*stddev_stats(&walltime_nsecs_stats) / + avg_stats(&walltime_nsecs_stats)); } fprintf(stderr, "\n\n"); } @@ -854,10 +659,6 @@ static const struct option options[] = { "repeat command and print average + stddev (max: 100)"), OPT_BOOLEAN('n', "null", &null_run, "null run - dont start any counters"), - OPT_BOOLEAN('d', "detailed", &detailed_run, - "detailed run - start a lot of events"), - OPT_BOOLEAN('S', "sync", &sync_run, - "call sync() before starting a run"), OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, "print large numbers with thousands\' separators", stat__set_big_num), @@ -919,18 +720,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) } /* Set attrs and nr_counters if no event is selected and !null_run */ - if (detailed_run) { - size_t c; - - for (c = 0; c < ARRAY_SIZE(detailed_attrs); ++c) { - pos = perf_evsel__new(&detailed_attrs[c], c); - if (pos == NULL) - goto out; - perf_evlist__add(evsel_list, pos); - } - } - /* Set attrs and nr_counters if no event is selected and !null_run */ - if (!detailed_run && !null_run && !evsel_list->nr_entries) { + if (!null_run && !evsel_list->nr_entries) { size_t c; for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) { @@ -983,10 +773,6 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) for (run_idx = 0; run_idx < run_count; run_idx++) { if (run_count != 1 && verbose) fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1); - - if (sync_run) - sync(); - status = run_perf_stat(argc, argv); } diff --git a/trunk/tools/perf/config/utilities.mak b/trunk/tools/perf/config/utilities.mak deleted file mode 100644 index 8046182a19eb..000000000000 --- a/trunk/tools/perf/config/utilities.mak +++ /dev/null @@ -1,188 +0,0 @@ -# This allows us to work with the newline character: -define newline - - -endef -newline := $(newline) - -# nl-escape -# -# Usage: escape = $(call nl-escape[,escape]) -# -# This is used as the common way to specify -# what should replace a newline when escaping -# newlines; the default is a bizarre string. -# -nl-escape = $(or $(1),m822df3020w6a44id34bt574ctac44eb9f4n) - -# escape-nl -# -# Usage: escaped-text = $(call escape-nl,text[,escape]) -# -# GNU make's $(shell ...) function converts to a -# single space each newline character in the output -# produced during the expansion; this may not be -# desirable. -# -# The only solution is to change each newline into -# something that won't be converted, so that the -# information can be recovered later with -# $(call unescape-nl...) -# -escape-nl = $(subst $(newline),$(call nl-escape,$(2)),$(1)) - -# unescape-nl -# -# Usage: text = $(call unescape-nl,escaped-text[,escape]) -# -# See escape-nl. -# -unescape-nl = $(subst $(call nl-escape,$(2)),$(newline),$(1)) - -# shell-escape-nl -# -# Usage: $(shell some-command | $(call shell-escape-nl[,escape])) -# -# Use this to escape newlines from within a shell call; -# the default escape is a bizarre string. -# -# NOTE: The escape is used directly as a string constant -# in an `awk' program that is delimited by shell -# single-quotes, so be wary of the characters -# that are chosen. -# -define shell-escape-nl -awk 'NR==1 {t=$$0} NR>1 {t=t "$(nl-escape)" $$0} END {printf t}' -endef - -# shell-unescape-nl -# -# Usage: $(shell some-command | $(call shell-unescape-nl[,escape])) -# -# Use this to unescape newlines from within a shell call; -# the default escape is a bizarre string. -# -# NOTE: The escape is used directly as an extended regular -# expression constant in an `awk' program that is -# delimited by shell single-quotes, so be wary -# of the characters that are chosen. -# -# (The bash shell has a bug where `{gsub(...),...}' is -# misinterpreted as a brace expansion; this can be -# overcome by putting a space between `{' and `gsub'). -# -define shell-unescape-nl -awk 'NR==1 {t=$$0} NR>1 {t=t "\n" $$0} END { gsub(/$(nl-escape)/,"\n",t); printf t }' -endef - -# escape-for-shell-sq -# -# Usage: embeddable-text = $(call escape-for-shell-sq,text) -# -# This function produces text that is suitable for -# embedding in a shell string that is delimited by -# single-quotes. -# -escape-for-shell-sq = $(subst ','\'',$(1)) - -# shell-sq -# -# Usage: single-quoted-and-escaped-text = $(call shell-sq,text) -# -shell-sq = '$(escape-for-shell-sq)' - -# shell-wordify -# -# Usage: wordified-text = $(call shell-wordify,text) -# -# For instance: -# -# |define text -# |hello -# |world -# |endef -# | -# |target: -# | echo $(call shell-wordify,$(text)) -# -# At least GNU make gets confused by expanding a newline -# within the context of a command line of a makefile rule -# (this is in constrast to a `$(shell ...)' function call, -# which can handle it just fine). -# -# This function avoids the problem by producing a string -# that works as a shell word, regardless of whether or -# not it contains a newline. -# -# If the text to be wordified contains a newline, then -# an intrictate shell command substitution is constructed -# to render the text as a single line; when the shell -# processes the resulting escaped text, it transforms -# it into the original unescaped text. -# -# If the text does not contain a newline, then this function -# produces the same results as the `$(shell-sq)' function. -# -shell-wordify = $(if $(findstring $(newline),$(1)),$(_sw-esc-nl),$(shell-sq)) -define _sw-esc-nl -"$$(echo $(call escape-nl,$(shell-sq),$(2)) | $(call shell-unescape-nl,$(2)))" -endef - -# is-absolute -# -# Usage: bool-value = $(call is-absolute,path) -# -is-absolute = $(shell echo $(shell-sq) | grep ^/ -q && echo y) - -# lookup -# -# Usage: absolute-executable-path-or-empty = $(call lookup,path) -# -# (It's necessary to use `sh -c' because GNU make messes up by -# trying too hard and getting things wrong). -# -lookup = $(call unescape-nl,$(shell sh -c $(_l-sh))) -_l-sh = $(call shell-sq,command -v $(shell-sq) | $(call shell-escape-nl,)) - -# is-executable -# -# Usage: bool-value = $(call is-executable,path) -# -# (It's necessary to use `sh -c' because GNU make messes up by -# trying too hard and getting things wrong). -# -is-executable = $(call _is-executable-helper,$(shell-sq)) -_is-executable-helper = $(shell sh -c $(_is-executable-sh)) -_is-executable-sh = $(call shell-sq,test -f $(1) -a -x $(1) && echo y) - -# get-executable -# -# Usage: absolute-executable-path-or-empty = $(call get-executable,path) -# -# The goal is to get an absolute path for an executable; -# the `command -v' is defined by POSIX, but it's not -# necessarily very portable, so it's only used if -# relative path resolution is requested, as determined -# by the presence of a leading `/'. -# -get-executable = $(if $(1),$(if $(is-absolute),$(_ge-abspath),$(lookup))) -_ge-abspath = $(if $(is-executable),$(1)) - -# get-supplied-or-default-executable -# -# Usage: absolute-executable-path-or-empty = $(call get-executable-or-default,variable,default) -# -define get-executable-or-default -$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2))) -endef -_ge_attempt = $(or $(get-executable),$(_gea_warn),$(call _gea_err,$(2))) -_gea_warn = $(warning The path '$(1)' is not executable.) -_gea_err = $(if $(1),$(error Please set '$(1)' appropriately)) - -# try-cc -# Usage: option = $(call try-cc, source-to-build, cc-options) -try-cc = $(shell sh -c \ - 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \ - echo "$(1)" | \ - $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \ - rm -f "$$TMP"') diff --git a/trunk/tools/perf/config/feature-tests.mak b/trunk/tools/perf/feature-tests.mak similarity index 86% rename from trunk/tools/perf/config/feature-tests.mak rename to trunk/tools/perf/feature-tests.mak index 6170fd2531b5..b041ca67a2cb 100644 --- a/trunk/tools/perf/config/feature-tests.mak +++ b/trunk/tools/perf/feature-tests.mak @@ -79,15 +79,9 @@ endef endif ifndef NO_LIBPYTHON -define SOURCE_PYTHON_VERSION -#include -#if PY_VERSION_HEX >= 0x03000000 - #error -#endif -int main(void){} -endef define SOURCE_PYTHON_EMBED #include + int main(void) { Py_Initialize(); @@ -126,3 +120,11 @@ int main(void) return 0; } endef + +# try-cc +# Usage: option = $(call try-cc, source-to-build, cc-options) +try-cc = $(shell sh -c \ + 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \ + echo "$(1)" | \ + $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \ + rm -f "$$TMP"') diff --git a/trunk/tools/perf/util/parse-events.c b/trunk/tools/perf/util/parse-events.c index 41982c373faf..952b4ae3d954 100644 --- a/trunk/tools/perf/util/parse-events.c +++ b/trunk/tools/perf/util/parse-events.c @@ -31,36 +31,34 @@ char debugfs_path[MAXPATHLEN]; #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x static struct event_symbol event_symbols[] = { - { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, - { CHW(STALLED_CYCLES_FRONTEND), "stalled-cycles-frontend", "idle-cycles-frontend" }, - { CHW(STALLED_CYCLES_BACKEND), "stalled-cycles-backend", "idle-cycles-backend" }, - { CHW(INSTRUCTIONS), "instructions", "" }, - { CHW(CACHE_REFERENCES), "cache-references", "" }, - { CHW(CACHE_MISSES), "cache-misses", "" }, - { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, - { CHW(BRANCH_MISSES), "branch-misses", "" }, - { CHW(BUS_CYCLES), "bus-cycles", "" }, - - { CSW(CPU_CLOCK), "cpu-clock", "" }, - { CSW(TASK_CLOCK), "task-clock", "" }, - { CSW(PAGE_FAULTS), "page-faults", "faults" }, - { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, - { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, - { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, - { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, - { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, - { CSW(EMULATION_FAULTS), "emulation-faults", "" }, + { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, + { CHW(INSTRUCTIONS), "instructions", "" }, + { CHW(CACHE_REFERENCES), "cache-references", "" }, + { CHW(CACHE_MISSES), "cache-misses", "" }, + { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, + { CHW(BRANCH_MISSES), "branch-misses", "" }, + { CHW(BUS_CYCLES), "bus-cycles", "" }, + + { CSW(CPU_CLOCK), "cpu-clock", "" }, + { CSW(TASK_CLOCK), "task-clock", "" }, + { CSW(PAGE_FAULTS), "page-faults", "faults" }, + { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, + { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, + { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, + { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, + { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, + { CSW(EMULATION_FAULTS), "emulation-faults", "" }, }; #define __PERF_EVENT_FIELD(config, name) \ ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) -#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) +#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) -#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) +#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) -static const char *hw_event_names[PERF_COUNT_HW_MAX] = { +static const char *hw_event_names[] = { "cycles", "instructions", "cache-references", @@ -68,13 +66,11 @@ static const char *hw_event_names[PERF_COUNT_HW_MAX] = { "branches", "branch-misses", "bus-cycles", - "stalled-cycles-frontend", - "stalled-cycles-backend", }; -static const char *sw_event_names[PERF_COUNT_SW_MAX] = { - "cpu-clock", - "task-clock", +static const char *sw_event_names[] = { + "cpu-clock-msecs", + "task-clock-msecs", "page-faults", "context-switches", "CPU-migrations", @@ -311,7 +307,7 @@ const char *__event_name(int type, u64 config) switch (type) { case PERF_TYPE_HARDWARE: - if (config < PERF_COUNT_HW_MAX && hw_event_names[config]) + if (config < PERF_COUNT_HW_MAX) return hw_event_names[config]; return "unknown-hardware"; @@ -337,7 +333,7 @@ const char *__event_name(int type, u64 config) } case PERF_TYPE_SOFTWARE: - if (config < PERF_COUNT_SW_MAX && sw_event_names[config]) + if (config < PERF_COUNT_SW_MAX) return sw_event_names[config]; return "unknown-software"; @@ -652,15 +648,13 @@ static int check_events(const char *str, unsigned int i) int n; n = strlen(event_symbols[i].symbol); - if (!strncasecmp(str, event_symbols[i].symbol, n)) + if (!strncmp(str, event_symbols[i].symbol, n)) return n; n = strlen(event_symbols[i].alias); - if (n) { - if (!strncasecmp(str, event_symbols[i].alias, n)) + if (n) + if (!strncmp(str, event_symbols[i].alias, n)) return n; - } - return 0; } @@ -724,22 +718,15 @@ parse_numeric_event(const char **strp, struct perf_event_attr *attr) return EVT_FAILED; } -static int +static enum event_result parse_event_modifier(const char **strp, struct perf_event_attr *attr) { const char *str = *strp; int exclude = 0; int eu = 0, ek = 0, eh = 0, precise = 0; - if (!*str) - return 0; - - if (*str == ',') - return 0; - if (*str++ != ':') - return -1; - + return 0; while (*str) { if (*str == 'u') { if (!exclude) @@ -760,16 +747,14 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr) ++str; } - if (str < *strp + 2) - return -1; - - *strp = str; - - attr->exclude_user = eu; - attr->exclude_kernel = ek; - attr->exclude_hv = eh; - attr->precise_ip = precise; - + if (str >= *strp + 2) { + *strp = str; + attr->exclude_user = eu; + attr->exclude_kernel = ek; + attr->exclude_hv = eh; + attr->precise_ip = precise; + return 1; + } return 0; } @@ -812,12 +797,7 @@ parse_event_symbols(const struct option *opt, const char **str, return EVT_FAILED; modifier: - if (parse_event_modifier(str, attr) < 0) { - fprintf(stderr, "invalid event modifier: '%s'\n", *str); - fprintf(stderr, "Run 'perf list' for a list of valid events and modifiers\n"); - - return EVT_FAILED; - } + parse_event_modifier(str, attr); return ret; } @@ -932,7 +912,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob) snprintf(evt_path, MAXPATHLEN, "%s:%s", sys_dirent.d_name, evt_dirent.d_name); - printf(" %-50s [%s]\n", evt_path, + printf(" %-42s [%s]\n", evt_path, event_type_descriptors[PERF_TYPE_TRACEPOINT]); } closedir(evt_dir); @@ -997,7 +977,7 @@ void print_events_type(u8 type) else snprintf(name, sizeof(name), "%s", syms->symbol); - printf(" %-50s [%s]\n", name, + printf(" %-42s [%s]\n", name, event_type_descriptors[type]); } } @@ -1015,10 +995,11 @@ int print_hwcache_events(const char *event_glob) for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { char *name = event_cache_name(type, op, i); - if (event_glob != NULL && !strglobmatch(name, event_glob)) + if (event_glob != NULL && + !strglobmatch(name, event_glob)) continue; - printf(" %-50s [%s]\n", name, + printf(" %-42s [%s]\n", name, event_type_descriptors[PERF_TYPE_HW_CACHE]); ++printed; } @@ -1028,16 +1009,14 @@ int print_hwcache_events(const char *event_glob) return printed; } -#define MAX_NAME_LEN 100 - /* * Print the help text for the event symbols: */ void print_events(const char *event_glob) { - unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0; struct event_symbol *syms = event_symbols; - char name[MAX_NAME_LEN]; + unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0; + char name[40]; printf("\n"); printf("List of pre-defined events (to be used in -e):\n"); @@ -1057,10 +1036,10 @@ void print_events(const char *event_glob) continue; if (strlen(syms->alias)) - snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); + sprintf(name, "%s OR %s", syms->symbol, syms->alias); else - strncpy(name, syms->symbol, MAX_NAME_LEN); - printf(" %-50s [%s]\n", name, + strcpy(name, syms->symbol); + printf(" %-42s [%s]\n", name, event_type_descriptors[type]); prev_type = type; @@ -1077,12 +1056,12 @@ void print_events(const char *event_glob) return; printf("\n"); - printf(" %-50s [%s]\n", + printf(" %-42s [%s]\n", "rNNN (see 'perf list --help' on how to encode it)", event_type_descriptors[PERF_TYPE_RAW]); printf("\n"); - printf(" %-50s [%s]\n", + printf(" %-42s [%s]\n", "mem:[:access]", event_type_descriptors[PERF_TYPE_BREAKPOINT]); printf("\n"); diff --git a/trunk/tools/perf/util/probe-finder.c b/trunk/tools/perf/util/probe-finder.c index 3b9d0b800d5c..b7c85ce466a1 100644 --- a/trunk/tools/perf/util/probe-finder.c +++ b/trunk/tools/perf/util/probe-finder.c @@ -1471,38 +1471,6 @@ static int find_probe_point_by_func(struct probe_finder *pf) return _param.retval; } -struct pubname_callback_param { - char *function; - char *file; - Dwarf_Die *cu_die; - Dwarf_Die *sp_die; - int found; -}; - -static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data) -{ - struct pubname_callback_param *param = data; - - if (dwarf_offdie(dbg, gl->die_offset, param->sp_die)) { - if (dwarf_tag(param->sp_die) != DW_TAG_subprogram) - return DWARF_CB_OK; - - if (die_compare_name(param->sp_die, param->function)) { - if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die)) - return DWARF_CB_OK; - - if (param->file && - strtailcmp(param->file, dwarf_decl_file(param->sp_die))) - return DWARF_CB_OK; - - param->found = 1; - return DWARF_CB_ABORT; - } - } - - return DWARF_CB_OK; -} - /* Find probe points from debuginfo */ static int find_probes(int fd, struct probe_finder *pf) { @@ -1530,28 +1498,6 @@ static int find_probes(int fd, struct probe_finder *pf) off = 0; line_list__init(&pf->lcache); - - /* Fastpath: lookup by function name from .debug_pubnames section */ - if (pp->function) { - struct pubname_callback_param pubname_param = { - .function = pp->function, - .file = pp->file, - .cu_die = &pf->cu_die, - .sp_die = &pf->sp_die, - .found = 0, - }; - struct dwarf_callback_param probe_param = { - .data = pf, - }; - - dwarf_getpubnames(dbg, pubname_search_cb, &pubname_param, 0); - if (pubname_param.found) { - ret = probe_point_search_cb(&pf->sp_die, &probe_param); - if (ret) - goto found; - } - } - /* Loop on CUs (Compilation Unit) */ while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) { /* Get the DIE(Debugging Information Entry) of this CU */ @@ -1579,8 +1525,6 @@ static int find_probes(int fd, struct probe_finder *pf) } off = noff; } - -found: line_list__free(&pf->lcache); if (dwfl) dwfl_end(dwfl); @@ -2002,22 +1946,6 @@ int find_line_range(int fd, struct line_range *lr) return -EBADF; } - /* Fastpath: lookup by function name from .debug_pubnames section */ - if (lr->function) { - struct pubname_callback_param pubname_param = { - .function = lr->function, .file = lr->file, - .cu_die = &lf.cu_die, .sp_die = &lf.sp_die, .found = 0}; - struct dwarf_callback_param line_range_param = { - .data = (void *)&lf, .retval = 0}; - - dwarf_getpubnames(dbg, pubname_search_cb, &pubname_param, 0); - if (pubname_param.found) { - line_range_search_cb(&lf.sp_die, &line_range_param); - if (lf.found) - goto found; - } - } - /* Loop on CUs (Compilation Unit) */ while (!lf.found && ret >= 0) { if (dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0) @@ -2046,7 +1974,6 @@ int find_line_range(int fd, struct line_range *lr) off = noff; } -found: /* Store comp_dir */ if (lf.found) { comp_dir = cu_get_comp_dir(&lf.cu_die); diff --git a/trunk/tools/perf/util/probe-finder.h b/trunk/tools/perf/util/probe-finder.h index 605730a366db..beaefc3c1223 100644 --- a/trunk/tools/perf/util/probe-finder.h +++ b/trunk/tools/perf/util/probe-finder.h @@ -49,7 +49,6 @@ struct probe_finder { Dwarf_Addr addr; /* Address */ const char *fname; /* Real file name */ Dwarf_Die cu_die; /* Current CU */ - Dwarf_Die sp_die; struct list_head lcache; /* Line cache for lazy match */ /* For variable searching */ @@ -84,7 +83,6 @@ struct line_finder { int lno_s; /* Start line number */ int lno_e; /* End line number */ Dwarf_Die cu_die; /* Current CU */ - Dwarf_Die sp_die; int found; }; diff --git a/trunk/tools/perf/util/python.c b/trunk/tools/perf/util/python.c index 8b0eff8b8283..f5e38451fdc5 100644 --- a/trunk/tools/perf/util/python.c +++ b/trunk/tools/perf/util/python.c @@ -810,9 +810,6 @@ static struct { { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS }, { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS }, - { "COUNT_HW_STALLED_CYCLES_FRONTEND", PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, - { "COUNT_HW_STALLED_CYCLES_BACKEND", PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, - { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK }, { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK }, { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS }, diff --git a/trunk/tools/perf/util/session.c b/trunk/tools/perf/util/session.c index fff66741f18d..caa224522fea 100644 --- a/trunk/tools/perf/util/session.c +++ b/trunk/tools/perf/util/session.c @@ -1156,18 +1156,6 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) return ret; } -struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, - unsigned int type) -{ - struct perf_evsel *pos; - - list_for_each_entry(pos, &session->evlist->entries, node) { - if (pos->attr.type == type) - return pos; - } - return NULL; -} - void perf_session__print_symbols(union perf_event *event, struct perf_sample *sample, struct perf_session *session) diff --git a/trunk/tools/perf/util/session.h b/trunk/tools/perf/util/session.h index 8daaa2d15396..1ac481fc1100 100644 --- a/trunk/tools/perf/util/session.h +++ b/trunk/tools/perf/util/session.h @@ -162,9 +162,6 @@ static inline int perf_session__parse_sample(struct perf_session *session, session->sample_id_all, sample); } -struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, - unsigned int type); - void perf_session__print_symbols(union perf_event *event, struct perf_sample *sample, struct perf_session *session); diff --git a/trunk/tools/perf/util/symbol.c b/trunk/tools/perf/util/symbol.c index 516876dfbe52..f06c10f092ba 100644 --- a/trunk/tools/perf/util/symbol.c +++ b/trunk/tools/perf/util/symbol.c @@ -31,13 +31,13 @@ #define NT_GNU_BUILD_ID 3 #endif -static bool dso__build_id_equal(const struct dso *dso, u8 *build_id); +static bool dso__build_id_equal(const struct dso *self, u8 *build_id); static int elf_read_build_id(Elf *elf, void *bf, size_t size); static void dsos__add(struct list_head *head, struct dso *dso); static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); -static int dso__load_kernel_sym(struct dso *dso, struct map *map, +static int dso__load_kernel_sym(struct dso *self, struct map *map, symbol_filter_t filter); -static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, +static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, symbol_filter_t filter); static int vmlinux_path__nr_entries; static char **vmlinux_path; @@ -49,27 +49,27 @@ struct symbol_conf symbol_conf = { .symfs = "", }; -int dso__name_len(const struct dso *dso) +int dso__name_len(const struct dso *self) { if (verbose) - return dso->long_name_len; + return self->long_name_len; - return dso->short_name_len; + return self->short_name_len; } -bool dso__loaded(const struct dso *dso, enum map_type type) +bool dso__loaded(const struct dso *self, enum map_type type) { - return dso->loaded & (1 << type); + return self->loaded & (1 << type); } -bool dso__sorted_by_name(const struct dso *dso, enum map_type type) +bool dso__sorted_by_name(const struct dso *self, enum map_type type) { - return dso->sorted_by_name & (1 << type); + return self->sorted_by_name & (1 << type); } -static void dso__set_sorted_by_name(struct dso *dso, enum map_type type) +static void dso__set_sorted_by_name(struct dso *self, enum map_type type) { - dso->sorted_by_name |= (1 << type); + self->sorted_by_name |= (1 << type); } bool symbol_type__is_a(char symbol_type, enum map_type map_type) @@ -84,9 +84,9 @@ bool symbol_type__is_a(char symbol_type, enum map_type map_type) } } -static void symbols__fixup_end(struct rb_root *symbols) +static void symbols__fixup_end(struct rb_root *self) { - struct rb_node *nd, *prevnd = rb_first(symbols); + struct rb_node *nd, *prevnd = rb_first(self); struct symbol *curr, *prev; if (prevnd == NULL) @@ -107,10 +107,10 @@ static void symbols__fixup_end(struct rb_root *symbols) curr->end = roundup(curr->start, 4096); } -static void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) +static void __map_groups__fixup_end(struct map_groups *self, enum map_type type) { struct map *prev, *curr; - struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]); + struct rb_node *nd, *prevnd = rb_first(&self->maps[type]); if (prevnd == NULL) return; @@ -130,128 +130,128 @@ static void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) curr->end = ~0ULL; } -static void map_groups__fixup_end(struct map_groups *mg) +static void map_groups__fixup_end(struct map_groups *self) { int i; for (i = 0; i < MAP__NR_TYPES; ++i) - __map_groups__fixup_end(mg, i); + __map_groups__fixup_end(self, i); } static struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) { size_t namelen = strlen(name) + 1; - struct symbol *sym = calloc(1, (symbol_conf.priv_size + - sizeof(*sym) + namelen)); - if (sym == NULL) + struct symbol *self = calloc(1, (symbol_conf.priv_size + + sizeof(*self) + namelen)); + if (self == NULL) return NULL; if (symbol_conf.priv_size) - sym = ((void *)sym) + symbol_conf.priv_size; + self = ((void *)self) + symbol_conf.priv_size; + + self->start = start; + self->end = len ? start + len - 1 : start; + self->binding = binding; + self->namelen = namelen - 1; - sym->start = start; - sym->end = len ? start + len - 1 : start; - sym->binding = binding; - sym->namelen = namelen - 1; + pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", __func__, name, start, self->end); - pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", - __func__, name, start, sym->end); - memcpy(sym->name, name, namelen); + memcpy(self->name, name, namelen); - return sym; + return self; } -void symbol__delete(struct symbol *sym) +void symbol__delete(struct symbol *self) { - free(((void *)sym) - symbol_conf.priv_size); + free(((void *)self) - symbol_conf.priv_size); } -static size_t symbol__fprintf(struct symbol *sym, FILE *fp) +static size_t symbol__fprintf(struct symbol *self, FILE *fp) { return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n", - sym->start, sym->end, - sym->binding == STB_GLOBAL ? 'g' : - sym->binding == STB_LOCAL ? 'l' : 'w', - sym->name); + self->start, self->end, + self->binding == STB_GLOBAL ? 'g' : + self->binding == STB_LOCAL ? 'l' : 'w', + self->name); } -void dso__set_long_name(struct dso *dso, char *name) +void dso__set_long_name(struct dso *self, char *name) { if (name == NULL) return; - dso->long_name = name; - dso->long_name_len = strlen(name); + self->long_name = name; + self->long_name_len = strlen(name); } -static void dso__set_short_name(struct dso *dso, const char *name) +static void dso__set_short_name(struct dso *self, const char *name) { if (name == NULL) return; - dso->short_name = name; - dso->short_name_len = strlen(name); + self->short_name = name; + self->short_name_len = strlen(name); } -static void dso__set_basename(struct dso *dso) +static void dso__set_basename(struct dso *self) { - dso__set_short_name(dso, basename(dso->long_name)); + dso__set_short_name(self, basename(self->long_name)); } struct dso *dso__new(const char *name) { - struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); + struct dso *self = calloc(1, sizeof(*self) + strlen(name) + 1); - if (dso != NULL) { + if (self != NULL) { int i; - strcpy(dso->name, name); - dso__set_long_name(dso, dso->name); - dso__set_short_name(dso, dso->name); + strcpy(self->name, name); + dso__set_long_name(self, self->name); + dso__set_short_name(self, self->name); for (i = 0; i < MAP__NR_TYPES; ++i) - dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; - dso->symtab_type = SYMTAB__NOT_FOUND; - dso->loaded = 0; - dso->sorted_by_name = 0; - dso->has_build_id = 0; - dso->kernel = DSO_TYPE_USER; - INIT_LIST_HEAD(&dso->node); + self->symbols[i] = self->symbol_names[i] = RB_ROOT; + self->symtab_type = SYMTAB__NOT_FOUND; + self->loaded = 0; + self->sorted_by_name = 0; + self->has_build_id = 0; + self->kernel = DSO_TYPE_USER; + INIT_LIST_HEAD(&self->node); } - return dso; + return self; } -static void symbols__delete(struct rb_root *symbols) +static void symbols__delete(struct rb_root *self) { struct symbol *pos; - struct rb_node *next = rb_first(symbols); + struct rb_node *next = rb_first(self); while (next) { pos = rb_entry(next, struct symbol, rb_node); next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, symbols); + rb_erase(&pos->rb_node, self); symbol__delete(pos); } } -void dso__delete(struct dso *dso) +void dso__delete(struct dso *self) { int i; for (i = 0; i < MAP__NR_TYPES; ++i) - symbols__delete(&dso->symbols[i]); - if (dso->sname_alloc) - free((char *)dso->short_name); - if (dso->lname_alloc) - free(dso->long_name); - free(dso); + symbols__delete(&self->symbols[i]); + if (self->sname_alloc) + free((char *)self->short_name); + if (self->lname_alloc) + free(self->long_name); + free(self); } -void dso__set_build_id(struct dso *dso, void *build_id) +void dso__set_build_id(struct dso *self, void *build_id) { - memcpy(dso->build_id, build_id, sizeof(dso->build_id)); - dso->has_build_id = 1; + memcpy(self->build_id, build_id, sizeof(self->build_id)); + self->has_build_id = 1; } -static void symbols__insert(struct rb_root *symbols, struct symbol *sym) +static void symbols__insert(struct rb_root *self, struct symbol *sym) { - struct rb_node **p = &symbols->rb_node; + struct rb_node **p = &self->rb_node; struct rb_node *parent = NULL; const u64 ip = sym->start; struct symbol *s; @@ -265,17 +265,17 @@ static void symbols__insert(struct rb_root *symbols, struct symbol *sym) p = &(*p)->rb_right; } rb_link_node(&sym->rb_node, parent, p); - rb_insert_color(&sym->rb_node, symbols); + rb_insert_color(&sym->rb_node, self); } -static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) +static struct symbol *symbols__find(struct rb_root *self, u64 ip) { struct rb_node *n; - if (symbols == NULL) + if (self == NULL) return NULL; - n = symbols->rb_node; + n = self->rb_node; while (n) { struct symbol *s = rb_entry(n, struct symbol, rb_node); @@ -296,9 +296,9 @@ struct symbol_name_rb_node { struct symbol sym; }; -static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym) +static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym) { - struct rb_node **p = &symbols->rb_node; + struct rb_node **p = &self->rb_node; struct rb_node *parent = NULL; struct symbol_name_rb_node *symn, *s; @@ -313,29 +313,27 @@ static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym) p = &(*p)->rb_right; } rb_link_node(&symn->rb_node, parent, p); - rb_insert_color(&symn->rb_node, symbols); + rb_insert_color(&symn->rb_node, self); } -static void symbols__sort_by_name(struct rb_root *symbols, - struct rb_root *source) +static void symbols__sort_by_name(struct rb_root *self, struct rb_root *source) { struct rb_node *nd; for (nd = rb_first(source); nd; nd = rb_next(nd)) { struct symbol *pos = rb_entry(nd, struct symbol, rb_node); - symbols__insert_by_name(symbols, pos); + symbols__insert_by_name(self, pos); } } -static struct symbol *symbols__find_by_name(struct rb_root *symbols, - const char *name) +static struct symbol *symbols__find_by_name(struct rb_root *self, const char *name) { struct rb_node *n; - if (symbols == NULL) + if (self == NULL) return NULL; - n = symbols->rb_node; + n = self->rb_node; while (n) { struct symbol_name_rb_node *s; @@ -355,29 +353,29 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, return NULL; } -struct symbol *dso__find_symbol(struct dso *dso, +struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr) { - return symbols__find(&dso->symbols[type], addr); + return symbols__find(&self->symbols[type], addr); } -struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, +struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, const char *name) { - return symbols__find_by_name(&dso->symbol_names[type], name); + return symbols__find_by_name(&self->symbol_names[type], name); } -void dso__sort_by_name(struct dso *dso, enum map_type type) +void dso__sort_by_name(struct dso *self, enum map_type type) { - dso__set_sorted_by_name(dso, type); - return symbols__sort_by_name(&dso->symbol_names[type], - &dso->symbols[type]); + dso__set_sorted_by_name(self, type); + return symbols__sort_by_name(&self->symbol_names[type], + &self->symbols[type]); } -int build_id__sprintf(const u8 *build_id, int len, char *bf) +int build_id__sprintf(const u8 *self, int len, char *bf) { char *bid = bf; - const u8 *raw = build_id; + const u8 *raw = self; int i; for (i = 0; i < len; ++i) { @@ -386,25 +384,24 @@ int build_id__sprintf(const u8 *build_id, int len, char *bf) bid += 2; } - return raw - build_id; + return raw - self; } -size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) +size_t dso__fprintf_buildid(struct dso *self, FILE *fp) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; - build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); + build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); return fprintf(fp, "%s", sbuild_id); } -size_t dso__fprintf_symbols_by_name(struct dso *dso, - enum map_type type, FILE *fp) +size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp) { size_t ret = 0; struct rb_node *nd; struct symbol_name_rb_node *pos; - for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) { + for (nd = rb_first(&self->symbol_names[type]); nd; nd = rb_next(nd)) { pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); fprintf(fp, "%s\n", pos->sym.name); } @@ -412,18 +409,18 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso, return ret; } -size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) +size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp) { struct rb_node *nd; - size_t ret = fprintf(fp, "dso: %s (", dso->short_name); + size_t ret = fprintf(fp, "dso: %s (", self->short_name); - if (dso->short_name != dso->long_name) - ret += fprintf(fp, "%s, ", dso->long_name); + if (self->short_name != self->long_name) + ret += fprintf(fp, "%s, ", self->long_name); ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], - dso->loaded ? "" : "NOT "); - ret += dso__fprintf_buildid(dso, fp); + self->loaded ? "" : "NOT "); + ret += dso__fprintf_buildid(self, fp); ret += fprintf(fp, ")\n"); - for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { + for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) { struct symbol *pos = rb_entry(nd, struct symbol, rb_node); ret += symbol__fprintf(pos, fp); } @@ -546,10 +543,10 @@ static int map__process_kallsym_symbol(void *arg, const char *name, * so that we can in the next step set the symbol ->end address and then * call kernel_maps__split_kallsyms. */ -static int dso__load_all_kallsyms(struct dso *dso, const char *filename, +static int dso__load_all_kallsyms(struct dso *self, const char *filename, struct map *map) { - struct process_kallsyms_args args = { .map = map, .dso = dso, }; + struct process_kallsyms_args args = { .map = map, .dso = self, }; return kallsyms__parse(filename, &args, map__process_kallsym_symbol); } @@ -558,7 +555,7 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename, * kernel range is broken in several maps, named [kernel].N, as we don't have * the original ELF section names vmlinux have. */ -static int dso__split_kallsyms(struct dso *dso, struct map *map, +static int dso__split_kallsyms(struct dso *self, struct map *map, symbol_filter_t filter) { struct map_groups *kmaps = map__kmap(map)->kmaps; @@ -566,7 +563,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, struct map *curr_map = map; struct symbol *pos; int count = 0, moved = 0; - struct rb_root *root = &dso->symbols[map->type]; + struct rb_root *root = &self->symbols[map->type]; struct rb_node *next = rb_first(root); int kernel_range = 0; @@ -585,7 +582,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, if (strcmp(curr_map->dso->short_name, module)) { if (curr_map != map && - dso->kernel == DSO_TYPE_GUEST_KERNEL && + self->kernel == DSO_TYPE_GUEST_KERNEL && machine__is_default_guest(machine)) { /* * We assume all symbols of a module are @@ -621,14 +618,14 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, pos->end = curr_map->map_ip(curr_map, pos->end); } else if (curr_map != map) { char dso_name[PATH_MAX]; - struct dso *ndso; + struct dso *dso; if (count == 0) { curr_map = map; goto filter_symbol; } - if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + if (self->kernel == DSO_TYPE_GUEST_KERNEL) snprintf(dso_name, sizeof(dso_name), "[guest.kernel].%d", kernel_range++); @@ -637,15 +634,15 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, "[kernel].%d", kernel_range++); - ndso = dso__new(dso_name); - if (ndso == NULL) + dso = dso__new(dso_name); + if (dso == NULL) return -1; - ndso->kernel = dso->kernel; + dso->kernel = self->kernel; - curr_map = map__new2(pos->start, ndso, map->type); + curr_map = map__new2(pos->start, dso, map->type); if (curr_map == NULL) { - dso__delete(ndso); + dso__delete(dso); return -1; } @@ -668,7 +665,7 @@ discard_symbol: rb_erase(&pos->rb_node, root); } if (curr_map != map && - dso->kernel == DSO_TYPE_GUEST_KERNEL && + self->kernel == DSO_TYPE_GUEST_KERNEL && machine__is_default_guest(kmaps->machine)) { dso__set_loaded(curr_map->dso, curr_map->type); } @@ -676,21 +673,21 @@ discard_symbol: rb_erase(&pos->rb_node, root); return count + moved; } -int dso__load_kallsyms(struct dso *dso, const char *filename, +int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map, symbol_filter_t filter) { - if (dso__load_all_kallsyms(dso, filename, map) < 0) + if (dso__load_all_kallsyms(self, filename, map) < 0) return -1; - if (dso->kernel == DSO_TYPE_GUEST_KERNEL) - dso->symtab_type = SYMTAB__GUEST_KALLSYMS; + if (self->kernel == DSO_TYPE_GUEST_KERNEL) + self->symtab_type = SYMTAB__GUEST_KALLSYMS; else - dso->symtab_type = SYMTAB__KALLSYMS; + self->symtab_type = SYMTAB__KALLSYMS; - return dso__split_kallsyms(dso, map, filter); + return dso__split_kallsyms(self, map, filter); } -static int dso__load_perf_map(struct dso *dso, struct map *map, +static int dso__load_perf_map(struct dso *self, struct map *map, symbol_filter_t filter) { char *line = NULL; @@ -698,7 +695,7 @@ static int dso__load_perf_map(struct dso *dso, struct map *map, FILE *file; int nr_syms = 0; - file = fopen(dso->long_name, "r"); + file = fopen(self->long_name, "r"); if (file == NULL) goto out_failure; @@ -736,7 +733,7 @@ static int dso__load_perf_map(struct dso *dso, struct map *map, if (filter && filter(map, sym)) symbol__delete(sym); else { - symbols__insert(&dso->symbols[map->type], sym); + symbols__insert(&self->symbols[map->type], sym); nr_syms++; } } @@ -755,7 +752,7 @@ static int dso__load_perf_map(struct dso *dso, struct map *map, /** * elf_symtab__for_each_symbol - iterate thru all the symbols * - * @syms: struct elf_symtab instance to iterate + * @self: struct elf_symtab instance to iterate * @idx: uint32_t idx * @sym: GElf_Sym iterator */ @@ -855,7 +852,7 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, * And always look at the original dso, not at debuginfo packages, that * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). */ -static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, +static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, symbol_filter_t filter) { uint32_t nr_rel_entries, idx; @@ -874,7 +871,7 @@ static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, char name[PATH_MAX]; snprintf(name, sizeof(name), "%s%s", - symbol_conf.symfs, dso->long_name); + symbol_conf.symfs, self->long_name); fd = open(name, O_RDONLY); if (fd < 0) goto out; @@ -950,7 +947,7 @@ static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, if (filter && filter(map, f)) symbol__delete(f); else { - symbols__insert(&dso->symbols[map->type], f); + symbols__insert(&self->symbols[map->type], f); ++nr; } } @@ -972,7 +969,7 @@ static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, if (filter && filter(map, f)) symbol__delete(f); else { - symbols__insert(&dso->symbols[map->type], f); + symbols__insert(&self->symbols[map->type], f); ++nr; } } @@ -988,30 +985,29 @@ static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, return nr; out: pr_debug("%s: problems reading %s PLT info.\n", - __func__, dso->long_name); + __func__, self->long_name); return 0; } -static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) +static bool elf_sym__is_a(GElf_Sym *self, enum map_type type) { switch (type) { case MAP__FUNCTION: - return elf_sym__is_function(sym); + return elf_sym__is_function(self); case MAP__VARIABLE: - return elf_sym__is_object(sym); + return elf_sym__is_object(self); default: return false; } } -static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, - enum map_type type) +static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type) { switch (type) { case MAP__FUNCTION: - return elf_sec__is_text(shdr, secstrs); + return elf_sec__is_text(self, secstrs); case MAP__VARIABLE: - return elf_sec__is_data(shdr, secstrs); + return elf_sec__is_data(self, secstrs); default: return false; } @@ -1036,13 +1032,13 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) return -1; } -static int dso__load_sym(struct dso *dso, struct map *map, const char *name, +static int dso__load_sym(struct dso *self, struct map *map, const char *name, int fd, symbol_filter_t filter, int kmodule, int want_symtab) { - struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; + struct kmap *kmap = self->kernel ? map__kmap(map) : NULL; struct map *curr_map = map; - struct dso *curr_dso = dso; + struct dso *curr_dso = self; Elf_Data *symstrs, *secstrs; uint32_t nr_syms; int err = -1; @@ -1068,14 +1064,14 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, } /* Always reject images with a mismatched build-id: */ - if (dso->has_build_id) { + if (self->has_build_id) { u8 build_id[BUILD_ID_SIZE]; if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) != BUILD_ID_SIZE) goto out_elf_end; - if (!dso__build_id_equal(dso, build_id)) + if (!dso__build_id_equal(self, build_id)) goto out_elf_end; } @@ -1116,14 +1112,13 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, nr_syms = shdr.sh_size / shdr.sh_entsize; memset(&sym, 0, sizeof(sym)); - if (dso->kernel == DSO_TYPE_USER) { - dso->adjust_symbols = (ehdr.e_type == ET_EXEC || + if (self->kernel == DSO_TYPE_USER) { + self->adjust_symbols = (ehdr.e_type == ET_EXEC || elf_section_by_name(elf, &ehdr, &shdr, ".gnu.prelink_undo", NULL) != NULL); - } else { - dso->adjust_symbols = 0; - } + } else self->adjust_symbols = 0; + elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { struct symbol *f; const char *elf_name = elf_sym__name(&sym, symstrs); @@ -1173,22 +1168,22 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, (sym.st_value & 1)) --sym.st_value; - if (dso->kernel != DSO_TYPE_USER || kmodule) { + if (self->kernel != DSO_TYPE_USER || kmodule) { char dso_name[PATH_MAX]; if (strcmp(section_name, (curr_dso->short_name + - dso->short_name_len)) == 0) + self->short_name_len)) == 0) goto new_symbol; if (strcmp(section_name, ".text") == 0) { curr_map = map; - curr_dso = dso; + curr_dso = self; goto new_symbol; } snprintf(dso_name, sizeof(dso_name), - "%s%s", dso->short_name, section_name); + "%s%s", self->short_name, section_name); curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); if (curr_map == NULL) { @@ -1200,9 +1195,9 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, curr_dso = dso__new(dso_name); if (curr_dso == NULL) goto out_elf_end; - curr_dso->kernel = dso->kernel; - curr_dso->long_name = dso->long_name; - curr_dso->long_name_len = dso->long_name_len; + curr_dso->kernel = self->kernel; + curr_dso->long_name = self->long_name; + curr_dso->long_name_len = self->long_name_len; curr_map = map__new2(start, curr_dso, map->type); if (curr_map == NULL) { @@ -1211,9 +1206,9 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, } curr_map->map_ip = identity__map_ip; curr_map->unmap_ip = identity__map_ip; - curr_dso->symtab_type = dso->symtab_type; + curr_dso->symtab_type = self->symtab_type; map_groups__insert(kmap->kmaps, curr_map); - dsos__add(&dso->node, curr_dso); + dsos__add(&self->node, curr_dso); dso__set_loaded(curr_dso, map->type); } else curr_dso = curr_map->dso; @@ -1255,7 +1250,7 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, * For misannotated, zeroed, ASM function sizes. */ if (nr > 0) { - symbols__fixup_end(&dso->symbols[map->type]); + symbols__fixup_end(&self->symbols[map->type]); if (kmap) { /* * We need to fixup this here too because we create new @@ -1271,9 +1266,9 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, return err; } -static bool dso__build_id_equal(const struct dso *dso, u8 *build_id) +static bool dso__build_id_equal(const struct dso *self, u8 *build_id) { - return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; + return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0; } bool __dsos__read_build_ids(struct list_head *head, bool with_hits) @@ -1434,7 +1429,7 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size) return err; } -char dso__symtab_origin(const struct dso *dso) +char dso__symtab_origin(const struct dso *self) { static const char origin[] = { [SYMTAB__KALLSYMS] = 'k', @@ -1449,12 +1444,12 @@ char dso__symtab_origin(const struct dso *dso) [SYMTAB__GUEST_KMODULE] = 'G', }; - if (dso == NULL || dso->symtab_type == SYMTAB__NOT_FOUND) + if (self == NULL || self->symtab_type == SYMTAB__NOT_FOUND) return '!'; - return origin[dso->symtab_type]; + return origin[self->symtab_type]; } -int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) { int size = PATH_MAX; char *name; @@ -1464,12 +1459,12 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) const char *root_dir; int want_symtab; - dso__set_loaded(dso, map->type); + dso__set_loaded(self, map->type); - if (dso->kernel == DSO_TYPE_KERNEL) - return dso__load_kernel_sym(dso, map, filter); - else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) - return dso__load_guest_kernel_sym(dso, map, filter); + if (self->kernel == DSO_TYPE_KERNEL) + return dso__load_kernel_sym(self, map, filter); + else if (self->kernel == DSO_TYPE_GUEST_KERNEL) + return dso__load_guest_kernel_sym(self, map, filter); if (map->groups && map->groups->machine) machine = map->groups->machine; @@ -1480,11 +1475,11 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) if (!name) return -1; - dso->adjust_symbols = 0; + self->adjust_symbols = 0; - if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { - ret = dso__load_perf_map(dso, map, filter); - dso->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT : + if (strncmp(self->name, "/tmp/perf-", 10) == 0) { + ret = dso__load_perf_map(self, map, filter); + self->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT : SYMTAB__NOT_FOUND; return ret; } @@ -1495,33 +1490,33 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) */ want_symtab = 1; restart: - for (dso->symtab_type = SYMTAB__BUILD_ID_CACHE; - dso->symtab_type != SYMTAB__NOT_FOUND; - dso->symtab_type++) { - switch (dso->symtab_type) { + for (self->symtab_type = SYMTAB__BUILD_ID_CACHE; + self->symtab_type != SYMTAB__NOT_FOUND; + self->symtab_type++) { + switch (self->symtab_type) { case SYMTAB__BUILD_ID_CACHE: /* skip the locally configured cache if a symfs is given */ if (symbol_conf.symfs[0] || - (dso__build_id_filename(dso, name, size) == NULL)) { + (dso__build_id_filename(self, name, size) == NULL)) { continue; } break; case SYMTAB__FEDORA_DEBUGINFO: snprintf(name, size, "%s/usr/lib/debug%s.debug", - symbol_conf.symfs, dso->long_name); + symbol_conf.symfs, self->long_name); break; case SYMTAB__UBUNTU_DEBUGINFO: snprintf(name, size, "%s/usr/lib/debug%s", - symbol_conf.symfs, dso->long_name); + symbol_conf.symfs, self->long_name); break; case SYMTAB__BUILDID_DEBUGINFO: { char build_id_hex[BUILD_ID_SIZE * 2 + 1]; - if (!dso->has_build_id) + if (!self->has_build_id) continue; - build_id__sprintf(dso->build_id, - sizeof(dso->build_id), + build_id__sprintf(self->build_id, + sizeof(self->build_id), build_id_hex); snprintf(name, size, "%s/usr/lib/debug/.build-id/%.2s/%s.debug", @@ -1530,7 +1525,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) break; case SYMTAB__SYSTEM_PATH_DSO: snprintf(name, size, "%s%s", - symbol_conf.symfs, dso->long_name); + symbol_conf.symfs, self->long_name); break; case SYMTAB__GUEST_KMODULE: if (map->groups && machine) @@ -1538,12 +1533,12 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) else root_dir = ""; snprintf(name, size, "%s%s%s", symbol_conf.symfs, - root_dir, dso->long_name); + root_dir, self->long_name); break; case SYMTAB__SYSTEM_PATH_KMODULE: snprintf(name, size, "%s%s", symbol_conf.symfs, - dso->long_name); + self->long_name); break; default:; } @@ -1553,7 +1548,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) if (fd < 0) continue; - ret = dso__load_sym(dso, map, name, fd, filter, 0, + ret = dso__load_sym(self, map, name, fd, filter, 0, want_symtab); close(fd); @@ -1565,8 +1560,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) continue; if (ret > 0) { - int nr_plt = dso__synthesize_plt_symbols(dso, map, - filter); + int nr_plt = dso__synthesize_plt_symbols(self, map, filter); if (nr_plt > 0) ret += nr_plt; break; @@ -1583,17 +1577,17 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) } free(name); - if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) + if (ret < 0 && strstr(self->name, " (deleted)") != NULL) return 0; return ret; } -struct map *map_groups__find_by_name(struct map_groups *mg, +struct map *map_groups__find_by_name(struct map_groups *self, enum map_type type, const char *name) { struct rb_node *nd; - for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { + for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { struct map *map = rb_entry(nd, struct map, rb_node); if (map->dso && strcmp(map->dso->short_name, name) == 0) @@ -1603,28 +1597,28 @@ struct map *map_groups__find_by_name(struct map_groups *mg, return NULL; } -static int dso__kernel_module_get_build_id(struct dso *dso, - const char *root_dir) +static int dso__kernel_module_get_build_id(struct dso *self, + const char *root_dir) { char filename[PATH_MAX]; /* * kernel module short names are of the form "[module]" and * we need just "module" here. */ - const char *name = dso->short_name + 1; + const char *name = self->short_name + 1; snprintf(filename, sizeof(filename), "%s/sys/module/%.*s/notes/.note.gnu.build-id", root_dir, (int)strlen(name) - 1, name); - if (sysfs__read_build_id(filename, dso->build_id, - sizeof(dso->build_id)) == 0) - dso->has_build_id = true; + if (sysfs__read_build_id(filename, self->build_id, + sizeof(self->build_id)) == 0) + self->has_build_id = true; return 0; } -static int map_groups__set_modules_path_dir(struct map_groups *mg, +static int map_groups__set_modules_path_dir(struct map_groups *self, const char *dir_name) { struct dirent *dent; @@ -1652,7 +1646,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg, snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); - ret = map_groups__set_modules_path_dir(mg, path); + ret = map_groups__set_modules_path_dir(self, path); if (ret < 0) goto out; } else { @@ -1667,8 +1661,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg, (int)(dot - dent->d_name), dent->d_name); strxfrchar(dso_name, '-', '_'); - map = map_groups__find_by_name(mg, MAP__FUNCTION, - dso_name); + map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name); if (map == NULL) continue; @@ -1718,20 +1711,20 @@ static char *get_kernel_version(const char *root_dir) return strdup(name); } -static int machine__set_modules_path(struct machine *machine) +static int machine__set_modules_path(struct machine *self) { char *version; char modules_path[PATH_MAX]; - version = get_kernel_version(machine->root_dir); + version = get_kernel_version(self->root_dir); if (!version) return -1; snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", - machine->root_dir, version); + self->root_dir, version); free(version); - return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); + return map_groups__set_modules_path_dir(&self->kmaps, modules_path); } /* @@ -1741,23 +1734,23 @@ static int machine__set_modules_path(struct machine *machine) */ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) { - struct map *map = calloc(1, (sizeof(*map) + - (dso->kernel ? sizeof(struct kmap) : 0))); - if (map != NULL) { + struct map *self = calloc(1, (sizeof(*self) + + (dso->kernel ? sizeof(struct kmap) : 0))); + if (self != NULL) { /* * ->end will be filled after we load all the symbols */ - map__init(map, type, start, 0, 0, dso); + map__init(self, type, start, 0, 0, dso); } - return map; + return self; } -struct map *machine__new_module(struct machine *machine, u64 start, +struct map *machine__new_module(struct machine *self, u64 start, const char *filename) { struct map *map; - struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); + struct dso *dso = __dsos__findnew(&self->kernel_dsos, filename); if (dso == NULL) return NULL; @@ -1766,15 +1759,15 @@ struct map *machine__new_module(struct machine *machine, u64 start, if (map == NULL) return NULL; - if (machine__is_host(machine)) + if (machine__is_host(self)) dso->symtab_type = SYMTAB__SYSTEM_PATH_KMODULE; else dso->symtab_type = SYMTAB__GUEST_KMODULE; - map_groups__insert(&machine->kmaps, map); + map_groups__insert(&self->kmaps, map); return map; } -static int machine__create_modules(struct machine *machine) +static int machine__create_modules(struct machine *self) { char *line = NULL; size_t n; @@ -1783,10 +1776,10 @@ static int machine__create_modules(struct machine *machine) const char *modules; char path[PATH_MAX]; - if (machine__is_default_guest(machine)) + if (machine__is_default_guest(self)) modules = symbol_conf.default_guest_modules; else { - sprintf(path, "%s/proc/modules", machine->root_dir); + sprintf(path, "%s/proc/modules", self->root_dir); modules = path; } @@ -1822,16 +1815,16 @@ static int machine__create_modules(struct machine *machine) *sep = '\0'; snprintf(name, sizeof(name), "[%s]", line); - map = machine__new_module(machine, start, name); + map = machine__new_module(self, start, name); if (map == NULL) goto out_delete_line; - dso__kernel_module_get_build_id(map->dso, machine->root_dir); + dso__kernel_module_get_build_id(map->dso, self->root_dir); } free(line); fclose(file); - return machine__set_modules_path(machine); + return machine__set_modules_path(self); out_delete_line: free(line); @@ -1839,7 +1832,7 @@ static int machine__create_modules(struct machine *machine) return -1; } -int dso__load_vmlinux(struct dso *dso, struct map *map, +int dso__load_vmlinux(struct dso *self, struct map *map, const char *vmlinux, symbol_filter_t filter) { int err = -1, fd; @@ -1851,9 +1844,9 @@ int dso__load_vmlinux(struct dso *dso, struct map *map, if (fd < 0) return -1; - dso__set_long_name(dso, (char *)vmlinux); - dso__set_loaded(dso, map->type); - err = dso__load_sym(dso, map, symfs_vmlinux, fd, filter, 0, 0); + dso__set_long_name(self, (char *)vmlinux); + dso__set_loaded(self, map->type); + err = dso__load_sym(self, map, symfs_vmlinux, fd, filter, 0, 0); close(fd); if (err > 0) @@ -1862,7 +1855,7 @@ int dso__load_vmlinux(struct dso *dso, struct map *map, return err; } -int dso__load_vmlinux_path(struct dso *dso, struct map *map, +int dso__load_vmlinux_path(struct dso *self, struct map *map, symbol_filter_t filter) { int i, err = 0; @@ -1871,20 +1864,20 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map, pr_debug("Looking at the vmlinux_path (%d entries long)\n", vmlinux_path__nr_entries + 1); - filename = dso__build_id_filename(dso, NULL, 0); + filename = dso__build_id_filename(self, NULL, 0); if (filename != NULL) { - err = dso__load_vmlinux(dso, map, filename, filter); + err = dso__load_vmlinux(self, map, filename, filter); if (err > 0) { - dso__set_long_name(dso, filename); + dso__set_long_name(self, filename); goto out; } free(filename); } for (i = 0; i < vmlinux_path__nr_entries; ++i) { - err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter); + err = dso__load_vmlinux(self, map, vmlinux_path[i], filter); if (err > 0) { - dso__set_long_name(dso, strdup(vmlinux_path[i])); + dso__set_long_name(self, strdup(vmlinux_path[i])); break; } } @@ -1892,7 +1885,7 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map, return err; } -static int dso__load_kernel_sym(struct dso *dso, struct map *map, +static int dso__load_kernel_sym(struct dso *self, struct map *map, symbol_filter_t filter) { int err; @@ -1919,10 +1912,10 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, } if (symbol_conf.vmlinux_name != NULL) { - err = dso__load_vmlinux(dso, map, + err = dso__load_vmlinux(self, map, symbol_conf.vmlinux_name, filter); if (err > 0) { - dso__set_long_name(dso, + dso__set_long_name(self, strdup(symbol_conf.vmlinux_name)); goto out_fixup; } @@ -1930,7 +1923,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, } if (vmlinux_path != NULL) { - err = dso__load_vmlinux_path(dso, map, filter); + err = dso__load_vmlinux_path(self, map, filter); if (err > 0) goto out_fixup; } @@ -1944,13 +1937,13 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, * we have a build-id, so check if it is the same as the running kernel, * using it if it is. */ - if (dso->has_build_id) { + if (self->has_build_id) { u8 kallsyms_build_id[BUILD_ID_SIZE]; char sbuild_id[BUILD_ID_SIZE * 2 + 1]; if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id, sizeof(kallsyms_build_id)) == 0) { - if (dso__build_id_equal(dso, kallsyms_build_id)) { + if (dso__build_id_equal(self, kallsyms_build_id)) { kallsyms_filename = "/proc/kallsyms"; goto do_kallsyms; } @@ -1959,7 +1952,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, * Now look if we have it on the build-id cache in * $HOME/.debug/[kernel.kallsyms]. */ - build_id__sprintf(dso->build_id, sizeof(dso->build_id), + build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); if (asprintf(&kallsyms_allocated_filename, @@ -1986,7 +1979,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, } do_kallsyms: - err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); + err = dso__load_kallsyms(self, kallsyms_filename, map, filter); if (err > 0) pr_debug("Using %s for symbols\n", kallsyms_filename); free(kallsyms_allocated_filename); @@ -1994,7 +1987,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, if (err > 0) { out_fixup: if (kallsyms_filename != NULL) - dso__set_long_name(dso, strdup("[kernel.kallsyms]")); + dso__set_long_name(self, strdup("[kernel.kallsyms]")); map__fixup_start(map); map__fixup_end(map); } @@ -2002,8 +1995,8 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, return err; } -static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, - symbol_filter_t filter) +static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, + symbol_filter_t filter) { int err; const char *kallsyms_filename = NULL; @@ -2023,7 +2016,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, * Or use file guest_kallsyms inputted by user on commandline */ if (symbol_conf.default_guest_vmlinux_name != NULL) { - err = dso__load_vmlinux(dso, map, + err = dso__load_vmlinux(self, map, symbol_conf.default_guest_vmlinux_name, filter); goto out_try_fixup; } @@ -2036,7 +2029,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, kallsyms_filename = path; } - err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); + err = dso__load_kallsyms(self, kallsyms_filename, map, filter); if (err > 0) pr_debug("Using %s for symbols\n", kallsyms_filename); @@ -2044,7 +2037,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, if (err > 0) { if (kallsyms_filename != NULL) { machine__mmap_name(machine, path, sizeof(path)); - dso__set_long_name(dso, strdup(path)); + dso__set_long_name(self, strdup(path)); } map__fixup_start(map); map__fixup_end(map); @@ -2097,12 +2090,12 @@ size_t __dsos__fprintf(struct list_head *head, FILE *fp) return ret; } -size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp) +size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp) { struct rb_node *nd; size_t ret = 0; - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { + for (nd = rb_first(self); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret += __dsos__fprintf(&pos->kernel_dsos, fp); ret += __dsos__fprintf(&pos->user_dsos, fp); @@ -2126,20 +2119,18 @@ static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, return ret; } -size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, - bool with_hits) +size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits) { - return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, with_hits) + - __dsos__fprintf_buildid(&machine->user_dsos, fp, with_hits); + return __dsos__fprintf_buildid(&self->kernel_dsos, fp, with_hits) + + __dsos__fprintf_buildid(&self->user_dsos, fp, with_hits); } -size_t machines__fprintf_dsos_buildid(struct rb_root *machines, - FILE *fp, bool with_hits) +size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits) { struct rb_node *nd; size_t ret = 0; - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { + for (nd = rb_first(self); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret += machine__fprintf_dsos_buildid(pos, fp, with_hits); } @@ -2148,59 +2139,59 @@ size_t machines__fprintf_dsos_buildid(struct rb_root *machines, struct dso *dso__new_kernel(const char *name) { - struct dso *dso = dso__new(name ?: "[kernel.kallsyms]"); + struct dso *self = dso__new(name ?: "[kernel.kallsyms]"); - if (dso != NULL) { - dso__set_short_name(dso, "[kernel]"); - dso->kernel = DSO_TYPE_KERNEL; + if (self != NULL) { + dso__set_short_name(self, "[kernel]"); + self->kernel = DSO_TYPE_KERNEL; } - return dso; + return self; } static struct dso *dso__new_guest_kernel(struct machine *machine, const char *name) { char bf[PATH_MAX]; - struct dso *dso = dso__new(name ?: machine__mmap_name(machine, bf, - sizeof(bf))); - if (dso != NULL) { - dso__set_short_name(dso, "[guest.kernel]"); - dso->kernel = DSO_TYPE_GUEST_KERNEL; + struct dso *self = dso__new(name ?: machine__mmap_name(machine, bf, sizeof(bf))); + + if (self != NULL) { + dso__set_short_name(self, "[guest.kernel]"); + self->kernel = DSO_TYPE_GUEST_KERNEL; } - return dso; + return self; } -void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) +void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine) { char path[PATH_MAX]; if (machine__is_default_guest(machine)) return; sprintf(path, "%s/sys/kernel/notes", machine->root_dir); - if (sysfs__read_build_id(path, dso->build_id, - sizeof(dso->build_id)) == 0) - dso->has_build_id = true; + if (sysfs__read_build_id(path, self->build_id, + sizeof(self->build_id)) == 0) + self->has_build_id = true; } -static struct dso *machine__create_kernel(struct machine *machine) +static struct dso *machine__create_kernel(struct machine *self) { const char *vmlinux_name = NULL; struct dso *kernel; - if (machine__is_host(machine)) { + if (machine__is_host(self)) { vmlinux_name = symbol_conf.vmlinux_name; kernel = dso__new_kernel(vmlinux_name); } else { - if (machine__is_default_guest(machine)) + if (machine__is_default_guest(self)) vmlinux_name = symbol_conf.default_guest_vmlinux_name; - kernel = dso__new_guest_kernel(machine, vmlinux_name); + kernel = dso__new_guest_kernel(self, vmlinux_name); } if (kernel != NULL) { - dso__read_running_kernel_build_id(kernel, machine); - dsos__add(&machine->kernel_dsos, kernel); + dso__read_running_kernel_build_id(kernel, self); + dsos__add(&self->kernel_dsos, kernel); } return kernel; } @@ -2245,43 +2236,41 @@ static u64 machine__get_kernel_start_addr(struct machine *machine) return args.start; } -int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) +int __machine__create_kernel_maps(struct machine *self, struct dso *kernel) { enum map_type type; - u64 start = machine__get_kernel_start_addr(machine); + u64 start = machine__get_kernel_start_addr(self); for (type = 0; type < MAP__NR_TYPES; ++type) { struct kmap *kmap; - machine->vmlinux_maps[type] = map__new2(start, kernel, type); - if (machine->vmlinux_maps[type] == NULL) + self->vmlinux_maps[type] = map__new2(start, kernel, type); + if (self->vmlinux_maps[type] == NULL) return -1; - machine->vmlinux_maps[type]->map_ip = - machine->vmlinux_maps[type]->unmap_ip = - identity__map_ip; - kmap = map__kmap(machine->vmlinux_maps[type]); - kmap->kmaps = &machine->kmaps; - map_groups__insert(&machine->kmaps, - machine->vmlinux_maps[type]); + self->vmlinux_maps[type]->map_ip = + self->vmlinux_maps[type]->unmap_ip = identity__map_ip; + + kmap = map__kmap(self->vmlinux_maps[type]); + kmap->kmaps = &self->kmaps; + map_groups__insert(&self->kmaps, self->vmlinux_maps[type]); } return 0; } -void machine__destroy_kernel_maps(struct machine *machine) +void machine__destroy_kernel_maps(struct machine *self) { enum map_type type; for (type = 0; type < MAP__NR_TYPES; ++type) { struct kmap *kmap; - if (machine->vmlinux_maps[type] == NULL) + if (self->vmlinux_maps[type] == NULL) continue; - kmap = map__kmap(machine->vmlinux_maps[type]); - map_groups__remove(&machine->kmaps, - machine->vmlinux_maps[type]); + kmap = map__kmap(self->vmlinux_maps[type]); + map_groups__remove(&self->kmaps, self->vmlinux_maps[type]); if (kmap->ref_reloc_sym) { /* * ref_reloc_sym is shared among all maps, so free just @@ -2295,25 +2284,25 @@ void machine__destroy_kernel_maps(struct machine *machine) kmap->ref_reloc_sym = NULL; } - map__delete(machine->vmlinux_maps[type]); - machine->vmlinux_maps[type] = NULL; + map__delete(self->vmlinux_maps[type]); + self->vmlinux_maps[type] = NULL; } } -int machine__create_kernel_maps(struct machine *machine) +int machine__create_kernel_maps(struct machine *self) { - struct dso *kernel = machine__create_kernel(machine); + struct dso *kernel = machine__create_kernel(self); if (kernel == NULL || - __machine__create_kernel_maps(machine, kernel) < 0) + __machine__create_kernel_maps(self, kernel) < 0) return -1; - if (symbol_conf.use_modules && machine__create_modules(machine) < 0) + if (symbol_conf.use_modules && machine__create_modules(self) < 0) pr_debug("Problems creating module maps, continuing anyway...\n"); /* * Now that we have all the maps created, just set the ->end of them: */ - map_groups__fixup_end(&machine->kmaps); + map_groups__fixup_end(&self->kmaps); return 0; } @@ -2377,11 +2366,11 @@ static int vmlinux_path__init(void) return -1; } -size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) +size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp) { int i; size_t printed = 0; - struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; + struct dso *kdso = self->vmlinux_maps[MAP__FUNCTION]->dso; if (kdso->has_build_id) { char filename[PATH_MAX]; @@ -2478,9 +2467,9 @@ void symbol__exit(void) symbol_conf.initialized = false; } -int machines__create_kernel_maps(struct rb_root *machines, pid_t pid) +int machines__create_kernel_maps(struct rb_root *self, pid_t pid) { - struct machine *machine = machines__findnew(machines, pid); + struct machine *machine = machines__findnew(self, pid); if (machine == NULL) return -1; @@ -2531,7 +2520,7 @@ char *strxfrchar(char *s, char from, char to) return s; } -int machines__create_guest_kernel_maps(struct rb_root *machines) +int machines__create_guest_kernel_maps(struct rb_root *self) { int ret = 0; struct dirent **namelist = NULL; @@ -2542,7 +2531,7 @@ int machines__create_guest_kernel_maps(struct rb_root *machines) if (symbol_conf.default_guest_vmlinux_name || symbol_conf.default_guest_modules || symbol_conf.default_guest_kallsyms) { - machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); + machines__create_kernel_maps(self, DEFAULT_GUEST_KERNEL_ID); } if (symbol_conf.guestmount) { @@ -2563,7 +2552,7 @@ int machines__create_guest_kernel_maps(struct rb_root *machines) pr_debug("Can't access file %s\n", path); goto failure; } - machines__create_kernel_maps(machines, pid); + machines__create_kernel_maps(self, pid); } failure: free(namelist); @@ -2572,23 +2561,23 @@ int machines__create_guest_kernel_maps(struct rb_root *machines) return ret; } -void machines__destroy_guest_kernel_maps(struct rb_root *machines) +void machines__destroy_guest_kernel_maps(struct rb_root *self) { - struct rb_node *next = rb_first(machines); + struct rb_node *next = rb_first(self); while (next) { struct machine *pos = rb_entry(next, struct machine, rb_node); next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, machines); + rb_erase(&pos->rb_node, self); machine__delete(pos); } } -int machine__load_kallsyms(struct machine *machine, const char *filename, +int machine__load_kallsyms(struct machine *self, const char *filename, enum map_type type, symbol_filter_t filter) { - struct map *map = machine->vmlinux_maps[type]; + struct map *map = self->vmlinux_maps[type]; int ret = dso__load_kallsyms(map->dso, filename, map, filter); if (ret > 0) { @@ -2598,16 +2587,16 @@ int machine__load_kallsyms(struct machine *machine, const char *filename, * kernel, with modules between them, fixup the end of all * sections. */ - __map_groups__fixup_end(&machine->kmaps, type); + __map_groups__fixup_end(&self->kmaps, type); } return ret; } -int machine__load_vmlinux_path(struct machine *machine, enum map_type type, +int machine__load_vmlinux_path(struct machine *self, enum map_type type, symbol_filter_t filter) { - struct map *map = machine->vmlinux_maps[type]; + struct map *map = self->vmlinux_maps[type]; int ret = dso__load_vmlinux_path(map->dso, map, filter); if (ret > 0) { diff --git a/trunk/tools/perf/util/symbol.h b/trunk/tools/perf/util/symbol.h index 242de0101a86..713b0b40cc4a 100644 --- a/trunk/tools/perf/util/symbol.h +++ b/trunk/tools/perf/util/symbol.h @@ -62,7 +62,7 @@ struct symbol { char name[0]; }; -void symbol__delete(struct symbol *sym); +void symbol__delete(struct symbol *self); struct strlist; @@ -96,9 +96,9 @@ struct symbol_conf { extern struct symbol_conf symbol_conf; -static inline void *symbol__priv(struct symbol *sym) +static inline void *symbol__priv(struct symbol *self) { - return ((void *)sym) - symbol_conf.priv_size; + return ((void *)self) - symbol_conf.priv_size; } struct ref_reloc_sym { @@ -155,45 +155,43 @@ struct dso { struct dso *dso__new(const char *name); struct dso *dso__new_kernel(const char *name); -void dso__delete(struct dso *dso); +void dso__delete(struct dso *self); -int dso__name_len(const struct dso *dso); +int dso__name_len(const struct dso *self); -bool dso__loaded(const struct dso *dso, enum map_type type); -bool dso__sorted_by_name(const struct dso *dso, enum map_type type); +bool dso__loaded(const struct dso *self, enum map_type type); +bool dso__sorted_by_name(const struct dso *self, enum map_type type); -static inline void dso__set_loaded(struct dso *dso, enum map_type type) +static inline void dso__set_loaded(struct dso *self, enum map_type type) { - dso->loaded |= (1 << type); + self->loaded |= (1 << type); } -void dso__sort_by_name(struct dso *dso, enum map_type type); +void dso__sort_by_name(struct dso *self, enum map_type type); struct dso *__dsos__findnew(struct list_head *head, const char *name); -int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter); -int dso__load_vmlinux(struct dso *dso, struct map *map, +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); +int dso__load_vmlinux(struct dso *self, struct map *map, const char *vmlinux, symbol_filter_t filter); -int dso__load_vmlinux_path(struct dso *dso, struct map *map, +int dso__load_vmlinux_path(struct dso *self, struct map *map, symbol_filter_t filter); -int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, +int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map, symbol_filter_t filter); -int machine__load_kallsyms(struct machine *machine, const char *filename, +int machine__load_kallsyms(struct machine *self, const char *filename, enum map_type type, symbol_filter_t filter); -int machine__load_vmlinux_path(struct machine *machine, enum map_type type, +int machine__load_vmlinux_path(struct machine *self, enum map_type type, symbol_filter_t filter); size_t __dsos__fprintf(struct list_head *head, FILE *fp); -size_t machine__fprintf_dsos_buildid(struct machine *machine, - FILE *fp, bool with_hits); -size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp); -size_t machines__fprintf_dsos_buildid(struct rb_root *machines, - FILE *fp, bool with_hits); -size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); -size_t dso__fprintf_symbols_by_name(struct dso *dso, - enum map_type type, FILE *fp); -size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); +size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits); +size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp); +size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits); + +size_t dso__fprintf_buildid(struct dso *self, FILE *fp); +size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp); +size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp); enum symtab_type { SYMTAB__KALLSYMS = 0, @@ -209,36 +207,34 @@ enum symtab_type { SYMTAB__NOT_FOUND, }; -char dso__symtab_origin(const struct dso *dso); -void dso__set_long_name(struct dso *dso, char *name); -void dso__set_build_id(struct dso *dso, void *build_id); -void dso__read_running_kernel_build_id(struct dso *dso, - struct machine *machine); -struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, - u64 addr); -struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, +char dso__symtab_origin(const struct dso *self); +void dso__set_long_name(struct dso *self, char *name); +void dso__set_build_id(struct dso *self, void *build_id); +void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine); +struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr); +struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, const char *name); int filename__read_build_id(const char *filename, void *bf, size_t size); int sysfs__read_build_id(const char *filename, void *bf, size_t size); bool __dsos__read_build_ids(struct list_head *head, bool with_hits); -int build_id__sprintf(const u8 *build_id, int len, char *bf); +int build_id__sprintf(const u8 *self, int len, char *bf); int kallsyms__parse(const char *filename, void *arg, int (*process_symbol)(void *arg, const char *name, char type, u64 start, u64 end)); -void machine__destroy_kernel_maps(struct machine *machine); -int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); -int machine__create_kernel_maps(struct machine *machine); +void machine__destroy_kernel_maps(struct machine *self); +int __machine__create_kernel_maps(struct machine *self, struct dso *kernel); +int machine__create_kernel_maps(struct machine *self); -int machines__create_kernel_maps(struct rb_root *machines, pid_t pid); -int machines__create_guest_kernel_maps(struct rb_root *machines); -void machines__destroy_guest_kernel_maps(struct rb_root *machines); +int machines__create_kernel_maps(struct rb_root *self, pid_t pid); +int machines__create_guest_kernel_maps(struct rb_root *self); +void machines__destroy_guest_kernel_maps(struct rb_root *self); int symbol__init(void); void symbol__exit(void); bool symbol_type__is_a(char symbol_type, enum map_type map_type); -size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); +size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp); #endif /* __PERF_SYMBOL */