diff --git a/[refs] b/[refs]
index 9cb21eaf23ce..a174a391eae6 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: 26fb20d008d841268545c25bb183f21ed16db891
+refs/heads/master: 80509e27e40d7554e576405ed9f5b7966c567112
diff --git a/trunk/Documentation/DocBook/tracepoint.tmpl b/trunk/Documentation/DocBook/tracepoint.tmpl
index 8bca1d5cec09..b0756d0fd579 100644
--- a/trunk/Documentation/DocBook/tracepoint.tmpl
+++ b/trunk/Documentation/DocBook/tracepoint.tmpl
@@ -86,9 +86,4 @@
!Iinclude/trace/events/irq.h
-
- SIGNAL
-!Iinclude/trace/events/signal.h
-
-
diff --git a/trunk/arch/Kconfig b/trunk/arch/Kconfig
index eef3bbb97075..7f418bbc261a 100644
--- a/trunk/arch/Kconfig
+++ b/trunk/arch/Kconfig
@@ -126,11 +126,4 @@ config HAVE_DMA_API_DEBUG
config HAVE_DEFAULT_NO_SPIN_MUTEXES
bool
-config HAVE_HW_BREAKPOINT
- bool
- depends on HAVE_PERF_EVENTS
- select ANON_INODES
- select PERF_EVENTS
-
-
source "kernel/gcov/Kconfig"
diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig
index 178084b4377c..72ace9515a07 100644
--- a/trunk/arch/x86/Kconfig
+++ b/trunk/arch/x86/Kconfig
@@ -49,7 +49,6 @@ config X86
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
- select HAVE_HW_BREAKPOINT
select HAVE_ARCH_KMEMCHECK
config OUTPUT_FORMAT
diff --git a/trunk/arch/x86/include/asm/Kbuild b/trunk/arch/x86/include/asm/Kbuild
index 9f828f87ca35..4a8e80cdcfa5 100644
--- a/trunk/arch/x86/include/asm/Kbuild
+++ b/trunk/arch/x86/include/asm/Kbuild
@@ -10,7 +10,6 @@ header-y += ptrace-abi.h
header-y += sigcontext32.h
header-y += ucontext.h
header-y += processor-flags.h
-header-y += hw_breakpoint.h
unifdef-y += e820.h
unifdef-y += ist.h
diff --git a/trunk/arch/x86/include/asm/a.out-core.h b/trunk/arch/x86/include/asm/a.out-core.h
index 7a15588e45d4..bb70e397aa84 100644
--- a/trunk/arch/x86/include/asm/a.out-core.h
+++ b/trunk/arch/x86/include/asm/a.out-core.h
@@ -17,7 +17,6 @@
#include
#include
-#include
/*
* fill in the user structure for an a.out core dump
@@ -33,7 +32,14 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
>> PAGE_SHIFT;
dump->u_dsize -= dump->u_tsize;
dump->u_ssize = 0;
- aout_dump_debugregs(dump);
+ dump->u_debugreg[0] = current->thread.debugreg0;
+ dump->u_debugreg[1] = current->thread.debugreg1;
+ dump->u_debugreg[2] = current->thread.debugreg2;
+ dump->u_debugreg[3] = current->thread.debugreg3;
+ dump->u_debugreg[4] = 0;
+ dump->u_debugreg[5] = 0;
+ dump->u_debugreg[6] = current->thread.debugreg6;
+ dump->u_debugreg[7] = current->thread.debugreg7;
if (dump->start_stack < TASK_SIZE)
dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack))
diff --git a/trunk/arch/x86/include/asm/debugreg.h b/trunk/arch/x86/include/asm/debugreg.h
index 8240f76b531e..3ea6f37be9e2 100644
--- a/trunk/arch/x86/include/asm/debugreg.h
+++ b/trunk/arch/x86/include/asm/debugreg.h
@@ -18,7 +18,6 @@
#define DR_TRAP1 (0x2) /* db1 */
#define DR_TRAP2 (0x4) /* db2 */
#define DR_TRAP3 (0x8) /* db3 */
-#define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)
#define DR_STEP (0x4000) /* single-step */
#define DR_SWITCH (0x8000) /* task switch */
@@ -50,8 +49,6 @@
#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
-#define DR_LOCAL_ENABLE (0x1) /* Local enable for reg 0 */
-#define DR_GLOBAL_ENABLE (0x2) /* Global enable for reg 0 */
#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
@@ -70,34 +67,4 @@
#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
-/*
- * HW breakpoint additions
- */
-#ifdef __KERNEL__
-
-DECLARE_PER_CPU(unsigned long, cpu_dr7);
-
-static inline void hw_breakpoint_disable(void)
-{
- /* Zero the control register for HW Breakpoint */
- set_debugreg(0UL, 7);
-
- /* Zero-out the individual HW breakpoint address registers */
- set_debugreg(0UL, 0);
- set_debugreg(0UL, 1);
- set_debugreg(0UL, 2);
- set_debugreg(0UL, 3);
-}
-
-static inline int hw_breakpoint_active(void)
-{
- return __get_cpu_var(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
-}
-
-extern void aout_dump_debugregs(struct user *dump);
-
-extern void hw_breakpoint_restore(void);
-
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_DEBUGREG_H */
diff --git a/trunk/arch/x86/include/asm/hardirq.h b/trunk/arch/x86/include/asm/hardirq.h
index 108eb6fd1ae7..82e3e8f01043 100644
--- a/trunk/arch/x86/include/asm/hardirq.h
+++ b/trunk/arch/x86/include/asm/hardirq.h
@@ -20,11 +20,11 @@ typedef struct {
unsigned int irq_call_count;
unsigned int irq_tlb_count;
#endif
-#ifdef CONFIG_X86_THERMAL_VECTOR
+#ifdef CONFIG_X86_MCE
unsigned int irq_thermal_count;
-#endif
-#ifdef CONFIG_X86_MCE_THRESHOLD
+# ifdef CONFIG_X86_MCE_THRESHOLD
unsigned int irq_threshold_count;
+# endif
#endif
} ____cacheline_aligned irq_cpustat_t;
diff --git a/trunk/arch/x86/include/asm/hw_breakpoint.h b/trunk/arch/x86/include/asm/hw_breakpoint.h
deleted file mode 100644
index 0675a7c4c20e..000000000000
--- a/trunk/arch/x86/include/asm/hw_breakpoint.h
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef _I386_HW_BREAKPOINT_H
-#define _I386_HW_BREAKPOINT_H
-
-#ifdef __KERNEL__
-#define __ARCH_HW_BREAKPOINT_H
-
-/*
- * The name should probably be something dealt in
- * a higher level. While dealing with the user
- * (display/resolving)
- */
-struct arch_hw_breakpoint {
- char *name; /* Contains name of the symbol to set bkpt */
- unsigned long address;
- u8 len;
- u8 type;
-};
-
-#include
-#include
-#include
-
-/* Available HW breakpoint length encodings */
-#define X86_BREAKPOINT_LEN_1 0x40
-#define X86_BREAKPOINT_LEN_2 0x44
-#define X86_BREAKPOINT_LEN_4 0x4c
-#define X86_BREAKPOINT_LEN_EXECUTE 0x40
-
-#ifdef CONFIG_X86_64
-#define X86_BREAKPOINT_LEN_8 0x48
-#endif
-
-/* Available HW breakpoint type encodings */
-
-/* trigger on instruction execute */
-#define X86_BREAKPOINT_EXECUTE 0x80
-/* trigger on memory write */
-#define X86_BREAKPOINT_WRITE 0x81
-/* trigger on memory read or write */
-#define X86_BREAKPOINT_RW 0x83
-
-/* Total number of available HW breakpoint registers */
-#define HBP_NUM 4
-
-struct perf_event;
-struct pmu;
-
-extern int arch_check_va_in_userspace(unsigned long va, u8 hbp_len);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp,
- struct task_struct *tsk);
-extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
- unsigned long val, void *data);
-
-
-int arch_install_hw_breakpoint(struct perf_event *bp);
-void arch_uninstall_hw_breakpoint(struct perf_event *bp);
-void hw_breakpoint_pmu_read(struct perf_event *bp);
-void hw_breakpoint_pmu_unthrottle(struct perf_event *bp);
-
-extern void
-arch_fill_perf_breakpoint(struct perf_event *bp);
-
-unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type);
-int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type);
-
-extern int arch_bp_generic_fields(int x86_len, int x86_type,
- int *gen_len, int *gen_type);
-
-extern struct pmu perf_ops_bp;
-
-#endif /* __KERNEL__ */
-#endif /* _I386_HW_BREAKPOINT_H */
-
diff --git a/trunk/arch/x86/include/asm/mce.h b/trunk/arch/x86/include/asm/mce.h
index 858baa061cfc..f1363b72364f 100644
--- a/trunk/arch/x86/include/asm/mce.h
+++ b/trunk/arch/x86/include/asm/mce.h
@@ -108,8 +108,6 @@ struct mce_log {
#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
-extern struct atomic_notifier_head x86_mce_decoder_chain;
-
#ifdef __KERNEL__
#include
@@ -120,11 +118,9 @@ extern int mce_disabled;
extern int mce_p5_enabled;
#ifdef CONFIG_X86_MCE
-int mcheck_init(void);
-void mcheck_cpu_init(struct cpuinfo_x86 *c);
+void mcheck_init(struct cpuinfo_x86 *c);
#else
-static inline int mcheck_init(void) { return 0; }
-static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
+static inline void mcheck_init(struct cpuinfo_x86 *c) {}
#endif
#ifdef CONFIG_X86_ANCIENT_MCE
@@ -218,11 +214,5 @@ void intel_init_thermal(struct cpuinfo_x86 *c);
void mce_log_therm_throt_event(__u64 status);
-#ifdef CONFIG_X86_THERMAL_VECTOR
-extern void mcheck_intel_therm_init(void);
-#else
-static inline void mcheck_intel_therm_init(void) { }
-#endif
-
#endif /* __KERNEL__ */
#endif /* _ASM_X86_MCE_H */
diff --git a/trunk/arch/x86/include/asm/processor.h b/trunk/arch/x86/include/asm/processor.h
index 6f8ec1c37e0a..c9786480f0fe 100644
--- a/trunk/arch/x86/include/asm/processor.h
+++ b/trunk/arch/x86/include/asm/processor.h
@@ -30,7 +30,6 @@ struct mm_struct;
#include
#include
-#define HBP_NUM 4
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
@@ -423,8 +422,6 @@ extern unsigned int xstate_size;
extern void free_thread_xstate(struct task_struct *);
extern struct kmem_cache *task_xstate_cachep;
-struct perf_event;
-
struct thread_struct {
/* Cached TLS descriptors: */
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
@@ -446,10 +443,13 @@ struct thread_struct {
unsigned long fs;
#endif
unsigned long gs;
- /* Save middle states of ptrace breakpoints */
- struct perf_event *ptrace_bps[HBP_NUM];
- /* Debug status used for traps, single steps, etc... */
- unsigned long debugreg6;
+ /* Hardware debugging registers: */
+ unsigned long debugreg0;
+ unsigned long debugreg1;
+ unsigned long debugreg2;
+ unsigned long debugreg3;
+ unsigned long debugreg6;
+ unsigned long debugreg7;
/* Fault info: */
unsigned long cr2;
unsigned long trap_no;
diff --git a/trunk/arch/x86/include/asm/ptrace.h b/trunk/arch/x86/include/asm/ptrace.h
index 3d11fd0f44c5..a3d49dd7d26e 100644
--- a/trunk/arch/x86/include/asm/ptrace.h
+++ b/trunk/arch/x86/include/asm/ptrace.h
@@ -227,8 +227,8 @@ extern const char *regs_query_register_name(unsigned int offset);
* @regs: pt_regs from which register value is gotten.
* @offset: offset number of the register.
*
- * regs_get_register returns the value of a register. The @offset is the
- * offset of the register in struct pt_regs address which specified by @regs.
+ * regs_get_register returns the value of a register whose offset from @regs
+ * is @offset. The @offset is the offset of the register in struct pt_regs.
* If @offset is bigger than MAX_REG_OFFSET, this returns 0.
*/
static inline unsigned long regs_get_register(struct pt_regs *regs,
@@ -244,7 +244,7 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
- * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * regs_within_kenel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
static inline int regs_within_kernel_stack(struct pt_regs *regs,
@@ -260,7 +260,7 @@ static inline int regs_within_kernel_stack(struct pt_regs *regs,
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
- * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
diff --git a/trunk/arch/x86/kernel/Makefile b/trunk/arch/x86/kernel/Makefile
index 4f2e66e29ecc..d8e5d0cdd678 100644
--- a/trunk/arch/x86/kernel/Makefile
+++ b/trunk/arch/x86/kernel/Makefile
@@ -40,7 +40,7 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
obj-y += bootflag.o e820.o
obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
-obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
+obj-y += alternative.o i8253.o pci-nommu.o
obj-y += tsc.o io_delay.o rtc.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
diff --git a/trunk/arch/x86/kernel/cpu/Makefile b/trunk/arch/x86/kernel/cpu/Makefile
index 1d2cb383410e..68537e957a9b 100644
--- a/trunk/arch/x86/kernel/cpu/Makefile
+++ b/trunk/arch/x86/kernel/cpu/Makefile
@@ -5,7 +5,6 @@
# Don't trace early stages of a secondary CPU boot
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_common.o = -pg
-CFLAGS_REMOVE_perf_event.o = -pg
endif
# Make sure load_percpu_segment has no stackprotector
diff --git a/trunk/arch/x86/kernel/cpu/common.c b/trunk/arch/x86/kernel/cpu/common.c
index 9053be5d95cd..cc25c2b4a567 100644
--- a/trunk/arch/x86/kernel/cpu/common.c
+++ b/trunk/arch/x86/kernel/cpu/common.c
@@ -837,8 +837,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
}
+#ifdef CONFIG_X86_MCE
/* Init Machine Check Exception if available. */
- mcheck_cpu_init(c);
+ mcheck_init(c);
+#endif
select_idle_routine(c);
diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce.c b/trunk/arch/x86/kernel/cpu/mcheck/mce.c
index 0bcaa3875863..721a77ca8115 100644
--- a/trunk/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/trunk/arch/x86/kernel/cpu/mcheck/mce.c
@@ -46,9 +46,6 @@
#include "mce-internal.h"
-#define CREATE_TRACE_POINTS
-#include
-
int mce_disabled __read_mostly;
#define MISC_MCELOG_MINOR 227
@@ -88,26 +85,18 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
static DEFINE_PER_CPU(struct mce, mces_seen);
static int cpu_missing;
-/*
- * CPU/chipset specific EDAC code can register a notifier call here to print
- * MCE errors in a human-readable form.
- */
-ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
-EXPORT_SYMBOL_GPL(x86_mce_decoder_chain);
-
-static int default_decode_mce(struct notifier_block *nb, unsigned long val,
- void *data)
+static void default_decode_mce(struct mce *m)
{
pr_emerg("No human readable MCE decoding support on this CPU type.\n");
pr_emerg("Run the message through 'mcelog --ascii' to decode.\n");
-
- return NOTIFY_STOP;
}
-static struct notifier_block mce_dec_nb = {
- .notifier_call = default_decode_mce,
- .priority = -1,
-};
+/*
+ * CPU/chipset specific EDAC code can register a callback here to print
+ * MCE errors in a human-readable form:
+ */
+void (*x86_mce_decode_callback)(struct mce *m) = default_decode_mce;
+EXPORT_SYMBOL(x86_mce_decode_callback);
/* MCA banks polled by the period polling timer for corrected events */
DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
@@ -152,9 +141,6 @@ void mce_log(struct mce *mce)
{
unsigned next, entry;
- /* Emit the trace record: */
- trace_mce_record(mce);
-
mce->finished = 0;
wmb();
for (;;) {
@@ -218,9 +204,9 @@ static void print_mce(struct mce *m)
/*
* Print out human-readable details about the MCE error,
- * (if the CPU has an implementation for that)
+ * (if the CPU has an implementation for that):
*/
- atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
+ x86_mce_decode_callback(m);
}
static void print_mce_head(void)
@@ -1136,7 +1122,7 @@ static int check_interval = 5 * 60; /* 5 minutes */
static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
static DEFINE_PER_CPU(struct timer_list, mce_timer);
-static void mce_start_timer(unsigned long data)
+static void mcheck_timer(unsigned long data)
{
struct timer_list *t = &per_cpu(mce_timer, data);
int *n;
@@ -1201,7 +1187,7 @@ int mce_notify_irq(void)
}
EXPORT_SYMBOL_GPL(mce_notify_irq);
-static int __cpuinit __mcheck_cpu_mce_banks_init(void)
+static int mce_banks_init(void)
{
int i;
@@ -1220,7 +1206,7 @@ static int __cpuinit __mcheck_cpu_mce_banks_init(void)
/*
* Initialize Machine Checks for a CPU.
*/
-static int __cpuinit __mcheck_cpu_cap_init(void)
+static int __cpuinit mce_cap_init(void)
{
unsigned b;
u64 cap;
@@ -1242,7 +1228,7 @@ static int __cpuinit __mcheck_cpu_cap_init(void)
WARN_ON(banks != 0 && b != banks);
banks = b;
if (!mce_banks) {
- int err = __mcheck_cpu_mce_banks_init();
+ int err = mce_banks_init();
if (err)
return err;
@@ -1258,7 +1244,7 @@ static int __cpuinit __mcheck_cpu_cap_init(void)
return 0;
}
-static void __mcheck_cpu_init_generic(void)
+static void mce_init(void)
{
mce_banks_t all_banks;
u64 cap;
@@ -1287,7 +1273,7 @@ static void __mcheck_cpu_init_generic(void)
}
/* Add per CPU specific workarounds here */
-static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
+static int __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
{
if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
pr_info("MCE: unknown CPU type - not enabling MCE support.\n");
@@ -1355,7 +1341,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
return 0;
}
-static void __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
+static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c)
{
if (c->x86 != 5)
return;
@@ -1369,7 +1355,7 @@ static void __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
}
}
-static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
+static void mce_cpu_features(struct cpuinfo_x86 *c)
{
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
@@ -1383,7 +1369,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
}
}
-static void __mcheck_cpu_init_timer(void)
+static void mce_init_timer(void)
{
struct timer_list *t = &__get_cpu_var(mce_timer);
int *n = &__get_cpu_var(mce_next_interval);
@@ -1394,7 +1380,7 @@ static void __mcheck_cpu_init_timer(void)
*n = check_interval * HZ;
if (!*n)
return;
- setup_timer(t, mce_start_timer, smp_processor_id());
+ setup_timer(t, mcheck_timer, smp_processor_id());
t->expires = round_jiffies(jiffies + *n);
add_timer_on(t, smp_processor_id());
}
@@ -1414,28 +1400,27 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) =
* Called for each booted CPU to set up machine checks.
* Must be called with preempt off:
*/
-void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
+void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
{
if (mce_disabled)
return;
- __mcheck_cpu_ancient_init(c);
+ mce_ancient_init(c);
if (!mce_available(c))
return;
- if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
+ if (mce_cap_init() < 0 || mce_cpu_quirks(c) < 0) {
mce_disabled = 1;
return;
}
machine_check_vector = do_machine_check;
- __mcheck_cpu_init_generic();
- __mcheck_cpu_init_vendor(c);
- __mcheck_cpu_init_timer();
+ mce_init();
+ mce_cpu_features(c);
+ mce_init_timer();
INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
-
}
/*
@@ -1655,15 +1640,6 @@ static int __init mcheck_enable(char *str)
}
__setup("mce", mcheck_enable);
-int __init mcheck_init(void)
-{
- atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb);
-
- mcheck_intel_therm_init();
-
- return 0;
-}
-
/*
* Sysfs support
*/
@@ -1672,7 +1648,7 @@ int __init mcheck_init(void)
* Disable machine checks on suspend and shutdown. We can't really handle
* them later.
*/
-static int mce_disable_error_reporting(void)
+static int mce_disable(void)
{
int i;
@@ -1687,12 +1663,12 @@ static int mce_disable_error_reporting(void)
static int mce_suspend(struct sys_device *dev, pm_message_t state)
{
- return mce_disable_error_reporting();
+ return mce_disable();
}
static int mce_shutdown(struct sys_device *dev)
{
- return mce_disable_error_reporting();
+ return mce_disable();
}
/*
@@ -1702,8 +1678,8 @@ static int mce_shutdown(struct sys_device *dev)
*/
static int mce_resume(struct sys_device *dev)
{
- __mcheck_cpu_init_generic();
- __mcheck_cpu_init_vendor(¤t_cpu_data);
+ mce_init();
+ mce_cpu_features(¤t_cpu_data);
return 0;
}
@@ -1713,8 +1689,8 @@ static void mce_cpu_restart(void *data)
del_timer_sync(&__get_cpu_var(mce_timer));
if (!mce_available(¤t_cpu_data))
return;
- __mcheck_cpu_init_generic();
- __mcheck_cpu_init_timer();
+ mce_init();
+ mce_init_timer();
}
/* Reinit MCEs after user configuration changes */
@@ -1740,7 +1716,7 @@ static void mce_enable_ce(void *all)
cmci_reenable();
cmci_recheck();
if (all)
- __mcheck_cpu_init_timer();
+ mce_init_timer();
}
static struct sysdev_class mce_sysclass = {
@@ -1953,14 +1929,13 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
}
/* Make sure there are no machine checks on offlined CPUs. */
-static void __cpuinit mce_disable_cpu(void *h)
+static void mce_disable_cpu(void *h)
{
unsigned long action = *(unsigned long *)h;
int i;
if (!mce_available(¤t_cpu_data))
return;
-
if (!(action & CPU_TASKS_FROZEN))
cmci_clear();
for (i = 0; i < banks; i++) {
@@ -1971,7 +1946,7 @@ static void __cpuinit mce_disable_cpu(void *h)
}
}
-static void __cpuinit mce_reenable_cpu(void *h)
+static void mce_reenable_cpu(void *h)
{
unsigned long action = *(unsigned long *)h;
int i;
@@ -2050,7 +2025,7 @@ static __init void mce_init_banks(void)
}
}
-static __init int mcheck_init_device(void)
+static __init int mce_init_device(void)
{
int err;
int i = 0;
@@ -2078,7 +2053,7 @@ static __init int mcheck_init_device(void)
return err;
}
-device_initcall(mcheck_init_device);
+device_initcall(mce_init_device);
/*
* Old style boot options parsing. Only for compatibility.
@@ -2126,7 +2101,7 @@ static int fake_panic_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
fake_panic_set, "%llu\n");
-static int __init mcheck_debugfs_init(void)
+static int __init mce_debugfs_init(void)
{
struct dentry *dmce, *ffake_panic;
@@ -2140,5 +2115,5 @@ static int __init mcheck_debugfs_init(void)
return 0;
}
-late_initcall(mcheck_debugfs_init);
+late_initcall(mce_debugfs_init);
#endif
diff --git a/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c b/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 4fef985fc221..b3a1dba75330 100644
--- a/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -49,8 +49,6 @@ static DEFINE_PER_CPU(struct thermal_state, thermal_state);
static atomic_t therm_throt_en = ATOMIC_INIT(0);
-static u32 lvtthmr_init __read_mostly;
-
#ifdef CONFIG_SYSFS
#define define_therm_throt_sysdev_one_ro(_name) \
static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL)
@@ -256,18 +254,6 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
ack_APIC_irq();
}
-void __init mcheck_intel_therm_init(void)
-{
- /*
- * This function is only called on boot CPU. Save the init thermal
- * LVT value on BSP and use that value to restore APs' thermal LVT
- * entry BIOS programmed later
- */
- if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) &&
- cpu_has(&boot_cpu_data, X86_FEATURE_ACC))
- lvtthmr_init = apic_read(APIC_LVTTHMR);
-}
-
void intel_init_thermal(struct cpuinfo_x86 *c)
{
unsigned int cpu = smp_processor_id();
@@ -284,20 +270,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
* since it might be delivered via SMI already:
*/
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
-
- /*
- * The initial value of thermal LVT entries on all APs always reads
- * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
- * sequence to them and LVT registers are reset to 0s except for
- * the mask bits which are set to 1s when APs receive INIT IPI.
- * Always restore the value that BIOS has programmed on AP based on
- * BSP's info we saved since BIOS is always setting the same value
- * for all threads/cores
- */
- apic_write(APIC_LVTTHMR, lvtthmr_init);
-
- h = lvtthmr_init;
-
+ h = apic_read(APIC_LVTTHMR);
if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
printk(KERN_DEBUG
"CPU%d: Thermal monitoring handled by SMI\n", cpu);
diff --git a/trunk/arch/x86/kernel/cpu/perf_event.c b/trunk/arch/x86/kernel/cpu/perf_event.c
index c1bbed1021d9..bd8743024204 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event.c
@@ -2229,10 +2229,10 @@ validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
{
struct hw_perf_event fake_event = event->hw;
- if (event->pmu && event->pmu != &pmu)
+ if (event->pmu != &pmu)
return 0;
- return x86_schedule_event(cpuc, &fake_event) >= 0;
+ return x86_schedule_event(cpuc, &fake_event);
}
static int validate_group(struct perf_event *event)
diff --git a/trunk/arch/x86/kernel/hw_breakpoint.c b/trunk/arch/x86/kernel/hw_breakpoint.c
deleted file mode 100644
index d42f65ac4927..000000000000
--- a/trunk/arch/x86/kernel/hw_breakpoint.c
+++ /dev/null
@@ -1,555 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) 2007 Alan Stern
- * Copyright (C) 2009 IBM Corporation
- * Copyright (C) 2009 Frederic Weisbecker
- *
- * Authors: Alan Stern
- * K.Prasad
- * Frederic Weisbecker
- */
-
-/*
- * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
- * using the CPU's debug registers.
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#include
-#include
-#include
-
-/* Per cpu debug control register value */
-DEFINE_PER_CPU(unsigned long, cpu_dr7);
-EXPORT_PER_CPU_SYMBOL(cpu_dr7);
-
-/* Per cpu debug address registers values */
-static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]);
-
-/*
- * Stores the breakpoints currently in use on each breakpoint address
- * register for each cpus
- */
-static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
-
-
-static inline unsigned long
-__encode_dr7(int drnum, unsigned int len, unsigned int type)
-{
- unsigned long bp_info;
-
- bp_info = (len | type) & 0xf;
- bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE);
- bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE));
-
- return bp_info;
-}
-
-/*
- * Encode the length, type, Exact, and Enable bits for a particular breakpoint
- * as stored in debug register 7.
- */
-unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type)
-{
- return __encode_dr7(drnum, len, type) | DR_GLOBAL_SLOWDOWN;
-}
-
-/*
- * Decode the length and type bits for a particular breakpoint as
- * stored in debug register 7. Return the "enabled" status.
- */
-int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type)
-{
- int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE);
-
- *len = (bp_info & 0xc) | 0x40;
- *type = (bp_info & 0x3) | 0x80;
-
- return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3;
-}
-
-/*
- * Install a perf counter breakpoint.
- *
- * We seek a free debug address register and use it for this
- * breakpoint. Eventually we enable it in the debug control register.
- *
- * Atomic: we hold the counter->ctx->lock and we only handle variables
- * and registers local to this cpu.
- */
-int arch_install_hw_breakpoint(struct perf_event *bp)
-{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- unsigned long *dr7;
- int i;
-
- for (i = 0; i < HBP_NUM; i++) {
- struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
-
- if (!*slot) {
- *slot = bp;
- break;
- }
- }
-
- if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
- return -EBUSY;
-
- set_debugreg(info->address, i);
- __get_cpu_var(cpu_debugreg[i]) = info->address;
-
- dr7 = &__get_cpu_var(cpu_dr7);
- *dr7 |= encode_dr7(i, info->len, info->type);
-
- set_debugreg(*dr7, 7);
-
- return 0;
-}
-
-/*
- * Uninstall the breakpoint contained in the given counter.
- *
- * First we search the debug address register it uses and then we disable
- * it.
- *
- * Atomic: we hold the counter->ctx->lock and we only handle variables
- * and registers local to this cpu.
- */
-void arch_uninstall_hw_breakpoint(struct perf_event *bp)
-{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- unsigned long *dr7;
- int i;
-
- for (i = 0; i < HBP_NUM; i++) {
- struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
-
- if (*slot == bp) {
- *slot = NULL;
- break;
- }
- }
-
- if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
- return;
-
- dr7 = &__get_cpu_var(cpu_dr7);
- *dr7 &= ~__encode_dr7(i, info->len, info->type);
-
- set_debugreg(*dr7, 7);
-}
-
-static int get_hbp_len(u8 hbp_len)
-{
- unsigned int len_in_bytes = 0;
-
- switch (hbp_len) {
- case X86_BREAKPOINT_LEN_1:
- len_in_bytes = 1;
- break;
- case X86_BREAKPOINT_LEN_2:
- len_in_bytes = 2;
- break;
- case X86_BREAKPOINT_LEN_4:
- len_in_bytes = 4;
- break;
-#ifdef CONFIG_X86_64
- case X86_BREAKPOINT_LEN_8:
- len_in_bytes = 8;
- break;
-#endif
- }
- return len_in_bytes;
-}
-
-/*
- * Check for virtual address in user space.
- */
-int arch_check_va_in_userspace(unsigned long va, u8 hbp_len)
-{
- unsigned int len;
-
- len = get_hbp_len(hbp_len);
-
- return (va <= TASK_SIZE - len);
-}
-
-/*
- * Check for virtual address in kernel space.
- */
-static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
-{
- unsigned int len;
-
- len = get_hbp_len(hbp_len);
-
- return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
-}
-
-/*
- * Store a breakpoint's encoded address, length, and type.
- */
-static int arch_store_info(struct perf_event *bp)
-{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- /*
- * For kernel-addresses, either the address or symbol name can be
- * specified.
- */
- if (info->name)
- info->address = (unsigned long)
- kallsyms_lookup_name(info->name);
- if (info->address)
- return 0;
-
- return -EINVAL;
-}
-
-int arch_bp_generic_fields(int x86_len, int x86_type,
- int *gen_len, int *gen_type)
-{
- /* Len */
- switch (x86_len) {
- case X86_BREAKPOINT_LEN_1:
- *gen_len = HW_BREAKPOINT_LEN_1;
- break;
- case X86_BREAKPOINT_LEN_2:
- *gen_len = HW_BREAKPOINT_LEN_2;
- break;
- case X86_BREAKPOINT_LEN_4:
- *gen_len = HW_BREAKPOINT_LEN_4;
- break;
-#ifdef CONFIG_X86_64
- case X86_BREAKPOINT_LEN_8:
- *gen_len = HW_BREAKPOINT_LEN_8;
- break;
-#endif
- default:
- return -EINVAL;
- }
-
- /* Type */
- switch (x86_type) {
- case X86_BREAKPOINT_EXECUTE:
- *gen_type = HW_BREAKPOINT_X;
- break;
- case X86_BREAKPOINT_WRITE:
- *gen_type = HW_BREAKPOINT_W;
- break;
- case X86_BREAKPOINT_RW:
- *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-static int arch_build_bp_info(struct perf_event *bp)
-{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
- info->address = bp->attr.bp_addr;
-
- /* Len */
- switch (bp->attr.bp_len) {
- case HW_BREAKPOINT_LEN_1:
- info->len = X86_BREAKPOINT_LEN_1;
- break;
- case HW_BREAKPOINT_LEN_2:
- info->len = X86_BREAKPOINT_LEN_2;
- break;
- case HW_BREAKPOINT_LEN_4:
- info->len = X86_BREAKPOINT_LEN_4;
- break;
-#ifdef CONFIG_X86_64
- case HW_BREAKPOINT_LEN_8:
- info->len = X86_BREAKPOINT_LEN_8;
- break;
-#endif
- default:
- return -EINVAL;
- }
-
- /* Type */
- switch (bp->attr.bp_type) {
- case HW_BREAKPOINT_W:
- info->type = X86_BREAKPOINT_WRITE;
- break;
- case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
- info->type = X86_BREAKPOINT_RW;
- break;
- case HW_BREAKPOINT_X:
- info->type = X86_BREAKPOINT_EXECUTE;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-/*
- * Validate the arch-specific HW Breakpoint register settings
- */
-int arch_validate_hwbkpt_settings(struct perf_event *bp,
- struct task_struct *tsk)
-{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- unsigned int align;
- int ret;
-
-
- ret = arch_build_bp_info(bp);
- if (ret)
- return ret;
-
- ret = -EINVAL;
-
- if (info->type == X86_BREAKPOINT_EXECUTE)
- /*
- * Ptrace-refactoring code
- * For now, we'll allow instruction breakpoint only for user-space
- * addresses
- */
- if ((!arch_check_va_in_userspace(info->address, info->len)) &&
- info->len != X86_BREAKPOINT_EXECUTE)
- return ret;
-
- switch (info->len) {
- case X86_BREAKPOINT_LEN_1:
- align = 0;
- break;
- case X86_BREAKPOINT_LEN_2:
- align = 1;
- break;
- case X86_BREAKPOINT_LEN_4:
- align = 3;
- break;
-#ifdef CONFIG_X86_64
- case X86_BREAKPOINT_LEN_8:
- align = 7;
- break;
-#endif
- default:
- return ret;
- }
-
- if (bp->callback)
- ret = arch_store_info(bp);
-
- if (ret < 0)
- return ret;
- /*
- * Check that the low-order bits of the address are appropriate
- * for the alignment implied by len.
- */
- if (info->address & align)
- return -EINVAL;
-
- /* Check that the virtual address is in the proper range */
- if (tsk) {
- if (!arch_check_va_in_userspace(info->address, info->len))
- return -EFAULT;
- } else {
- if (!arch_check_va_in_kernelspace(info->address, info->len))
- return -EFAULT;
- }
-
- return 0;
-}
-
-/*
- * Dump the debug register contents to the user.
- * We can't dump our per cpu values because it
- * may contain cpu wide breakpoint, something that
- * doesn't belong to the current task.
- *
- * TODO: include non-ptrace user breakpoints (perf)
- */
-void aout_dump_debugregs(struct user *dump)
-{
- int i;
- int dr7 = 0;
- struct perf_event *bp;
- struct arch_hw_breakpoint *info;
- struct thread_struct *thread = ¤t->thread;
-
- for (i = 0; i < HBP_NUM; i++) {
- bp = thread->ptrace_bps[i];
-
- if (bp && !bp->attr.disabled) {
- dump->u_debugreg[i] = bp->attr.bp_addr;
- info = counter_arch_bp(bp);
- dr7 |= encode_dr7(i, info->len, info->type);
- } else {
- dump->u_debugreg[i] = 0;
- }
- }
-
- dump->u_debugreg[4] = 0;
- dump->u_debugreg[5] = 0;
- dump->u_debugreg[6] = current->thread.debugreg6;
-
- dump->u_debugreg[7] = dr7;
-}
-EXPORT_SYMBOL_GPL(aout_dump_debugregs);
-
-/*
- * Release the user breakpoints used by ptrace
- */
-void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
-{
- int i;
- struct thread_struct *t = &tsk->thread;
-
- for (i = 0; i < HBP_NUM; i++) {
- unregister_hw_breakpoint(t->ptrace_bps[i]);
- t->ptrace_bps[i] = NULL;
- }
-}
-
-void hw_breakpoint_restore(void)
-{
- set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0);
- set_debugreg(__get_cpu_var(cpu_debugreg[1]), 1);
- set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2);
- set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3);
- set_debugreg(current->thread.debugreg6, 6);
- set_debugreg(__get_cpu_var(cpu_dr7), 7);
-}
-EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
-
-/*
- * Handle debug exception notifications.
- *
- * Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below.
- *
- * NOTIFY_DONE returned if one of the following conditions is true.
- * i) When the causative address is from user-space and the exception
- * is a valid one, i.e. not triggered as a result of lazy debug register
- * switching
- * ii) When there are more bits than trap set in DR6 register (such
- * as BD, BS or BT) indicating that more than one debug condition is
- * met and requires some more action in do_debug().
- *
- * NOTIFY_STOP returned for all other cases
- *
- */
-static int __kprobes hw_breakpoint_handler(struct die_args *args)
-{
- int i, cpu, rc = NOTIFY_STOP;
- struct perf_event *bp;
- unsigned long dr7, dr6;
- unsigned long *dr6_p;
-
- /* The DR6 value is pointed by args->err */
- dr6_p = (unsigned long *)ERR_PTR(args->err);
- dr6 = *dr6_p;
-
- /* Do an early return if no trap bits are set in DR6 */
- if ((dr6 & DR_TRAP_BITS) == 0)
- return NOTIFY_DONE;
-
- get_debugreg(dr7, 7);
- /* Disable breakpoints during exception handling */
- set_debugreg(0UL, 7);
- /*
- * Assert that local interrupts are disabled
- * Reset the DRn bits in the virtualized register value.
- * The ptrace trigger routine will add in whatever is needed.
- */
- current->thread.debugreg6 &= ~DR_TRAP_BITS;
- cpu = get_cpu();
-
- /* Handle all the breakpoints that were triggered */
- for (i = 0; i < HBP_NUM; ++i) {
- if (likely(!(dr6 & (DR_TRAP0 << i))))
- continue;
-
- /*
- * The counter may be concurrently released but that can only
- * occur from a call_rcu() path. We can then safely fetch
- * the breakpoint, use its callback, touch its counter
- * while we are in an rcu_read_lock() path.
- */
- rcu_read_lock();
-
- bp = per_cpu(bp_per_reg[i], cpu);
- if (bp)
- rc = NOTIFY_DONE;
- /*
- * Reset the 'i'th TRAP bit in dr6 to denote completion of
- * exception handling
- */
- (*dr6_p) &= ~(DR_TRAP0 << i);
- /*
- * bp can be NULL due to lazy debug register switching
- * or due to concurrent perf counter removing.
- */
- if (!bp) {
- rcu_read_unlock();
- break;
- }
-
- (bp->callback)(bp, args->regs);
-
- rcu_read_unlock();
- }
- if (dr6 & (~DR_TRAP_BITS))
- rc = NOTIFY_DONE;
-
- set_debugreg(dr7, 7);
- put_cpu();
-
- return rc;
-}
-
-/*
- * Handle debug exception notifications.
- */
-int __kprobes hw_breakpoint_exceptions_notify(
- struct notifier_block *unused, unsigned long val, void *data)
-{
- if (val != DIE_DEBUG)
- return NOTIFY_DONE;
-
- return hw_breakpoint_handler(data);
-}
-
-void hw_breakpoint_pmu_read(struct perf_event *bp)
-{
- /* TODO */
-}
-
-void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
-{
- /* TODO */
-}
diff --git a/trunk/arch/x86/kernel/irq.c b/trunk/arch/x86/kernel/irq.c
index 19212cb01558..04bbd5278568 100644
--- a/trunk/arch/x86/kernel/irq.c
+++ b/trunk/arch/x86/kernel/irq.c
@@ -92,17 +92,17 @@ static int show_other_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
seq_printf(p, " TLB shootdowns\n");
#endif
-#ifdef CONFIG_X86_THERMAL_VECTOR
+#ifdef CONFIG_X86_MCE
seq_printf(p, "%*s: ", prec, "TRM");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
seq_printf(p, " Thermal event interrupts\n");
-#endif
-#ifdef CONFIG_X86_MCE_THRESHOLD
+# ifdef CONFIG_X86_MCE_THRESHOLD
seq_printf(p, "%*s: ", prec, "THR");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
seq_printf(p, " Threshold APIC interrupts\n");
+# endif
#endif
#ifdef CONFIG_X86_MCE
seq_printf(p, "%*s: ", prec, "MCE");
@@ -194,11 +194,11 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
sum += irq_stats(cpu)->irq_call_count;
sum += irq_stats(cpu)->irq_tlb_count;
#endif
-#ifdef CONFIG_X86_THERMAL_VECTOR
+#ifdef CONFIG_X86_MCE
sum += irq_stats(cpu)->irq_thermal_count;
-#endif
-#ifdef CONFIG_X86_MCE_THRESHOLD
+# ifdef CONFIG_X86_MCE_THRESHOLD
sum += irq_stats(cpu)->irq_threshold_count;
+# endif
#endif
#ifdef CONFIG_X86_MCE
sum += per_cpu(mce_exception_count, cpu);
diff --git a/trunk/arch/x86/kernel/kgdb.c b/trunk/arch/x86/kernel/kgdb.c
index 34e86b67550c..8d82a77a3f3b 100644
--- a/trunk/arch/x86/kernel/kgdb.c
+++ b/trunk/arch/x86/kernel/kgdb.c
@@ -43,7 +43,6 @@
#include
#include
-#include
#include
#include
@@ -435,11 +434,6 @@ single_step_cont(struct pt_regs *regs, struct die_args *args)
"resuming...\n");
kgdb_arch_handle_exception(args->trapnr, args->signr,
args->err, "c", "", regs);
- /*
- * Reset the BS bit in dr6 (pointed by args->err) to
- * denote completion of processing
- */
- (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
return NOTIFY_STOP;
}
diff --git a/trunk/arch/x86/kernel/kprobes.c b/trunk/arch/x86/kernel/kprobes.c
index 3fe86d706a14..c5f1f117e0c0 100644
--- a/trunk/arch/x86/kernel/kprobes.c
+++ b/trunk/arch/x86/kernel/kprobes.c
@@ -56,7 +56,6 @@
#include
#include
#include
-#include
void jprobe_return_end(void);
@@ -946,14 +945,8 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
ret = NOTIFY_STOP;
break;
case DIE_DEBUG:
- if (post_kprobe_handler(args->regs)) {
- /*
- * Reset the BS bit in dr6 (pointed by args->err) to
- * denote completion of processing
- */
- (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
+ if (post_kprobe_handler(args->regs))
ret = NOTIFY_STOP;
- }
break;
case DIE_GPF:
/*
diff --git a/trunk/arch/x86/kernel/machine_kexec_32.c b/trunk/arch/x86/kernel/machine_kexec_32.c
index c843f8406da2..c1c429d00130 100644
--- a/trunk/arch/x86/kernel/machine_kexec_32.c
+++ b/trunk/arch/x86/kernel/machine_kexec_32.c
@@ -25,7 +25,6 @@
#include
#include
#include
-#include
static void set_idt(void *newidt, __u16 limit)
{
@@ -203,7 +202,6 @@ void machine_kexec(struct kimage *image)
/* Interrupts aren't acceptable while we reboot */
local_irq_disable();
- hw_breakpoint_disable();
if (image->preserve_context) {
#ifdef CONFIG_X86_IO_APIC
diff --git a/trunk/arch/x86/kernel/machine_kexec_64.c b/trunk/arch/x86/kernel/machine_kexec_64.c
index 4a8bb82248ae..84c3bf209e98 100644
--- a/trunk/arch/x86/kernel/machine_kexec_64.c
+++ b/trunk/arch/x86/kernel/machine_kexec_64.c
@@ -18,7 +18,6 @@
#include
#include
#include
-#include
static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
unsigned long addr)
@@ -283,7 +282,6 @@ void machine_kexec(struct kimage *image)
/* Interrupts aren't acceptable while we reboot */
local_irq_disable();
- hw_breakpoint_disable();
if (image->preserve_context) {
#ifdef CONFIG_X86_IO_APIC
diff --git a/trunk/arch/x86/kernel/process.c b/trunk/arch/x86/kernel/process.c
index 744508e7cfdd..5284cd2b5776 100644
--- a/trunk/arch/x86/kernel/process.c
+++ b/trunk/arch/x86/kernel/process.c
@@ -10,7 +10,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -18,7 +17,6 @@
#include
#include
#include
-#include
unsigned long idle_halt;
EXPORT_SYMBOL(idle_halt);
@@ -105,7 +103,14 @@ void flush_thread(void)
}
#endif
- flush_ptrace_hw_breakpoint(tsk);
+ clear_tsk_thread_flag(tsk, TIF_DEBUG);
+
+ tsk->thread.debugreg0 = 0;
+ tsk->thread.debugreg1 = 0;
+ tsk->thread.debugreg2 = 0;
+ tsk->thread.debugreg3 = 0;
+ tsk->thread.debugreg6 = 0;
+ tsk->thread.debugreg7 = 0;
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
/*
* Forget coprocessor state..
@@ -187,6 +192,16 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
else if (next->debugctlmsr != prev->debugctlmsr)
update_debugctlmsr(next->debugctlmsr);
+ if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
+ set_debugreg(next->debugreg0, 0);
+ set_debugreg(next->debugreg1, 1);
+ set_debugreg(next->debugreg2, 2);
+ set_debugreg(next->debugreg3, 3);
+ /* no 4 and 5 */
+ set_debugreg(next->debugreg6, 6);
+ set_debugreg(next->debugreg7, 7);
+ }
+
if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
test_tsk_thread_flag(next_p, TIF_NOTSC)) {
/* prev and next are different */
diff --git a/trunk/arch/x86/kernel/process_32.c b/trunk/arch/x86/kernel/process_32.c
index d5bd3132ee70..4cf79567cdab 100644
--- a/trunk/arch/x86/kernel/process_32.c
+++ b/trunk/arch/x86/kernel/process_32.c
@@ -58,7 +58,6 @@
#include
#include
#include
-#include
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
@@ -260,12 +259,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
task_user_gs(p) = get_user_gs(regs);
- p->thread.io_bitmap_ptr = NULL;
tsk = current;
- err = -ENOMEM;
-
- memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
-
if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
IO_BITMAP_BYTES, GFP_KERNEL);
diff --git a/trunk/arch/x86/kernel/process_64.c b/trunk/arch/x86/kernel/process_64.c
index 70cf15873f3d..eb62cbcaa490 100644
--- a/trunk/arch/x86/kernel/process_64.c
+++ b/trunk/arch/x86/kernel/process_64.c
@@ -52,7 +52,6 @@
#include
#include
#include
-#include
asmlinkage extern void ret_from_fork(void);
@@ -298,16 +297,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
p->thread.fs = me->thread.fs;
p->thread.gs = me->thread.gs;
- p->thread.io_bitmap_ptr = NULL;
savesegment(gs, p->thread.gsindex);
savesegment(fs, p->thread.fsindex);
savesegment(es, p->thread.es);
savesegment(ds, p->thread.ds);
- err = -ENOMEM;
- memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
-
if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
if (!p->thread.io_bitmap_ptr) {
@@ -346,7 +341,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
kfree(p->thread.io_bitmap_ptr);
p->thread.io_bitmap_max = 0;
}
-
return err;
}
@@ -501,7 +495,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
if (preload_fpu)
__math_state_restore();
-
return prev_p;
}
diff --git a/trunk/arch/x86/kernel/ptrace.c b/trunk/arch/x86/kernel/ptrace.c
index 04d182a7cfdb..c4f76d275ee4 100644
--- a/trunk/arch/x86/kernel/ptrace.c
+++ b/trunk/arch/x86/kernel/ptrace.c
@@ -22,8 +22,6 @@
#include
#include
#include
-#include
-#include
#include
#include
@@ -36,7 +34,6 @@
#include
#include
#include
-#include
#include "tls.h"
@@ -252,6 +249,11 @@ static int set_segment_reg(struct task_struct *task,
return 0;
}
+static unsigned long debugreg_addr_limit(struct task_struct *task)
+{
+ return TASK_SIZE - 3;
+}
+
#else /* CONFIG_X86_64 */
#define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
@@ -376,6 +378,15 @@ static int set_segment_reg(struct task_struct *task,
return 0;
}
+static unsigned long debugreg_addr_limit(struct task_struct *task)
+{
+#ifdef CONFIG_IA32_EMULATION
+ if (test_tsk_thread_flag(task, TIF_IA32))
+ return IA32_PAGE_OFFSET - 3;
+#endif
+ return TASK_SIZE_MAX - 7;
+}
+
#endif /* CONFIG_X86_32 */
static unsigned long get_flags(struct task_struct *task)
@@ -555,238 +566,98 @@ static int genregs_set(struct task_struct *target,
return ret;
}
-static void ptrace_triggered(struct perf_event *bp, void *data)
-{
- int i;
- struct thread_struct *thread = &(current->thread);
-
- /*
- * Store in the virtual DR6 register the fact that the breakpoint
- * was hit so the thread's debugger will see it.
- */
- for (i = 0; i < HBP_NUM; i++) {
- if (thread->ptrace_bps[i] == bp)
- break;
- }
-
- thread->debugreg6 |= (DR_TRAP0 << i);
-}
-
/*
- * Walk through every ptrace breakpoints for this thread and
- * build the dr7 value on top of their attributes.
- *
+ * This function is trivial and will be inlined by the compiler.
+ * Having it separates the implementation details of debug
+ * registers from the interface details of ptrace.
*/
-static unsigned long ptrace_get_dr7(struct perf_event *bp[])
+static unsigned long ptrace_get_debugreg(struct task_struct *child, int n)
{
- int i;
- int dr7 = 0;
- struct arch_hw_breakpoint *info;
-
- for (i = 0; i < HBP_NUM; i++) {
- if (bp[i] && !bp[i]->attr.disabled) {
- info = counter_arch_bp(bp[i]);
- dr7 |= encode_dr7(i, info->len, info->type);
- }
+ switch (n) {
+ case 0: return child->thread.debugreg0;
+ case 1: return child->thread.debugreg1;
+ case 2: return child->thread.debugreg2;
+ case 3: return child->thread.debugreg3;
+ case 6: return child->thread.debugreg6;
+ case 7: return child->thread.debugreg7;
}
-
- return dr7;
-}
-
-static struct perf_event *
-ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
- struct task_struct *tsk, int disabled)
-{
- int err;
- int gen_len, gen_type;
- DEFINE_BREAKPOINT_ATTR(attr);
-
- /*
- * We shoud have at least an inactive breakpoint at this
- * slot. It means the user is writing dr7 without having
- * written the address register first
- */
- if (!bp)
- return ERR_PTR(-EINVAL);
-
- err = arch_bp_generic_fields(len, type, &gen_len, &gen_type);
- if (err)
- return ERR_PTR(err);
-
- attr = bp->attr;
- attr.bp_len = gen_len;
- attr.bp_type = gen_type;
- attr.disabled = disabled;
-
- return modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk);
+ return 0;
}
-/*
- * Handle ptrace writes to debug register 7.
- */
-static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
+static int ptrace_set_debugreg(struct task_struct *child,
+ int n, unsigned long data)
{
- struct thread_struct *thread = &(tsk->thread);
- unsigned long old_dr7;
- int i, orig_ret = 0, rc = 0;
- int enabled, second_pass = 0;
- unsigned len, type;
- struct perf_event *bp;
-
- data &= ~DR_CONTROL_RESERVED;
- old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
-restore:
- /*
- * Loop through all the hardware breakpoints, making the
- * appropriate changes to each.
- */
- for (i = 0; i < HBP_NUM; i++) {
- enabled = decode_dr7(data, i, &len, &type);
- bp = thread->ptrace_bps[i];
-
- if (!enabled) {
- if (bp) {
- /*
- * Don't unregister the breakpoints right-away,
- * unless all register_user_hw_breakpoint()
- * requests have succeeded. This prevents
- * any window of opportunity for debug
- * register grabbing by other users.
- */
- if (!second_pass)
- continue;
-
- thread->ptrace_bps[i] = NULL;
- bp = ptrace_modify_breakpoint(bp, len, type,
- tsk, 1);
- if (IS_ERR(bp)) {
- rc = PTR_ERR(bp);
- thread->ptrace_bps[i] = NULL;
- break;
- }
- thread->ptrace_bps[i] = bp;
- }
- continue;
- }
-
- bp = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
+ int i;
- /* Incorrect bp, or we have a bug in bp API */
- if (IS_ERR(bp)) {
- rc = PTR_ERR(bp);
- thread->ptrace_bps[i] = NULL;
- break;
- }
- thread->ptrace_bps[i] = bp;
- }
- /*
- * Make a second pass to free the remaining unused breakpoints
- * or to restore the original breakpoints if an error occurred.
- */
- if (!second_pass) {
- second_pass = 1;
- if (rc < 0) {
- orig_ret = rc;
- data = old_dr7;
- }
- goto restore;
- }
- return ((orig_ret < 0) ? orig_ret : rc);
-}
+ if (unlikely(n == 4 || n == 5))
+ return -EIO;
-/*
- * Handle PTRACE_PEEKUSR calls for the debug register area.
- */
-static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
-{
- struct thread_struct *thread = &(tsk->thread);
- unsigned long val = 0;
+ if (n < 4 && unlikely(data >= debugreg_addr_limit(child)))
+ return -EIO;
- if (n < HBP_NUM) {
- struct perf_event *bp;
- bp = thread->ptrace_bps[n];
- if (!bp)
- return 0;
- val = bp->hw.info.address;
- } else if (n == 6) {
- val = thread->debugreg6;
- } else if (n == 7) {
- val = ptrace_get_dr7(thread->ptrace_bps);
- }
- return val;
-}
+ switch (n) {
+ case 0: child->thread.debugreg0 = data; break;
+ case 1: child->thread.debugreg1 = data; break;
+ case 2: child->thread.debugreg2 = data; break;
+ case 3: child->thread.debugreg3 = data; break;
-static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
- unsigned long addr)
-{
- struct perf_event *bp;
- struct thread_struct *t = &tsk->thread;
- DEFINE_BREAKPOINT_ATTR(attr);
+ case 6:
+ if ((data & ~0xffffffffUL) != 0)
+ return -EIO;
+ child->thread.debugreg6 = data;
+ break;
- if (!t->ptrace_bps[nr]) {
+ case 7:
/*
- * Put stub len and type to register (reserve) an inactive but
- * correct bp
+ * Sanity-check data. Take one half-byte at once with
+ * check = (val >> (16 + 4*i)) & 0xf. It contains the
+ * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
+ * 2 and 3 are LENi. Given a list of invalid values,
+ * we do mask |= 1 << invalid_value, so that
+ * (mask >> check) & 1 is a correct test for invalid
+ * values.
+ *
+ * R/Wi contains the type of the breakpoint /
+ * watchpoint, LENi contains the length of the watched
+ * data in the watchpoint case.
+ *
+ * The invalid values are:
+ * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit]
+ * - R/Wi == 0x10 (break on I/O reads or writes), so
+ * mask |= 0x4444.
+ * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
+ * 0x1110.
+ *
+ * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
+ *
+ * See the Intel Manual "System Programming Guide",
+ * 15.2.4
+ *
+ * Note that LENi == 0x10 is defined on x86_64 in long
+ * mode (i.e. even for 32-bit userspace software, but
+ * 64-bit kernel), so the x86_64 mask value is 0x5454.
+ * See the AMD manual no. 24593 (AMD64 System Programming)
*/
- attr.bp_addr = addr;
- attr.bp_len = HW_BREAKPOINT_LEN_1;
- attr.bp_type = HW_BREAKPOINT_W;
- attr.disabled = 1;
-
- bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
- } else {
- bp = t->ptrace_bps[nr];
- t->ptrace_bps[nr] = NULL;
-
- attr = bp->attr;
- attr.bp_addr = addr;
- bp = modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk);
+#ifdef CONFIG_X86_32
+#define DR7_MASK 0x5f54
+#else
+#define DR7_MASK 0x5554
+#endif
+ data &= ~DR_CONTROL_RESERVED;
+ for (i = 0; i < 4; i++)
+ if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1)
+ return -EIO;
+ child->thread.debugreg7 = data;
+ if (data)
+ set_tsk_thread_flag(child, TIF_DEBUG);
+ else
+ clear_tsk_thread_flag(child, TIF_DEBUG);
+ break;
}
- /*
- * CHECKME: the previous code returned -EIO if the addr wasn't a
- * valid task virtual addr. The new one will return -EINVAL in this
- * case.
- * -EINVAL may be what we want for in-kernel breakpoints users, but
- * -EIO looks better for ptrace, since we refuse a register writing
- * for the user. And anyway this is the previous behaviour.
- */
- if (IS_ERR(bp))
- return PTR_ERR(bp);
-
- t->ptrace_bps[nr] = bp;
return 0;
}
-/*
- * Handle PTRACE_POKEUSR calls for the debug register area.
- */
-int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
-{
- struct thread_struct *thread = &(tsk->thread);
- int rc = 0;
-
- /* There are no DR4 or DR5 registers */
- if (n == 4 || n == 5)
- return -EIO;
-
- if (n == 6) {
- thread->debugreg6 = val;
- goto ret_path;
- }
- if (n < HBP_NUM) {
- rc = ptrace_set_breakpoint_addr(tsk, n, val);
- if (rc)
- return rc;
- }
- /* All that's left is DR7 */
- if (n == 7)
- rc = ptrace_write_dr7(tsk, val);
-
-ret_path:
- return rc;
-}
-
/*
* These access the current or another (stopped) task's io permission
* bitmap for debugging or core dump.
diff --git a/trunk/arch/x86/kernel/setup.c b/trunk/arch/x86/kernel/setup.c
index c0ca8f921c91..2a34f9c5be21 100644
--- a/trunk/arch/x86/kernel/setup.c
+++ b/trunk/arch/x86/kernel/setup.c
@@ -109,7 +109,6 @@
#ifdef CONFIG_X86_64
#include
#endif
-#include
/*
* end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
@@ -1032,8 +1031,6 @@ void __init setup_arch(char **cmdline_p)
#endif
#endif
x86_init.oem.banner();
-
- mcheck_init();
}
#ifdef CONFIG_X86_32
diff --git a/trunk/arch/x86/kernel/signal.c b/trunk/arch/x86/kernel/signal.c
index fbf3b07c8567..6a44a76055ad 100644
--- a/trunk/arch/x86/kernel/signal.c
+++ b/trunk/arch/x86/kernel/signal.c
@@ -799,6 +799,15 @@ static void do_signal(struct pt_regs *regs)
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
+ /*
+ * Re-enable any watchpoints before delivering the
+ * signal to user space. The processor register will
+ * have been cleared if the watchpoint triggered
+ * inside the kernel.
+ */
+ if (current->thread.debugreg7)
+ set_debugreg(current->thread.debugreg7, 7);
+
/* Whee! Actually deliver the signal. */
if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
/*
diff --git a/trunk/arch/x86/kernel/traps.c b/trunk/arch/x86/kernel/traps.c
index 33399176512a..7e37dcee0cc3 100644
--- a/trunk/arch/x86/kernel/traps.c
+++ b/trunk/arch/x86/kernel/traps.c
@@ -529,56 +529,77 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
{
struct task_struct *tsk = current;
- unsigned long dr6;
+ unsigned long condition;
int si_code;
- get_debugreg(dr6, 6);
+ get_debugreg(condition, 6);
/* Catch kmemcheck conditions first of all! */
- if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
+ if (condition & DR_STEP && kmemcheck_trap(regs))
return;
- /* DR6 may or may not be cleared by the CPU */
- set_debugreg(0, 6);
/*
* The processor cleared BTF, so don't mark that we need it set.
*/
clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
tsk->thread.debugctlmsr = 0;
- /* Store the virtualized DR6 value */
- tsk->thread.debugreg6 = dr6;
-
- if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code,
- SIGTRAP) == NOTIFY_STOP)
+ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
+ SIGTRAP) == NOTIFY_STOP)
return;
/* It's safe to allow irq's after DR6 has been saved */
preempt_conditional_sti(regs);
- if (regs->flags & X86_VM_MASK) {
- handle_vm86_trap((struct kernel_vm86_regs *) regs,
- error_code, 1);
- return;
+ /* Mask out spurious debug traps due to lazy DR7 setting */
+ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
+ if (!tsk->thread.debugreg7)
+ goto clear_dr7;
}
+#ifdef CONFIG_X86_32
+ if (regs->flags & X86_VM_MASK)
+ goto debug_vm86;
+#endif
+
+ /* Save debug status register where ptrace can see it */
+ tsk->thread.debugreg6 = condition;
+
/*
- * Single-stepping through system calls: ignore any exceptions in
- * kernel space, but re-enable TF when returning to user mode.
- *
- * We already checked v86 mode above, so we can check for kernel mode
- * by just checking the CPL of CS.
+ * Single-stepping through TF: make sure we ignore any events in
+ * kernel space (but re-enable TF when returning to user mode).
*/
- if ((dr6 & DR_STEP) && !user_mode(regs)) {
- tsk->thread.debugreg6 &= ~DR_STEP;
- set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
- regs->flags &= ~X86_EFLAGS_TF;
+ if (condition & DR_STEP) {
+ if (!user_mode(regs))
+ goto clear_TF_reenable;
}
- si_code = get_si_code(tsk->thread.debugreg6);
- if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS))
- send_sigtrap(tsk, regs, error_code, si_code);
+
+ si_code = get_si_code(condition);
+ /* Ok, finally something we can handle */
+ send_sigtrap(tsk, regs, error_code, si_code);
+
+ /*
+ * Disable additional traps. They'll be re-enabled when
+ * the signal is delivered.
+ */
+clear_dr7:
+ set_debugreg(0, 7);
preempt_conditional_cli(regs);
+ return;
+#ifdef CONFIG_X86_32
+debug_vm86:
+ /* reenable preemption: handle_vm86_trap() might sleep */
+ dec_preempt_count();
+ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
+ conditional_cli(regs);
+ return;
+#endif
+
+clear_TF_reenable:
+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+ regs->flags &= ~X86_EFLAGS_TF;
+ preempt_conditional_cli(regs);
return;
}
diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c
index 4fc80174191c..ae07d261527c 100644
--- a/trunk/arch/x86/kvm/x86.c
+++ b/trunk/arch/x86/kvm/x86.c
@@ -42,7 +42,6 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-#include
#include
#include
#include
@@ -3644,15 +3643,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
trace_kvm_entry(vcpu->vcpu_id);
kvm_x86_ops->run(vcpu, kvm_run);
- /*
- * If the guest has used debug registers, at least dr7
- * will be disabled while returning to the host.
- * If we don't have active breakpoints in the host, we don't
- * care about the messed up debug address registers. But if
- * we have some of them active, restore the old state.
- */
- if (hw_breakpoint_active())
- hw_breakpoint_restore();
+ if (unlikely(vcpu->arch.switch_db_regs || test_thread_flag(TIF_DEBUG))) {
+ set_debugreg(current->thread.debugreg0, 0);
+ set_debugreg(current->thread.debugreg1, 1);
+ set_debugreg(current->thread.debugreg2, 2);
+ set_debugreg(current->thread.debugreg3, 3);
+ set_debugreg(current->thread.debugreg6, 6);
+ set_debugreg(current->thread.debugreg7, 7);
+ }
set_bit(KVM_REQ_KICK, &vcpu->requests);
local_irq_enable();
diff --git a/trunk/arch/x86/mm/kmmio.c b/trunk/arch/x86/mm/kmmio.c
index 11a4ad4d6253..16ccbd77917f 100644
--- a/trunk/arch/x86/mm/kmmio.c
+++ b/trunk/arch/x86/mm/kmmio.c
@@ -540,14 +540,8 @@ kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
struct die_args *arg = args;
if (val == DIE_DEBUG && (arg->err & DR_STEP))
- if (post_kmmio_handler(arg->err, arg->regs) == 1) {
- /*
- * Reset the BS bit in dr6 (pointed by args->err) to
- * denote completion of processing
- */
- (*(unsigned long *)ERR_PTR(arg->err)) &= ~DR_STEP;
+ if (post_kmmio_handler(arg->err, arg->regs) == 1)
return NOTIFY_STOP;
- }
return NOTIFY_DONE;
}
diff --git a/trunk/arch/x86/power/cpu.c b/trunk/arch/x86/power/cpu.c
index 0a979f3e5b8a..8aa85f17667e 100644
--- a/trunk/arch/x86/power/cpu.c
+++ b/trunk/arch/x86/power/cpu.c
@@ -18,7 +18,6 @@
#include
#include
#include
-#include
#ifdef CONFIG_X86_32
static struct saved_context saved_context;
@@ -143,6 +142,31 @@ static void fix_processor_context(void)
#endif
load_TR_desc(); /* This does ltr */
load_LDT(¤t->active_mm->context); /* This does lldt */
+
+ /*
+ * Now maybe reload the debug registers
+ */
+ if (current->thread.debugreg7) {
+#ifdef CONFIG_X86_32
+ set_debugreg(current->thread.debugreg0, 0);
+ set_debugreg(current->thread.debugreg1, 1);
+ set_debugreg(current->thread.debugreg2, 2);
+ set_debugreg(current->thread.debugreg3, 3);
+ /* no 4 and 5 */
+ set_debugreg(current->thread.debugreg6, 6);
+ set_debugreg(current->thread.debugreg7, 7);
+#else
+ /* CONFIG_X86_64 */
+ loaddebug(¤t->thread, 0);
+ loaddebug(¤t->thread, 1);
+ loaddebug(¤t->thread, 2);
+ loaddebug(¤t->thread, 3);
+ /* no 4 and 5 */
+ loaddebug(¤t->thread, 6);
+ loaddebug(¤t->thread, 7);
+#endif
+ }
+
}
/**
diff --git a/trunk/arch/x86/tools/Makefile b/trunk/arch/x86/tools/Makefile
index 4688f90ce5a2..c80b0792cd83 100644
--- a/trunk/arch/x86/tools/Makefile
+++ b/trunk/arch/x86/tools/Makefile
@@ -1,13 +1,19 @@
PHONY += posttest
ifeq ($(KBUILD_VERBOSE),1)
- postest_verbose = -v
+ posttest_verbose = -v
else
- postest_verbose =
+ posttest_verbose =
+endif
+
+ifeq ($(CONFIG_64BIT),y)
+ posttest_64bit = -y
+else
+ posttest_64bit = -n
endif
quiet_cmd_posttest = TEST $@
- cmd_posttest = $(OBJDUMP) -d -j .text $(objtree)/vmlinux | awk -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len -$(CONFIG_64BIT) $(posttest_verbose)
+ cmd_posttest = $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len $(posttest_64bit) $(posttest_verbose)
posttest: $(obj)/test_get_len vmlinux
$(call cmd,posttest)
diff --git a/trunk/arch/x86/tools/test_get_len.c b/trunk/arch/x86/tools/test_get_len.c
index d8214dc03fa7..af75e07217ba 100644
--- a/trunk/arch/x86/tools/test_get_len.c
+++ b/trunk/arch/x86/tools/test_get_len.c
@@ -114,7 +114,6 @@ int main(int argc, char **argv)
unsigned char insn_buf[16];
struct insn insn;
int insns = 0, c;
- int warnings = 0;
parse_args(argc, argv);
@@ -152,22 +151,18 @@ int main(int argc, char **argv)
insn_init(&insn, insn_buf, x86_64);
insn_get_length(&insn);
if (insn.length != nb) {
- warnings++;
- fprintf(stderr, "Warning: %s found difference at %s\n",
+ fprintf(stderr, "Error: %s found a difference at %s\n",
prog, sym);
- fprintf(stderr, "Warning: %s", line);
- fprintf(stderr, "Warning: objdump says %d bytes, but "
+ fprintf(stderr, "Error: %s", line);
+ fprintf(stderr, "Error: objdump says %d bytes, but "
"insn_get_length() says %d\n", nb,
insn.length);
if (verbose)
dump_insn(stderr, &insn);
+ exit(2);
}
}
- if (warnings)
- fprintf(stderr, "Warning: decoded and checked %d"
- " instructions with %d warnings\n", insns, warnings);
- else
- fprintf(stderr, "Succeed: decoded and checked %d"
- " instructions\n", insns);
+ fprintf(stderr, "Succeed: decoded and checked %d instructions\n",
+ insns);
return 0;
}
diff --git a/trunk/drivers/edac/edac_mce_amd.c b/trunk/drivers/edac/edac_mce_amd.c
index 689cc6a6214d..713ed7d37247 100644
--- a/trunk/drivers/edac/edac_mce_amd.c
+++ b/trunk/drivers/edac/edac_mce_amd.c
@@ -3,6 +3,7 @@
static bool report_gart_errors;
static void (*nb_bus_decoder)(int node_id, struct err_regs *regs);
+static void (*orig_mce_callback)(struct mce *m);
void amd_report_gart_errors(bool v)
{
@@ -362,10 +363,8 @@ static inline void amd_decode_err_code(unsigned int ec)
pr_warning("Huh? Unknown MCE error 0x%x\n", ec);
}
-static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
- void *data)
+static void amd_decode_mce(struct mce *m)
{
- struct mce *m = (struct mce *)data;
struct err_regs regs;
int node, ecc;
@@ -421,22 +420,20 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
}
amd_decode_err_code(m->status & 0xffff);
-
- return NOTIFY_STOP;
}
-static struct notifier_block amd_mce_dec_nb = {
- .notifier_call = amd_decode_mce,
-};
-
static int __init mce_amd_init(void)
{
/*
* We can decode MCEs for Opteron and later CPUs:
*/
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
- (boot_cpu_data.x86 >= 0xf))
- atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+ (boot_cpu_data.x86 >= 0xf)) {
+ /* safe the default decode mce callback */
+ orig_mce_callback = x86_mce_decode_callback;
+
+ x86_mce_decode_callback = amd_decode_mce;
+ }
return 0;
}
@@ -445,7 +442,7 @@ early_initcall(mce_amd_init);
#ifdef MODULE
static void __exit mce_amd_exit(void)
{
- atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+ x86_mce_decode_callback = orig_mce_callback;
}
MODULE_DESCRIPTION("AMD MCE decoder");
diff --git a/trunk/include/linux/ftrace_event.h b/trunk/include/linux/ftrace_event.h
index 47bbdf9c38d0..43360c1d8f70 100644
--- a/trunk/include/linux/ftrace_event.h
+++ b/trunk/include/linux/ftrace_event.h
@@ -137,8 +137,13 @@ struct ftrace_event_call {
#define FTRACE_MAX_PROFILE_SIZE 2048
-extern char *perf_trace_buf;
-extern char *perf_trace_buf_nmi;
+struct perf_trace_buf {
+ char buf[FTRACE_MAX_PROFILE_SIZE];
+ int recursion;
+};
+
+extern struct perf_trace_buf *perf_trace_buf;
+extern struct perf_trace_buf *perf_trace_buf_nmi;
#define MAX_FILTER_PRED 32
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
diff --git a/trunk/include/linux/hw_breakpoint.h b/trunk/include/linux/hw_breakpoint.h
deleted file mode 100644
index a03daed08c59..000000000000
--- a/trunk/include/linux/hw_breakpoint.h
+++ /dev/null
@@ -1,131 +0,0 @@
-#ifndef _LINUX_HW_BREAKPOINT_H
-#define _LINUX_HW_BREAKPOINT_H
-
-enum {
- HW_BREAKPOINT_LEN_1 = 1,
- HW_BREAKPOINT_LEN_2 = 2,
- HW_BREAKPOINT_LEN_4 = 4,
- HW_BREAKPOINT_LEN_8 = 8,
-};
-
-enum {
- HW_BREAKPOINT_R = 1,
- HW_BREAKPOINT_W = 2,
- HW_BREAKPOINT_X = 4,
-};
-
-#ifdef __KERNEL__
-
-#include
-
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
-
-/* As it's for in-kernel or ptrace use, we want it to be pinned */
-#define DEFINE_BREAKPOINT_ATTR(name) \
-struct perf_event_attr name = { \
- .type = PERF_TYPE_BREAKPOINT, \
- .size = sizeof(name), \
- .pinned = 1, \
-};
-
-static inline void hw_breakpoint_init(struct perf_event_attr *attr)
-{
- attr->type = PERF_TYPE_BREAKPOINT;
- attr->size = sizeof(*attr);
- attr->pinned = 1;
-}
-
-static inline unsigned long hw_breakpoint_addr(struct perf_event *bp)
-{
- return bp->attr.bp_addr;
-}
-
-static inline int hw_breakpoint_type(struct perf_event *bp)
-{
- return bp->attr.bp_type;
-}
-
-static inline int hw_breakpoint_len(struct perf_event *bp)
-{
- return bp->attr.bp_len;
-}
-
-extern struct perf_event *
-register_user_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered,
- struct task_struct *tsk);
-
-/* FIXME: only change from the attr, and don't unregister */
-extern struct perf_event *
-modify_user_hw_breakpoint(struct perf_event *bp,
- struct perf_event_attr *attr,
- perf_callback_t triggered,
- struct task_struct *tsk);
-
-/*
- * Kernel breakpoints are not associated with any particular thread.
- */
-extern struct perf_event *
-register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
- perf_callback_t triggered,
- int cpu);
-
-extern struct perf_event **
-register_wide_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered);
-
-extern int register_perf_hw_breakpoint(struct perf_event *bp);
-extern int __register_perf_hw_breakpoint(struct perf_event *bp);
-extern void unregister_hw_breakpoint(struct perf_event *bp);
-extern void unregister_wide_hw_breakpoint(struct perf_event **cpu_events);
-
-extern int reserve_bp_slot(struct perf_event *bp);
-extern void release_bp_slot(struct perf_event *bp);
-
-extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
-
-static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
-{
- return &bp->hw.info;
-}
-
-#else /* !CONFIG_HAVE_HW_BREAKPOINT */
-
-static inline struct perf_event *
-register_user_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered,
- struct task_struct *tsk) { return NULL; }
-static inline struct perf_event *
-modify_user_hw_breakpoint(struct perf_event *bp,
- struct perf_event_attr *attr,
- perf_callback_t triggered,
- struct task_struct *tsk) { return NULL; }
-static inline struct perf_event *
-register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
- perf_callback_t triggered,
- int cpu) { return NULL; }
-static inline struct perf_event **
-register_wide_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered) { return NULL; }
-static inline int
-register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
-static inline int
-__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
-static inline void unregister_hw_breakpoint(struct perf_event *bp) { }
-static inline void
-unregister_wide_hw_breakpoint(struct perf_event **cpu_events) { }
-static inline int
-reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; }
-static inline void release_bp_slot(struct perf_event *bp) { }
-
-static inline void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { }
-
-static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
-{
- return NULL;
-}
-
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_HW_BREAKPOINT_H */
diff --git a/trunk/include/linux/perf_event.h b/trunk/include/linux/perf_event.h
index 43adbd7f0010..7f87563c8485 100644
--- a/trunk/include/linux/perf_event.h
+++ b/trunk/include/linux/perf_event.h
@@ -18,10 +18,6 @@
#include
#include
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
-#include
-#endif
-
/*
* User-space ABI bits:
*/
@@ -35,7 +31,6 @@ enum perf_type_id {
PERF_TYPE_TRACEPOINT = 2,
PERF_TYPE_HW_CACHE = 3,
PERF_TYPE_RAW = 4,
- PERF_TYPE_BREAKPOINT = 5,
PERF_TYPE_MAX, /* non-ABI */
};
@@ -214,15 +209,6 @@ struct perf_event_attr {
__u32 wakeup_events; /* wakeup every n events */
__u32 wakeup_watermark; /* bytes before wakeup */
};
-
- union {
- struct { /* Hardware breakpoint info */
- __u64 bp_addr;
- __u32 bp_type;
- __u32 bp_len;
- };
- };
-
__u32 __reserved_2;
__u64 __reserved_3;
@@ -492,11 +478,6 @@ struct hw_perf_event {
s64 remaining;
struct hrtimer hrtimer;
};
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- union { /* breakpoint */
- struct arch_hw_breakpoint info;
- };
-#endif
};
atomic64_t prev_count;
u64 sample_period;
@@ -565,10 +546,6 @@ struct perf_pending_entry {
void (*func)(struct perf_pending_entry *);
};
-typedef void (*perf_callback_t)(struct perf_event *, void *);
-
-struct perf_sample_data;
-
/**
* struct perf_event - performance event kernel representation:
*/
@@ -611,7 +588,7 @@ struct perf_event {
u64 tstamp_running;
u64 tstamp_stopped;
- struct perf_event_attr attr;
+ struct perf_event_attr attr;
struct hw_perf_event hw;
struct perf_event_context *ctx;
@@ -660,18 +637,10 @@ struct perf_event {
struct pid_namespace *ns;
u64 id;
- void (*overflow_handler)(struct perf_event *event,
- int nmi, struct perf_sample_data *data,
- struct pt_regs *regs);
-
#ifdef CONFIG_EVENT_PROFILE
struct event_filter *filter;
#endif
- perf_callback_t callback;
-
- perf_callback_t event_callback;
-
#endif /* CONFIG_PERF_EVENTS */
};
@@ -776,14 +745,6 @@ extern int hw_perf_group_sched_in(struct perf_event *group_leader,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx, int cpu);
extern void perf_event_update_userpage(struct perf_event *event);
-extern int perf_event_release_kernel(struct perf_event *event);
-extern struct perf_event *
-perf_event_create_kernel_counter(struct perf_event_attr *attr,
- int cpu,
- pid_t pid,
- perf_callback_t callback);
-extern u64 perf_event_read_value(struct perf_event *event,
- u64 *enabled, u64 *running);
struct perf_sample_data {
u64 type;
@@ -860,7 +821,6 @@ extern int sysctl_perf_event_sample_rate;
extern void perf_event_init(void);
extern void perf_tp_event(int event_id, u64 addr, u64 count,
void *record, int entry_size);
-extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
@@ -874,8 +834,6 @@ extern int perf_output_begin(struct perf_output_handle *handle,
extern void perf_output_end(struct perf_output_handle *handle);
extern void perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len);
-extern int perf_swevent_get_recursion_context(void);
-extern void perf_swevent_put_recursion_context(int rctx);
#else
static inline void
perf_event_task_sched_in(struct task_struct *task, int cpu) { }
@@ -897,15 +855,11 @@ static inline int perf_event_task_enable(void) { return -EINVAL; }
static inline void
perf_sw_event(u32 event_id, u64 nr, int nmi,
struct pt_regs *regs, u64 addr) { }
-static inline void
-perf_bp_event(struct perf_event *event, void *data) { }
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
static inline void perf_event_comm(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_init(void) { }
-static inline int perf_swevent_get_recursion_context(void) { return -1; }
-static inline void perf_swevent_put_recursion_context(int rctx) { }
#endif
diff --git a/trunk/include/linux/syscalls.h b/trunk/include/linux/syscalls.h
index e79e2f3ccc51..b50974a93af0 100644
--- a/trunk/include/linux/syscalls.h
+++ b/trunk/include/linux/syscalls.h
@@ -99,16 +99,37 @@ struct perf_event_attr;
#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
#ifdef CONFIG_EVENT_PROFILE
+#define TRACE_SYS_ENTER_PROFILE(sname) \
+static int prof_sysenter_enable_##sname(struct ftrace_event_call *unused) \
+{ \
+ return reg_prof_syscall_enter("sys"#sname); \
+} \
+ \
+static void prof_sysenter_disable_##sname(struct ftrace_event_call *unused) \
+{ \
+ unreg_prof_syscall_enter("sys"#sname); \
+}
+
+#define TRACE_SYS_EXIT_PROFILE(sname) \
+static int prof_sysexit_enable_##sname(struct ftrace_event_call *unused) \
+{ \
+ return reg_prof_syscall_exit("sys"#sname); \
+} \
+ \
+static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \
+{ \
+ unreg_prof_syscall_exit("sys"#sname); \
+}
#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
.profile_count = ATOMIC_INIT(-1), \
- .profile_enable = prof_sysenter_enable, \
- .profile_disable = prof_sysenter_disable,
+ .profile_enable = prof_sysenter_enable_##sname, \
+ .profile_disable = prof_sysenter_disable_##sname,
#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \
.profile_count = ATOMIC_INIT(-1), \
- .profile_enable = prof_sysexit_enable, \
- .profile_disable = prof_sysexit_disable,
+ .profile_enable = prof_sysexit_enable_##sname, \
+ .profile_disable = prof_sysexit_disable_##sname,
#else
#define TRACE_SYS_ENTER_PROFILE(sname)
#define TRACE_SYS_ENTER_PROFILE_INIT(sname)
@@ -132,46 +153,74 @@ struct perf_event_attr;
#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
#define SYSCALL_TRACE_ENTER_EVENT(sname) \
- static const struct syscall_metadata __syscall_meta_##sname; \
static struct ftrace_event_call event_enter_##sname; \
- static struct trace_event enter_syscall_print_##sname = { \
+ struct trace_event enter_syscall_print_##sname = { \
.trace = print_syscall_enter, \
}; \
+ static int init_enter_##sname(struct ftrace_event_call *call) \
+ { \
+ int num, id; \
+ num = syscall_name_to_nr("sys"#sname); \
+ if (num < 0) \
+ return -ENOSYS; \
+ id = register_ftrace_event(&enter_syscall_print_##sname);\
+ if (!id) \
+ return -ENODEV; \
+ event_enter_##sname.id = id; \
+ set_syscall_enter_id(num, id); \
+ INIT_LIST_HEAD(&event_enter_##sname.fields); \
+ return 0; \
+ } \
+ TRACE_SYS_ENTER_PROFILE(sname); \
static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) \
event_enter_##sname = { \
.name = "sys_enter"#sname, \
.system = "syscalls", \
- .event = &enter_syscall_print_##sname, \
- .raw_init = init_syscall_trace, \
+ .event = &event_syscall_enter, \
+ .raw_init = init_enter_##sname, \
.show_format = syscall_enter_format, \
.define_fields = syscall_enter_define_fields, \
.regfunc = reg_event_syscall_enter, \
.unregfunc = unreg_event_syscall_enter, \
- .data = (void *)&__syscall_meta_##sname,\
+ .data = "sys"#sname, \
TRACE_SYS_ENTER_PROFILE_INIT(sname) \
}
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
- static const struct syscall_metadata __syscall_meta_##sname; \
static struct ftrace_event_call event_exit_##sname; \
- static struct trace_event exit_syscall_print_##sname = { \
+ struct trace_event exit_syscall_print_##sname = { \
.trace = print_syscall_exit, \
}; \
+ static int init_exit_##sname(struct ftrace_event_call *call) \
+ { \
+ int num, id; \
+ num = syscall_name_to_nr("sys"#sname); \
+ if (num < 0) \
+ return -ENOSYS; \
+ id = register_ftrace_event(&exit_syscall_print_##sname);\
+ if (!id) \
+ return -ENODEV; \
+ event_exit_##sname.id = id; \
+ set_syscall_exit_id(num, id); \
+ INIT_LIST_HEAD(&event_exit_##sname.fields); \
+ return 0; \
+ } \
+ TRACE_SYS_EXIT_PROFILE(sname); \
static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) \
event_exit_##sname = { \
.name = "sys_exit"#sname, \
.system = "syscalls", \
- .event = &exit_syscall_print_##sname, \
- .raw_init = init_syscall_trace, \
+ .event = &event_syscall_exit, \
+ .raw_init = init_exit_##sname, \
.show_format = syscall_exit_format, \
.define_fields = syscall_exit_define_fields, \
.regfunc = reg_event_syscall_exit, \
.unregfunc = unreg_event_syscall_exit, \
- .data = (void *)&__syscall_meta_##sname,\
+ .data = "sys"#sname, \
TRACE_SYS_EXIT_PROFILE_INIT(sname) \
}
diff --git a/trunk/include/linux/tracepoint.h b/trunk/include/linux/tracepoint.h
index f59604ed0ec6..2aac8a83e89b 100644
--- a/trunk/include/linux/tracepoint.h
+++ b/trunk/include/linux/tracepoint.h
@@ -280,12 +280,6 @@ static inline void tracepoint_synchronize_unregister(void)
* TRACE_EVENT_FN to perform any (un)registration work.
*/
-#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)
-#define DEFINE_EVENT(template, name, proto, args) \
- DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
-
#define TRACE_EVENT(name, proto, args, struct, assign, print) \
DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
#define TRACE_EVENT_FN(name, proto, args, struct, \
diff --git a/trunk/include/trace/define_trace.h b/trunk/include/trace/define_trace.h
index 5acfb1eb4df9..2a4b3bf74033 100644
--- a/trunk/include/trace/define_trace.h
+++ b/trunk/include/trace/define_trace.h
@@ -31,14 +31,6 @@
assign, print, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg)
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args) \
- DEFINE_TRACE(name)
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_TRACE(name)
-
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args) \
DEFINE_TRACE(name)
@@ -71,9 +63,6 @@
#undef TRACE_EVENT
#undef TRACE_EVENT_FN
-#undef DECLARE_EVENT_CLASS
-#undef DEFINE_EVENT
-#undef DEFINE_EVENT_PRINT
#undef TRACE_HEADER_MULTI_READ
/* Only undef what we defined in this file */
diff --git a/trunk/include/trace/events/block.h b/trunk/include/trace/events/block.h
index 5fb72733331e..00405b5f624a 100644
--- a/trunk/include/trace/events/block.h
+++ b/trunk/include/trace/events/block.h
@@ -8,7 +8,7 @@
#include
#include
-DECLARE_EVENT_CLASS(block_rq_with_error,
+TRACE_EVENT(block_rq_abort,
TP_PROTO(struct request_queue *q, struct request *rq),
@@ -40,28 +40,41 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
__entry->nr_sector, __entry->errors)
);
-DEFINE_EVENT(block_rq_with_error, block_rq_abort,
+TRACE_EVENT(block_rq_insert,
TP_PROTO(struct request_queue *q, struct request *rq),
- TP_ARGS(q, rq)
-);
-
-DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
-
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_ARGS(q, rq),
- TP_ARGS(q, rq)
-);
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( unsigned int, bytes )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ ),
-DEFINE_EVENT(block_rq_with_error, block_rq_complete,
+ TP_fast_assign(
+ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
+ __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
+ __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
- TP_PROTO(struct request_queue *q, struct request *rq),
+ blk_fill_rwbs_rq(__entry->rwbs, rq);
+ blk_dump_cmd(__get_str(cmd), rq);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
- TP_ARGS(q, rq)
+ TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs, __entry->bytes, __get_str(cmd),
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
);
-DECLARE_EVENT_CLASS(block_rq,
+TRACE_EVENT(block_rq_issue,
TP_PROTO(struct request_queue *q, struct request *rq),
@@ -73,7 +86,7 @@ DECLARE_EVENT_CLASS(block_rq,
__field( unsigned int, nr_sector )
__field( unsigned int, bytes )
__array( char, rwbs, 6 )
- __array( char, comm, TASK_COMM_LEN )
+ __array( char, comm, TASK_COMM_LEN )
__dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
),
@@ -95,18 +108,68 @@ DECLARE_EVENT_CLASS(block_rq,
__entry->nr_sector, __entry->comm)
);
-DEFINE_EVENT(block_rq, block_rq_insert,
+TRACE_EVENT(block_rq_requeue,
TP_PROTO(struct request_queue *q, struct request *rq),
- TP_ARGS(q, rq)
+ TP_ARGS(q, rq),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( int, errors )
+ __array( char, rwbs, 6 )
+ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
+ __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
+ __entry->errors = rq->errors;
+
+ blk_fill_rwbs_rq(__entry->rwbs, rq);
+ blk_dump_cmd(__get_str(cmd), rq);
+ ),
+
+ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs, __get_str(cmd),
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->errors)
);
-DEFINE_EVENT(block_rq, block_rq_issue,
+TRACE_EVENT(block_rq_complete,
TP_PROTO(struct request_queue *q, struct request *rq),
- TP_ARGS(q, rq)
+ TP_ARGS(q, rq),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( int, errors )
+ __array( char, rwbs, 6 )
+ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
+ __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
+ __entry->errors = rq->errors;
+
+ blk_fill_rwbs_rq(__entry->rwbs, rq);
+ blk_dump_cmd(__get_str(cmd), rq);
+ ),
+
+ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs, __get_str(cmd),
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->errors)
);
TRACE_EVENT(block_bio_bounce,
@@ -165,7 +228,7 @@ TRACE_EVENT(block_bio_complete,
__entry->nr_sector, __entry->error)
);
-DECLARE_EVENT_CLASS(block_bio,
+TRACE_EVENT(block_bio_backmerge,
TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -193,28 +256,63 @@ DECLARE_EVENT_CLASS(block_bio,
__entry->nr_sector, __entry->comm)
);
-DEFINE_EVENT(block_bio, block_bio_backmerge,
+TRACE_EVENT(block_bio_frontmerge,
TP_PROTO(struct request_queue *q, struct bio *bio),
- TP_ARGS(q, bio)
-);
+ TP_ARGS(q, bio),
-DEFINE_EVENT(block_bio, block_bio_frontmerge,
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned, nr_sector )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
- TP_PROTO(struct request_queue *q, struct bio *bio),
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
- TP_ARGS(q, bio)
+ TP_printk("%d,%d %s %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
);
-DEFINE_EVENT(block_bio, block_bio_queue,
+TRACE_EVENT(block_bio_queue,
TP_PROTO(struct request_queue *q, struct bio *bio),
- TP_ARGS(q, bio)
+ TP_ARGS(q, bio),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
);
-DECLARE_EVENT_CLASS(block_get_rq,
+TRACE_EVENT(block_getrq,
TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
@@ -243,18 +341,33 @@ DECLARE_EVENT_CLASS(block_get_rq,
__entry->nr_sector, __entry->comm)
);
-DEFINE_EVENT(block_get_rq, block_getrq,
+TRACE_EVENT(block_sleeprq,
TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
- TP_ARGS(q, bio, rw)
-);
+ TP_ARGS(q, bio, rw),
-DEFINE_EVENT(block_get_rq, block_sleeprq,
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
+ TP_fast_assign(
+ __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
+ __entry->sector = bio ? bio->bi_sector : 0;
+ __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
+ blk_fill_rwbs(__entry->rwbs,
+ bio ? bio->bi_rw : 0, __entry->nr_sector);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
- TP_ARGS(q, bio, rw)
+ TP_printk("%d,%d %s %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
);
TRACE_EVENT(block_plug,
@@ -274,7 +387,7 @@ TRACE_EVENT(block_plug,
TP_printk("[%s]", __entry->comm)
);
-DECLARE_EVENT_CLASS(block_unplug,
+TRACE_EVENT(block_unplug_timer,
TP_PROTO(struct request_queue *q),
@@ -293,18 +406,23 @@ DECLARE_EVENT_CLASS(block_unplug,
TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
);
-DEFINE_EVENT(block_unplug, block_unplug_timer,
+TRACE_EVENT(block_unplug_io,
TP_PROTO(struct request_queue *q),
- TP_ARGS(q)
-);
+ TP_ARGS(q),
-DEFINE_EVENT(block_unplug, block_unplug_io,
+ TP_STRUCT__entry(
+ __field( int, nr_rq )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
- TP_PROTO(struct request_queue *q),
+ TP_fast_assign(
+ __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE];
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
- TP_ARGS(q)
+ TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
);
TRACE_EVENT(block_split,
diff --git a/trunk/include/trace/events/ext4.h b/trunk/include/trace/events/ext4.h
index 318f76535bd4..d09550bf3f95 100644
--- a/trunk/include/trace/events/ext4.h
+++ b/trunk/include/trace/events/ext4.h
@@ -90,7 +90,7 @@ TRACE_EVENT(ext4_allocate_inode,
(unsigned long) __entry->dir, __entry->mode)
);
-DECLARE_EVENT_CLASS(ext4__write_begin,
+TRACE_EVENT(ext4_write_begin,
TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
unsigned int flags),
@@ -118,23 +118,7 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
__entry->pos, __entry->len, __entry->flags)
);
-DEFINE_EVENT(ext4__write_begin, ext4_write_begin,
-
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
-
- TP_ARGS(inode, pos, len, flags)
-);
-
-DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin,
-
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int flags),
-
- TP_ARGS(inode, pos, len, flags)
-);
-
-DECLARE_EVENT_CLASS(ext4__write_end,
+TRACE_EVENT(ext4_ordered_write_end,
TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
unsigned int copied),
@@ -161,36 +145,57 @@ DECLARE_EVENT_CLASS(ext4__write_end,
__entry->pos, __entry->len, __entry->copied)
);
-DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end,
-
+TRACE_EVENT(ext4_writeback_write_end,
TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
unsigned int copied),
- TP_ARGS(inode, pos, len, copied)
-);
+ TP_ARGS(inode, pos, len, copied),
-DEFINE_EVENT(ext4__write_end, ext4_writeback_write_end,
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( loff_t, pos )
+ __field( unsigned int, len )
+ __field( unsigned int, copied )
+ ),
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int copied),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pos = pos;
+ __entry->len = len;
+ __entry->copied = copied;
+ ),
- TP_ARGS(inode, pos, len, copied)
+ TP_printk("dev %s ino %lu pos %llu len %u copied %u",
+ jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ __entry->pos, __entry->len, __entry->copied)
);
-DEFINE_EVENT(ext4__write_end, ext4_journalled_write_end,
-
+TRACE_EVENT(ext4_journalled_write_end,
TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
unsigned int copied),
+ TP_ARGS(inode, pos, len, copied),
- TP_ARGS(inode, pos, len, copied)
-);
-
-DEFINE_EVENT(ext4__write_end, ext4_da_write_end,
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( loff_t, pos )
+ __field( unsigned int, len )
+ __field( unsigned int, copied )
+ ),
- TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
- unsigned int copied),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pos = pos;
+ __entry->len = len;
+ __entry->copied = copied;
+ ),
- TP_ARGS(inode, pos, len, copied)
+ TP_printk("dev %s ino %lu pos %llu len %u copied %u",
+ jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ __entry->pos, __entry->len, __entry->copied)
);
TRACE_EVENT(ext4_writepage,
@@ -332,6 +337,60 @@ TRACE_EVENT(ext4_da_writepages_result,
(unsigned long) __entry->writeback_index)
);
+TRACE_EVENT(ext4_da_write_begin,
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+ unsigned int flags),
+
+ TP_ARGS(inode, pos, len, flags),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( loff_t, pos )
+ __field( unsigned int, len )
+ __field( unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pos = pos;
+ __entry->len = len;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev %s ino %lu pos %llu len %u flags %u",
+ jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ __entry->pos, __entry->len, __entry->flags)
+);
+
+TRACE_EVENT(ext4_da_write_end,
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+ unsigned int copied),
+
+ TP_ARGS(inode, pos, len, copied),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( loff_t, pos )
+ __field( unsigned int, len )
+ __field( unsigned int, copied )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pos = pos;
+ __entry->len = len;
+ __entry->copied = copied;
+ ),
+
+ TP_printk("dev %s ino %lu pos %llu len %u copied %u",
+ jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ __entry->pos, __entry->len, __entry->copied)
+);
+
TRACE_EVENT(ext4_discard_blocks,
TP_PROTO(struct super_block *sb, unsigned long long blk,
unsigned long long count),
diff --git a/trunk/include/trace/events/irq.h b/trunk/include/trace/events/irq.h
index 0e4cfb694fe7..dcfcd4407623 100644
--- a/trunk/include/trace/events/irq.h
+++ b/trunk/include/trace/events/irq.h
@@ -82,7 +82,18 @@ TRACE_EVENT(irq_handler_exit,
__entry->irq, __entry->ret ? "handled" : "unhandled")
);
-DECLARE_EVENT_CLASS(softirq,
+/**
+ * softirq_entry - called immediately before the softirq handler
+ * @h: pointer to struct softirq_action
+ * @vec: pointer to first struct softirq_action in softirq_vec array
+ *
+ * The @h parameter, contains a pointer to the struct softirq_action
+ * which has a pointer to the action handler that is called. By subtracting
+ * the @vec pointer from the @h pointer, we can determine the softirq
+ * number. Also, when used in combination with the softirq_exit tracepoint
+ * we can determine the softirq latency.
+ */
+TRACE_EVENT(softirq_entry,
TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
@@ -100,24 +111,6 @@ DECLARE_EVENT_CLASS(softirq,
show_softirq_name(__entry->vec))
);
-/**
- * softirq_entry - called immediately before the softirq handler
- * @h: pointer to struct softirq_action
- * @vec: pointer to first struct softirq_action in softirq_vec array
- *
- * The @h parameter, contains a pointer to the struct softirq_action
- * which has a pointer to the action handler that is called. By subtracting
- * the @vec pointer from the @h pointer, we can determine the softirq
- * number. Also, when used in combination with the softirq_exit tracepoint
- * we can determine the softirq latency.
- */
-DEFINE_EVENT(softirq, softirq_entry,
-
- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
-
- TP_ARGS(h, vec)
-);
-
/**
* softirq_exit - called immediately after the softirq handler returns
* @h: pointer to struct softirq_action
@@ -129,11 +122,22 @@ DEFINE_EVENT(softirq, softirq_entry,
* combination with the softirq_entry tracepoint we can determine the softirq
* latency.
*/
-DEFINE_EVENT(softirq, softirq_exit,
+TRACE_EVENT(softirq_exit,
TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
- TP_ARGS(h, vec)
+ TP_ARGS(h, vec),
+
+ TP_STRUCT__entry(
+ __field( int, vec )
+ ),
+
+ TP_fast_assign(
+ __entry->vec = (int)(h - vec);
+ ),
+
+ TP_printk("vec=%d [action=%s]", __entry->vec,
+ show_softirq_name(__entry->vec))
);
#endif /* _TRACE_IRQ_H */
diff --git a/trunk/include/trace/events/jbd2.h b/trunk/include/trace/events/jbd2.h
index 96b370a050de..3c60b75adb9e 100644
--- a/trunk/include/trace/events/jbd2.h
+++ b/trunk/include/trace/events/jbd2.h
@@ -30,7 +30,7 @@ TRACE_EVENT(jbd2_checkpoint,
jbd2_dev_to_name(__entry->dev), __entry->result)
);
-DECLARE_EVENT_CLASS(jbd2_commit,
+TRACE_EVENT(jbd2_start_commit,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
@@ -53,32 +53,73 @@ DECLARE_EVENT_CLASS(jbd2_commit,
__entry->sync_commit)
);
-DEFINE_EVENT(jbd2_commit, jbd2_start_commit,
+TRACE_EVENT(jbd2_commit_locking,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
- TP_ARGS(journal, commit_transaction)
-);
+ TP_ARGS(journal, commit_transaction),
-DEFINE_EVENT(jbd2_commit, jbd2_commit_locking,
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( char, sync_commit )
+ __field( int, transaction )
+ ),
- TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->sync_commit = commit_transaction->t_synchronous_commit;
+ __entry->transaction = commit_transaction->t_tid;
+ ),
- TP_ARGS(journal, commit_transaction)
+ TP_printk("dev %s transaction %d sync %d",
+ jbd2_dev_to_name(__entry->dev), __entry->transaction,
+ __entry->sync_commit)
);
-DEFINE_EVENT(jbd2_commit, jbd2_commit_flushing,
+TRACE_EVENT(jbd2_commit_flushing,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
- TP_ARGS(journal, commit_transaction)
+ TP_ARGS(journal, commit_transaction),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( char, sync_commit )
+ __field( int, transaction )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->sync_commit = commit_transaction->t_synchronous_commit;
+ __entry->transaction = commit_transaction->t_tid;
+ ),
+
+ TP_printk("dev %s transaction %d sync %d",
+ jbd2_dev_to_name(__entry->dev), __entry->transaction,
+ __entry->sync_commit)
);
-DEFINE_EVENT(jbd2_commit, jbd2_commit_logging,
+TRACE_EVENT(jbd2_commit_logging,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
- TP_ARGS(journal, commit_transaction)
+ TP_ARGS(journal, commit_transaction),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( char, sync_commit )
+ __field( int, transaction )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->sync_commit = commit_transaction->t_synchronous_commit;
+ __entry->transaction = commit_transaction->t_tid;
+ ),
+
+ TP_printk("dev %s transaction %d sync %d",
+ jbd2_dev_to_name(__entry->dev), __entry->transaction,
+ __entry->sync_commit)
);
TRACE_EVENT(jbd2_end_commit,
diff --git a/trunk/include/trace/events/kmem.h b/trunk/include/trace/events/kmem.h
index 3adca0ca9dbe..eaf46bdd18a5 100644
--- a/trunk/include/trace/events/kmem.h
+++ b/trunk/include/trace/events/kmem.h
@@ -44,7 +44,7 @@
{(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \
) : "GFP_NOWAIT"
-DECLARE_EVENT_CLASS(kmem_alloc,
+TRACE_EVENT(kmalloc,
TP_PROTO(unsigned long call_site,
const void *ptr,
@@ -78,23 +78,41 @@ DECLARE_EVENT_CLASS(kmem_alloc,
show_gfp_flags(__entry->gfp_flags))
);
-DEFINE_EVENT(kmem_alloc, kmalloc,
+TRACE_EVENT(kmem_cache_alloc,
- TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
+ TP_PROTO(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
-);
+ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
-DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
+ TP_STRUCT__entry(
+ __field( unsigned long, call_site )
+ __field( const void *, ptr )
+ __field( size_t, bytes_req )
+ __field( size_t, bytes_alloc )
+ __field( gfp_t, gfp_flags )
+ ),
- TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
+ TP_fast_assign(
+ __entry->call_site = call_site;
+ __entry->ptr = ptr;
+ __entry->bytes_req = bytes_req;
+ __entry->bytes_alloc = bytes_alloc;
+ __entry->gfp_flags = gfp_flags;
+ ),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
+ TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
+ __entry->call_site,
+ __entry->ptr,
+ __entry->bytes_req,
+ __entry->bytes_alloc,
+ show_gfp_flags(__entry->gfp_flags))
);
-DECLARE_EVENT_CLASS(kmem_alloc_node,
+TRACE_EVENT(kmalloc_node,
TP_PROTO(unsigned long call_site,
const void *ptr,
@@ -132,25 +150,45 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->node)
);
-DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
+TRACE_EVENT(kmem_cache_alloc_node,
- TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc,
- gfp_t gfp_flags, int node),
+ TP_PROTO(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags,
+ int node),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
-);
+ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
-DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
+ TP_STRUCT__entry(
+ __field( unsigned long, call_site )
+ __field( const void *, ptr )
+ __field( size_t, bytes_req )
+ __field( size_t, bytes_alloc )
+ __field( gfp_t, gfp_flags )
+ __field( int, node )
+ ),
- TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc,
- gfp_t gfp_flags, int node),
+ TP_fast_assign(
+ __entry->call_site = call_site;
+ __entry->ptr = ptr;
+ __entry->bytes_req = bytes_req;
+ __entry->bytes_alloc = bytes_alloc;
+ __entry->gfp_flags = gfp_flags;
+ __entry->node = node;
+ ),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
+ TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
+ __entry->call_site,
+ __entry->ptr,
+ __entry->bytes_req,
+ __entry->bytes_alloc,
+ show_gfp_flags(__entry->gfp_flags),
+ __entry->node)
);
-DECLARE_EVENT_CLASS(kmem_free,
+TRACE_EVENT(kfree,
TP_PROTO(unsigned long call_site, const void *ptr),
@@ -169,18 +207,23 @@ DECLARE_EVENT_CLASS(kmem_free,
TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
);
-DEFINE_EVENT(kmem_free, kfree,
+TRACE_EVENT(kmem_cache_free,
TP_PROTO(unsigned long call_site, const void *ptr),
- TP_ARGS(call_site, ptr)
-);
+ TP_ARGS(call_site, ptr),
-DEFINE_EVENT(kmem_free, kmem_cache_free,
+ TP_STRUCT__entry(
+ __field( unsigned long, call_site )
+ __field( const void *, ptr )
+ ),
- TP_PROTO(unsigned long call_site, const void *ptr),
+ TP_fast_assign(
+ __entry->call_site = call_site;
+ __entry->ptr = ptr;
+ ),
- TP_ARGS(call_site, ptr)
+ TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
);
TRACE_EVENT(mm_page_free_direct,
@@ -256,7 +299,7 @@ TRACE_EVENT(mm_page_alloc,
show_gfp_flags(__entry->gfp_flags))
);
-DECLARE_EVENT_CLASS(mm_page,
+TRACE_EVENT(mm_page_alloc_zone_locked,
TP_PROTO(struct page *page, unsigned int order, int migratetype),
@@ -282,22 +325,29 @@ DECLARE_EVENT_CLASS(mm_page,
__entry->order == 0)
);
-DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
+TRACE_EVENT(mm_page_pcpu_drain,
- TP_PROTO(struct page *page, unsigned int order, int migratetype),
+ TP_PROTO(struct page *page, int order, int migratetype),
- TP_ARGS(page, order, migratetype)
-);
-
-DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
+ TP_ARGS(page, order, migratetype),
- TP_PROTO(struct page *page, unsigned int order, int migratetype),
+ TP_STRUCT__entry(
+ __field( struct page *, page )
+ __field( int, order )
+ __field( int, migratetype )
+ ),
- TP_ARGS(page, order, migratetype),
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->order = order;
+ __entry->migratetype = migratetype;
+ ),
TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
- __entry->page, page_to_pfn(__entry->page),
- __entry->order, __entry->migratetype)
+ __entry->page,
+ page_to_pfn(__entry->page),
+ __entry->order,
+ __entry->migratetype)
);
TRACE_EVENT(mm_page_alloc_extfrag,
diff --git a/trunk/include/trace/events/mce.h b/trunk/include/trace/events/mce.h
deleted file mode 100644
index 7eee77895cb3..000000000000
--- a/trunk/include/trace/events/mce.h
+++ /dev/null
@@ -1,69 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM mce
-
-#if !defined(_TRACE_MCE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_MCE_H
-
-#include
-#include
-#include
-
-TRACE_EVENT(mce_record,
-
- TP_PROTO(struct mce *m),
-
- TP_ARGS(m),
-
- TP_STRUCT__entry(
- __field( u64, mcgcap )
- __field( u64, mcgstatus )
- __field( u8, bank )
- __field( u64, status )
- __field( u64, addr )
- __field( u64, misc )
- __field( u64, ip )
- __field( u8, cs )
- __field( u64, tsc )
- __field( u64, walltime )
- __field( u32, cpu )
- __field( u32, cpuid )
- __field( u32, apicid )
- __field( u32, socketid )
- __field( u8, cpuvendor )
- ),
-
- TP_fast_assign(
- __entry->mcgcap = m->mcgcap;
- __entry->mcgstatus = m->mcgstatus;
- __entry->bank = m->bank;
- __entry->status = m->status;
- __entry->addr = m->addr;
- __entry->misc = m->misc;
- __entry->ip = m->ip;
- __entry->cs = m->cs;
- __entry->tsc = m->tsc;
- __entry->walltime = m->time;
- __entry->cpu = m->extcpu;
- __entry->cpuid = m->cpuid;
- __entry->apicid = m->apicid;
- __entry->socketid = m->socketid;
- __entry->cpuvendor = m->cpuvendor;
- ),
-
- TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, ADDR/MISC: %016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x",
- __entry->cpu,
- __entry->mcgcap, __entry->mcgstatus,
- __entry->bank, __entry->status,
- __entry->addr, __entry->misc,
- __entry->cs, __entry->ip,
- __entry->tsc,
- __entry->cpuvendor, __entry->cpuid,
- __entry->walltime,
- __entry->socketid,
- __entry->apicid)
-);
-
-#endif /* _TRACE_MCE_H */
-
-/* This part must be outside protection */
-#include
diff --git a/trunk/include/trace/events/module.h b/trunk/include/trace/events/module.h
index 4b0f48ba16a6..84160fb18478 100644
--- a/trunk/include/trace/events/module.h
+++ b/trunk/include/trace/events/module.h
@@ -51,7 +51,7 @@ TRACE_EVENT(module_free,
TP_printk("%s", __get_str(name))
);
-DECLARE_EVENT_CLASS(module_refcnt,
+TRACE_EVENT(module_get,
TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
@@ -73,18 +73,26 @@ DECLARE_EVENT_CLASS(module_refcnt,
__get_str(name), (void *)__entry->ip, __entry->refcnt)
);
-DEFINE_EVENT(module_refcnt, module_get,
+TRACE_EVENT(module_put,
TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
- TP_ARGS(mod, ip, refcnt)
-);
+ TP_ARGS(mod, ip, refcnt),
-DEFINE_EVENT(module_refcnt, module_put,
+ TP_STRUCT__entry(
+ __field( unsigned long, ip )
+ __field( int, refcnt )
+ __string( name, mod->name )
+ ),
- TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
+ TP_fast_assign(
+ __entry->ip = ip;
+ __entry->refcnt = refcnt;
+ __assign_str(name, mod->name);
+ ),
- TP_ARGS(mod, ip, refcnt)
+ TP_printk("%s call_site=%pf refcnt=%d",
+ __get_str(name), (void *)__entry->ip, __entry->refcnt)
);
TRACE_EVENT(module_request,
diff --git a/trunk/include/trace/events/power.h b/trunk/include/trace/events/power.h
index c4efe9b8280d..9bb96e5a2848 100644
--- a/trunk/include/trace/events/power.h
+++ b/trunk/include/trace/events/power.h
@@ -16,7 +16,7 @@ enum {
};
#endif
-DECLARE_EVENT_CLASS(power,
+TRACE_EVENT(power_start,
TP_PROTO(unsigned int type, unsigned int state),
@@ -35,20 +35,6 @@ DECLARE_EVENT_CLASS(power,
TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long)__entry->state)
);
-DEFINE_EVENT(power, power_start,
-
- TP_PROTO(unsigned int type, unsigned int state),
-
- TP_ARGS(type, state)
-);
-
-DEFINE_EVENT(power, power_frequency,
-
- TP_PROTO(unsigned int type, unsigned int state),
-
- TP_ARGS(type, state)
-);
-
TRACE_EVENT(power_end,
TP_PROTO(int dummy),
@@ -67,6 +53,26 @@ TRACE_EVENT(power_end,
);
+
+TRACE_EVENT(power_frequency,
+
+ TP_PROTO(unsigned int type, unsigned int state),
+
+ TP_ARGS(type, state),
+
+ TP_STRUCT__entry(
+ __field( u64, type )
+ __field( u64, state )
+ ),
+
+ TP_fast_assign(
+ __entry->type = type;
+ __entry->state = state;
+ ),
+
+ TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long) __entry->state)
+);
+
#endif /* _TRACE_POWER_H */
/* This part must be outside protection */
diff --git a/trunk/include/trace/events/sched.h b/trunk/include/trace/events/sched.h
index cfceb0b73e20..b50b9856c59f 100644
--- a/trunk/include/trace/events/sched.h
+++ b/trunk/include/trace/events/sched.h
@@ -83,7 +83,7 @@ TRACE_EVENT(sched_wait_task,
* (NOTE: the 'rq' argument is not used by generic trace events,
* but used by the latency tracer plugin. )
*/
-DECLARE_EVENT_CLASS(sched_wakeup_template,
+TRACE_EVENT(sched_wakeup,
TP_PROTO(struct rq *rq, struct task_struct *p, int success),
@@ -110,19 +110,38 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
__entry->success, __entry->target_cpu)
);
-DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
- TP_PROTO(struct rq *rq, struct task_struct *p, int success),
- TP_ARGS(rq, p, success));
-
/*
* Tracepoint for waking up a new task:
*
* (NOTE: the 'rq' argument is not used by generic trace events,
* but used by the latency tracer plugin. )
*/
-DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
- TP_PROTO(struct rq *rq, struct task_struct *p, int success),
- TP_ARGS(rq, p, success));
+TRACE_EVENT(sched_wakeup_new,
+
+ TP_PROTO(struct rq *rq, struct task_struct *p, int success),
+
+ TP_ARGS(rq, p, success),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ __field( int, success )
+ __field( int, target_cpu )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio;
+ __entry->success = success;
+ __entry->target_cpu = task_cpu(p);
+ ),
+
+ TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
+ __entry->comm, __entry->pid, __entry->prio,
+ __entry->success, __entry->target_cpu)
+);
/*
* Tracepoint for task switches, performed by the scheduler:
@@ -197,7 +216,10 @@ TRACE_EVENT(sched_migrate_task,
__entry->orig_cpu, __entry->dest_cpu)
);
-DECLARE_EVENT_CLASS(sched_process_template,
+/*
+ * Tracepoint for freeing a task:
+ */
+TRACE_EVENT(sched_process_free,
TP_PROTO(struct task_struct *p),
@@ -219,20 +241,30 @@ DECLARE_EVENT_CLASS(sched_process_template,
__entry->comm, __entry->pid, __entry->prio)
);
-/*
- * Tracepoint for freeing a task:
- */
-DEFINE_EVENT(sched_process_template, sched_process_free,
- TP_PROTO(struct task_struct *p),
- TP_ARGS(p));
-
-
/*
* Tracepoint for a task exiting:
*/
-DEFINE_EVENT(sched_process_template, sched_process_exit,
- TP_PROTO(struct task_struct *p),
- TP_ARGS(p));
+TRACE_EVENT(sched_process_exit,
+
+ TP_PROTO(struct task_struct *p),
+
+ TP_ARGS(p),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio;
+ ),
+
+ TP_printk("comm=%s pid=%d prio=%d",
+ __entry->comm, __entry->pid, __entry->prio)
+);
/*
* Tracepoint for a waiting task:
@@ -287,11 +319,41 @@ TRACE_EVENT(sched_process_fork,
__entry->child_comm, __entry->child_pid)
);
+/*
+ * Tracepoint for sending a signal:
+ */
+TRACE_EVENT(sched_signal_send,
+
+ TP_PROTO(int sig, struct task_struct *p),
+
+ TP_ARGS(sig, p),
+
+ TP_STRUCT__entry(
+ __field( int, sig )
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->sig = sig;
+ ),
+
+ TP_printk("sig=%d comm=%s pid=%d",
+ __entry->sig, __entry->comm, __entry->pid)
+);
+
/*
* XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
* adding sched_stat support to SCHED_FIFO/RR would be welcome.
*/
-DECLARE_EVENT_CLASS(sched_stat_template,
+
+/*
+ * Tracepoint for accounting wait time (time the task is runnable
+ * but not actually running due to scheduler contention).
+ */
+TRACE_EVENT(sched_stat_wait,
TP_PROTO(struct task_struct *tsk, u64 delay),
@@ -317,31 +379,6 @@ DECLARE_EVENT_CLASS(sched_stat_template,
(unsigned long long)__entry->delay)
);
-
-/*
- * Tracepoint for accounting wait time (time the task is runnable
- * but not actually running due to scheduler contention).
- */
-DEFINE_EVENT(sched_stat_template, sched_stat_wait,
- TP_PROTO(struct task_struct *tsk, u64 delay),
- TP_ARGS(tsk, delay));
-
-/*
- * Tracepoint for accounting sleep time (time the task is not runnable,
- * including iowait, see below).
- */
-DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
- TP_PROTO(struct task_struct *tsk, u64 delay),
- TP_ARGS(tsk, delay));
-
-/*
- * Tracepoint for accounting iowait time (time the task is not runnable
- * due to waiting on IO to complete).
- */
-DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
- TP_PROTO(struct task_struct *tsk, u64 delay),
- TP_ARGS(tsk, delay));
-
/*
* Tracepoint for accounting runtime (time the task is executing
* on a CPU).
@@ -375,6 +412,66 @@ TRACE_EVENT(sched_stat_runtime,
(unsigned long long)__entry->vruntime)
);
+/*
+ * Tracepoint for accounting sleep time (time the task is not runnable,
+ * including iowait, see below).
+ */
+TRACE_EVENT(sched_stat_sleep,
+
+ TP_PROTO(struct task_struct *tsk, u64 delay),
+
+ TP_ARGS(tsk, delay),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( u64, delay )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->delay = delay;
+ )
+ TP_perf_assign(
+ __perf_count(delay);
+ ),
+
+ TP_printk("comm=%s pid=%d delay=%Lu [ns]",
+ __entry->comm, __entry->pid,
+ (unsigned long long)__entry->delay)
+);
+
+/*
+ * Tracepoint for accounting iowait time (time the task is not runnable
+ * due to waiting on IO to complete).
+ */
+TRACE_EVENT(sched_stat_iowait,
+
+ TP_PROTO(struct task_struct *tsk, u64 delay),
+
+ TP_ARGS(tsk, delay),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( u64, delay )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->delay = delay;
+ )
+ TP_perf_assign(
+ __perf_count(delay);
+ ),
+
+ TP_printk("comm=%s pid=%d delay=%Lu [ns]",
+ __entry->comm, __entry->pid,
+ (unsigned long long)__entry->delay)
+);
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/trunk/include/trace/events/signal.h b/trunk/include/trace/events/signal.h
deleted file mode 100644
index a510b75ac304..000000000000
--- a/trunk/include/trace/events/signal.h
+++ /dev/null
@@ -1,173 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM signal
-
-#if !defined(_TRACE_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_SIGNAL_H
-
-#include
-#include
-#include
-
-#define TP_STORE_SIGINFO(__entry, info) \
- do { \
- if (info == SEND_SIG_NOINFO) { \
- __entry->errno = 0; \
- __entry->code = SI_USER; \
- } else if (info == SEND_SIG_PRIV) { \
- __entry->errno = 0; \
- __entry->code = SI_KERNEL; \
- } else { \
- __entry->errno = info->si_errno; \
- __entry->code = info->si_code; \
- } \
- } while (0)
-
-/**
- * signal_generate - called when a signal is generated
- * @sig: signal number
- * @info: pointer to struct siginfo
- * @task: pointer to struct task_struct
- *
- * Current process sends a 'sig' signal to 'task' process with
- * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV,
- * 'info' is not a pointer and you can't access its field. Instead,
- * SEND_SIG_NOINFO means that si_code is SI_USER, and SEND_SIG_PRIV
- * means that si_code is SI_KERNEL.
- */
-TRACE_EVENT(signal_generate,
-
- TP_PROTO(int sig, struct siginfo *info, struct task_struct *task),
-
- TP_ARGS(sig, info, task),
-
- TP_STRUCT__entry(
- __field( int, sig )
- __field( int, errno )
- __field( int, code )
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- ),
-
- TP_fast_assign(
- __entry->sig = sig;
- TP_STORE_SIGINFO(__entry, info);
- memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
- __entry->pid = task->pid;
- ),
-
- TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d",
- __entry->sig, __entry->errno, __entry->code,
- __entry->comm, __entry->pid)
-);
-
-/**
- * signal_deliver - called when a signal is delivered
- * @sig: signal number
- * @info: pointer to struct siginfo
- * @ka: pointer to struct k_sigaction
- *
- * A 'sig' signal is delivered to current process with 'info' siginfo,
- * and it will be handled by 'ka'. ka->sa.sa_handler can be SIG_IGN or
- * SIG_DFL.
- * Note that some signals reported by signal_generate tracepoint can be
- * lost, ignored or modified (by debugger) before hitting this tracepoint.
- * This means, this can show which signals are actually delivered, but
- * matching generated signals and delivered signals may not be correct.
- */
-TRACE_EVENT(signal_deliver,
-
- TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka),
-
- TP_ARGS(sig, info, ka),
-
- TP_STRUCT__entry(
- __field( int, sig )
- __field( int, errno )
- __field( int, code )
- __field( unsigned long, sa_handler )
- __field( unsigned long, sa_flags )
- ),
-
- TP_fast_assign(
- __entry->sig = sig;
- TP_STORE_SIGINFO(__entry, info);
- __entry->sa_handler = (unsigned long)ka->sa.sa_handler;
- __entry->sa_flags = ka->sa.sa_flags;
- ),
-
- TP_printk("sig=%d errno=%d code=%d sa_handler=%lx sa_flags=%lx",
- __entry->sig, __entry->errno, __entry->code,
- __entry->sa_handler, __entry->sa_flags)
-);
-
-/**
- * signal_overflow_fail - called when signal queue is overflow
- * @sig: signal number
- * @group: signal to process group or not (bool)
- * @info: pointer to struct siginfo
- *
- * Kernel fails to generate 'sig' signal with 'info' siginfo, because
- * siginfo queue is overflow, and the signal is dropped.
- * 'group' is not 0 if the signal will be sent to a process group.
- * 'sig' is always one of RT signals.
- */
-TRACE_EVENT(signal_overflow_fail,
-
- TP_PROTO(int sig, int group, struct siginfo *info),
-
- TP_ARGS(sig, group, info),
-
- TP_STRUCT__entry(
- __field( int, sig )
- __field( int, group )
- __field( int, errno )
- __field( int, code )
- ),
-
- TP_fast_assign(
- __entry->sig = sig;
- __entry->group = group;
- TP_STORE_SIGINFO(__entry, info);
- ),
-
- TP_printk("sig=%d group=%d errno=%d code=%d",
- __entry->sig, __entry->group, __entry->errno, __entry->code)
-);
-
-/**
- * signal_lose_info - called when siginfo is lost
- * @sig: signal number
- * @group: signal to process group or not (bool)
- * @info: pointer to struct siginfo
- *
- * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo
- * queue is overflow.
- * 'group' is not 0 if the signal will be sent to a process group.
- * 'sig' is always one of non-RT signals.
- */
-TRACE_EVENT(signal_lose_info,
-
- TP_PROTO(int sig, int group, struct siginfo *info),
-
- TP_ARGS(sig, group, info),
-
- TP_STRUCT__entry(
- __field( int, sig )
- __field( int, group )
- __field( int, errno )
- __field( int, code )
- ),
-
- TP_fast_assign(
- __entry->sig = sig;
- __entry->group = group;
- TP_STORE_SIGINFO(__entry, info);
- ),
-
- TP_printk("sig=%d group=%d errno=%d code=%d",
- __entry->sig, __entry->group, __entry->errno, __entry->code)
-);
-#endif /* _TRACE_SIGNAL_H */
-
-/* This part must be outside protection */
-#include
diff --git a/trunk/include/trace/events/workqueue.h b/trunk/include/trace/events/workqueue.h
index d6c974474e70..e4612dbd7ba6 100644
--- a/trunk/include/trace/events/workqueue.h
+++ b/trunk/include/trace/events/workqueue.h
@@ -8,7 +8,7 @@
#include
#include
-DECLARE_EVENT_CLASS(workqueue,
+TRACE_EVENT(workqueue_insertion,
TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
@@ -30,18 +30,26 @@ DECLARE_EVENT_CLASS(workqueue,
__entry->thread_pid, __entry->func)
);
-DEFINE_EVENT(workqueue, workqueue_insertion,
+TRACE_EVENT(workqueue_execution,
TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
- TP_ARGS(wq_thread, work)
-);
+ TP_ARGS(wq_thread, work),
-DEFINE_EVENT(workqueue, workqueue_execution,
+ TP_STRUCT__entry(
+ __array(char, thread_comm, TASK_COMM_LEN)
+ __field(pid_t, thread_pid)
+ __field(work_func_t, func)
+ ),
- TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
+ TP_fast_assign(
+ memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
+ __entry->thread_pid = wq_thread->pid;
+ __entry->func = work->func;
+ ),
- TP_ARGS(wq_thread, work)
+ TP_printk("thread=%s:%d func=%pf", __entry->thread_comm,
+ __entry->thread_pid, __entry->func)
);
/* Trace the creation of one workqueue thread on a cpu */
diff --git a/trunk/include/trace/ftrace.h b/trunk/include/trace/ftrace.h
index 2c9c073e45ad..4945d1c99864 100644
--- a/trunk/include/trace/ftrace.h
+++ b/trunk/include/trace/ftrace.h
@@ -18,26 +18,6 @@
#include
-/*
- * DECLARE_EVENT_CLASS can be used to add a generic function
- * handlers for events. That is, if all events have the same
- * parameters and just have distinct trace points.
- * Each tracepoint can be defined with DEFINE_EVENT and that
- * will map the DECLARE_EVENT_CLASS to the tracepoint.
- *
- * TRACE_EVENT is a one to one mapping between tracepoint and template.
- */
-#undef TRACE_EVENT
-#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
- DECLARE_EVENT_CLASS(name, \
- PARAMS(proto), \
- PARAMS(args), \
- PARAMS(tstruct), \
- PARAMS(assign), \
- PARAMS(print)); \
- DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
-
-
#undef __field
#define __field(type, item) type item;
@@ -56,21 +36,15 @@
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
- struct ftrace_raw_##name { \
- struct trace_entry ent; \
- tstruct \
- char __data[0]; \
- };
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args) \
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
+ struct ftrace_raw_##name { \
+ struct trace_entry ent; \
+ tstruct \
+ char __data[0]; \
+ }; \
static struct ftrace_event_call event_##name
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-
#undef __cpparg
#define __cpparg(arg...) arg
@@ -115,19 +89,12 @@
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
struct ftrace_data_offsets_##call { \
tstruct; \
};
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args)
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
@@ -203,49 +170,16 @@
#undef TP_perf_assign
#define TP_perf_assign(args...)
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
-static int \
-ftrace_format_setup_##call(struct ftrace_event_call *unused, \
- struct trace_seq *s) \
-{ \
- struct ftrace_raw_##call field __attribute__((unused)); \
- int ret = 0; \
- \
- tstruct; \
- \
- return ret; \
-} \
- \
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
static int \
ftrace_format_##call(struct ftrace_event_call *unused, \
- struct trace_seq *s) \
-{ \
- int ret = 0; \
- \
- ret = ftrace_format_setup_##call(unused, s); \
- if (!ret) \
- return ret; \
- \
- ret = trace_seq_printf(s, "\nprint fmt: " print); \
- \
- return ret; \
-}
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args)
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
-static int \
-ftrace_format_##name(struct ftrace_event_call *unused, \
struct trace_seq *s) \
{ \
+ struct ftrace_raw_##call field __attribute__((unused)); \
int ret = 0; \
\
- ret = ftrace_format_setup_##template(unused, s); \
- if (!ret) \
- return ret; \
+ tstruct; \
\
trace_seq_printf(s, "\nprint fmt: " print); \
\
@@ -321,55 +255,13 @@ ftrace_format_##name(struct ftrace_event_call *unused, \
ftrace_print_symbols_seq(p, value, symbols); \
})
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-static enum print_line_t \
-ftrace_raw_output_id_##call(int event_id, const char *name, \
- struct trace_iterator *iter, int flags) \
-{ \
- struct trace_seq *s = &iter->seq; \
- struct ftrace_raw_##call *field; \
- struct trace_entry *entry; \
- struct trace_seq *p; \
- int ret; \
- \
- entry = iter->ent; \
- \
- if (entry->type != event_id) { \
- WARN_ON_ONCE(1); \
- return TRACE_TYPE_UNHANDLED; \
- } \
- \
- field = (typeof(field))entry; \
- \
- p = &get_cpu_var(ftrace_event_seq); \
- trace_seq_init(p); \
- ret = trace_seq_printf(s, "%s: ", name); \
- if (ret) \
- ret = trace_seq_printf(s, print); \
- put_cpu(); \
- if (!ret) \
- return TRACE_TYPE_PARTIAL_LINE; \
- \
- return TRACE_TYPE_HANDLED; \
-}
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args) \
-static enum print_line_t \
-ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
-{ \
- return ftrace_raw_output_id_##template(event_##name.id, \
- #name, iter, flags); \
-}
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
static enum print_line_t \
ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
{ \
struct trace_seq *s = &iter->seq; \
- struct ftrace_raw_##template *field; \
+ struct ftrace_raw_##call *field; \
struct trace_entry *entry; \
struct trace_seq *p; \
int ret; \
@@ -385,16 +277,14 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
\
p = &get_cpu_var(ftrace_event_seq); \
trace_seq_init(p); \
- ret = trace_seq_printf(s, "%s: ", #call); \
- if (ret) \
- ret = trace_seq_printf(s, print); \
+ ret = trace_seq_printf(s, #call ": " print); \
put_cpu(); \
if (!ret) \
return TRACE_TYPE_PARTIAL_LINE; \
\
return TRACE_TYPE_HANDLED; \
}
-
+
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#undef __field_ext
@@ -428,8 +318,8 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
static int \
ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
{ \
@@ -445,13 +335,6 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
return ret; \
}
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args)
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
@@ -478,10 +361,10 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
__data_size += (len) * sizeof(type);
#undef __string
-#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
+#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
static inline int ftrace_get_offsets_##call( \
struct ftrace_data_offsets_##call *__data_offsets, proto) \
{ \
@@ -493,13 +376,6 @@ static inline int ftrace_get_offsets_##call( \
return __data_size; \
}
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args)
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#ifdef CONFIG_EVENT_PROFILE
@@ -521,28 +397,21 @@ static inline int ftrace_get_offsets_##call( \
*
*/
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args) \
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
\
-static void ftrace_profile_##name(proto); \
+static void ftrace_profile_##call(proto); \
\
-static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\
+static int ftrace_profile_enable_##call(struct ftrace_event_call *unused)\
{ \
- return register_trace_##name(ftrace_profile_##name); \
+ return register_trace_##call(ftrace_profile_##call); \
} \
\
-static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
+static void ftrace_profile_disable_##call(struct ftrace_event_call *unused)\
{ \
- unregister_trace_##name(ftrace_profile_##name); \
+ unregister_trace_##call(ftrace_profile_##call); \
}
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif
@@ -681,13 +550,15 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
#define __assign_str(dst, src) \
strcpy(__get_str(dst), src);
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
\
-static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
- proto) \
+static struct ftrace_event_call event_##call; \
+ \
+static void ftrace_raw_event_##call(proto) \
{ \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
+ struct ftrace_event_call *event_call = &event_##call; \
struct ring_buffer_event *event; \
struct ftrace_raw_##call *entry; \
struct ring_buffer *buffer; \
@@ -701,7 +572,7 @@ static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
\
event = trace_current_buffer_lock_reserve(&buffer, \
- event_call->id, \
+ event_##call.id, \
sizeof(*entry) + __data_size, \
irq_flags, pc); \
if (!event) \
@@ -716,14 +587,6 @@ static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
if (!filter_current_check_discard(buffer, event_call, entry, event)) \
trace_nowake_buffer_unlock_commit(buffer, \
event, irq_flags, pc); \
-}
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
- \
-static void ftrace_raw_event_##call(proto) \
-{ \
- ftrace_raw_event_id_##template(&event_##call, args); \
} \
\
static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
@@ -756,36 +619,7 @@ static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\
event_##call.id = id; \
INIT_LIST_HEAD(&event_##call.fields); \
return 0; \
-}
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
- \
-static struct ftrace_event_call __used \
-__attribute__((__aligned__(4))) \
-__attribute__((section("_ftrace_events"))) event_##call = { \
- .name = #call, \
- .system = __stringify(TRACE_SYSTEM), \
- .event = &ftrace_event_type_##call, \
- .raw_init = ftrace_raw_init_event_##call, \
- .regfunc = ftrace_raw_reg_event_##call, \
- .unregfunc = ftrace_raw_unreg_event_##call, \
- .show_format = ftrace_format_##template, \
- .define_fields = ftrace_define_fields_##template, \
- _TRACE_PROFILE_INIT(call) \
-}
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
+} \
\
static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
@@ -797,7 +631,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
.regfunc = ftrace_raw_reg_event_##call, \
.unregfunc = ftrace_raw_unreg_event_##call, \
.show_format = ftrace_format_##call, \
- .define_fields = ftrace_define_fields_##template, \
+ .define_fields = ftrace_define_fields_##call, \
_TRACE_PROFILE_INIT(call) \
}
@@ -885,26 +719,22 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
#undef __perf_count
#define __perf_count(c) __count = (c)
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-static void \
-ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
- proto) \
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
+static void ftrace_profile_##call(proto) \
{ \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
- extern int perf_swevent_get_recursion_context(void); \
- extern void perf_swevent_put_recursion_context(int rctx); \
+ struct ftrace_event_call *event_call = &event_##call; \
extern void perf_tp_event(int, u64, u64, void *, int); \
struct ftrace_raw_##call *entry; \
+ struct perf_trace_buf *trace_buf; \
u64 __addr = 0, __count = 1; \
unsigned long irq_flags; \
struct trace_entry *ent; \
int __entry_size; \
int __data_size; \
- char *trace_buf; \
char *raw_data; \
int __cpu; \
- int rctx; \
int pc; \
\
pc = preempt_count(); \
@@ -919,11 +749,6 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
return; \
\
local_irq_save(irq_flags); \
- \
- rctx = perf_swevent_get_recursion_context(); \
- if (rctx < 0) \
- goto end_recursion; \
- \
__cpu = smp_processor_id(); \
\
if (in_nmi()) \
@@ -934,7 +759,13 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
if (!trace_buf) \
goto end; \
\
- raw_data = per_cpu_ptr(trace_buf, __cpu); \
+ trace_buf = per_cpu_ptr(trace_buf, __cpu); \
+ if (trace_buf->recursion++) \
+ goto end_recursion; \
+ \
+ barrier(); \
+ \
+ raw_data = trace_buf->buf; \
\
*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
entry = (struct ftrace_raw_##call *)raw_data; \
@@ -949,26 +780,13 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
perf_tp_event(event_call->id, __addr, __count, entry, \
__entry_size); \
\
-end: \
- perf_swevent_put_recursion_context(rctx); \
end_recursion: \
+ trace_buf->recursion--; \
+end: \
local_irq_restore(irq_flags); \
\
}
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
-static void ftrace_profile_##call(proto) \
-{ \
- struct ftrace_event_call *event_call = &event_##call; \
- \
- ftrace_profile_templ_##template(event_call, args); \
-}
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif /* CONFIG_EVENT_PROFILE */
diff --git a/trunk/include/trace/syscall.h b/trunk/include/trace/syscall.h
index 961fda3556bb..51ee17d3632a 100644
--- a/trunk/include/trace/syscall.h
+++ b/trunk/include/trace/syscall.h
@@ -12,19 +12,21 @@
* A syscall entry in the ftrace syscalls array.
*
* @name: name of the syscall
- * @syscall_nr: number of the syscall
* @nb_args: number of parameters it takes
* @types: list of types as strings
* @args: list of args as strings (args[i] matches types[i])
+ * @enter_id: associated ftrace enter event id
+ * @exit_id: associated ftrace exit event id
* @enter_event: associated syscall_enter trace event
* @exit_event: associated syscall_exit trace event
*/
struct syscall_metadata {
const char *name;
- int syscall_nr;
int nb_args;
const char **types;
const char **args;
+ int enter_id;
+ int exit_id;
struct ftrace_event_call *enter_event;
struct ftrace_event_call *exit_event;
@@ -32,7 +34,11 @@ struct syscall_metadata {
#ifdef CONFIG_FTRACE_SYSCALLS
extern unsigned long arch_syscall_addr(int nr);
-extern int init_syscall_trace(struct ftrace_event_call *call);
+extern int syscall_name_to_nr(char *name);
+void set_syscall_enter_id(int num, int id);
+void set_syscall_exit_id(int num, int id);
+extern struct trace_event event_syscall_enter;
+extern struct trace_event event_syscall_exit;
extern int syscall_enter_format(struct ftrace_event_call *call,
struct trace_seq *s);
@@ -50,10 +56,10 @@ enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags);
enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
#endif
#ifdef CONFIG_EVENT_PROFILE
-int prof_sysenter_enable(struct ftrace_event_call *call);
-void prof_sysenter_disable(struct ftrace_event_call *call);
-int prof_sysexit_enable(struct ftrace_event_call *call);
-void prof_sysexit_disable(struct ftrace_event_call *call);
+int reg_prof_syscall_enter(char *name);
+void unreg_prof_syscall_enter(char *name);
+int reg_prof_syscall_exit(char *name);
+void unreg_prof_syscall_exit(char *name);
#endif
diff --git a/trunk/kernel/Makefile b/trunk/kernel/Makefile
index 6b7ce8173dfd..b8d4cd8ac0b9 100644
--- a/trunk/kernel/Makefile
+++ b/trunk/kernel/Makefile
@@ -21,7 +21,6 @@ CFLAGS_REMOVE_mutex-debug.o = -pg
CFLAGS_REMOVE_rtmutex-debug.o = -pg
CFLAGS_REMOVE_cgroup-debug.o = -pg
CFLAGS_REMOVE_sched_clock.o = -pg
-CFLAGS_REMOVE_perf_event.o = -pg
endif
obj-$(CONFIG_FREEZER) += freezer.o
@@ -96,7 +95,6 @@ obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_SLOW_WORK) += slow-work.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
# According to Alan Modra , the -fno-omit-frame-pointer is
diff --git a/trunk/kernel/exit.c b/trunk/kernel/exit.c
index 3f45e3cf931d..f7864ac2ecc1 100644
--- a/trunk/kernel/exit.c
+++ b/trunk/kernel/exit.c
@@ -49,7 +49,6 @@
#include
#include
#include
-#include
#include
#include
@@ -978,10 +977,6 @@ NORET_TYPE void do_exit(long code)
proc_exit_connector(tsk);
- /*
- * FIXME: do that only when needed, using sched_exit tracepoint
- */
- flush_ptrace_hw_breakpoint(tsk);
/*
* Flush inherited counters to the parent - before the parent
* gets woken up by child-exit notifications.
diff --git a/trunk/kernel/hw_breakpoint.c b/trunk/kernel/hw_breakpoint.c
deleted file mode 100644
index cf5ee1628411..000000000000
--- a/trunk/kernel/hw_breakpoint.c
+++ /dev/null
@@ -1,423 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) 2007 Alan Stern
- * Copyright (C) IBM Corporation, 2009
- * Copyright (C) 2009, Frederic Weisbecker
- *
- * Thanks to Ingo Molnar for his many suggestions.
- *
- * Authors: Alan Stern
- * K.Prasad
- * Frederic Weisbecker
- */
-
-/*
- * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
- * using the CPU's debug registers.
- * This file contains the arch-independent routines.
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#include
-
-/*
- * Constraints data
- */
-
-/* Number of pinned cpu breakpoints in a cpu */
-static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
-
-/* Number of pinned task breakpoints in a cpu */
-static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]);
-
-/* Number of non-pinned cpu/task breakpoints in a cpu */
-static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
-
-/* Gather the number of total pinned and un-pinned bp in a cpuset */
-struct bp_busy_slots {
- unsigned int pinned;
- unsigned int flexible;
-};
-
-/* Serialize accesses to the above constraints */
-static DEFINE_MUTEX(nr_bp_mutex);
-
-/*
- * Report the maximum number of pinned breakpoints a task
- * have in this cpu
- */
-static unsigned int max_task_bp_pinned(int cpu)
-{
- int i;
- unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu);
-
- for (i = HBP_NUM -1; i >= 0; i--) {
- if (tsk_pinned[i] > 0)
- return i + 1;
- }
-
- return 0;
-}
-
-/*
- * Report the number of pinned/un-pinned breakpoints we have in
- * a given cpu (cpu > -1) or in all of them (cpu = -1).
- */
-static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
-{
- if (cpu >= 0) {
- slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
- slots->pinned += max_task_bp_pinned(cpu);
- slots->flexible = per_cpu(nr_bp_flexible, cpu);
-
- return;
- }
-
- for_each_online_cpu(cpu) {
- unsigned int nr;
-
- nr = per_cpu(nr_cpu_bp_pinned, cpu);
- nr += max_task_bp_pinned(cpu);
-
- if (nr > slots->pinned)
- slots->pinned = nr;
-
- nr = per_cpu(nr_bp_flexible, cpu);
-
- if (nr > slots->flexible)
- slots->flexible = nr;
- }
-}
-
-/*
- * Add a pinned breakpoint for the given task in our constraint table
- */
-static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
-{
- int count = 0;
- struct perf_event *bp;
- struct perf_event_context *ctx = tsk->perf_event_ctxp;
- unsigned int *tsk_pinned;
- struct list_head *list;
- unsigned long flags;
-
- if (WARN_ONCE(!ctx, "No perf context for this task"))
- return;
-
- list = &ctx->event_list;
-
- spin_lock_irqsave(&ctx->lock, flags);
-
- /*
- * The current breakpoint counter is not included in the list
- * at the open() callback time
- */
- list_for_each_entry(bp, list, event_entry) {
- if (bp->attr.type == PERF_TYPE_BREAKPOINT)
- count++;
- }
-
- spin_unlock_irqrestore(&ctx->lock, flags);
-
- if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
- return;
-
- tsk_pinned = per_cpu(task_bp_pinned, cpu);
- if (enable) {
- tsk_pinned[count]++;
- if (count > 0)
- tsk_pinned[count-1]--;
- } else {
- tsk_pinned[count]--;
- if (count > 0)
- tsk_pinned[count-1]++;
- }
-}
-
-/*
- * Add/remove the given breakpoint in our constraint table
- */
-static void toggle_bp_slot(struct perf_event *bp, bool enable)
-{
- int cpu = bp->cpu;
- struct task_struct *tsk = bp->ctx->task;
-
- /* Pinned counter task profiling */
- if (tsk) {
- if (cpu >= 0) {
- toggle_bp_task_slot(tsk, cpu, enable);
- return;
- }
-
- for_each_online_cpu(cpu)
- toggle_bp_task_slot(tsk, cpu, enable);
- return;
- }
-
- /* Pinned counter cpu profiling */
- if (enable)
- per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
- else
- per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
-}
-
-/*
- * Contraints to check before allowing this new breakpoint counter:
- *
- * == Non-pinned counter == (Considered as pinned for now)
- *
- * - If attached to a single cpu, check:
- *
- * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
- * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
- *
- * -> If there are already non-pinned counters in this cpu, it means
- * there is already a free slot for them.
- * Otherwise, we check that the maximum number of per task
- * breakpoints (for this cpu) plus the number of per cpu breakpoint
- * (for this cpu) doesn't cover every registers.
- *
- * - If attached to every cpus, check:
- *
- * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
- * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
- *
- * -> This is roughly the same, except we check the number of per cpu
- * bp for every cpu and we keep the max one. Same for the per tasks
- * breakpoints.
- *
- *
- * == Pinned counter ==
- *
- * - If attached to a single cpu, check:
- *
- * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
- * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
- *
- * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
- * one register at least (or they will never be fed).
- *
- * - If attached to every cpus, check:
- *
- * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
- * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM
- */
-int reserve_bp_slot(struct perf_event *bp)
-{
- struct bp_busy_slots slots = {0};
- int ret = 0;
-
- mutex_lock(&nr_bp_mutex);
-
- fetch_bp_busy_slots(&slots, bp->cpu);
-
- /* Flexible counters need to keep at least one slot */
- if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
- ret = -ENOSPC;
- goto end;
- }
-
- toggle_bp_slot(bp, true);
-
-end:
- mutex_unlock(&nr_bp_mutex);
-
- return ret;
-}
-
-void release_bp_slot(struct perf_event *bp)
-{
- mutex_lock(&nr_bp_mutex);
-
- toggle_bp_slot(bp, false);
-
- mutex_unlock(&nr_bp_mutex);
-}
-
-
-int __register_perf_hw_breakpoint(struct perf_event *bp)
-{
- int ret;
-
- ret = reserve_bp_slot(bp);
- if (ret)
- return ret;
-
- /*
- * Ptrace breakpoints can be temporary perf events only
- * meant to reserve a slot. In this case, it is created disabled and
- * we don't want to check the params right now (as we put a null addr)
- * But perf tools create events as disabled and we want to check
- * the params for them.
- * This is a quick hack that will be removed soon, once we remove
- * the tmp breakpoints from ptrace
- */
- if (!bp->attr.disabled || bp->callback == perf_bp_event)
- ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
-
- return ret;
-}
-
-int register_perf_hw_breakpoint(struct perf_event *bp)
-{
- bp->callback = perf_bp_event;
-
- return __register_perf_hw_breakpoint(bp);
-}
-
-/**
- * register_user_hw_breakpoint - register a hardware breakpoint for user space
- * @attr: breakpoint attributes
- * @triggered: callback to trigger when we hit the breakpoint
- * @tsk: pointer to 'task_struct' of the process to which the address belongs
- */
-struct perf_event *
-register_user_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered,
- struct task_struct *tsk)
-{
- return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
-}
-EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
-
-/**
- * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
- * @bp: the breakpoint structure to modify
- * @attr: new breakpoint attributes
- * @triggered: callback to trigger when we hit the breakpoint
- * @tsk: pointer to 'task_struct' of the process to which the address belongs
- */
-struct perf_event *
-modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr,
- perf_callback_t triggered,
- struct task_struct *tsk)
-{
- /*
- * FIXME: do it without unregistering
- * - We don't want to lose our slot
- * - If the new bp is incorrect, don't lose the older one
- */
- unregister_hw_breakpoint(bp);
-
- return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
-}
-EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
-
-/**
- * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
- * @bp: the breakpoint structure to unregister
- */
-void unregister_hw_breakpoint(struct perf_event *bp)
-{
- if (!bp)
- return;
- perf_event_release_kernel(bp);
-}
-EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
-
-/**
- * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
- * @attr: breakpoint attributes
- * @triggered: callback to trigger when we hit the breakpoint
- *
- * @return a set of per_cpu pointers to perf events
- */
-struct perf_event **
-register_wide_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered)
-{
- struct perf_event **cpu_events, **pevent, *bp;
- long err;
- int cpu;
-
- cpu_events = alloc_percpu(typeof(*cpu_events));
- if (!cpu_events)
- return ERR_PTR(-ENOMEM);
-
- for_each_possible_cpu(cpu) {
- pevent = per_cpu_ptr(cpu_events, cpu);
- bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
-
- *pevent = bp;
-
- if (IS_ERR(bp)) {
- err = PTR_ERR(bp);
- goto fail;
- }
- }
-
- return cpu_events;
-
-fail:
- for_each_possible_cpu(cpu) {
- pevent = per_cpu_ptr(cpu_events, cpu);
- if (IS_ERR(*pevent))
- break;
- unregister_hw_breakpoint(*pevent);
- }
- free_percpu(cpu_events);
- /* return the error if any */
- return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
-
-/**
- * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
- * @cpu_events: the per cpu set of events to unregister
- */
-void unregister_wide_hw_breakpoint(struct perf_event **cpu_events)
-{
- int cpu;
- struct perf_event **pevent;
-
- for_each_possible_cpu(cpu) {
- pevent = per_cpu_ptr(cpu_events, cpu);
- unregister_hw_breakpoint(*pevent);
- }
- free_percpu(cpu_events);
-}
-EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
-
-static struct notifier_block hw_breakpoint_exceptions_nb = {
- .notifier_call = hw_breakpoint_exceptions_notify,
- /* we need to be notified first */
- .priority = 0x7fffffff
-};
-
-static int __init init_hw_breakpoint(void)
-{
- return register_die_notifier(&hw_breakpoint_exceptions_nb);
-}
-core_initcall(init_hw_breakpoint);
-
-
-struct pmu perf_ops_bp = {
- .enable = arch_install_hw_breakpoint,
- .disable = arch_uninstall_hw_breakpoint,
- .read = hw_breakpoint_pmu_read,
- .unthrottle = hw_breakpoint_pmu_unthrottle
-};
diff --git a/trunk/kernel/kallsyms.c b/trunk/kernel/kallsyms.c
index 8e5288a8a355..8b6b8b697c68 100644
--- a/trunk/kernel/kallsyms.c
+++ b/trunk/kernel/kallsyms.c
@@ -181,7 +181,6 @@ unsigned long kallsyms_lookup_name(const char *name)
}
return module_kallsyms_lookup_name(name);
}
-EXPORT_SYMBOL_GPL(kallsyms_lookup_name);
int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
unsigned long),
diff --git a/trunk/kernel/perf_event.c b/trunk/kernel/perf_event.c
index 6b7ddba1dd64..3256e36ad251 100644
--- a/trunk/kernel/perf_event.c
+++ b/trunk/kernel/perf_event.c
@@ -29,7 +29,6 @@
#include
#include
#include
-#include
#include
@@ -246,49 +245,6 @@ static void perf_unpin_context(struct perf_event_context *ctx)
put_ctx(ctx);
}
-static inline u64 perf_clock(void)
-{
- return cpu_clock(smp_processor_id());
-}
-
-/*
- * Update the record of the current time in a context.
- */
-static void update_context_time(struct perf_event_context *ctx)
-{
- u64 now = perf_clock();
-
- ctx->time += now - ctx->timestamp;
- ctx->timestamp = now;
-}
-
-/*
- * Update the total_time_enabled and total_time_running fields for a event.
- */
-static void update_event_times(struct perf_event *event)
-{
- struct perf_event_context *ctx = event->ctx;
- u64 run_end;
-
- if (event->state < PERF_EVENT_STATE_INACTIVE ||
- event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
- return;
-
- if (ctx->is_active)
- run_end = ctx->time;
- else
- run_end = event->tstamp_stopped;
-
- event->total_time_enabled = run_end - event->tstamp_enabled;
-
- if (event->state == PERF_EVENT_STATE_INACTIVE)
- run_end = event->tstamp_stopped;
- else
- run_end = ctx->time;
-
- event->total_time_running = run_end - event->tstamp_running;
-}
-
/*
* Add a event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
@@ -337,18 +293,6 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
if (event->group_leader != event)
event->group_leader->nr_siblings--;
- update_event_times(event);
-
- /*
- * If event was in error state, then keep it
- * that way, otherwise bogus counts will be
- * returned on read(). The only way to get out
- * of error state is by explicit re-enabling
- * of the event
- */
- if (event->state > PERF_EVENT_STATE_OFF)
- event->state = PERF_EVENT_STATE_OFF;
-
/*
* If this was a group event with sibling events then
* upgrade the siblings to singleton events by adding them
@@ -502,11 +446,50 @@ static void perf_event_remove_from_context(struct perf_event *event)
* can remove the event safely, if the call above did not
* succeed.
*/
- if (!list_empty(&event->group_entry))
+ if (!list_empty(&event->group_entry)) {
list_del_event(event, ctx);
+ }
spin_unlock_irq(&ctx->lock);
}
+static inline u64 perf_clock(void)
+{
+ return cpu_clock(smp_processor_id());
+}
+
+/*
+ * Update the record of the current time in a context.
+ */
+static void update_context_time(struct perf_event_context *ctx)
+{
+ u64 now = perf_clock();
+
+ ctx->time += now - ctx->timestamp;
+ ctx->timestamp = now;
+}
+
+/*
+ * Update the total_time_enabled and total_time_running fields for a event.
+ */
+static void update_event_times(struct perf_event *event)
+{
+ struct perf_event_context *ctx = event->ctx;
+ u64 run_end;
+
+ if (event->state < PERF_EVENT_STATE_INACTIVE ||
+ event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
+ return;
+
+ event->total_time_enabled = ctx->time - event->tstamp_enabled;
+
+ if (event->state == PERF_EVENT_STATE_INACTIVE)
+ run_end = event->tstamp_stopped;
+ else
+ run_end = ctx->time;
+
+ event->total_time_running = run_end - event->tstamp_running;
+}
+
/*
* Update total_time_enabled and total_time_running for all events in a group.
*/
@@ -1049,10 +1032,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
update_context_time(ctx);
perf_disable();
- if (ctx->nr_active) {
+ if (ctx->nr_active)
list_for_each_entry(event, &ctx->group_list, group_entry)
group_sched_out(event, cpuctx, ctx);
- }
+
perf_enable();
out:
spin_unlock(&ctx->lock);
@@ -1077,6 +1060,8 @@ static int context_equiv(struct perf_event_context *ctx1,
&& !ctx1->pin_count && !ctx2->pin_count;
}
+static void __perf_event_read(void *event);
+
static void __perf_event_sync_stat(struct perf_event *event,
struct perf_event *next_event)
{
@@ -1094,8 +1079,8 @@ static void __perf_event_sync_stat(struct perf_event *event,
*/
switch (event->state) {
case PERF_EVENT_STATE_ACTIVE:
- event->pmu->read(event);
- /* fall-through */
+ __perf_event_read(event);
+ break;
case PERF_EVENT_STATE_INACTIVE:
update_event_times(event);
@@ -1134,8 +1119,6 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
if (!ctx->nr_stat)
return;
- update_context_time(ctx);
-
event = list_first_entry(&ctx->event_list,
struct perf_event, event_entry);
@@ -1179,6 +1162,8 @@ void perf_event_task_sched_out(struct task_struct *task,
if (likely(!ctx || !cpuctx->task_ctx))
return;
+ update_context_time(ctx);
+
rcu_read_lock();
parent = rcu_dereference(ctx->parent_ctx);
next_ctx = next->perf_event_ctxp;
@@ -1531,6 +1516,7 @@ static void __perf_event_read(void *info)
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
+ unsigned long flags;
/*
* If this is a task context, we need to check whether it is
@@ -1542,12 +1528,12 @@ static void __perf_event_read(void *info)
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
- update_context_time(ctx);
- update_event_times(event);
- spin_unlock(&ctx->lock);
-
+ local_irq_save(flags);
+ if (ctx->is_active)
+ update_context_time(ctx);
event->pmu->read(event);
+ update_event_times(event);
+ local_irq_restore(flags);
}
static u64 perf_event_read(struct perf_event *event)
@@ -1560,13 +1546,7 @@ static u64 perf_event_read(struct perf_event *event)
smp_call_function_single(event->oncpu,
__perf_event_read, event, 1);
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
- struct perf_event_context *ctx = event->ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&ctx->lock, flags);
- update_context_time(ctx);
update_event_times(event);
- spin_unlock_irqrestore(&ctx->lock, flags);
}
return atomic64_read(&event->count);
@@ -1720,10 +1700,16 @@ static void free_event(struct perf_event *event)
call_rcu(&event->rcu_head, free_event_rcu);
}
-int perf_event_release_kernel(struct perf_event *event)
+/*
+ * Called when the last reference to the file is gone.
+ */
+static int perf_release(struct inode *inode, struct file *file)
{
+ struct perf_event *event = file->private_data;
struct perf_event_context *ctx = event->ctx;
+ file->private_data = NULL;
+
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
perf_event_remove_from_context(event);
@@ -1738,19 +1724,6 @@ int perf_event_release_kernel(struct perf_event *event)
return 0;
}
-EXPORT_SYMBOL_GPL(perf_event_release_kernel);
-
-/*
- * Called when the last reference to the file is gone.
- */
-static int perf_release(struct inode *inode, struct file *file)
-{
- struct perf_event *event = file->private_data;
-
- file->private_data = NULL;
-
- return perf_event_release_kernel(event);
-}
static int perf_event_read_size(struct perf_event *event)
{
@@ -1777,94 +1750,91 @@ static int perf_event_read_size(struct perf_event *event)
return size;
}
-u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+static u64 perf_event_read_value(struct perf_event *event)
{
struct perf_event *child;
u64 total = 0;
- *enabled = 0;
- *running = 0;
-
- mutex_lock(&event->child_mutex);
total += perf_event_read(event);
- *enabled += event->total_time_enabled +
- atomic64_read(&event->child_total_time_enabled);
- *running += event->total_time_running +
- atomic64_read(&event->child_total_time_running);
-
- list_for_each_entry(child, &event->child_list, child_list) {
+ list_for_each_entry(child, &event->child_list, child_list)
total += perf_event_read(child);
- *enabled += child->total_time_enabled;
- *running += child->total_time_running;
- }
- mutex_unlock(&event->child_mutex);
return total;
}
-EXPORT_SYMBOL_GPL(perf_event_read_value);
+
+static int perf_event_read_entry(struct perf_event *event,
+ u64 read_format, char __user *buf)
+{
+ int n = 0, count = 0;
+ u64 values[2];
+
+ values[n++] = perf_event_read_value(event);
+ if (read_format & PERF_FORMAT_ID)
+ values[n++] = primary_event_id(event);
+
+ count = n * sizeof(u64);
+
+ if (copy_to_user(buf, values, count))
+ return -EFAULT;
+
+ return count;
+}
static int perf_event_read_group(struct perf_event *event,
u64 read_format, char __user *buf)
{
struct perf_event *leader = event->group_leader, *sub;
- int n = 0, size = 0, ret = -EFAULT;
- struct perf_event_context *ctx = leader->ctx;
- u64 values[5];
- u64 count, enabled, running;
-
- mutex_lock(&ctx->mutex);
- count = perf_event_read_value(leader, &enabled, &running);
+ int n = 0, size = 0, err = -EFAULT;
+ u64 values[3];
values[n++] = 1 + leader->nr_siblings;
- if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
- values[n++] = enabled;
- if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
- values[n++] = running;
- values[n++] = count;
- if (read_format & PERF_FORMAT_ID)
- values[n++] = primary_event_id(leader);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ values[n++] = leader->total_time_enabled +
+ atomic64_read(&leader->child_total_time_enabled);
+ }
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ values[n++] = leader->total_time_running +
+ atomic64_read(&leader->child_total_time_running);
+ }
size = n * sizeof(u64);
if (copy_to_user(buf, values, size))
- goto unlock;
-
- ret = size;
-
- list_for_each_entry(sub, &leader->sibling_list, group_entry) {
- n = 0;
+ return -EFAULT;
- values[n++] = perf_event_read_value(sub, &enabled, &running);
- if (read_format & PERF_FORMAT_ID)
- values[n++] = primary_event_id(sub);
+ err = perf_event_read_entry(leader, read_format, buf + size);
+ if (err < 0)
+ return err;
- size = n * sizeof(u64);
+ size += err;
- if (copy_to_user(buf + ret, values, size)) {
- ret = -EFAULT;
- goto unlock;
- }
+ list_for_each_entry(sub, &leader->sibling_list, group_entry) {
+ err = perf_event_read_entry(sub, read_format,
+ buf + size);
+ if (err < 0)
+ return err;
- ret += size;
+ size += err;
}
-unlock:
- mutex_unlock(&ctx->mutex);
- return ret;
+ return size;
}
static int perf_event_read_one(struct perf_event *event,
u64 read_format, char __user *buf)
{
- u64 enabled, running;
u64 values[4];
int n = 0;
- values[n++] = perf_event_read_value(event, &enabled, &running);
- if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
- values[n++] = enabled;
- if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
- values[n++] = running;
+ values[n++] = perf_event_read_value(event);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ values[n++] = event->total_time_enabled +
+ atomic64_read(&event->child_total_time_enabled);
+ }
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ values[n++] = event->total_time_running +
+ atomic64_read(&event->child_total_time_running);
+ }
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
@@ -1895,10 +1865,12 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
return -ENOSPC;
WARN_ON_ONCE(event->ctx->parent_ctx);
+ mutex_lock(&event->child_mutex);
if (read_format & PERF_FORMAT_GROUP)
ret = perf_event_read_group(event, read_format, buf);
else
ret = perf_event_read_one(event, read_format, buf);
+ mutex_unlock(&event->child_mutex);
return ret;
}
@@ -2210,7 +2182,6 @@ static void perf_mmap_data_free(struct perf_mmap_data *data)
perf_mmap_free_page((unsigned long)data->user_page);
for (i = 0; i < data->nr_pages; i++)
perf_mmap_free_page((unsigned long)data->data_pages[i]);
- kfree(data);
}
#else
@@ -2251,7 +2222,6 @@ static void perf_mmap_data_free_work(struct work_struct *work)
perf_mmap_unmark_page(base + (i * PAGE_SIZE));
vfree(base);
- kfree(data);
}
static void perf_mmap_data_free(struct perf_mmap_data *data)
@@ -2345,7 +2315,7 @@ perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
}
if (!data->watermark)
- data->watermark = max_size / 2;
+ data->watermark = max_t(long, PAGE_SIZE, max_size / 2);
rcu_assign_pointer(event->data, data);
@@ -2357,6 +2327,7 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
perf_mmap_data_free(data);
+ kfree(data);
}
static void perf_mmap_data_release(struct perf_event *event)
@@ -3274,10 +3245,15 @@ static void perf_event_task_ctx(struct perf_event_context *ctx,
{
struct perf_event *event;
+ if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
+ return;
+
+ rcu_read_lock();
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (perf_event_task_match(event))
perf_event_task_output(event, task_event);
}
+ rcu_read_unlock();
}
static void perf_event_task_event(struct perf_task_event *task_event)
@@ -3285,11 +3261,11 @@ static void perf_event_task_event(struct perf_task_event *task_event)
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx = task_event->task_ctx;
- rcu_read_lock();
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_task_ctx(&cpuctx->ctx, task_event);
put_cpu_var(perf_cpu_context);
+ rcu_read_lock();
if (!ctx)
ctx = rcu_dereference(task_event->task->perf_event_ctxp);
if (ctx)
@@ -3381,10 +3357,15 @@ static void perf_event_comm_ctx(struct perf_event_context *ctx,
{
struct perf_event *event;
+ if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
+ return;
+
+ rcu_read_lock();
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (perf_event_comm_match(event))
perf_event_comm_output(event, comm_event);
}
+ rcu_read_unlock();
}
static void perf_event_comm_event(struct perf_comm_event *comm_event)
@@ -3395,7 +3376,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
char comm[TASK_COMM_LEN];
memset(comm, 0, sizeof(comm));
- strlcpy(comm, comm_event->task->comm, sizeof(comm));
+ strncpy(comm, comm_event->task->comm, sizeof(comm));
size = ALIGN(strlen(comm)+1, sizeof(u64));
comm_event->comm = comm;
@@ -3403,11 +3384,11 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
- rcu_read_lock();
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_comm_ctx(&cpuctx->ctx, comm_event);
put_cpu_var(perf_cpu_context);
+ rcu_read_lock();
/*
* doesn't really matter which of the child contexts the
* events ends up in.
@@ -3500,10 +3481,15 @@ static void perf_event_mmap_ctx(struct perf_event_context *ctx,
{
struct perf_event *event;
+ if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
+ return;
+
+ rcu_read_lock();
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (perf_event_mmap_match(event, mmap_event))
perf_event_mmap_output(event, mmap_event);
}
+ rcu_read_unlock();
}
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
@@ -3559,11 +3545,11 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
- rcu_read_lock();
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
put_cpu_var(perf_cpu_context);
+ rcu_read_lock();
/*
* doesn't really matter which of the child contexts the
* events ends up in.
@@ -3702,11 +3688,7 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
perf_event_disable(event);
}
- if (event->overflow_handler)
- event->overflow_handler(event, nmi, data, regs);
- else
- perf_event_output(event, nmi, data, regs);
-
+ perf_event_output(event, nmi, data, regs);
return ret;
}
@@ -3751,16 +3733,16 @@ static u64 perf_swevent_set_period(struct perf_event *event)
return nr;
}
-static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
+static void perf_swevent_overflow(struct perf_event *event,
int nmi, struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
int throttle = 0;
+ u64 overflow;
data->period = event->hw.last_period;
- if (!overflow)
- overflow = perf_swevent_set_period(event);
+ overflow = perf_swevent_set_period(event);
if (hwc->interrupts == MAX_INTERRUPTS)
return;
@@ -3793,19 +3775,14 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
atomic64_add(nr, &event->count);
- if (!regs)
- return;
-
if (!hwc->sample_period)
return;
- if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
- return perf_swevent_overflow(event, 1, nmi, data, regs);
-
- if (atomic64_add_negative(nr, &hwc->period_left))
+ if (!regs)
return;
- perf_swevent_overflow(event, 0, nmi, data, regs);
+ if (!atomic64_add_negative(nr, &hwc->period_left))
+ perf_swevent_overflow(event, nmi, data, regs);
}
static int perf_swevent_is_counting(struct perf_event *event)
@@ -3841,20 +3818,6 @@ static int perf_swevent_is_counting(struct perf_event *event)
static int perf_tp_event_match(struct perf_event *event,
struct perf_sample_data *data);
-static int perf_exclude_event(struct perf_event *event,
- struct pt_regs *regs)
-{
- if (regs) {
- if (event->attr.exclude_user && user_mode(regs))
- return 1;
-
- if (event->attr.exclude_kernel && !user_mode(regs))
- return 1;
- }
-
- return 0;
-}
-
static int perf_swevent_match(struct perf_event *event,
enum perf_type_id type,
u32 event_id,
@@ -3866,12 +3829,16 @@ static int perf_swevent_match(struct perf_event *event,
if (event->attr.type != type)
return 0;
-
if (event->attr.config != event_id)
return 0;
- if (perf_exclude_event(event, regs))
- return 0;
+ if (regs) {
+ if (event->attr.exclude_user && user_mode(regs))
+ return 0;
+
+ if (event->attr.exclude_kernel && !user_mode(regs))
+ return 0;
+ }
if (event->attr.type == PERF_TYPE_TRACEPOINT &&
!perf_tp_event_match(event, data))
@@ -3888,59 +3855,49 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx,
{
struct perf_event *event;
+ if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
+ return;
+
+ rcu_read_lock();
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (perf_swevent_match(event, type, event_id, data, regs))
perf_swevent_add(event, nr, nmi, data, regs);
}
+ rcu_read_unlock();
}
-int perf_swevent_get_recursion_context(void)
+static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx)
{
- struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
- int rctx;
-
if (in_nmi())
- rctx = 3;
- else if (in_irq())
- rctx = 2;
- else if (in_softirq())
- rctx = 1;
- else
- rctx = 0;
+ return &cpuctx->recursion[3];
- if (cpuctx->recursion[rctx]) {
- put_cpu_var(perf_cpu_context);
- return -1;
- }
+ if (in_irq())
+ return &cpuctx->recursion[2];
- cpuctx->recursion[rctx]++;
- barrier();
+ if (in_softirq())
+ return &cpuctx->recursion[1];
- return rctx;
+ return &cpuctx->recursion[0];
}
-EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
-
-void perf_swevent_put_recursion_context(int rctx)
-{
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
- barrier();
- cpuctx->recursion[rctx]--;
- put_cpu_var(perf_cpu_context);
-}
-EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
u64 nr, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
- struct perf_cpu_context *cpuctx;
+ struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
+ int *recursion = perf_swevent_recursion_context(cpuctx);
struct perf_event_context *ctx;
- cpuctx = &__get_cpu_var(perf_cpu_context);
- rcu_read_lock();
+ if (*recursion)
+ goto out;
+
+ (*recursion)++;
+ barrier();
+
perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
nr, nmi, data, regs);
+ rcu_read_lock();
/*
* doesn't really matter which of the child contexts the
* events ends up in.
@@ -3949,24 +3906,23 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
if (ctx)
perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
rcu_read_unlock();
+
+ barrier();
+ (*recursion)--;
+
+out:
+ put_cpu_var(perf_cpu_context);
}
void __perf_sw_event(u32 event_id, u64 nr, int nmi,
struct pt_regs *regs, u64 addr)
{
- struct perf_sample_data data;
- int rctx;
-
- rctx = perf_swevent_get_recursion_context();
- if (rctx < 0)
- return;
-
- data.addr = addr;
- data.raw = NULL;
-
- do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
+ struct perf_sample_data data = {
+ .addr = addr,
+ };
- perf_swevent_put_recursion_context(rctx);
+ do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi,
+ &data, regs);
}
static void perf_swevent_read(struct perf_event *event)
@@ -4011,7 +3967,6 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
event->pmu->read(event);
data.addr = 0;
- data.period = event->hw.last_period;
regs = get_irq_regs();
/*
* In case we exclude kernel IPs or are somehow not in interrupt
@@ -4190,7 +4145,6 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
if (!regs)
regs = task_pt_regs(current);
- /* Trace events already protected against recursion */
do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
&data, regs);
}
@@ -4277,53 +4231,6 @@ static void perf_event_free_filter(struct perf_event *event)
#endif /* CONFIG_EVENT_PROFILE */
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
-static void bp_perf_event_destroy(struct perf_event *event)
-{
- release_bp_slot(event);
-}
-
-static const struct pmu *bp_perf_event_init(struct perf_event *bp)
-{
- int err;
- /*
- * The breakpoint is already filled if we haven't created the counter
- * through perf syscall
- * FIXME: manage to get trigerred to NULL if it comes from syscalls
- */
- if (!bp->callback)
- err = register_perf_hw_breakpoint(bp);
- else
- err = __register_perf_hw_breakpoint(bp);
- if (err)
- return ERR_PTR(err);
-
- bp->destroy = bp_perf_event_destroy;
-
- return &perf_ops_bp;
-}
-
-void perf_bp_event(struct perf_event *bp, void *data)
-{
- struct perf_sample_data sample;
- struct pt_regs *regs = data;
-
- sample.addr = bp->attr.bp_addr;
-
- if (!perf_exclude_event(bp, regs))
- perf_swevent_add(bp, 1, 1, &sample, regs);
-}
-#else
-static const struct pmu *bp_perf_event_init(struct perf_event *bp)
-{
- return NULL;
-}
-
-void perf_bp_event(struct perf_event *bp, void *regs)
-{
-}
-#endif
-
atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
static void sw_perf_event_destroy(struct perf_event *event)
@@ -4390,7 +4297,6 @@ perf_event_alloc(struct perf_event_attr *attr,
struct perf_event_context *ctx,
struct perf_event *group_leader,
struct perf_event *parent_event,
- perf_callback_t callback,
gfp_t gfpflags)
{
const struct pmu *pmu;
@@ -4433,11 +4339,6 @@ perf_event_alloc(struct perf_event_attr *attr,
event->state = PERF_EVENT_STATE_INACTIVE;
- if (!callback && parent_event)
- callback = parent_event->callback;
-
- event->callback = callback;
-
if (attr->disabled)
event->state = PERF_EVENT_STATE_OFF;
@@ -4472,11 +4373,6 @@ perf_event_alloc(struct perf_event_attr *attr,
pmu = tp_perf_event_init(event);
break;
- case PERF_TYPE_BREAKPOINT:
- pmu = bp_perf_event_init(event);
- break;
-
-
default:
break;
}
@@ -4719,7 +4615,7 @@ SYSCALL_DEFINE5(perf_event_open,
}
event = perf_event_alloc(&attr, cpu, ctx, group_leader,
- NULL, NULL, GFP_KERNEL);
+ NULL, GFP_KERNEL);
err = PTR_ERR(event);
if (IS_ERR(event))
goto err_put_context;
@@ -4767,60 +4663,6 @@ SYSCALL_DEFINE5(perf_event_open,
return err;
}
-/**
- * perf_event_create_kernel_counter
- *
- * @attr: attributes of the counter to create
- * @cpu: cpu in which the counter is bound
- * @pid: task to profile
- */
-struct perf_event *
-perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
- pid_t pid, perf_callback_t callback)
-{
- struct perf_event *event;
- struct perf_event_context *ctx;
- int err;
-
- /*
- * Get the target context (task or percpu):
- */
-
- ctx = find_get_context(pid, cpu);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto err_exit;
- }
-
- event = perf_event_alloc(attr, cpu, ctx, NULL,
- NULL, callback, GFP_KERNEL);
- if (IS_ERR(event)) {
- err = PTR_ERR(event);
- goto err_put_context;
- }
-
- event->filp = NULL;
- WARN_ON_ONCE(ctx->parent_ctx);
- mutex_lock(&ctx->mutex);
- perf_install_in_context(ctx, event, cpu);
- ++ctx->generation;
- mutex_unlock(&ctx->mutex);
-
- event->owner = current;
- get_task_struct(current);
- mutex_lock(¤t->perf_event_mutex);
- list_add_tail(&event->owner_entry, ¤t->perf_event_list);
- mutex_unlock(¤t->perf_event_mutex);
-
- return event;
-
- err_put_context:
- put_ctx(ctx);
- err_exit:
- return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
-
/*
* inherit a event from parent task to child task:
*/
@@ -4846,7 +4688,7 @@ inherit_event(struct perf_event *parent_event,
child_event = perf_event_alloc(&parent_event->attr,
parent_event->cpu, child_ctx,
group_leader, parent_event,
- NULL, GFP_KERNEL);
+ GFP_KERNEL);
if (IS_ERR(child_event))
return child_event;
get_ctx(child_ctx);
@@ -4864,8 +4706,6 @@ inherit_event(struct perf_event *parent_event,
if (parent_event->attr.freq)
child_event->hw.sample_period = parent_event->hw.sample_period;
- child_event->overflow_handler = parent_event->overflow_handler;
-
/*
* Link it up in the child's context:
*/
@@ -4955,6 +4795,7 @@ __perf_event_exit_task(struct perf_event *child_event,
{
struct perf_event *parent_event;
+ update_event_times(child_event);
perf_event_remove_from_context(child_event);
parent_event = child_event->parent;
@@ -5006,7 +4847,6 @@ void perf_event_exit_task(struct task_struct *child)
* the events from it.
*/
unclone_ctx(child_ctx);
- update_context_time(child_ctx);
spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
diff --git a/trunk/kernel/signal.c b/trunk/kernel/signal.c
index 93e72e5feae6..6705320784fd 100644
--- a/trunk/kernel/signal.c
+++ b/trunk/kernel/signal.c
@@ -27,8 +27,7 @@
#include
#include
#include
-#define CREATE_TRACE_POINTS
-#include
+#include
#include
#include
@@ -835,7 +834,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
struct sigqueue *q;
int override_rlimit;
- trace_signal_generate(sig, info, t);
+ trace_sched_signal_send(sig, t);
assert_spin_locked(&t->sighand->siglock);
@@ -897,21 +896,12 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
break;
}
} else if (!is_si_special(info)) {
- if (sig >= SIGRTMIN && info->si_code != SI_USER) {
- /*
- * Queue overflow, abort. We may abort if the
- * signal was rt and sent by user using something
- * other than kill().
- */
- trace_signal_overflow_fail(sig, group, info);
+ if (sig >= SIGRTMIN && info->si_code != SI_USER)
+ /*
+ * Queue overflow, abort. We may abort if the signal was rt
+ * and sent by user using something other than kill().
+ */
return -EAGAIN;
- } else {
- /*
- * This is a silent loss of information. We still
- * send the signal, but the *info bits are lost.
- */
- trace_signal_lose_info(sig, group, info);
- }
}
out_set:
@@ -1849,9 +1839,6 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
ka = &sighand->action[signr-1];
}
- /* Trace actually delivered signals. */
- trace_signal_deliver(signr, info, ka);
-
if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
continue;
if (ka->sa.sa_handler != SIG_DFL) {
diff --git a/trunk/kernel/trace/Kconfig b/trunk/kernel/trace/Kconfig
index d006554888dc..f05671609a89 100644
--- a/trunk/kernel/trace/Kconfig
+++ b/trunk/kernel/trace/Kconfig
@@ -339,27 +339,6 @@ config POWER_TRACER
power management decisions, specifically the C-state and P-state
behavior.
-config KSYM_TRACER
- bool "Trace read and write access on kernel memory locations"
- depends on HAVE_HW_BREAKPOINT
- select TRACING
- help
- This tracer helps find read and write operations on any given kernel
- symbol i.e. /proc/kallsyms.
-
-config PROFILE_KSYM_TRACER
- bool "Profile all kernel memory accesses on 'watched' variables"
- depends on KSYM_TRACER
- help
- This tracer profiles kernel accesses on variables watched through the
- ksym tracer ftrace plugin. Depending upon the hardware, all read
- and write operations on kernel variables can be monitored for
- accesses.
-
- The results will be displayed in:
- /debugfs/tracing/profile_ksym
-
- Say N if unsure.
config STACK_TRACER
bool "Trace max stack"
diff --git a/trunk/kernel/trace/Makefile b/trunk/kernel/trace/Makefile
index cd9ecd89ec77..edc3a3cca1a1 100644
--- a/trunk/kernel/trace/Makefile
+++ b/trunk/kernel/trace/Makefile
@@ -54,7 +54,6 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
-obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o
obj-$(CONFIG_EVENT_TRACING) += power-traces.o
libftrace-y := ftrace.o
diff --git a/trunk/kernel/trace/trace.h b/trunk/kernel/trace/trace.h
index 1d7f4830a80d..b4e4212e66d7 100644
--- a/trunk/kernel/trace/trace.h
+++ b/trunk/kernel/trace/trace.h
@@ -11,7 +11,6 @@
#include
#include
#include
-#include
#include
#include
@@ -38,7 +37,6 @@ enum trace_type {
TRACE_KMEM_ALLOC,
TRACE_KMEM_FREE,
TRACE_BLK,
- TRACE_KSYM,
__TRACE_LAST_TYPE,
};
@@ -100,7 +98,7 @@ struct syscall_trace_enter {
struct syscall_trace_exit {
struct trace_entry ent;
int nr;
- long ret;
+ unsigned long ret;
};
struct kprobe_trace_entry {
@@ -234,7 +232,6 @@ extern void __ftrace_bad_type(void);
TRACE_KMEM_ALLOC); \
IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
TRACE_KMEM_FREE); \
- IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\
__ftrace_bad_type(); \
} while (0)
@@ -390,8 +387,6 @@ int register_tracer(struct tracer *type);
void unregister_tracer(struct tracer *type);
int is_tracing_stopped(void);
-extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr);
-
extern unsigned long nsecs_to_usecs(unsigned long nsecs);
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -466,8 +461,6 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
struct trace_array *tr);
extern int trace_selftest_startup_hw_branches(struct tracer *trace,
struct trace_array *tr);
-extern int trace_selftest_startup_ksym(struct tracer *trace,
- struct trace_array *tr);
#endif /* CONFIG_FTRACE_STARTUP_TEST */
extern void *head_page(struct trace_array_cpu *data);
diff --git a/trunk/kernel/trace/trace_entries.h b/trunk/kernel/trace/trace_entries.h
index c16a08f399df..ead3d724599d 100644
--- a/trunk/kernel/trace/trace_entries.h
+++ b/trunk/kernel/trace/trace_entries.h
@@ -364,19 +364,3 @@ FTRACE_ENTRY(kmem_free, kmemtrace_free_entry,
F_printk("type:%u call_site:%lx ptr:%p",
__entry->type_id, __entry->call_site, __entry->ptr)
);
-
-FTRACE_ENTRY(ksym_trace, ksym_trace_entry,
-
- TRACE_KSYM,
-
- F_STRUCT(
- __field( unsigned long, ip )
- __field( unsigned char, type )
- __array( char , cmd, TASK_COMM_LEN )
- __field( unsigned long, addr )
- ),
-
- F_printk("ip: %pF type: %d ksym_name: %pS cmd: %s",
- (void *)__entry->ip, (unsigned int)__entry->type,
- (void *)__entry->addr, __entry->cmd)
-);
diff --git a/trunk/kernel/trace/trace_event_profile.c b/trunk/kernel/trace/trace_event_profile.c
index d9c60f80aa0d..e0d351b01f5a 100644
--- a/trunk/kernel/trace/trace_event_profile.c
+++ b/trunk/kernel/trace/trace_event_profile.c
@@ -9,33 +9,31 @@
#include "trace.h"
-char *perf_trace_buf;
+struct perf_trace_buf *perf_trace_buf;
EXPORT_SYMBOL_GPL(perf_trace_buf);
-char *perf_trace_buf_nmi;
+struct perf_trace_buf *perf_trace_buf_nmi;
EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
-typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
-
/* Count the events in use (per event id, not per instance) */
static int total_profile_count;
static int ftrace_profile_enable_event(struct ftrace_event_call *event)
{
- char *buf;
+ struct perf_trace_buf *buf;
int ret = -ENOMEM;
if (atomic_inc_return(&event->profile_count))
return 0;
if (!total_profile_count) {
- buf = (char *)alloc_percpu(perf_trace_t);
+ buf = alloc_percpu(struct perf_trace_buf);
if (!buf)
goto fail_buf;
rcu_assign_pointer(perf_trace_buf, buf);
- buf = (char *)alloc_percpu(perf_trace_t);
+ buf = alloc_percpu(struct perf_trace_buf);
if (!buf)
goto fail_buf_nmi;
@@ -81,7 +79,7 @@ int ftrace_profile_enable(int event_id)
static void ftrace_profile_disable_event(struct ftrace_event_call *event)
{
- char *buf, *nmi_buf;
+ struct perf_trace_buf *buf, *nmi_buf;
if (!atomic_add_negative(-1, &event->profile_count))
return;
diff --git a/trunk/kernel/trace/trace_kprobe.c b/trunk/kernel/trace/trace_kprobe.c
index aff5f80b59b8..3696476f307d 100644
--- a/trunk/kernel/trace/trace_kprobe.c
+++ b/trunk/kernel/trace/trace_kprobe.c
@@ -243,11 +243,7 @@ static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
ret = snprintf(buf, n, "@0x%p", ff->data);
else if (ff->func == fetch_symbol) {
struct symbol_cache *sc = ff->data;
- if (sc->offset)
- ret = snprintf(buf, n, "@%s%+ld", sc->symbol,
- sc->offset);
- else
- ret = snprintf(buf, n, "@%s", sc->symbol);
+ ret = snprintf(buf, n, "@%s%+ld", sc->symbol, sc->offset);
} else if (ff->func == fetch_retvalue)
ret = snprintf(buf, n, "$retval");
else if (ff->func == fetch_stack_address)
@@ -483,8 +479,7 @@ static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
return ret;
}
-/* Recursive argument parser */
-static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
+static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
{
int ret = 0;
unsigned long param;
@@ -544,7 +539,7 @@ static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
if (!id)
return -ENOMEM;
id->offset = offset;
- ret = __parse_probe_arg(arg, &id->orig, is_return);
+ ret = parse_probe_arg(arg, &id->orig, is_return);
if (ret)
kfree(id);
else {
@@ -561,16 +556,6 @@ static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
return ret;
}
-/* String length checking wrapper */
-static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
-{
- if (strlen(arg) > MAX_ARGSTR_LEN) {
- pr_info("Argument is too long.: %s\n", arg);
- return -ENOSPC;
- }
- return __parse_probe_arg(arg, ff, is_return);
-}
-
/* Return 1 if name is reserved or already used by another argument */
static int conflict_field_name(const char *name,
struct probe_arg *args, int narg)
@@ -709,23 +694,20 @@ static int create_trace_probe(int argc, char **argv)
}
tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
- if (!tp->args[i].name) {
- pr_info("Failed to allocate argument%d name '%s'.\n",
- i, argv[i]);
- ret = -ENOMEM;
- goto error;
- }
/* Parse fetch argument */
+ if (strlen(arg) > MAX_ARGSTR_LEN) {
+ pr_info("Argument%d(%s) is too long.\n", i, arg);
+ ret = -ENOSPC;
+ goto error;
+ }
ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
if (ret) {
pr_info("Parse error at argument%d. (%d)\n", i, ret);
- kfree(tp->args[i].name);
goto error;
}
-
- tp->nr_args++;
}
+ tp->nr_args = i;
ret = register_trace_probe(tp);
if (ret)
@@ -776,14 +758,12 @@ static int probes_seq_show(struct seq_file *m, void *v)
char buf[MAX_ARGSTR_LEN + 1];
seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
- seq_printf(m, ":%s/%s", tp->call.system, tp->call.name);
+ seq_printf(m, ":%s", tp->call.name);
- if (!tp->symbol)
- seq_printf(m, " 0x%p", tp->rp.kp.addr);
- else if (tp->rp.kp.offset)
+ if (tp->symbol)
seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
else
- seq_printf(m, " %s", probe_symbol(tp));
+ seq_printf(m, " 0x%p", tp->rp.kp.addr);
for (i = 0; i < tp->nr_args; i++) {
ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
@@ -1228,12 +1208,11 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
struct ftrace_event_call *call = &tp->call;
struct kprobe_trace_entry *entry;
+ struct perf_trace_buf *trace_buf;
struct trace_entry *ent;
int size, __size, i, pc, __cpu;
unsigned long irq_flags;
- char *trace_buf;
char *raw_data;
- int rctx;
pc = preempt_count();
__size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
@@ -1248,11 +1227,6 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
* This also protects the rcu read side
*/
local_irq_save(irq_flags);
-
- rctx = perf_swevent_get_recursion_context();
- if (rctx < 0)
- goto end_recursion;
-
__cpu = smp_processor_id();
if (in_nmi())
@@ -1263,7 +1237,18 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
if (!trace_buf)
goto end;
- raw_data = per_cpu_ptr(trace_buf, __cpu);
+ trace_buf = per_cpu_ptr(trace_buf, __cpu);
+
+ if (trace_buf->recursion++)
+ goto end_recursion;
+
+ /*
+ * Make recursion update visible before entering perf_tp_event
+ * so that we protect from perf recursions.
+ */
+ barrier();
+
+ raw_data = trace_buf->buf;
/* Zero dead bytes from alignment to avoid buffer leak to userspace */
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -1278,9 +1263,9 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
perf_tp_event(call->id, entry->ip, 1, entry, size);
-end:
- perf_swevent_put_recursion_context(rctx);
end_recursion:
+ trace_buf->recursion--;
+end:
local_irq_restore(irq_flags);
return 0;
@@ -1293,12 +1278,11 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
struct ftrace_event_call *call = &tp->call;
struct kretprobe_trace_entry *entry;
+ struct perf_trace_buf *trace_buf;
struct trace_entry *ent;
int size, __size, i, pc, __cpu;
unsigned long irq_flags;
- char *trace_buf;
char *raw_data;
- int rctx;
pc = preempt_count();
__size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
@@ -1313,11 +1297,6 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
* This also protects the rcu read side
*/
local_irq_save(irq_flags);
-
- rctx = perf_swevent_get_recursion_context();
- if (rctx < 0)
- goto end_recursion;
-
__cpu = smp_processor_id();
if (in_nmi())
@@ -1328,7 +1307,18 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
if (!trace_buf)
goto end;
- raw_data = per_cpu_ptr(trace_buf, __cpu);
+ trace_buf = per_cpu_ptr(trace_buf, __cpu);
+
+ if (trace_buf->recursion++)
+ goto end_recursion;
+
+ /*
+ * Make recursion update visible before entering perf_tp_event
+ * so that we protect from perf recursions.
+ */
+ barrier();
+
+ raw_data = trace_buf->buf;
/* Zero dead bytes from alignment to avoid buffer leak to userspace */
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -1344,9 +1334,9 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
-end:
- perf_swevent_put_recursion_context(rctx);
end_recursion:
+ trace_buf->recursion--;
+end:
local_irq_restore(irq_flags);
return 0;
diff --git a/trunk/kernel/trace/trace_ksym.c b/trunk/kernel/trace/trace_ksym.c
deleted file mode 100644
index ddfa0fd43bc0..000000000000
--- a/trunk/kernel/trace/trace_ksym.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/*
- * trace_ksym.c - Kernel Symbol Tracer
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2009
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-#include "trace_output.h"
-#include "trace_stat.h"
-#include "trace.h"
-
-#include
-#include
-
-/*
- * For now, let us restrict the no. of symbols traced simultaneously to number
- * of available hardware breakpoint registers.
- */
-#define KSYM_TRACER_MAX HBP_NUM
-
-#define KSYM_TRACER_OP_LEN 3 /* rw- */
-
-struct trace_ksym {
- struct perf_event **ksym_hbp;
- struct perf_event_attr attr;
-#ifdef CONFIG_PROFILE_KSYM_TRACER
- unsigned long counter;
-#endif
- struct hlist_node ksym_hlist;
-};
-
-static struct trace_array *ksym_trace_array;
-
-static unsigned int ksym_filter_entry_count;
-static unsigned int ksym_tracing_enabled;
-
-static HLIST_HEAD(ksym_filter_head);
-
-static DEFINE_MUTEX(ksym_tracer_mutex);
-
-#ifdef CONFIG_PROFILE_KSYM_TRACER
-
-#define MAX_UL_INT 0xffffffff
-
-void ksym_collect_stats(unsigned long hbp_hit_addr)
-{
- struct hlist_node *node;
- struct trace_ksym *entry;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) {
- if ((entry->attr.bp_addr == hbp_hit_addr) &&
- (entry->counter <= MAX_UL_INT)) {
- entry->counter++;
- break;
- }
- }
- rcu_read_unlock();
-}
-#endif /* CONFIG_PROFILE_KSYM_TRACER */
-
-void ksym_hbp_handler(struct perf_event *hbp, void *data)
-{
- struct ring_buffer_event *event;
- struct ksym_trace_entry *entry;
- struct pt_regs *regs = data;
- struct ring_buffer *buffer;
- int pc;
-
- if (!ksym_tracing_enabled)
- return;
-
- buffer = ksym_trace_array->buffer;
-
- pc = preempt_count();
-
- event = trace_buffer_lock_reserve(buffer, TRACE_KSYM,
- sizeof(*entry), 0, pc);
- if (!event)
- return;
-
- entry = ring_buffer_event_data(event);
- entry->ip = instruction_pointer(regs);
- entry->type = hw_breakpoint_type(hbp);
- entry->addr = hw_breakpoint_addr(hbp);
- strlcpy(entry->cmd, current->comm, TASK_COMM_LEN);
-
-#ifdef CONFIG_PROFILE_KSYM_TRACER
- ksym_collect_stats(hw_breakpoint_addr(hbp));
-#endif /* CONFIG_PROFILE_KSYM_TRACER */
-
- trace_buffer_unlock_commit(buffer, event, 0, pc);
-}
-
-/* Valid access types are represented as
- *
- * rw- : Set Read/Write Access Breakpoint
- * -w- : Set Write Access Breakpoint
- * --- : Clear Breakpoints
- * --x : Set Execution Break points (Not available yet)
- *
- */
-static int ksym_trace_get_access_type(char *str)
-{
- int access = 0;
-
- if (str[0] == 'r')
- access |= HW_BREAKPOINT_R;
-
- if (str[1] == 'w')
- access |= HW_BREAKPOINT_W;
-
- if (str[2] == 'x')
- access |= HW_BREAKPOINT_X;
-
- switch (access) {
- case HW_BREAKPOINT_R:
- case HW_BREAKPOINT_W:
- case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
- return access;
- default:
- return -EINVAL;
- }
-}
-
-/*
- * There can be several possible malformed requests and we attempt to capture
- * all of them. We enumerate some of the rules
- * 1. We will not allow kernel symbols with ':' since it is used as a delimiter.
- * i.e. multiple ':' symbols disallowed. Possible uses are of the form
- * ::.
- * 2. No delimiter symbol ':' in the input string
- * 3. Spurious operator symbols or symbols not in their respective positions
- * 4. :--- i.e. clear breakpoint request when ksym_name not in file
- * 5. Kernel symbol not a part of /proc/kallsyms
- * 6. Duplicate requests
- */
-static int parse_ksym_trace_str(char *input_string, char **ksymname,
- unsigned long *addr)
-{
- int ret;
-
- *ksymname = strsep(&input_string, ":");
- *addr = kallsyms_lookup_name(*ksymname);
-
- /* Check for malformed request: (2), (1) and (5) */
- if ((!input_string) ||
- (strlen(input_string) != KSYM_TRACER_OP_LEN) ||
- (*addr == 0))
- return -EINVAL;;
-
- ret = ksym_trace_get_access_type(input_string);
-
- return ret;
-}
-
-int process_new_ksym_entry(char *ksymname, int op, unsigned long addr)
-{
- struct trace_ksym *entry;
- int ret = -ENOMEM;
-
- if (ksym_filter_entry_count >= KSYM_TRACER_MAX) {
- printk(KERN_ERR "ksym_tracer: Maximum limit:(%d) reached. No"
- " new requests for tracing can be accepted now.\n",
- KSYM_TRACER_MAX);
- return -ENOSPC;
- }
-
- entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- hw_breakpoint_init(&entry->attr);
-
- entry->attr.bp_type = op;
- entry->attr.bp_addr = addr;
- entry->attr.bp_len = HW_BREAKPOINT_LEN_4;
-
- ret = -EAGAIN;
- entry->ksym_hbp = register_wide_hw_breakpoint(&entry->attr,
- ksym_hbp_handler);
-
- if (IS_ERR(entry->ksym_hbp)) {
- ret = PTR_ERR(entry->ksym_hbp);
- printk(KERN_INFO "ksym_tracer request failed. Try again"
- " later!!\n");
- goto err;
- }
-
- hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head);
- ksym_filter_entry_count++;
-
- return 0;
-
-err:
- kfree(entry);
-
- return ret;
-}
-
-static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct trace_ksym *entry;
- struct hlist_node *node;
- struct trace_seq *s;
- ssize_t cnt = 0;
- int ret;
-
- s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (!s)
- return -ENOMEM;
- trace_seq_init(s);
-
- mutex_lock(&ksym_tracer_mutex);
-
- hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) {
- ret = trace_seq_printf(s, "%pS:", (void *)entry->attr.bp_addr);
- if (entry->attr.bp_type == HW_BREAKPOINT_R)
- ret = trace_seq_puts(s, "r--\n");
- else if (entry->attr.bp_type == HW_BREAKPOINT_W)
- ret = trace_seq_puts(s, "-w-\n");
- else if (entry->attr.bp_type == (HW_BREAKPOINT_W | HW_BREAKPOINT_R))
- ret = trace_seq_puts(s, "rw-\n");
- WARN_ON_ONCE(!ret);
- }
-
- cnt = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
-
- mutex_unlock(&ksym_tracer_mutex);
-
- kfree(s);
-
- return cnt;
-}
-
-static void __ksym_trace_reset(void)
-{
- struct trace_ksym *entry;
- struct hlist_node *node, *node1;
-
- mutex_lock(&ksym_tracer_mutex);
- hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head,
- ksym_hlist) {
- unregister_wide_hw_breakpoint(entry->ksym_hbp);
- ksym_filter_entry_count--;
- hlist_del_rcu(&(entry->ksym_hlist));
- synchronize_rcu();
- kfree(entry);
- }
- mutex_unlock(&ksym_tracer_mutex);
-}
-
-static ssize_t ksym_trace_filter_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct trace_ksym *entry;
- struct hlist_node *node;
- char *input_string, *ksymname = NULL;
- unsigned long ksym_addr = 0;
- int ret, op, changed = 0;
-
- input_string = kzalloc(count + 1, GFP_KERNEL);
- if (!input_string)
- return -ENOMEM;
-
- if (copy_from_user(input_string, buffer, count)) {
- kfree(input_string);
- return -EFAULT;
- }
- input_string[count] = '\0';
-
- strstrip(input_string);
-
- /*
- * Clear all breakpoints if:
- * 1: echo > ksym_trace_filter
- * 2: echo 0 > ksym_trace_filter
- * 3: echo "*:---" > ksym_trace_filter
- */
- if (!input_string[0] || !strcmp(input_string, "0") ||
- !strcmp(input_string, "*:---")) {
- __ksym_trace_reset();
- kfree(input_string);
- return count;
- }
-
- ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr);
- if (ret < 0) {
- kfree(input_string);
- return ret;
- }
-
- mutex_lock(&ksym_tracer_mutex);
-
- ret = -EINVAL;
- hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) {
- if (entry->attr.bp_addr == ksym_addr) {
- /* Check for malformed request: (6) */
- if (entry->attr.bp_type != op)
- changed = 1;
- else
- goto out;
- break;
- }
- }
- if (changed) {
- unregister_wide_hw_breakpoint(entry->ksym_hbp);
- entry->attr.bp_type = op;
- ret = 0;
- if (op > 0) {
- entry->ksym_hbp =
- register_wide_hw_breakpoint(&entry->attr,
- ksym_hbp_handler);
- if (IS_ERR(entry->ksym_hbp))
- ret = PTR_ERR(entry->ksym_hbp);
- else
- goto out;
- }
- /* Error or "symbol:---" case: drop it */
- ksym_filter_entry_count--;
- hlist_del_rcu(&(entry->ksym_hlist));
- synchronize_rcu();
- kfree(entry);
- goto out;
- } else {
- /* Check for malformed request: (4) */
- if (op == 0)
- goto out;
- ret = process_new_ksym_entry(ksymname, op, ksym_addr);
- }
-out:
- mutex_unlock(&ksym_tracer_mutex);
-
- kfree(input_string);
-
- if (!ret)
- ret = count;
- return ret;
-}
-
-static const struct file_operations ksym_tracing_fops = {
- .open = tracing_open_generic,
- .read = ksym_trace_filter_read,
- .write = ksym_trace_filter_write,
-};
-
-static void ksym_trace_reset(struct trace_array *tr)
-{
- ksym_tracing_enabled = 0;
- __ksym_trace_reset();
-}
-
-static int ksym_trace_init(struct trace_array *tr)
-{
- int cpu, ret = 0;
-
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
- ksym_tracing_enabled = 1;
- ksym_trace_array = tr;
-
- return ret;
-}
-
-static void ksym_trace_print_header(struct seq_file *m)
-{
- seq_puts(m,
- "# TASK-PID CPU# Symbol "
- "Type Function\n");
- seq_puts(m,
- "# | | | "
- " | |\n");
-}
-
-static enum print_line_t ksym_trace_output(struct trace_iterator *iter)
-{
- struct trace_entry *entry = iter->ent;
- struct trace_seq *s = &iter->seq;
- struct ksym_trace_entry *field;
- char str[KSYM_SYMBOL_LEN];
- int ret;
-
- if (entry->type != TRACE_KSYM)
- return TRACE_TYPE_UNHANDLED;
-
- trace_assign_type(field, entry);
-
- ret = trace_seq_printf(s, "%11s-%-5d [%03d] %pS", field->cmd,
- entry->pid, iter->cpu, (char *)field->addr);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- switch (field->type) {
- case HW_BREAKPOINT_R:
- ret = trace_seq_printf(s, " R ");
- break;
- case HW_BREAKPOINT_W:
- ret = trace_seq_printf(s, " W ");
- break;
- case HW_BREAKPOINT_R | HW_BREAKPOINT_W:
- ret = trace_seq_printf(s, " RW ");
- break;
- default:
- return TRACE_TYPE_PARTIAL_LINE;
- }
-
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- sprint_symbol(str, field->ip);
- ret = trace_seq_printf(s, "%s\n", str);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- return TRACE_TYPE_HANDLED;
-}
-
-struct tracer ksym_tracer __read_mostly =
-{
- .name = "ksym_tracer",
- .init = ksym_trace_init,
- .reset = ksym_trace_reset,
-#ifdef CONFIG_FTRACE_SELFTEST
- .selftest = trace_selftest_startup_ksym,
-#endif
- .print_header = ksym_trace_print_header,
- .print_line = ksym_trace_output
-};
-
-__init static int init_ksym_trace(void)
-{
- struct dentry *d_tracer;
- struct dentry *entry;
-
- d_tracer = tracing_init_dentry();
- ksym_filter_entry_count = 0;
-
- entry = debugfs_create_file("ksym_trace_filter", 0644, d_tracer,
- NULL, &ksym_tracing_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'ksym_trace_filter' file\n");
-
- return register_tracer(&ksym_tracer);
-}
-device_initcall(init_ksym_trace);
-
-
-#ifdef CONFIG_PROFILE_KSYM_TRACER
-static int ksym_tracer_stat_headers(struct seq_file *m)
-{
- seq_puts(m, " Access Type ");
- seq_puts(m, " Symbol Counter\n");
- seq_puts(m, " ----------- ");
- seq_puts(m, " ------ -------\n");
- return 0;
-}
-
-static int ksym_tracer_stat_show(struct seq_file *m, void *v)
-{
- struct hlist_node *stat = v;
- struct trace_ksym *entry;
- int access_type = 0;
- char fn_name[KSYM_NAME_LEN];
-
- entry = hlist_entry(stat, struct trace_ksym, ksym_hlist);
-
- access_type = entry->attr.bp_type;
-
- switch (access_type) {
- case HW_BREAKPOINT_R:
- seq_puts(m, " R ");
- break;
- case HW_BREAKPOINT_W:
- seq_puts(m, " W ");
- break;
- case HW_BREAKPOINT_R | HW_BREAKPOINT_W:
- seq_puts(m, " RW ");
- break;
- default:
- seq_puts(m, " NA ");
- }
-
- if (lookup_symbol_name(entry->attr.bp_addr, fn_name) >= 0)
- seq_printf(m, " %-36s", fn_name);
- else
- seq_printf(m, " %-36s", "");
- seq_printf(m, " %15lu\n", entry->counter);
-
- return 0;
-}
-
-static void *ksym_tracer_stat_start(struct tracer_stat *trace)
-{
- return ksym_filter_head.first;
-}
-
-static void *
-ksym_tracer_stat_next(void *v, int idx)
-{
- struct hlist_node *stat = v;
-
- return stat->next;
-}
-
-static struct tracer_stat ksym_tracer_stats = {
- .name = "ksym_tracer",
- .stat_start = ksym_tracer_stat_start,
- .stat_next = ksym_tracer_stat_next,
- .stat_headers = ksym_tracer_stat_headers,
- .stat_show = ksym_tracer_stat_show
-};
-
-__init static int ksym_tracer_stat_init(void)
-{
- int ret;
-
- ret = register_stat_tracer(&ksym_tracer_stats);
- if (ret) {
- printk(KERN_WARNING "Warning: could not register "
- "ksym tracer stats\n");
- return 1;
- }
-
- return 0;
-}
-fs_initcall(ksym_tracer_stat_init);
-#endif /* CONFIG_PROFILE_KSYM_TRACER */
diff --git a/trunk/kernel/trace/trace_selftest.c b/trunk/kernel/trace/trace_selftest.c
index dc98309e839a..d2cdbabb4ead 100644
--- a/trunk/kernel/trace/trace_selftest.c
+++ b/trunk/kernel/trace/trace_selftest.c
@@ -17,7 +17,6 @@ static inline int trace_valid_entry(struct trace_entry *entry)
case TRACE_GRAPH_ENT:
case TRACE_GRAPH_RET:
case TRACE_HW_BRANCHES:
- case TRACE_KSYM:
return 1;
}
return 0;
@@ -809,57 +808,3 @@ trace_selftest_startup_hw_branches(struct tracer *trace,
return ret;
}
#endif /* CONFIG_HW_BRANCH_TRACER */
-
-#ifdef CONFIG_KSYM_TRACER
-static int ksym_selftest_dummy;
-
-int
-trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr)
-{
- unsigned long count;
- int ret;
-
- /* start the tracing */
- ret = tracer_init(trace, tr);
- if (ret) {
- warn_failed_init_tracer(trace, ret);
- return ret;
- }
-
- ksym_selftest_dummy = 0;
- /* Register the read-write tracing request */
-
- ret = process_new_ksym_entry("ksym_selftest_dummy",
- HW_BREAKPOINT_R | HW_BREAKPOINT_W,
- (unsigned long)(&ksym_selftest_dummy));
-
- if (ret < 0) {
- printk(KERN_CONT "ksym_trace read-write startup test failed\n");
- goto ret_path;
- }
- /* Perform a read and a write operation over the dummy variable to
- * trigger the tracer
- */
- if (ksym_selftest_dummy == 0)
- ksym_selftest_dummy++;
-
- /* stop the tracing. */
- tracing_stop();
- /* check the trace buffer */
- ret = trace_test_buffer(tr, &count);
- trace->reset(tr);
- tracing_start();
-
- /* read & write operations - one each is performed on the dummy variable
- * triggering two entries in the trace buffer
- */
- if (!ret && count != 2) {
- printk(KERN_CONT "Ksym tracer startup test failed");
- ret = -1;
- }
-
-ret_path:
- return ret;
-}
-#endif /* CONFIG_KSYM_TRACER */
-
diff --git a/trunk/kernel/trace/trace_syscalls.c b/trunk/kernel/trace/trace_syscalls.c
index 57501d90096a..51213b0aa81b 100644
--- a/trunk/kernel/trace/trace_syscalls.c
+++ b/trunk/kernel/trace/trace_syscalls.c
@@ -51,6 +51,32 @@ static struct syscall_metadata *syscall_nr_to_meta(int nr)
return syscalls_metadata[nr];
}
+int syscall_name_to_nr(char *name)
+{
+ int i;
+
+ if (!syscalls_metadata)
+ return -1;
+
+ for (i = 0; i < NR_syscalls; i++) {
+ if (syscalls_metadata[i]) {
+ if (!strcmp(syscalls_metadata[i]->name, name))
+ return i;
+ }
+ }
+ return -1;
+}
+
+void set_syscall_enter_id(int num, int id)
+{
+ syscalls_metadata[num]->enter_id = id;
+}
+
+void set_syscall_exit_id(int num, int id)
+{
+ syscalls_metadata[num]->exit_id = id;
+}
+
enum print_line_t
print_syscall_enter(struct trace_iterator *iter, int flags)
{
@@ -67,7 +93,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags)
if (!entry)
goto end;
- if (entry->enter_event->id != ent->type) {
+ if (entry->enter_id != ent->type) {
WARN_ON_ONCE(1);
goto end;
}
@@ -122,7 +148,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags)
return TRACE_TYPE_HANDLED;
}
- if (entry->exit_event->id != ent->type) {
+ if (entry->exit_id != ent->type) {
WARN_ON_ONCE(1);
return TRACE_TYPE_UNHANDLED;
}
@@ -146,11 +172,18 @@ extern char *__bad_type_size(void);
int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
{
int i;
+ int nr;
int ret;
- struct syscall_metadata *entry = call->data;
+ struct syscall_metadata *entry;
struct syscall_trace_enter trace;
int offset = offsetof(struct syscall_trace_enter, args);
+ nr = syscall_name_to_nr(call->data);
+ entry = syscall_nr_to_meta(nr);
+
+ if (!entry)
+ return 0;
+
ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
"\tsigned:%u;\n",
SYSCALL_FIELD(int, nr));
@@ -212,16 +245,19 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
int syscall_enter_define_fields(struct ftrace_event_call *call)
{
struct syscall_trace_enter trace;
- struct syscall_metadata *meta = call->data;
+ struct syscall_metadata *meta;
int ret;
+ int nr;
int i;
int offset = offsetof(typeof(trace), args);
- ret = trace_define_common_fields(call);
- if (ret)
- return ret;
+ nr = syscall_name_to_nr(call->data);
+ meta = syscall_nr_to_meta(nr);
- ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
+ if (!meta)
+ return 0;
+
+ ret = trace_define_common_fields(call);
if (ret)
return ret;
@@ -245,10 +281,6 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
if (ret)
return ret;
- ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
- if (ret)
- return ret;
-
ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
FILTER_OTHER);
@@ -276,8 +308,8 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id)
size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
- event = trace_current_buffer_lock_reserve(&buffer,
- sys_data->enter_event->id, size, 0, 0);
+ event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id,
+ size, 0, 0);
if (!event)
return;
@@ -308,8 +340,8 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret)
if (!sys_data)
return;
- event = trace_current_buffer_lock_reserve(&buffer,
- sys_data->exit_event->id, sizeof(*entry), 0, 0);
+ event = trace_current_buffer_lock_reserve(&buffer, sys_data->exit_id,
+ sizeof(*entry), 0, 0);
if (!event)
return;
@@ -326,8 +358,10 @@ int reg_event_syscall_enter(struct ftrace_event_call *call)
{
int ret = 0;
int num;
+ char *name;
- num = ((struct syscall_metadata *)call->data)->syscall_nr;
+ name = (char *)call->data;
+ num = syscall_name_to_nr(name);
if (num < 0 || num >= NR_syscalls)
return -ENOSYS;
mutex_lock(&syscall_trace_lock);
@@ -347,8 +381,10 @@ int reg_event_syscall_enter(struct ftrace_event_call *call)
void unreg_event_syscall_enter(struct ftrace_event_call *call)
{
int num;
+ char *name;
- num = ((struct syscall_metadata *)call->data)->syscall_nr;
+ name = (char *)call->data;
+ num = syscall_name_to_nr(name);
if (num < 0 || num >= NR_syscalls)
return;
mutex_lock(&syscall_trace_lock);
@@ -363,8 +399,10 @@ int reg_event_syscall_exit(struct ftrace_event_call *call)
{
int ret = 0;
int num;
+ char *name;
- num = ((struct syscall_metadata *)call->data)->syscall_nr;
+ name = call->data;
+ num = syscall_name_to_nr(name);
if (num < 0 || num >= NR_syscalls)
return -ENOSYS;
mutex_lock(&syscall_trace_lock);
@@ -384,8 +422,10 @@ int reg_event_syscall_exit(struct ftrace_event_call *call)
void unreg_event_syscall_exit(struct ftrace_event_call *call)
{
int num;
+ char *name;
- num = ((struct syscall_metadata *)call->data)->syscall_nr;
+ name = call->data;
+ num = syscall_name_to_nr(name);
if (num < 0 || num >= NR_syscalls)
return;
mutex_lock(&syscall_trace_lock);
@@ -396,17 +436,13 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call)
mutex_unlock(&syscall_trace_lock);
}
-int init_syscall_trace(struct ftrace_event_call *call)
-{
- int id;
+struct trace_event event_syscall_enter = {
+ .trace = print_syscall_enter,
+};
- id = register_ftrace_event(call->event);
- if (!id)
- return -ENODEV;
- call->id = id;
- INIT_LIST_HEAD(&call->fields);
- return 0;
-}
+struct trace_event event_syscall_exit = {
+ .trace = print_syscall_exit,
+};
int __init init_ftrace_syscalls(void)
{
@@ -424,10 +460,6 @@ int __init init_ftrace_syscalls(void)
for (i = 0; i < NR_syscalls; i++) {
addr = arch_syscall_addr(i);
meta = find_syscall_meta(addr);
- if (!meta)
- continue;
-
- meta->syscall_nr = i;
syscalls_metadata[i] = meta;
}
@@ -445,12 +477,11 @@ static int sys_prof_refcount_exit;
static void prof_syscall_enter(struct pt_regs *regs, long id)
{
struct syscall_metadata *sys_data;
+ struct perf_trace_buf *trace_buf;
struct syscall_trace_enter *rec;
unsigned long flags;
- char *trace_buf;
char *raw_data;
int syscall_nr;
- int rctx;
int size;
int cpu;
@@ -474,42 +505,54 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
/* Protect the per cpu buffer, begin the rcu read side */
local_irq_save(flags);
- rctx = perf_swevent_get_recursion_context();
- if (rctx < 0)
- goto end_recursion;
-
cpu = smp_processor_id();
- trace_buf = rcu_dereference(perf_trace_buf);
+ if (in_nmi())
+ trace_buf = rcu_dereference(perf_trace_buf_nmi);
+ else
+ trace_buf = rcu_dereference(perf_trace_buf);
if (!trace_buf)
goto end;
- raw_data = per_cpu_ptr(trace_buf, cpu);
+ trace_buf = per_cpu_ptr(trace_buf, cpu);
+
+ if (trace_buf->recursion++)
+ goto end_recursion;
+
+ /*
+ * Make recursion update visible before entering perf_tp_event
+ * so that we protect from perf recursions.
+ */
+ barrier();
+
+ raw_data = trace_buf->buf;
/* zero the dead bytes from align to not leak stack to user */
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
rec = (struct syscall_trace_enter *) raw_data;
tracing_generic_entry_update(&rec->ent, 0, 0);
- rec->ent.type = sys_data->enter_event->id;
+ rec->ent.type = sys_data->enter_id;
rec->nr = syscall_nr;
syscall_get_arguments(current, regs, 0, sys_data->nb_args,
(unsigned long *)&rec->args);
- perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size);
+ perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
-end:
- perf_swevent_put_recursion_context(rctx);
end_recursion:
+ trace_buf->recursion--;
+end:
local_irq_restore(flags);
}
-int prof_sysenter_enable(struct ftrace_event_call *call)
+int reg_prof_syscall_enter(char *name)
{
int ret = 0;
int num;
- num = ((struct syscall_metadata *)call->data)->syscall_nr;
+ num = syscall_name_to_nr(name);
+ if (num < 0 || num >= NR_syscalls)
+ return -ENOSYS;
mutex_lock(&syscall_trace_lock);
if (!sys_prof_refcount_enter)
@@ -525,11 +568,13 @@ int prof_sysenter_enable(struct ftrace_event_call *call)
return ret;
}
-void prof_sysenter_disable(struct ftrace_event_call *call)
+void unreg_prof_syscall_enter(char *name)
{
int num;
- num = ((struct syscall_metadata *)call->data)->syscall_nr;
+ num = syscall_name_to_nr(name);
+ if (num < 0 || num >= NR_syscalls)
+ return;
mutex_lock(&syscall_trace_lock);
sys_prof_refcount_enter--;
@@ -543,11 +588,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
{
struct syscall_metadata *sys_data;
struct syscall_trace_exit *rec;
+ struct perf_trace_buf *trace_buf;
unsigned long flags;
int syscall_nr;
- char *trace_buf;
char *raw_data;
- int rctx;
int size;
int cpu;
@@ -573,19 +617,28 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
/* Protect the per cpu buffer, begin the rcu read side */
local_irq_save(flags);
-
- rctx = perf_swevent_get_recursion_context();
- if (rctx < 0)
- goto end_recursion;
-
cpu = smp_processor_id();
- trace_buf = rcu_dereference(perf_trace_buf);
+ if (in_nmi())
+ trace_buf = rcu_dereference(perf_trace_buf_nmi);
+ else
+ trace_buf = rcu_dereference(perf_trace_buf);
if (!trace_buf)
goto end;
- raw_data = per_cpu_ptr(trace_buf, cpu);
+ trace_buf = per_cpu_ptr(trace_buf, cpu);
+
+ if (trace_buf->recursion++)
+ goto end_recursion;
+
+ /*
+ * Make recursion update visible before entering perf_tp_event
+ * so that we protect from perf recursions.
+ */
+ barrier();
+
+ raw_data = trace_buf->buf;
/* zero the dead bytes from align to not leak stack to user */
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -593,24 +646,26 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
rec = (struct syscall_trace_exit *)raw_data;
tracing_generic_entry_update(&rec->ent, 0, 0);
- rec->ent.type = sys_data->exit_event->id;
+ rec->ent.type = sys_data->exit_id;
rec->nr = syscall_nr;
rec->ret = syscall_get_return_value(current, regs);
- perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size);
+ perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
-end:
- perf_swevent_put_recursion_context(rctx);
end_recursion:
+ trace_buf->recursion--;
+end:
local_irq_restore(flags);
}
-int prof_sysexit_enable(struct ftrace_event_call *call)
+int reg_prof_syscall_exit(char *name)
{
int ret = 0;
int num;
- num = ((struct syscall_metadata *)call->data)->syscall_nr;
+ num = syscall_name_to_nr(name);
+ if (num < 0 || num >= NR_syscalls)
+ return -ENOSYS;
mutex_lock(&syscall_trace_lock);
if (!sys_prof_refcount_exit)
@@ -626,11 +681,13 @@ int prof_sysexit_enable(struct ftrace_event_call *call)
return ret;
}
-void prof_sysexit_disable(struct ftrace_event_call *call)
+void unreg_prof_syscall_exit(char *name)
{
int num;
- num = ((struct syscall_metadata *)call->data)->syscall_nr;
+ num = syscall_name_to_nr(name);
+ if (num < 0 || num >= NR_syscalls)
+ return;
mutex_lock(&syscall_trace_lock);
sys_prof_refcount_exit--;
diff --git a/trunk/samples/Kconfig b/trunk/samples/Kconfig
index e4be84ac3d38..b92bde3c6a89 100644
--- a/trunk/samples/Kconfig
+++ b/trunk/samples/Kconfig
@@ -40,11 +40,5 @@ config SAMPLE_KRETPROBES
default m
depends on SAMPLE_KPROBES && KRETPROBES
-config SAMPLE_HW_BREAKPOINT
- tristate "Build kernel hardware breakpoint examples -- loadable module only"
- depends on HAVE_HW_BREAKPOINT && m
- help
- This builds kernel hardware breakpoint example modules.
-
endif # SAMPLES
diff --git a/trunk/samples/Makefile b/trunk/samples/Makefile
index 0f15e6d77fd6..43343a03b1f4 100644
--- a/trunk/samples/Makefile
+++ b/trunk/samples/Makefile
@@ -1,4 +1,3 @@
# Makefile for Linux samples code
-obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ tracepoints/ trace_events/ \
- hw_breakpoint/
+obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ tracepoints/ trace_events/
diff --git a/trunk/samples/hw_breakpoint/Makefile b/trunk/samples/hw_breakpoint/Makefile
deleted file mode 100644
index 0f5c31c2fc47..000000000000
--- a/trunk/samples/hw_breakpoint/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_SAMPLE_HW_BREAKPOINT) += data_breakpoint.o
diff --git a/trunk/samples/hw_breakpoint/data_breakpoint.c b/trunk/samples/hw_breakpoint/data_breakpoint.c
deleted file mode 100644
index 29525500df00..000000000000
--- a/trunk/samples/hw_breakpoint/data_breakpoint.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * data_breakpoint.c - Sample HW Breakpoint file to watch kernel data address
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * usage: insmod data_breakpoint.ko ksym=
- *
- * This file is a kernel module that places a breakpoint over ksym_name kernel
- * variable using Hardware Breakpoint register. The corresponding handler which
- * prints a backtrace is invoked everytime a write operation is performed on
- * that variable.
- *
- * Copyright (C) IBM Corporation, 2009
- *
- * Author: K.Prasad
- */
-#include /* Needed by all modules */
-#include /* Needed for KERN_INFO */
-#include /* Needed for the macros */
-#include
-
-#include
-#include
-
-struct perf_event **sample_hbp;
-
-static char ksym_name[KSYM_NAME_LEN] = "pid_max";
-module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO);
-MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any"
- " write operations on the kernel symbol");
-
-static void sample_hbp_handler(struct perf_event *temp, void *data)
-{
- printk(KERN_INFO "%s value is changed\n", ksym_name);
- dump_stack();
- printk(KERN_INFO "Dump stack from sample_hbp_handler\n");
-}
-
-static int __init hw_break_module_init(void)
-{
- int ret;
- DEFINE_BREAKPOINT_ATTR(attr);
-
- attr.bp_addr = kallsyms_lookup_name(ksym_name);
- attr.bp_len = HW_BREAKPOINT_LEN_4;
- attr.bp_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
-
- sample_hbp = register_wide_hw_breakpoint(&attr, sample_hbp_handler);
- if (IS_ERR(sample_hbp)) {
- ret = PTR_ERR(sample_hbp);
- goto fail;
- }
-
- printk(KERN_INFO "HW Breakpoint for %s write installed\n", ksym_name);
-
- return 0;
-
-fail:
- printk(KERN_INFO "Breakpoint registration failed\n");
-
- return ret;
-}
-
-static void __exit hw_break_module_exit(void)
-{
- unregister_wide_hw_breakpoint(sample_hbp);
- printk(KERN_INFO "HW Breakpoint for %s write uninstalled\n", ksym_name);
-}
-
-module_init(hw_break_module_init);
-module_exit(hw_break_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("K.Prasad");
-MODULE_DESCRIPTION("ksym breakpoint");
diff --git a/trunk/scripts/kernel-doc b/trunk/scripts/kernel-doc
index 241310e59cd6..ea9f8a58678f 100755
--- a/trunk/scripts/kernel-doc
+++ b/trunk/scripts/kernel-doc
@@ -1852,17 +1852,10 @@ sub tracepoint_munge($) {
my $tracepointname = 0;
my $tracepointargs = 0;
- if ($prototype =~ m/TRACE_EVENT\((.*?),/) {
+ if($prototype =~ m/TRACE_EVENT\((.*?),/) {
$tracepointname = $1;
}
- if ($prototype =~ m/DEFINE_SINGLE_EVENT\((.*?),/) {
- $tracepointname = $1;
- }
- if ($prototype =~ m/DEFINE_EVENT\((.*?),(.*?),/) {
- $tracepointname = $2;
- }
- $tracepointname =~ s/^\s+//; #strip leading whitespace
- if ($prototype =~ m/TP_PROTO\((.*?)\)/) {
+ if($prototype =~ m/TP_PROTO\((.*?)\)/) {
$tracepointargs = $1;
}
if (($tracepointname eq 0) || ($tracepointargs eq 0)) {
@@ -1927,9 +1920,7 @@ sub process_state3_function($$) {
if ($prototype =~ /SYSCALL_DEFINE/) {
syscall_munge();
}
- if ($prototype =~ /TRACE_EVENT/ || $prototype =~ /DEFINE_EVENT/ ||
- $prototype =~ /DEFINE_SINGLE_EVENT/)
- {
+ if ($prototype =~ /TRACE_EVENT/) {
tracepoint_munge($file);
}
dump_function($prototype, $file);
diff --git a/trunk/tools/perf/.gitignore b/trunk/tools/perf/.gitignore
index fe08660ce0bd..0854f110bf7f 100644
--- a/trunk/tools/perf/.gitignore
+++ b/trunk/tools/perf/.gitignore
@@ -12,7 +12,6 @@ perf*.1
perf*.xml
perf*.html
common-cmds.h
-perf.data
tags
TAGS
cscope*
diff --git a/trunk/tools/perf/Documentation/perf-kmem.txt b/trunk/tools/perf/Documentation/perf-kmem.txt
deleted file mode 100644
index 44b0ce35c28a..000000000000
--- a/trunk/tools/perf/Documentation/perf-kmem.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-perf-kmem(1)
-==============
-
-NAME
-----
-perf-kmem - Tool to trace/measure kernel memory(slab) properties
-
-SYNOPSIS
---------
-[verse]
-'perf kmem' {record} []
-
-DESCRIPTION
------------
-There's two variants of perf kmem:
-
- 'perf kmem record ' to record the kmem events
- of an arbitrary workload.
-
- 'perf kmem' to report kernel memory statistics.
-
-OPTIONS
--------
--i ::
---input=::
- Select the input file (default: perf.data)
-
---stat=::
- Select per callsite or per allocation statistics
-
--s ::
---sort=::
- Sort the output (default: frag,hit,bytes)
-
--l ::
---line=::
- Print n lines only
-
---raw-ip::
- Print raw ip instead of symbol
-
-SEE ALSO
---------
-linkperf:perf-record[1]
diff --git a/trunk/tools/perf/Documentation/perf-record.txt b/trunk/tools/perf/Documentation/perf-record.txt
index fc46c0b40f6e..0ff23de9e453 100644
--- a/trunk/tools/perf/Documentation/perf-record.txt
+++ b/trunk/tools/perf/Documentation/perf-record.txt
@@ -26,19 +26,11 @@ OPTIONS
-e::
--event=::
- Select the PMU event. Selection can be:
+ Select the PMU event. Selection can be a symbolic event name
+ (use 'perf list' to list all events) or a raw PMU
+ event (eventsel+umask) in the form of rNNN where NNN is a
+ hexadecimal event descriptor.
- - a symbolic event name (use 'perf list' to list all events)
-
- - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
- hexadecimal event descriptor.
-
- - a hardware breakpoint event in the form of '\mem:addr[:access]'
- where addr is the address in memory you want to break in.
- Access is the memory access type (read, write, execute) it can
- be passed as follows: '\mem:addr[:[r][w][x]]'.
- If you want to profile read-write accesses in 0x1000, just set
- 'mem:0x1000:rw'.
-a::
System-wide collection.
diff --git a/trunk/tools/perf/Documentation/perf-trace-perl.txt b/trunk/tools/perf/Documentation/perf-trace-perl.txt
deleted file mode 100644
index c5f55f439091..000000000000
--- a/trunk/tools/perf/Documentation/perf-trace-perl.txt
+++ /dev/null
@@ -1,219 +0,0 @@
-perf-trace-perl(1)
-==================
-
-NAME
-----
-perf-trace-perl - Process trace data with a Perl script
-
-SYNOPSIS
---------
-[verse]
-'perf trace' [-s [lang]:script[.ext] ]
-
-DESCRIPTION
------------
-
-This perf trace option is used to process perf trace data using perf's
-built-in Perl interpreter. It reads and processes the input file and
-displays the results of the trace analysis implemented in the given
-Perl script, if any.
-
-STARTER SCRIPTS
----------------
-
-You can avoid reading the rest of this document by running 'perf trace
--g perl' in the same directory as an existing perf.data trace file.
-That will generate a starter script containing a handler for each of
-the event types in the trace file; it simply prints every available
-field for each event in the trace file.
-
-You can also look at the existing scripts in
-~/libexec/perf-core/scripts/perl for typical examples showing how to
-do basic things like aggregate event data, print results, etc. Also,
-the check-perf-trace.pl script, while not interesting for its results,
-attempts to exercise all of the main scripting features.
-
-EVENT HANDLERS
---------------
-
-When perf trace is invoked using a trace script, a user-defined
-'handler function' is called for each event in the trace. If there's
-no handler function defined for a given event type, the event is
-ignored (or passed to a 'trace_handled' function, see below) and the
-next event is processed.
-
-Most of the event's field values are passed as arguments to the
-handler function; some of the less common ones aren't - those are
-available as calls back into the perf executable (see below).
-
-As an example, the following perf record command can be used to record
-all sched_wakeup events in the system:
-
- # perf record -c 1 -f -a -M -R -e sched:sched_wakeup
-
-Traces meant to be processed using a script should be recorded with
-the above options: -c 1 says to sample every event, -a to enable
-system-wide collection, -M to multiplex the output, and -R to collect
-raw samples.
-
-The format file for the sched_wakep event defines the following fields
-(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
-
-----
- format:
- field:unsigned short common_type;
- field:unsigned char common_flags;
- field:unsigned char common_preempt_count;
- field:int common_pid;
- field:int common_lock_depth;
-
- field:char comm[TASK_COMM_LEN];
- field:pid_t pid;
- field:int prio;
- field:int success;
- field:int target_cpu;
-----
-
-The handler function for this event would be defined as:
-
-----
-sub sched::sched_wakeup
-{
- my ($event_name, $context, $common_cpu, $common_secs,
- $common_nsecs, $common_pid, $common_comm,
- $comm, $pid, $prio, $success, $target_cpu) = @_;
-}
-----
-
-The handler function takes the form subsystem::event_name.
-
-The $common_* arguments in the handler's argument list are the set of
-arguments passed to all event handlers; some of the fields correspond
-to the common_* fields in the format file, but some are synthesized,
-and some of the common_* fields aren't common enough to to be passed
-to every event as arguments but are available as library functions.
-
-Here's a brief description of each of the invariant event args:
-
- $event_name the name of the event as text
- $context an opaque 'cookie' used in calls back into perf
- $common_cpu the cpu the event occurred on
- $common_secs the secs portion of the event timestamp
- $common_nsecs the nsecs portion of the event timestamp
- $common_pid the pid of the current task
- $common_comm the name of the current process
-
-All of the remaining fields in the event's format file have
-counterparts as handler function arguments of the same name, as can be
-seen in the example above.
-
-The above provides the basics needed to directly access every field of
-every event in a trace, which covers 90% of what you need to know to
-write a useful trace script. The sections below cover the rest.
-
-SCRIPT LAYOUT
--------------
-
-Every perf trace Perl script should start by setting up a Perl module
-search path and 'use'ing a few support modules (see module
-descriptions below):
-
-----
- use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
- use lib "./Perf-Trace-Util/lib";
- use Perf::Trace::Core;
- use Perf::Trace::Context;
- use Perf::Trace::Util;
-----
-
-The rest of the script can contain handler functions and support
-functions in any order.
-
-Aside from the event handler functions discussed above, every script
-can implement a set of optional functions:
-
-*trace_begin*, if defined, is called before any event is processed and
-gives scripts a chance to do setup tasks:
-
-----
- sub trace_begin
- {
- }
-----
-
-*trace_end*, if defined, is called after all events have been
- processed and gives scripts a chance to do end-of-script tasks, such
- as display results:
-
-----
-sub trace_end
-{
-}
-----
-
-*trace_unhandled*, if defined, is called after for any event that
- doesn't have a handler explicitly defined for it. The standard set
- of common arguments are passed into it:
-
-----
-sub trace_unhandled
-{
- my ($event_name, $context, $common_cpu, $common_secs,
- $common_nsecs, $common_pid, $common_comm) = @_;
-}
-----
-
-The remaining sections provide descriptions of each of the available
-built-in perf trace Perl modules and their associated functions.
-
-AVAILABLE MODULES AND FUNCTIONS
--------------------------------
-
-The following sections describe the functions and variables available
-via the various Perf::Trace::* Perl modules. To use the functions and
-variables from the given module, add the corresponding 'use
-Perf::Trace::XXX' line to your perf trace script.
-
-Perf::Trace::Core Module
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-These functions provide some essential functions to user scripts.
-
-The *flag_str* and *symbol_str* functions provide human-readable
-strings for flag and symbolic fields. These correspond to the strings
-and values parsed from the 'print fmt' fields of the event format
-files:
-
- flag_str($event_name, $field_name, $field_value) - returns the string represention corresponding to $field_value for the flag field $field_name of event $event_name
- symbol_str($event_name, $field_name, $field_value) - returns the string represention corresponding to $field_value for the symbolic field $field_name of event $event_name
-
-Perf::Trace::Context Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Some of the 'common' fields in the event format file aren't all that
-common, but need to be made accessible to user scripts nonetheless.
-
-Perf::Trace::Context defines a set of functions that can be used to
-access this data in the context of the current event. Each of these
-functions expects a $context variable, which is the same as the
-$context variable passed into every event handler as the second
-argument.
-
- common_pc($context) - returns common_preempt count for the current event
- common_flags($context) - returns common_flags for the current event
- common_lock_depth($context) - returns common_lock_depth for the current event
-
-Perf::Trace::Util Module
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-Various utility functions for use with perf trace:
-
- nsecs($secs, $nsecs) - returns total nsecs given secs/nsecs pair
- nsecs_secs($nsecs) - returns whole secs portion given nsecs
- nsecs_nsecs($nsecs) - returns nsecs remainder given nsecs
- nsecs_str($nsecs) - returns printable string in the form secs.nsecs
- avg($total, $n) - returns average given a sum and a total number of values
-
-SEE ALSO
---------
-linkperf:perf-trace[1]
diff --git a/trunk/tools/perf/Documentation/perf-trace.txt b/trunk/tools/perf/Documentation/perf-trace.txt
index 07065efa60e0..41ed75398ca9 100644
--- a/trunk/tools/perf/Documentation/perf-trace.txt
+++ b/trunk/tools/perf/Documentation/perf-trace.txt
@@ -20,15 +20,6 @@ OPTIONS
--dump-raw-trace=::
Display verbose dump of the trace data.
--s::
---script=::
- Process trace data with the given script ([lang]:script[.ext]).
-
--g::
---gen-script=::
- Generate perf-trace.[ext] starter script for given language,
- using current perf.data.
-
SEE ALSO
--------
-linkperf:perf-record[1], linkperf:perf-trace-perl[1]
+linkperf:perf-record[1]
diff --git a/trunk/tools/perf/Makefile b/trunk/tools/perf/Makefile
index 23ec66098bdc..5d1a8b0dff8f 100644
--- a/trunk/tools/perf/Makefile
+++ b/trunk/tools/perf/Makefile
@@ -2,7 +2,6 @@
all::
# Define V=1 to have a more verbose compile.
-# Define V=2 to have an even more verbose compile.
#
# Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf()
# or vsnprintf() return -1 instead of number of characters which would
@@ -148,8 +147,6 @@ all::
# broken, or spawning external process is slower than built-in grep perf has).
#
# Define LDFLAGS=-static to build a static binary.
-#
-# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
@$(SHELL_PATH) util/PERF-VERSION-GEN
@@ -162,6 +159,20 @@ uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not')
uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not')
uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not')
+#
+# Add -m32 for cross-builds:
+#
+ifdef NO_64BIT
+ MBITS := -m32
+else
+ #
+ # If we're on a 64-bit kernel, use -m64:
+ #
+ ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M))
+ MBITS := -m64
+ endif
+endif
+
# CFLAGS and LDFLAGS are for the users to override from the command line.
#
@@ -198,7 +209,7 @@ ifndef PERF_DEBUG
CFLAGS_OPTIMIZE = -O6
endif
-CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS)
EXTLIBS = -lpthread -lrt -lelf -lm
ALL_CFLAGS = $(CFLAGS)
ALL_LDFLAGS = $(LDFLAGS)
@@ -250,7 +261,7 @@ PTHREAD_LIBS = -lpthread
# explicitly what architecture to check for. Fix this up for yours..
SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
-ifeq ($(shell sh -c "echo 'int foo(void) {char X[2]; return 3;}' | $(CC) -x c -c -Werror -fstack-protector-all - -o /dev/null "$(QUIET_STDERR)" && echo y"), y)
+ifeq ($(shell sh -c "echo 'int foo(void) {char X[2]; return 3;}' | $(CC) -x c -c -Werror -fstack-protector-all - -o /dev/null >/dev/null 2>&1 && echo y"), y)
CFLAGS := $(CFLAGS) -fstack-protector-all
endif
@@ -369,8 +380,6 @@ LIB_H += util/sort.h
LIB_H += util/hist.h
LIB_H += util/thread.h
LIB_H += util/data_map.h
-LIB_H += util/probe-finder.h
-LIB_H += util/probe-event.h
LIB_OBJS += util/abspath.o
LIB_OBJS += util/alias.o
@@ -409,12 +418,10 @@ LIB_OBJS += util/thread.o
LIB_OBJS += util/trace-event-parse.o
LIB_OBJS += util/trace-event-read.o
LIB_OBJS += util/trace-event-info.o
-LIB_OBJS += util/trace-event-perl.o
LIB_OBJS += util/svghelper.o
LIB_OBJS += util/sort.o
LIB_OBJS += util/hist.o
LIB_OBJS += util/data_map.o
-LIB_OBJS += util/probe-event.o
BUILTIN_OBJS += builtin-annotate.o
@@ -423,7 +430,6 @@ BUILTIN_OBJS += builtin-bench.o
# Benchmark modules
BUILTIN_OBJS += bench/sched-messaging.o
BUILTIN_OBJS += bench/sched-pipe.o
-BUILTIN_OBJS += bench/mem-memcpy.o
BUILTIN_OBJS += builtin-help.o
BUILTIN_OBJS += builtin-sched.o
@@ -436,15 +442,9 @@ BUILTIN_OBJS += builtin-timechart.o
BUILTIN_OBJS += builtin-top.o
BUILTIN_OBJS += builtin-trace.o
BUILTIN_OBJS += builtin-probe.o
-BUILTIN_OBJS += builtin-kmem.o
PERFLIBS = $(LIB_FILE)
-ifeq ($(V), 2)
- QUIET_STDERR = ">/dev/null"
-else
- QUIET_STDERR = ">/dev/null 2>&1"
-endif
#
# Platform specific tweaks
#
@@ -472,58 +472,49 @@ ifeq ($(uname_S),Darwin)
PTHREAD_LIBS =
endif
-ifeq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
-ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
- msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
+ifeq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) > /dev/null 2>&1 && echo y"), y)
+ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) > /dev/null 2>&1 && echo y"), y)
+ msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]);
endif
- ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) > /dev/null 2>&1 && echo y"), y)
BASIC_CFLAGS += -DLIBELF_NO_MMAP
endif
else
msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]);
endif
-ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) > /dev/null 2>&1 && echo y"), y)
msg := $(warning No libdwarf.h found or old libdwarf.h found, disables dwarf support. Please install libdwarf-dev/libdwarf-devel >= 20081231);
BASIC_CFLAGS += -DNO_LIBDWARF
else
EXTLIBS += -lelf -ldwarf
+ LIB_H += util/probe-finder.h
LIB_OBJS += util/probe-finder.o
endif
-PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts 2>/dev/null`
-PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
-
-ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; echo 'int main(void) { perl_alloc(); return 0; }') | $(CC) -x c - $(PERL_EMBED_CCOPTS) -o /dev/null $(PERL_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
- BASIC_CFLAGS += -DNO_LIBPERL
-else
- ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
- LIB_OBJS += scripts/perl/Perf-Trace-Util/Context.o
-endif
-
ifdef NO_DEMANGLE
BASIC_CFLAGS += -DNO_DEMANGLE
else
- has_bfd := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd "$(QUIET_STDERR)" && echo y")
+ has_bfd := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd > /dev/null 2>&1 && echo y")
ifeq ($(has_bfd),y)
EXTLIBS += -lbfd
else
- has_bfd_iberty := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty "$(QUIET_STDERR)" && echo y")
+ has_bfd_iberty := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty > /dev/null 2>&1 && echo y")
ifeq ($(has_bfd_iberty),y)
EXTLIBS += -lbfd -liberty
else
- has_bfd_iberty_z := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz "$(QUIET_STDERR)" && echo y")
+ has_bfd_iberty_z := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz > /dev/null 2>&1 && echo y")
ifeq ($(has_bfd_iberty_z),y)
EXTLIBS += -lbfd -liberty -lz
else
- has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -liberty "$(QUIET_STDERR)" && echo y")
+ has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -liberty > /dev/null 2>&1 && echo y")
ifeq ($(has_cplus_demangle),y)
EXTLIBS += -liberty
BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
else
- msg := $(warning No bfd.h/libbfd found, install binutils-dev[el]/zlib-static to gain symbol demangling)
+ msg := $(warning No bfd.h/libbfd found, install binutils-dev[el] to gain symbol demangling)
BASIC_CFLAGS += -DNO_DEMANGLE
endif
endif
@@ -873,12 +864,6 @@ util/hweight.o: ../../lib/hweight.c PERF-CFLAGS
util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS
$(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
-util/trace-event-perl.o: util/trace-event-perl.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o util/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
-
-scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c PERF-CFLAGS
- $(QUIET_CC)$(CC) -o scripts/perl/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
-
perf-%$X: %.o $(PERFLIBS)
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
@@ -986,13 +971,6 @@ export perfexec_instdir
install: all
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
$(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)'
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
- $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
- $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'
- $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
- $(INSTALL) scripts/perl/Perf-Trace-Util/Makefile.PL -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util'
- $(INSTALL) scripts/perl/Perf-Trace-Util/README -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util'
ifdef BUILT_INS
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
$(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
@@ -1078,7 +1056,7 @@ distclean: clean
# $(RM) configure
clean:
- $(RM) *.o */*.o */*/*.o */*/*/*.o $(LIB_FILE)
+ $(RM) *.o */*.o $(LIB_FILE)
$(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X
$(RM) $(TEST_PROGRAMS)
$(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h TAGS tags cscope*
diff --git a/trunk/tools/perf/bench/bench.h b/trunk/tools/perf/bench/bench.h
index f7781c6267c0..9fbd8d745fa1 100644
--- a/trunk/tools/perf/bench/bench.h
+++ b/trunk/tools/perf/bench/bench.h
@@ -3,7 +3,6 @@
extern int bench_sched_messaging(int argc, const char **argv, const char *prefix);
extern int bench_sched_pipe(int argc, const char **argv, const char *prefix);
-extern int bench_mem_memcpy(int argc, const char **argv, const char *prefix __used);
#define BENCH_FORMAT_DEFAULT_STR "default"
#define BENCH_FORMAT_DEFAULT 0
diff --git a/trunk/tools/perf/bench/mem-memcpy.c b/trunk/tools/perf/bench/mem-memcpy.c
deleted file mode 100644
index 89773178e894..000000000000
--- a/trunk/tools/perf/bench/mem-memcpy.c
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * mem-memcpy.c
- *
- * memcpy: Simple memory copy in various ways
- *
- * Written by Hitoshi Mitake
- */
-#include
-
-#include "../perf.h"
-#include "../util/util.h"
-#include "../util/parse-options.h"
-#include "../util/string.h"
-#include "../util/header.h"
-#include "bench.h"
-
-#include
-#include
-#include
-#include
-#include
-
-#define K 1024
-
-static const char *length_str = "1MB";
-static const char *routine = "default";
-static int use_clock = 0;
-static int clock_fd;
-
-static const struct option options[] = {
- OPT_STRING('l', "length", &length_str, "1MB",
- "Specify length of memory to copy. "
- "available unit: B, MB, GB (upper and lower)"),
- OPT_STRING('r', "routine", &routine, "default",
- "Specify routine to copy"),
- OPT_BOOLEAN('c', "clock", &use_clock,
- "Use CPU clock for measuring"),
- OPT_END()
-};
-
-struct routine {
- const char *name;
- const char *desc;
- void * (*fn)(void *dst, const void *src, size_t len);
-};
-
-struct routine routines[] = {
- { "default",
- "Default memcpy() provided by glibc",
- memcpy },
- { NULL,
- NULL,
- NULL }
-};
-
-static const char * const bench_mem_memcpy_usage[] = {
- "perf bench mem memcpy ",
- NULL
-};
-
-static struct perf_event_attr clock_attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES
-};
-
-static void init_clock(void)
-{
- clock_fd = sys_perf_event_open(&clock_attr, getpid(), -1, -1, 0);
-
- if (clock_fd < 0 && errno == ENOSYS)
- die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
- else
- BUG_ON(clock_fd < 0);
-}
-
-static u64 get_clock(void)
-{
- int ret;
- u64 clk;
-
- ret = read(clock_fd, &clk, sizeof(u64));
- BUG_ON(ret != sizeof(u64));
-
- return clk;
-}
-
-static double timeval2double(struct timeval *ts)
-{
- return (double)ts->tv_sec +
- (double)ts->tv_usec / (double)1000000;
-}
-
-int bench_mem_memcpy(int argc, const char **argv,
- const char *prefix __used)
-{
- int i;
- void *dst, *src;
- size_t length;
- double bps = 0.0;
- struct timeval tv_start, tv_end, tv_diff;
- u64 clock_start, clock_end, clock_diff;
-
- clock_start = clock_end = clock_diff = 0ULL;
- argc = parse_options(argc, argv, options,
- bench_mem_memcpy_usage, 0);
-
- tv_diff.tv_sec = 0;
- tv_diff.tv_usec = 0;
- length = (size_t)perf_atoll((char *)length_str);
-
- if ((s64)length <= 0) {
- fprintf(stderr, "Invalid length:%s\n", length_str);
- return 1;
- }
-
- for (i = 0; routines[i].name; i++) {
- if (!strcmp(routines[i].name, routine))
- break;
- }
- if (!routines[i].name) {
- printf("Unknown routine:%s\n", routine);
- printf("Available routines...\n");
- for (i = 0; routines[i].name; i++) {
- printf("\t%s ... %s\n",
- routines[i].name, routines[i].desc);
- }
- return 1;
- }
-
- dst = zalloc(length);
- if (!dst)
- die("memory allocation failed - maybe length is too large?\n");
-
- src = zalloc(length);
- if (!src)
- die("memory allocation failed - maybe length is too large?\n");
-
- if (bench_format == BENCH_FORMAT_DEFAULT) {
- printf("# Copying %s Bytes from %p to %p ...\n\n",
- length_str, src, dst);
- }
-
- if (use_clock) {
- init_clock();
- clock_start = get_clock();
- } else {
- BUG_ON(gettimeofday(&tv_start, NULL));
- }
-
- routines[i].fn(dst, src, length);
-
- if (use_clock) {
- clock_end = get_clock();
- clock_diff = clock_end - clock_start;
- } else {
- BUG_ON(gettimeofday(&tv_end, NULL));
- timersub(&tv_end, &tv_start, &tv_diff);
- bps = (double)((double)length / timeval2double(&tv_diff));
- }
-
- switch (bench_format) {
- case BENCH_FORMAT_DEFAULT:
- if (use_clock) {
- printf(" %14lf Clock/Byte\n",
- (double)clock_diff / (double)length);
- } else {
- if (bps < K)
- printf(" %14lf B/Sec\n", bps);
- else if (bps < K * K)
- printf(" %14lfd KB/Sec\n", bps / 1024);
- else if (bps < K * K * K)
- printf(" %14lf MB/Sec\n", bps / 1024 / 1024);
- else {
- printf(" %14lf GB/Sec\n",
- bps / 1024 / 1024 / 1024);
- }
- }
- break;
- case BENCH_FORMAT_SIMPLE:
- if (use_clock) {
- printf("%14lf\n",
- (double)clock_diff / (double)length);
- } else
- printf("%lf\n", bps);
- break;
- default:
- /* reaching this means there's some disaster: */
- die("unknown format: %d\n", bench_format);
- break;
- }
-
- return 0;
-}
diff --git a/trunk/tools/perf/builtin-annotate.c b/trunk/tools/perf/builtin-annotate.c
index 0bf2e8f9af57..77d50a6d6802 100644
--- a/trunk/tools/perf/builtin-annotate.c
+++ b/trunk/tools/perf/builtin-annotate.c
@@ -19,22 +19,24 @@
#include "perf.h"
#include "util/debug.h"
-#include "util/event.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
#include "util/thread.h"
#include "util/sort.h"
#include "util/hist.h"
-#include "util/data_map.h"
static char const *input_name = "perf.data";
static int force;
+static int input;
static int full_paths;
static int print_line;
+static unsigned long page_size;
+static unsigned long mmap_window = 32;
+
struct sym_hist {
u64 sum;
u64 ip[0];
@@ -51,11 +53,6 @@ struct sym_priv {
struct sym_ext *ext;
};
-static struct symbol_conf symbol_conf = {
- .priv_size = sizeof(struct sym_priv),
- .try_vmlinux_path = true,
-};
-
static const char *sym_hist_filter;
static int symbol_filter(struct map *map __used, struct symbol *sym)
@@ -121,34 +118,186 @@ static void hist_hit(struct hist_entry *he, u64 ip)
h->ip[offset]);
}
-static int hist_entry__add(struct addr_location *al, u64 count)
+static int hist_entry__add(struct thread *thread, struct map *map,
+ struct symbol *sym, u64 ip, u64 count, char level)
{
bool hit;
- struct hist_entry *he = __hist_entry__add(al, NULL, count, &hit);
+ struct hist_entry *he = __hist_entry__add(thread, map, sym, NULL, ip,
+ count, level, &hit);
if (he == NULL)
return -ENOMEM;
- hist_hit(he, al->addr);
+ hist_hit(he, ip);
return 0;
}
-static int process_sample_event(event_t *event)
+static int
+process_sample_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct addr_location al;
-
- dump_printf("(IP, %d): %d: %p\n", event->header.misc,
- event->ip.pid, (void *)(long)event->ip.ip);
-
- if (event__preprocess_sample(event, &al, symbol_filter) < 0) {
+ char level;
+ u64 ip = event->ip.ip;
+ struct map *map = NULL;
+ struct symbol *sym = NULL;
+ struct thread *thread = threads__findnew(event->ip.pid);
+
+ dump_printf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->header.misc,
+ event->ip.pid,
+ (void *)(long)ip);
+
+ if (thread == NULL) {
fprintf(stderr, "problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
- if (hist_entry__add(&al, 1)) {
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+
+ if (event->header.misc & PERF_RECORD_MISC_KERNEL) {
+ level = 'k';
+ sym = kernel_maps__find_symbol(ip, &map);
+ dump_printf(" ...... dso: %s\n",
+ map ? map->dso->long_name : "");
+ } else if (event->header.misc & PERF_RECORD_MISC_USER) {
+ level = '.';
+ map = thread__find_map(thread, ip);
+ if (map != NULL) {
+got_map:
+ ip = map->map_ip(map, ip);
+ sym = map__find_symbol(map, ip, symbol_filter);
+ } else {
+ /*
+ * If this is outside of all known maps,
+ * and is a negative address, try to look it
+ * up in the kernel dso, as it might be a
+ * vsyscall or vdso (which executes in user-mode).
+ *
+ * XXX This is nasty, we should have a symbol list in
+ * the "[vdso]" dso, but for now lets use the old
+ * trick of looking in the whole kernel symbol list.
+ */
+ if ((long long)ip < 0) {
+ map = kernel_map;
+ goto got_map;
+ }
+ }
+ dump_printf(" ...... dso: %s\n",
+ map ? map->dso->long_name : "");
+ } else {
+ level = 'H';
+ dump_printf(" ...... dso: [hypervisor]\n");
+ }
+
+ if (hist_entry__add(thread, map, sym, ip, 1, level)) {
fprintf(stderr, "problem incrementing symbol count, "
"skipping event\n");
return -1;
}
+ total++;
+
+ return 0;
+}
+
+static int
+process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ struct map *map = map__new(&event->mmap, NULL, 0);
+ struct thread *thread = threads__findnew(event->mmap.pid);
+
+ dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->mmap.pid,
+ (void *)(long)event->mmap.start,
+ (void *)(long)event->mmap.len,
+ (void *)(long)event->mmap.pgoff,
+ event->mmap.filename);
+
+ if (thread == NULL || map == NULL) {
+ dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
+ return 0;
+ }
+
+ thread__insert_map(thread, map);
+ total_mmap++;
+
+ return 0;
+}
+
+static int
+process_comm_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ struct thread *thread = threads__findnew(event->comm.pid);
+
+ dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->comm.comm, event->comm.pid);
+
+ if (thread == NULL ||
+ thread__set_comm(thread, event->comm.comm)) {
+ dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
+ return -1;
+ }
+ total_comm++;
+
+ return 0;
+}
+
+static int
+process_fork_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ struct thread *thread = threads__findnew(event->fork.pid);
+ struct thread *parent = threads__findnew(event->fork.ppid);
+
+ dump_printf("%p [%p]: PERF_RECORD_FORK: %d:%d\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->fork.pid, event->fork.ppid);
+
+ /*
+ * A thread clone will have the same PID for both
+ * parent and child.
+ */
+ if (thread == parent)
+ return 0;
+
+ if (!thread || !parent || thread__fork(thread, parent)) {
+ dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
+ return -1;
+ }
+ total_fork++;
+
+ return 0;
+}
+
+static int
+process_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ switch (event->header.type) {
+ case PERF_RECORD_SAMPLE:
+ return process_sample_event(event, offset, head);
+
+ case PERF_RECORD_MMAP:
+ return process_mmap_event(event, offset, head);
+
+ case PERF_RECORD_COMM:
+ return process_comm_event(event, offset, head);
+
+ case PERF_RECORD_FORK:
+ return process_fork_event(event, offset, head);
+ /*
+ * We dont process them right now but they are fine:
+ */
+
+ case PERF_RECORD_THROTTLE:
+ case PERF_RECORD_UNTHROTTLE:
+ return 0;
+
+ default:
+ return -1;
+ }
return 0;
}
@@ -453,32 +602,116 @@ static void find_annotations(void)
}
}
-static struct perf_file_handler file_handler = {
- .process_sample_event = process_sample_event,
- .process_mmap_event = event__process_mmap,
- .process_comm_event = event__process_comm,
- .process_fork_event = event__process_task,
-};
-
static int __cmd_annotate(void)
{
- struct perf_header *header;
- struct thread *idle;
- int ret;
+ int ret, rc = EXIT_FAILURE;
+ unsigned long offset = 0;
+ unsigned long head = 0;
+ struct stat input_stat;
+ event_t *event;
+ uint32_t size;
+ char *buf;
+
+ register_idle_thread();
+
+ input = open(input_name, O_RDONLY);
+ if (input < 0) {
+ perror("failed to open file");
+ exit(-1);
+ }
- idle = register_idle_thread();
- register_perf_file_handler(&file_handler);
+ ret = fstat(input, &input_stat);
+ if (ret < 0) {
+ perror("failed to stat file");
+ exit(-1);
+ }
- ret = mmap_dispatch_perf_file(&header, input_name, 0, 0,
- &event__cwdlen, &event__cwd);
- if (ret)
- return ret;
+ if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
+ fprintf(stderr, "file: %s not owned by current user or root\n", input_name);
+ exit(-1);
+ }
- if (dump_trace) {
- event__print_totals();
- return 0;
+ if (!input_stat.st_size) {
+ fprintf(stderr, "zero-sized file, nothing to do!\n");
+ exit(0);
+ }
+
+ if (load_kernel(symbol_filter) < 0) {
+ perror("failed to load kernel symbols");
+ return EXIT_FAILURE;
+ }
+
+remap:
+ buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
+ MAP_SHARED, input, offset);
+ if (buf == MAP_FAILED) {
+ perror("failed to mmap file");
+ exit(-1);
+ }
+
+more:
+ event = (event_t *)(buf + head);
+
+ size = event->header.size;
+ if (!size)
+ size = 8;
+
+ if (head + event->header.size >= page_size * mmap_window) {
+ unsigned long shift = page_size * (head / page_size);
+ int munmap_ret;
+
+ munmap_ret = munmap(buf, page_size * mmap_window);
+ assert(munmap_ret == 0);
+
+ offset += shift;
+ head -= shift;
+ goto remap;
}
+ size = event->header.size;
+
+ dump_printf("%p [%p]: event: %d\n",
+ (void *)(offset + head),
+ (void *)(long)event->header.size,
+ event->header.type);
+
+ if (!size || process_event(event, offset, head) < 0) {
+
+ dump_printf("%p [%p]: skipping unknown header type: %d\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->header.type);
+
+ total_unknown++;
+
+ /*
+ * assume we lost track of the stream, check alignment, and
+ * increment a single u64 in the hope to catch on again 'soon'.
+ */
+
+ if (unlikely(head & 7))
+ head &= ~7ULL;
+
+ size = 8;
+ }
+
+ head += size;
+
+ if (offset + head < (unsigned long)input_stat.st_size)
+ goto more;
+
+ rc = EXIT_SUCCESS;
+ close(input);
+
+ dump_printf(" IP events: %10ld\n", total);
+ dump_printf(" mmap events: %10ld\n", total_mmap);
+ dump_printf(" comm events: %10ld\n", total_comm);
+ dump_printf(" fork events: %10ld\n", total_fork);
+ dump_printf(" unknown events: %10ld\n", total_unknown);
+
+ if (dump_trace)
+ return 0;
+
if (verbose > 3)
threads__fprintf(stdout);
@@ -486,11 +719,11 @@ static int __cmd_annotate(void)
dsos__fprintf(stdout);
collapse__resort();
- output__resort(event__total[0]);
+ output__resort(total);
find_annotations();
- return ret;
+ return rc;
}
static const char * const annotate_usage[] = {
@@ -508,9 +741,8 @@ static const struct option options[] = {
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
- OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
- "file", "vmlinux pathname"),
- OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
+ OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"),
+ OPT_BOOLEAN('m', "modules", &modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_BOOLEAN('l', "print-line", &print_line,
"print matching source lines (may be slow)"),
@@ -536,8 +768,9 @@ static void setup_sorting(void)
int cmd_annotate(int argc, const char **argv, const char *prefix __used)
{
- if (symbol__init(&symbol_conf) < 0)
- return -1;
+ symbol__init(sizeof(struct sym_priv));
+
+ page_size = getpagesize();
argc = parse_options(argc, argv, options, annotate_usage, 0);
diff --git a/trunk/tools/perf/builtin-bench.c b/trunk/tools/perf/builtin-bench.c
index e043eb83092a..90c39baae0de 100644
--- a/trunk/tools/perf/builtin-bench.c
+++ b/trunk/tools/perf/builtin-bench.c
@@ -12,7 +12,6 @@
*
* Available subsystem list:
* sched ... scheduler and IPC mechanism
- * mem ... memory access performance
*
*/
@@ -44,15 +43,6 @@ static struct bench_suite sched_suites[] = {
NULL }
};
-static struct bench_suite mem_suites[] = {
- { "memcpy",
- "Simple memory copy in various ways",
- bench_mem_memcpy },
- { NULL,
- NULL,
- NULL }
-};
-
struct bench_subsys {
const char *name;
const char *summary;
@@ -63,12 +53,9 @@ static struct bench_subsys subsystems[] = {
{ "sched",
"scheduler and IPC mechanism",
sched_suites },
- { "mem",
- "memory access performance",
- mem_suites },
{ NULL,
NULL,
- NULL }
+ NULL }
};
static void dump_suites(int subsys_index)
diff --git a/trunk/tools/perf/builtin-help.c b/trunk/tools/perf/builtin-help.c
index 9f810b17c25c..768f9c826312 100644
--- a/trunk/tools/perf/builtin-help.c
+++ b/trunk/tools/perf/builtin-help.c
@@ -179,7 +179,7 @@ static void add_man_viewer(const char *name)
while (*p)
p = &((*p)->next);
- *p = zalloc(sizeof(**p) + len + 1);
+ *p = calloc(1, (sizeof(**p) + len + 1));
strncpy((*p)->name, name, len);
}
@@ -194,7 +194,7 @@ static void do_add_man_viewer_info(const char *name,
size_t len,
const char *value)
{
- struct man_viewer_info_list *new = zalloc(sizeof(*new) + len + 1);
+ struct man_viewer_info_list *new = calloc(1, sizeof(*new) + len + 1);
strncpy(new->name, name, len);
new->info = strdup(value);
diff --git a/trunk/tools/perf/builtin-kmem.c b/trunk/tools/perf/builtin-kmem.c
deleted file mode 100644
index 047fef74bd52..000000000000
--- a/trunk/tools/perf/builtin-kmem.c
+++ /dev/null
@@ -1,807 +0,0 @@
-#include "builtin.h"
-#include "perf.h"
-
-#include "util/util.h"
-#include "util/cache.h"
-#include "util/symbol.h"
-#include "util/thread.h"
-#include "util/header.h"
-
-#include "util/parse-options.h"
-#include "util/trace-event.h"
-
-#include "util/debug.h"
-#include "util/data_map.h"
-
-#include
-
-struct alloc_stat;
-typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
-
-static char const *input_name = "perf.data";
-
-static struct perf_header *header;
-static u64 sample_type;
-
-static int alloc_flag;
-static int caller_flag;
-
-static int alloc_lines = -1;
-static int caller_lines = -1;
-
-static bool raw_ip;
-
-static char default_sort_order[] = "frag,hit,bytes";
-
-static int *cpunode_map;
-static int max_cpu_num;
-
-struct alloc_stat {
- u64 call_site;
- u64 ptr;
- u64 bytes_req;
- u64 bytes_alloc;
- u32 hit;
- u32 pingpong;
-
- short alloc_cpu;
-
- struct rb_node node;
-};
-
-static struct rb_root root_alloc_stat;
-static struct rb_root root_alloc_sorted;
-static struct rb_root root_caller_stat;
-static struct rb_root root_caller_sorted;
-
-static unsigned long total_requested, total_allocated;
-static unsigned long nr_allocs, nr_cross_allocs;
-
-struct raw_event_sample {
- u32 size;
- char data[0];
-};
-
-#define PATH_SYS_NODE "/sys/devices/system/node"
-
-static void init_cpunode_map(void)
-{
- FILE *fp;
- int i;
-
- fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
- if (!fp) {
- max_cpu_num = 4096;
- return;
- }
-
- if (fscanf(fp, "%d", &max_cpu_num) < 1)
- die("Failed to read 'kernel_max' from sysfs");
- max_cpu_num++;
-
- cpunode_map = calloc(max_cpu_num, sizeof(int));
- if (!cpunode_map)
- die("calloc");
- for (i = 0; i < max_cpu_num; i++)
- cpunode_map[i] = -1;
- fclose(fp);
-}
-
-static void setup_cpunode_map(void)
-{
- struct dirent *dent1, *dent2;
- DIR *dir1, *dir2;
- unsigned int cpu, mem;
- char buf[PATH_MAX];
-
- init_cpunode_map();
-
- dir1 = opendir(PATH_SYS_NODE);
- if (!dir1)
- return;
-
- while (true) {
- dent1 = readdir(dir1);
- if (!dent1)
- break;
-
- if (sscanf(dent1->d_name, "node%u", &mem) < 1)
- continue;
-
- snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
- dir2 = opendir(buf);
- if (!dir2)
- continue;
- while (true) {
- dent2 = readdir(dir2);
- if (!dent2)
- break;
- if (sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
- continue;
- cpunode_map[cpu] = mem;
- }
- }
-}
-
-static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
- int bytes_req, int bytes_alloc, int cpu)
-{
- struct rb_node **node = &root_alloc_stat.rb_node;
- struct rb_node *parent = NULL;
- struct alloc_stat *data = NULL;
-
- while (*node) {
- parent = *node;
- data = rb_entry(*node, struct alloc_stat, node);
-
- if (ptr > data->ptr)
- node = &(*node)->rb_right;
- else if (ptr < data->ptr)
- node = &(*node)->rb_left;
- else
- break;
- }
-
- if (data && data->ptr == ptr) {
- data->hit++;
- data->bytes_req += bytes_req;
- data->bytes_alloc += bytes_req;
- } else {
- data = malloc(sizeof(*data));
- if (!data)
- die("malloc");
- data->ptr = ptr;
- data->pingpong = 0;
- data->hit = 1;
- data->bytes_req = bytes_req;
- data->bytes_alloc = bytes_alloc;
-
- rb_link_node(&data->node, parent, node);
- rb_insert_color(&data->node, &root_alloc_stat);
- }
- data->call_site = call_site;
- data->alloc_cpu = cpu;
-}
-
-static void insert_caller_stat(unsigned long call_site,
- int bytes_req, int bytes_alloc)
-{
- struct rb_node **node = &root_caller_stat.rb_node;
- struct rb_node *parent = NULL;
- struct alloc_stat *data = NULL;
-
- while (*node) {
- parent = *node;
- data = rb_entry(*node, struct alloc_stat, node);
-
- if (call_site > data->call_site)
- node = &(*node)->rb_right;
- else if (call_site < data->call_site)
- node = &(*node)->rb_left;
- else
- break;
- }
-
- if (data && data->call_site == call_site) {
- data->hit++;
- data->bytes_req += bytes_req;
- data->bytes_alloc += bytes_req;
- } else {
- data = malloc(sizeof(*data));
- if (!data)
- die("malloc");
- data->call_site = call_site;
- data->pingpong = 0;
- data->hit = 1;
- data->bytes_req = bytes_req;
- data->bytes_alloc = bytes_alloc;
-
- rb_link_node(&data->node, parent, node);
- rb_insert_color(&data->node, &root_caller_stat);
- }
-}
-
-static void process_alloc_event(struct raw_event_sample *raw,
- struct event *event,
- int cpu,
- u64 timestamp __used,
- struct thread *thread __used,
- int node)
-{
- unsigned long call_site;
- unsigned long ptr;
- int bytes_req;
- int bytes_alloc;
- int node1, node2;
-
- ptr = raw_field_value(event, "ptr", raw->data);
- call_site = raw_field_value(event, "call_site", raw->data);
- bytes_req = raw_field_value(event, "bytes_req", raw->data);
- bytes_alloc = raw_field_value(event, "bytes_alloc", raw->data);
-
- insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
- insert_caller_stat(call_site, bytes_req, bytes_alloc);
-
- total_requested += bytes_req;
- total_allocated += bytes_alloc;
-
- if (node) {
- node1 = cpunode_map[cpu];
- node2 = raw_field_value(event, "node", raw->data);
- if (node1 != node2)
- nr_cross_allocs++;
- }
- nr_allocs++;
-}
-
-static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
-static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
-
-static struct alloc_stat *search_alloc_stat(unsigned long ptr,
- unsigned long call_site,
- struct rb_root *root,
- sort_fn_t sort_fn)
-{
- struct rb_node *node = root->rb_node;
- struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
-
- while (node) {
- struct alloc_stat *data;
- int cmp;
-
- data = rb_entry(node, struct alloc_stat, node);
-
- cmp = sort_fn(&key, data);
- if (cmp < 0)
- node = node->rb_left;
- else if (cmp > 0)
- node = node->rb_right;
- else
- return data;
- }
- return NULL;
-}
-
-static void process_free_event(struct raw_event_sample *raw,
- struct event *event,
- int cpu,
- u64 timestamp __used,
- struct thread *thread __used)
-{
- unsigned long ptr;
- struct alloc_stat *s_alloc, *s_caller;
-
- ptr = raw_field_value(event, "ptr", raw->data);
-
- s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
- if (!s_alloc)
- return;
-
- if (cpu != s_alloc->alloc_cpu) {
- s_alloc->pingpong++;
-
- s_caller = search_alloc_stat(0, s_alloc->call_site,
- &root_caller_stat, callsite_cmp);
- assert(s_caller);
- s_caller->pingpong++;
- }
- s_alloc->alloc_cpu = -1;
-}
-
-static void
-process_raw_event(event_t *raw_event __used, void *more_data,
- int cpu, u64 timestamp, struct thread *thread)
-{
- struct raw_event_sample *raw = more_data;
- struct event *event;
- int type;
-
- type = trace_parse_common_type(raw->data);
- event = trace_find_event(type);
-
- if (!strcmp(event->name, "kmalloc") ||
- !strcmp(event->name, "kmem_cache_alloc")) {
- process_alloc_event(raw, event, cpu, timestamp, thread, 0);
- return;
- }
-
- if (!strcmp(event->name, "kmalloc_node") ||
- !strcmp(event->name, "kmem_cache_alloc_node")) {
- process_alloc_event(raw, event, cpu, timestamp, thread, 1);
- return;
- }
-
- if (!strcmp(event->name, "kfree") ||
- !strcmp(event->name, "kmem_cache_free")) {
- process_free_event(raw, event, cpu, timestamp, thread);
- return;
- }
-}
-
-static int process_sample_event(event_t *event)
-{
- u64 ip = event->ip.ip;
- u64 timestamp = -1;
- u32 cpu = -1;
- u64 period = 1;
- void *more_data = event->ip.__more_data;
- struct thread *thread = threads__findnew(event->ip.pid);
-
- if (sample_type & PERF_SAMPLE_TIME) {
- timestamp = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
-
- if (sample_type & PERF_SAMPLE_CPU) {
- cpu = *(u32 *)more_data;
- more_data += sizeof(u32);
- more_data += sizeof(u32); /* reserved */
- }
-
- if (sample_type & PERF_SAMPLE_PERIOD) {
- period = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
-
- dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
- event->header.misc,
- event->ip.pid, event->ip.tid,
- (void *)(long)ip,
- (long long)period);
-
- if (thread == NULL) {
- pr_debug("problem processing %d event, skipping it.\n",
- event->header.type);
- return -1;
- }
-
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
-
- process_raw_event(event, more_data, cpu, timestamp, thread);
-
- return 0;
-}
-
-static int sample_type_check(u64 type)
-{
- sample_type = type;
-
- if (!(sample_type & PERF_SAMPLE_RAW)) {
- fprintf(stderr,
- "No trace sample to read. Did you call perf record "
- "without -R?");
- return -1;
- }
-
- return 0;
-}
-
-static struct perf_file_handler file_handler = {
- .process_sample_event = process_sample_event,
- .process_comm_event = event__process_comm,
- .sample_type_check = sample_type_check,
-};
-
-static int read_events(void)
-{
- register_idle_thread();
- register_perf_file_handler(&file_handler);
-
- return mmap_dispatch_perf_file(&header, input_name, 0, 0,
- &event__cwdlen, &event__cwd);
-}
-
-static double fragmentation(unsigned long n_req, unsigned long n_alloc)
-{
- if (n_alloc == 0)
- return 0.0;
- else
- return 100.0 - (100.0 * n_req / n_alloc);
-}
-
-static void __print_result(struct rb_root *root, int n_lines, int is_caller)
-{
- struct rb_node *next;
-
- printf("%.102s\n", graph_dotted_line);
- printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
- printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
- printf("%.102s\n", graph_dotted_line);
-
- next = rb_first(root);
-
- while (next && n_lines--) {
- struct alloc_stat *data = rb_entry(next, struct alloc_stat,
- node);
- struct symbol *sym = NULL;
- char buf[BUFSIZ];
- u64 addr;
-
- if (is_caller) {
- addr = data->call_site;
- if (!raw_ip)
- sym = thread__find_function(kthread, addr, NULL);
- } else
- addr = data->ptr;
-
- if (sym != NULL)
- snprintf(buf, sizeof(buf), "%s+%Lx", sym->name,
- addr - sym->start);
- else
- snprintf(buf, sizeof(buf), "%#Lx", addr);
- printf(" %-34s |", buf);
-
- printf(" %9llu/%-5lu | %9llu/%-5lu | %6lu | %8lu | %6.3f%%\n",
- (unsigned long long)data->bytes_alloc,
- (unsigned long)data->bytes_alloc / data->hit,
- (unsigned long long)data->bytes_req,
- (unsigned long)data->bytes_req / data->hit,
- (unsigned long)data->hit,
- (unsigned long)data->pingpong,
- fragmentation(data->bytes_req, data->bytes_alloc));
-
- next = rb_next(next);
- }
-
- if (n_lines == -1)
- printf(" ... | ... | ... | ... | ... | ... \n");
-
- printf("%.102s\n", graph_dotted_line);
-}
-
-static void print_summary(void)
-{
- printf("\nSUMMARY\n=======\n");
- printf("Total bytes requested: %lu\n", total_requested);
- printf("Total bytes allocated: %lu\n", total_allocated);
- printf("Total bytes wasted on internal fragmentation: %lu\n",
- total_allocated - total_requested);
- printf("Internal fragmentation: %f%%\n",
- fragmentation(total_requested, total_allocated));
- printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
-}
-
-static void print_result(void)
-{
- if (caller_flag)
- __print_result(&root_caller_sorted, caller_lines, 1);
- if (alloc_flag)
- __print_result(&root_alloc_sorted, alloc_lines, 0);
- print_summary();
-}
-
-struct sort_dimension {
- const char name[20];
- sort_fn_t cmp;
- struct list_head list;
-};
-
-static LIST_HEAD(caller_sort);
-static LIST_HEAD(alloc_sort);
-
-static void sort_insert(struct rb_root *root, struct alloc_stat *data,
- struct list_head *sort_list)
-{
- struct rb_node **new = &(root->rb_node);
- struct rb_node *parent = NULL;
- struct sort_dimension *sort;
-
- while (*new) {
- struct alloc_stat *this;
- int cmp = 0;
-
- this = rb_entry(*new, struct alloc_stat, node);
- parent = *new;
-
- list_for_each_entry(sort, sort_list, list) {
- cmp = sort->cmp(data, this);
- if (cmp)
- break;
- }
-
- if (cmp > 0)
- new = &((*new)->rb_left);
- else
- new = &((*new)->rb_right);
- }
-
- rb_link_node(&data->node, parent, new);
- rb_insert_color(&data->node, root);
-}
-
-static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
- struct list_head *sort_list)
-{
- struct rb_node *node;
- struct alloc_stat *data;
-
- for (;;) {
- node = rb_first(root);
- if (!node)
- break;
-
- rb_erase(node, root);
- data = rb_entry(node, struct alloc_stat, node);
- sort_insert(root_sorted, data, sort_list);
- }
-}
-
-static void sort_result(void)
-{
- __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
- __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
-}
-
-static int __cmd_kmem(void)
-{
- setup_pager();
- read_events();
- sort_result();
- print_result();
-
- return 0;
-}
-
-static const char * const kmem_usage[] = {
- "perf kmem [] {record}",
- NULL
-};
-
-static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
-{
- if (l->ptr < r->ptr)
- return -1;
- else if (l->ptr > r->ptr)
- return 1;
- return 0;
-}
-
-static struct sort_dimension ptr_sort_dimension = {
- .name = "ptr",
- .cmp = ptr_cmp,
-};
-
-static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
-{
- if (l->call_site < r->call_site)
- return -1;
- else if (l->call_site > r->call_site)
- return 1;
- return 0;
-}
-
-static struct sort_dimension callsite_sort_dimension = {
- .name = "callsite",
- .cmp = callsite_cmp,
-};
-
-static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
-{
- if (l->hit < r->hit)
- return -1;
- else if (l->hit > r->hit)
- return 1;
- return 0;
-}
-
-static struct sort_dimension hit_sort_dimension = {
- .name = "hit",
- .cmp = hit_cmp,
-};
-
-static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
-{
- if (l->bytes_alloc < r->bytes_alloc)
- return -1;
- else if (l->bytes_alloc > r->bytes_alloc)
- return 1;
- return 0;
-}
-
-static struct sort_dimension bytes_sort_dimension = {
- .name = "bytes",
- .cmp = bytes_cmp,
-};
-
-static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
-{
- double x, y;
-
- x = fragmentation(l->bytes_req, l->bytes_alloc);
- y = fragmentation(r->bytes_req, r->bytes_alloc);
-
- if (x < y)
- return -1;
- else if (x > y)
- return 1;
- return 0;
-}
-
-static struct sort_dimension frag_sort_dimension = {
- .name = "frag",
- .cmp = frag_cmp,
-};
-
-static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
-{
- if (l->pingpong < r->pingpong)
- return -1;
- else if (l->pingpong > r->pingpong)
- return 1;
- return 0;
-}
-
-static struct sort_dimension pingpong_sort_dimension = {
- .name = "pingpong",
- .cmp = pingpong_cmp,
-};
-
-static struct sort_dimension *avail_sorts[] = {
- &ptr_sort_dimension,
- &callsite_sort_dimension,
- &hit_sort_dimension,
- &bytes_sort_dimension,
- &frag_sort_dimension,
- &pingpong_sort_dimension,
-};
-
-#define NUM_AVAIL_SORTS \
- (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
-
-static int sort_dimension__add(const char *tok, struct list_head *list)
-{
- struct sort_dimension *sort;
- int i;
-
- for (i = 0; i < NUM_AVAIL_SORTS; i++) {
- if (!strcmp(avail_sorts[i]->name, tok)) {
- sort = malloc(sizeof(*sort));
- if (!sort)
- die("malloc");
- memcpy(sort, avail_sorts[i], sizeof(*sort));
- list_add_tail(&sort->list, list);
- return 0;
- }
- }
-
- return -1;
-}
-
-static int setup_sorting(struct list_head *sort_list, const char *arg)
-{
- char *tok;
- char *str = strdup(arg);
-
- if (!str)
- die("strdup");
-
- while (true) {
- tok = strsep(&str, ",");
- if (!tok)
- break;
- if (sort_dimension__add(tok, sort_list) < 0) {
- error("Unknown --sort key: '%s'", tok);
- return -1;
- }
- }
-
- free(str);
- return 0;
-}
-
-static int parse_sort_opt(const struct option *opt __used,
- const char *arg, int unset __used)
-{
- if (!arg)
- return -1;
-
- if (caller_flag > alloc_flag)
- return setup_sorting(&caller_sort, arg);
- else
- return setup_sorting(&alloc_sort, arg);
-
- return 0;
-}
-
-static int parse_stat_opt(const struct option *opt __used,
- const char *arg, int unset __used)
-{
- if (!arg)
- return -1;
-
- if (strcmp(arg, "alloc") == 0)
- alloc_flag = (caller_flag + 1);
- else if (strcmp(arg, "caller") == 0)
- caller_flag = (alloc_flag + 1);
- else
- return -1;
- return 0;
-}
-
-static int parse_line_opt(const struct option *opt __used,
- const char *arg, int unset __used)
-{
- int lines;
-
- if (!arg)
- return -1;
-
- lines = strtoul(arg, NULL, 10);
-
- if (caller_flag > alloc_flag)
- caller_lines = lines;
- else
- alloc_lines = lines;
-
- return 0;
-}
-
-static const struct option kmem_options[] = {
- OPT_STRING('i', "input", &input_name, "file",
- "input file name"),
- OPT_CALLBACK(0, "stat", NULL, "|",
- "stat selector, Pass 'alloc' or 'caller'.",
- parse_stat_opt),
- OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
- "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
- parse_sort_opt),
- OPT_CALLBACK('l', "line", NULL, "num",
- "show n lins",
- parse_line_opt),
- OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
- OPT_END()
-};
-
-static const char *record_args[] = {
- "record",
- "-a",
- "-R",
- "-M",
- "-f",
- "-c", "1",
- "-e", "kmem:kmalloc",
- "-e", "kmem:kmalloc_node",
- "-e", "kmem:kfree",
- "-e", "kmem:kmem_cache_alloc",
- "-e", "kmem:kmem_cache_alloc_node",
- "-e", "kmem:kmem_cache_free",
-};
-
-static int __cmd_record(int argc, const char **argv)
-{
- unsigned int rec_argc, i, j;
- const char **rec_argv;
-
- rec_argc = ARRAY_SIZE(record_args) + argc - 1;
- rec_argv = calloc(rec_argc + 1, sizeof(char *));
-
- for (i = 0; i < ARRAY_SIZE(record_args); i++)
- rec_argv[i] = strdup(record_args[i]);
-
- for (j = 1; j < (unsigned int)argc; j++, i++)
- rec_argv[i] = argv[j];
-
- return cmd_record(i, rec_argv, NULL);
-}
-
-int cmd_kmem(int argc, const char **argv, const char *prefix __used)
-{
- symbol__init(0);
-
- argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
-
- if (argc && !strncmp(argv[0], "rec", 3))
- return __cmd_record(argc, argv);
- else if (argc)
- usage_with_options(kmem_usage, kmem_options);
-
- if (list_empty(&caller_sort))
- setup_sorting(&caller_sort, default_sort_order);
- if (list_empty(&alloc_sort))
- setup_sorting(&alloc_sort, default_sort_order);
-
- setup_cpunode_map();
-
- return __cmd_kmem();
-}
-
diff --git a/trunk/tools/perf/builtin-probe.c b/trunk/tools/perf/builtin-probe.c
index a58e11b7ea80..d78a3d945492 100644
--- a/trunk/tools/perf/builtin-probe.c
+++ b/trunk/tools/perf/builtin-probe.c
@@ -40,7 +40,6 @@
#include "util/parse-options.h"
#include "util/parse-events.h" /* For debugfs_path */
#include "util/probe-finder.h"
-#include "util/probe-event.h"
/* Default vmlinux search paths */
#define NR_SEARCH_PATH 3
@@ -52,6 +51,8 @@ const char *default_search_path[NR_SEARCH_PATH] = {
#define MAX_PATH_LEN 256
#define MAX_PROBES 128
+#define MAX_PROBE_ARGS 128
+#define PERFPROBE_GROUP "probe"
/* Session management structure */
static struct {
@@ -62,19 +63,152 @@ static struct {
struct probe_point probes[MAX_PROBES];
} session;
-static bool listing;
+#define semantic_error(msg ...) die("Semantic error :" msg)
+
+/* Parse probe point. Return 1 if return probe */
+static void parse_probe_point(char *arg, struct probe_point *pp)
+{
+ char *ptr, *tmp;
+ char c, nc = 0;
+ /*
+ *
+ * perf probe SRC:LN
+ * perf probe FUNC[+OFFS|%return][@SRC]
+ */
+
+ ptr = strpbrk(arg, ":+@%");
+ if (ptr) {
+ nc = *ptr;
+ *ptr++ = '\0';
+ }
+
+ /* Check arg is function or file and copy it */
+ if (strchr(arg, '.')) /* File */
+ pp->file = strdup(arg);
+ else /* Function */
+ pp->function = strdup(arg);
+ DIE_IF(pp->file == NULL && pp->function == NULL);
+
+ /* Parse other options */
+ while (ptr) {
+ arg = ptr;
+ c = nc;
+ ptr = strpbrk(arg, ":+@%");
+ if (ptr) {
+ nc = *ptr;
+ *ptr++ = '\0';
+ }
+ switch (c) {
+ case ':': /* Line number */
+ pp->line = strtoul(arg, &tmp, 0);
+ if (*tmp != '\0')
+ semantic_error("There is non-digit charactor"
+ " in line number.");
+ break;
+ case '+': /* Byte offset from a symbol */
+ pp->offset = strtoul(arg, &tmp, 0);
+ if (*tmp != '\0')
+ semantic_error("There is non-digit charactor"
+ " in offset.");
+ break;
+ case '@': /* File name */
+ if (pp->file)
+ semantic_error("SRC@SRC is not allowed.");
+ pp->file = strdup(arg);
+ DIE_IF(pp->file == NULL);
+ if (ptr)
+ semantic_error("@SRC must be the last "
+ "option.");
+ break;
+ case '%': /* Probe places */
+ if (strcmp(arg, "return") == 0) {
+ pp->retprobe = 1;
+ } else /* Others not supported yet */
+ semantic_error("%%%s is not supported.", arg);
+ break;
+ default:
+ DIE_IF("Program has a bug.");
+ break;
+ }
+ }
+
+ /* Exclusion check */
+ if (pp->line && pp->offset)
+ semantic_error("Offset can't be used with line number.");
+ if (!pp->line && pp->file && !pp->function)
+ semantic_error("File always requires line number.");
+ if (pp->offset && !pp->function)
+ semantic_error("Offset requires an entry function.");
+ if (pp->retprobe && !pp->function)
+ semantic_error("Return probe requires an entry function.");
+ if ((pp->offset || pp->line) && pp->retprobe)
+ semantic_error("Offset/Line can't be used with return probe.");
+
+ pr_debug("symbol:%s file:%s line:%d offset:%d, return:%d\n",
+ pp->function, pp->file, pp->line, pp->offset, pp->retprobe);
+}
/* Parse an event definition. Note that any error must die. */
static void parse_probe_event(const char *str)
{
+ char *argv[MAX_PROBE_ARGS + 2]; /* Event + probe + args */
+ int argc, i;
struct probe_point *pp = &session.probes[session.nr_probe];
pr_debug("probe-definition(%d): %s\n", session.nr_probe, str);
if (++session.nr_probe == MAX_PROBES)
- die("Too many probes (> %d) are specified.", MAX_PROBES);
+ semantic_error("Too many probes");
+
+ /* Separate arguments, similar to argv_split */
+ argc = 0;
+ do {
+ /* Skip separators */
+ while (isspace(*str))
+ str++;
+
+ /* Add an argument */
+ if (*str != '\0') {
+ const char *s = str;
+
+ /* Skip the argument */
+ while (!isspace(*str) && *str != '\0')
+ str++;
+
+ /* Duplicate the argument */
+ argv[argc] = strndup(s, str - s);
+ if (argv[argc] == NULL)
+ die("strndup");
+ if (++argc == MAX_PROBE_ARGS)
+ semantic_error("Too many arguments");
+ pr_debug("argv[%d]=%s\n", argc, argv[argc - 1]);
+ }
+ } while (*str != '\0');
+ if (!argc)
+ semantic_error("An empty argument.");
+
+ /* Parse probe point */
+ parse_probe_point(argv[0], pp);
+ free(argv[0]);
+ if (pp->file || pp->line)
+ session.need_dwarf = 1;
+
+ /* Copy arguments */
+ pp->nr_args = argc - 1;
+ if (pp->nr_args > 0) {
+ pp->args = (char **)malloc(sizeof(char *) * pp->nr_args);
+ if (!pp->args)
+ die("malloc");
+ memcpy(pp->args, &argv[1], sizeof(char *) * pp->nr_args);
+ }
- /* Parse perf-probe event into probe_point */
- session.need_dwarf = parse_perf_probe_event(str, pp);
+ /* Ensure return probe has no C argument */
+ for (i = 0; i < pp->nr_args; i++)
+ if (is_c_varname(pp->args[i])) {
+ if (pp->retprobe)
+ semantic_error("You can't specify local"
+ " variable for kretprobe");
+ session.need_dwarf = 1;
+ }
pr_debug("%d arguments\n", pp->nr_args);
}
@@ -121,7 +255,6 @@ static int open_default_vmlinux(void)
static const char * const probe_usage[] = {
"perf probe [] 'PROBEDEF' ['PROBEDEF' ...]",
"perf probe [] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
- "perf probe --list",
NULL
};
@@ -132,7 +265,6 @@ static const struct option options[] = {
OPT_STRING('k', "vmlinux", &session.vmlinux, "file",
"vmlinux/module pathname"),
#endif
- OPT_BOOLEAN('l', "list", &listing, "list up current probes"),
OPT_CALLBACK('a', "add", NULL,
#ifdef NO_LIBDWARF
"FUNC[+OFFS|%return] [ARG ...]",
@@ -153,38 +285,73 @@ static const struct option options[] = {
"\t\tALN:\tAbsolute line number in file.\n"
"\t\tARG:\tProbe argument (local variable name or\n"
#endif
- "\t\t\tkprobe-tracer argument format.)\n",
+ "\t\t\tkprobe-tracer argument format is supported.)\n",
opt_add_probe_event),
OPT_END()
};
+static int write_new_event(int fd, const char *buf)
+{
+ int ret;
+
+ ret = write(fd, buf, strlen(buf));
+ if (ret <= 0)
+ die("Failed to create event.");
+ else
+ printf("Added new event: %s\n", buf);
+
+ return ret;
+}
+
+#define MAX_CMDLEN 256
+
+static int synthesize_probe_event(struct probe_point *pp)
+{
+ char *buf;
+ int i, len, ret;
+ pp->probes[0] = buf = (char *)calloc(MAX_CMDLEN, sizeof(char));
+ if (!buf)
+ die("Failed to allocate memory by calloc.");
+ ret = snprintf(buf, MAX_CMDLEN, "%s+%d", pp->function, pp->offset);
+ if (ret <= 0 || ret >= MAX_CMDLEN)
+ goto error;
+ len = ret;
+
+ for (i = 0; i < pp->nr_args; i++) {
+ ret = snprintf(&buf[len], MAX_CMDLEN - len, " %s",
+ pp->args[i]);
+ if (ret <= 0 || ret >= MAX_CMDLEN - len)
+ goto error;
+ len += ret;
+ }
+ pp->found = 1;
+ return pp->found;
+error:
+ free(pp->probes[0]);
+ if (ret > 0)
+ ret = -E2BIG;
+ return ret;
+}
+
int cmd_probe(int argc, const char **argv, const char *prefix __used)
{
- int i, j, ret;
-#ifndef NO_LIBDWARF
- int fd;
-#endif
+ int i, j, fd, ret;
struct probe_point *pp;
+ char buf[MAX_CMDLEN];
argc = parse_options(argc, argv, options, probe_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
for (i = 0; i < argc; i++)
parse_probe_event(argv[i]);
- if ((session.nr_probe == 0 && !listing) ||
- (session.nr_probe != 0 && listing))
+ if (session.nr_probe == 0)
usage_with_options(probe_usage, options);
- if (listing) {
- show_perf_probe_events();
- return 0;
- }
-
if (session.need_dwarf)
#ifdef NO_LIBDWARF
- die("Debuginfo-analysis is not supported");
+ semantic_error("Debuginfo-analysis is not supported");
#else /* !NO_LIBDWARF */
- pr_debug("Some probes require debuginfo.\n");
+ pr_info("Some probes require debuginfo.\n");
if (session.vmlinux)
fd = open(session.vmlinux, O_RDONLY);
@@ -228,15 +395,41 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
if (pp->found) /* This probe is already found. */
continue;
- ret = synthesize_trace_kprobe_event(pp);
+ ret = synthesize_probe_event(pp);
if (ret == -E2BIG)
- die("probe point definition becomes too long.");
+ semantic_error("probe point is too long.");
else if (ret < 0)
die("Failed to synthesize a probe point.");
}
/* Settng up probe points */
- add_trace_kprobe_events(session.probes, session.nr_probe);
+ snprintf(buf, MAX_CMDLEN, "%s/../kprobe_events", debugfs_path);
+ fd = open(buf, O_WRONLY, O_APPEND);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ die("kprobe_events file does not exist - please rebuild with CONFIG_KPROBE_TRACER.");
+ else
+ die("Could not open kprobe_events file: %s",
+ strerror(errno));
+ }
+ for (j = 0; j < session.nr_probe; j++) {
+ pp = &session.probes[j];
+ if (pp->found == 1) {
+ snprintf(buf, MAX_CMDLEN, "%c:%s/%s_%x %s\n",
+ pp->retprobe ? 'r' : 'p', PERFPROBE_GROUP,
+ pp->function, pp->offset, pp->probes[0]);
+ write_new_event(fd, buf);
+ } else
+ for (i = 0; i < pp->found; i++) {
+ snprintf(buf, MAX_CMDLEN, "%c:%s/%s_%x_%d %s\n",
+ pp->retprobe ? 'r' : 'p',
+ PERFPROBE_GROUP,
+ pp->function, pp->offset, i,
+ pp->probes[0]);
+ write_new_event(fd, buf);
+ }
+ }
+ close(fd);
return 0;
}
diff --git a/trunk/tools/perf/builtin-record.c b/trunk/tools/perf/builtin-record.c
index 0e519c667e3a..82260c56db3d 100644
--- a/trunk/tools/perf/builtin-record.c
+++ b/trunk/tools/perf/builtin-record.c
@@ -307,12 +307,6 @@ static void create_counter(int counter, int cpu, pid_t pid)
printf("\n");
error("perfcounter syscall returned with %d (%s)\n",
fd[nr_cpu][counter], strerror(err));
-
-#if defined(__i386__) || defined(__x86_64__)
- if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
- die("No hardware sampling interrupt available. No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.\n");
-#endif
-
die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
exit(-1);
}
@@ -406,7 +400,7 @@ static int __cmd_record(int argc, const char **argv)
struct stat st;
pid_t pid = 0;
int flags;
- int err;
+ int ret;
unsigned long waking = 0;
page_size = sysconf(_SC_PAGE_SIZE);
@@ -440,18 +434,16 @@ static int __cmd_record(int argc, const char **argv)
exit(-1);
}
- header = perf_header__new();
+ if (!file_new)
+ header = perf_header__read(output);
+ else
+ header = perf_header__new();
+
if (header == NULL) {
pr_err("Not enough memory for reading perf file header\n");
return -1;
}
- if (!file_new) {
- err = perf_header__read(header, output);
- if (err < 0)
- return err;
- }
-
if (raw_samples) {
perf_header__set_feat(header, HEADER_TRACE_INFO);
} else {
@@ -480,11 +472,8 @@ static int __cmd_record(int argc, const char **argv)
}
}
- if (file_new) {
- err = perf_header__write(header, output, false);
- if (err < 0)
- return err;
- }
+ if (file_new)
+ perf_header__write(header, output, false);
if (!system_wide)
event__synthesize_thread(pid, process_synthesized_event);
@@ -538,7 +527,7 @@ static int __cmd_record(int argc, const char **argv)
if (hits == samples) {
if (done)
break;
- err = poll(event_array, nr_poll, -1);
+ ret = poll(event_array, nr_poll, -1);
waking++;
}
diff --git a/trunk/tools/perf/builtin-report.c b/trunk/tools/perf/builtin-report.c
index 383c4ab4f9af..1a806d5f05cf 100644
--- a/trunk/tools/perf/builtin-report.c
+++ b/trunk/tools/perf/builtin-report.c
@@ -52,12 +52,13 @@ static int exclude_other = 1;
static char callchain_default_opt[] = "fractal,0.5";
+static char *cwd;
+static int cwdlen;
+
static struct perf_header *header;
static u64 sample_type;
-struct symbol_conf symbol_conf;
-
static size_t
callchain__fprintf_left_margin(FILE *fp, int left_margin)
@@ -408,6 +409,55 @@ static int thread__set_comm_adjust(struct thread *self, const char *comm)
return 0;
}
+
+static struct symbol *
+resolve_symbol(struct thread *thread, struct map **mapp, u64 *ipp)
+{
+ struct map *map = mapp ? *mapp : NULL;
+ u64 ip = *ipp;
+
+ if (map)
+ goto got_map;
+
+ if (!thread)
+ return NULL;
+
+ map = thread__find_map(thread, ip);
+ if (map != NULL) {
+ /*
+ * We have to do this here as we may have a dso
+ * with no symbol hit that has a name longer than
+ * the ones with symbols sampled.
+ */
+ if (!sort_dso.elide && !map->dso->slen_calculated)
+ dso__calc_col_width(map->dso);
+
+ if (mapp)
+ *mapp = map;
+got_map:
+ ip = map->map_ip(map, ip);
+ } else {
+ /*
+ * If this is outside of all known maps,
+ * and is a negative address, try to look it
+ * up in the kernel dso, as it might be a
+ * vsyscall or vdso (which executes in user-mode).
+ *
+ * XXX This is nasty, we should have a symbol list in
+ * the "[vdso]" dso, but for now lets use the old
+ * trick of looking in the whole kernel symbol list.
+ */
+ if ((long long)ip < 0)
+ return kernel_maps__find_symbol(ip, mapp);
+ }
+ dump_printf(" ...... dso: %s\n",
+ map ? map->dso->long_name : "");
+ dump_printf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
+ *ipp = ip;
+
+ return map ? map__find_symbol(map, ip, NULL) : NULL;
+}
+
static int call__match(struct symbol *sym)
{
if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
@@ -416,11 +466,11 @@ static int call__match(struct symbol *sym)
return 0;
}
-static struct symbol **resolve_callchain(struct thread *thread,
+static struct symbol **resolve_callchain(struct thread *thread, struct map *map,
struct ip_callchain *chain,
struct symbol **parent)
{
- u8 cpumode = PERF_RECORD_MISC_USER;
+ u64 context = PERF_CONTEXT_MAX;
struct symbol **syms = NULL;
unsigned int i;
@@ -434,31 +484,30 @@ static struct symbol **resolve_callchain(struct thread *thread,
for (i = 0; i < chain->nr; i++) {
u64 ip = chain->ips[i];
- struct addr_location al;
+ struct symbol *sym = NULL;
if (ip >= PERF_CONTEXT_MAX) {
- switch (ip) {
- case PERF_CONTEXT_HV:
- cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
- case PERF_CONTEXT_KERNEL:
- cpumode = PERF_RECORD_MISC_KERNEL; break;
- case PERF_CONTEXT_USER:
- cpumode = PERF_RECORD_MISC_USER; break;
- default:
- break;
- }
+ context = ip;
continue;
}
- thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
- ip, &al, NULL);
- if (al.sym != NULL) {
- if (sort__has_parent && !*parent &&
- call__match(al.sym))
- *parent = al.sym;
+ switch (context) {
+ case PERF_CONTEXT_HV:
+ break;
+ case PERF_CONTEXT_KERNEL:
+ sym = kernel_maps__find_symbol(ip, &map);
+ break;
+ default:
+ sym = resolve_symbol(thread, &map, &ip);
+ break;
+ }
+
+ if (sym) {
+ if (sort__has_parent && !*parent && call__match(sym))
+ *parent = sym;
if (!callchain)
break;
- syms[i] = al.sym;
+ syms[i] = sym;
}
}
@@ -469,17 +518,20 @@ static struct symbol **resolve_callchain(struct thread *thread,
* collect histogram counts
*/
-static int hist_entry__add(struct addr_location *al,
- struct ip_callchain *chain, u64 count)
+static int
+hist_entry__add(struct thread *thread, struct map *map,
+ struct symbol *sym, u64 ip, struct ip_callchain *chain,
+ char level, u64 count)
{
struct symbol **syms = NULL, *parent = NULL;
bool hit;
struct hist_entry *he;
if ((sort__has_parent || callchain) && chain)
- syms = resolve_callchain(al->thread, chain, &parent);
+ syms = resolve_callchain(thread, map, chain, &parent);
- he = __hist_entry__add(al, parent, count, &hit);
+ he = __hist_entry__add(thread, map, sym, parent,
+ ip, count, level, &hit);
if (he == NULL)
return -ENOMEM;
@@ -603,14 +655,17 @@ static int validate_chain(struct ip_callchain *chain, event_t *event)
return 0;
}
-static int process_sample_event(event_t *event)
+static int
+process_sample_event(event_t *event, unsigned long offset, unsigned long head)
{
+ char level;
+ struct symbol *sym = NULL;
u64 ip = event->ip.ip;
u64 period = 1;
+ struct map *map = NULL;
void *more_data = event->ip.__more_data;
struct ip_callchain *chain = NULL;
int cpumode;
- struct addr_location al;
struct thread *thread = threads__findnew(event->ip.pid);
if (sample_type & PERF_SAMPLE_PERIOD) {
@@ -618,7 +673,9 @@ static int process_sample_event(event_t *event)
more_data += sizeof(u64);
}
- dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
+ dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
event->header.misc,
event->ip.pid, event->ip.tid,
(void *)(long)ip,
@@ -656,51 +713,136 @@ static int process_sample_event(event_t *event)
cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- thread__find_addr_location(thread, cpumode,
- MAP__FUNCTION, ip, &al, NULL);
- /*
- * We have to do this here as we may have a dso with no symbol hit that
- * has a name longer than the ones with symbols sampled.
- */
- if (al.map && !sort_dso.elide && !al.map->dso->slen_calculated)
- dso__calc_col_width(al.map->dso);
+ if (cpumode == PERF_RECORD_MISC_KERNEL) {
+ level = 'k';
+ sym = kernel_maps__find_symbol(ip, &map);
+ dump_printf(" ...... dso: %s\n",
+ map ? map->dso->long_name : "");
+ } else if (cpumode == PERF_RECORD_MISC_USER) {
+ level = '.';
+ sym = resolve_symbol(thread, &map, &ip);
+
+ } else {
+ level = 'H';
+ dump_printf(" ...... dso: [hypervisor]\n");
+ }
if (dso_list &&
- (!al.map || !al.map->dso ||
- !(strlist__has_entry(dso_list, al.map->dso->short_name) ||
- (al.map->dso->short_name != al.map->dso->long_name &&
- strlist__has_entry(dso_list, al.map->dso->long_name)))))
+ (!map || !map->dso ||
+ !(strlist__has_entry(dso_list, map->dso->short_name) ||
+ (map->dso->short_name != map->dso->long_name &&
+ strlist__has_entry(dso_list, map->dso->long_name)))))
return 0;
- if (sym_list && al.sym && !strlist__has_entry(sym_list, al.sym->name))
+ if (sym_list && sym && !strlist__has_entry(sym_list, sym->name))
return 0;
- if (hist_entry__add(&al, chain, period)) {
+ if (hist_entry__add(thread, map, sym, ip,
+ chain, level, period)) {
pr_debug("problem incrementing symbol count, skipping event\n");
return -1;
}
- event__stats.total += period;
+ total += period;
return 0;
}
-static int process_comm_event(event_t *event)
+static int
+process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ struct map *map = map__new(&event->mmap, cwd, cwdlen);
+ struct thread *thread = threads__findnew(event->mmap.pid);
+
+ dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->mmap.pid,
+ event->mmap.tid,
+ (void *)(long)event->mmap.start,
+ (void *)(long)event->mmap.len,
+ (void *)(long)event->mmap.pgoff,
+ event->mmap.filename);
+
+ if (thread == NULL || map == NULL) {
+ dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
+ return 0;
+ }
+
+ thread__insert_map(thread, map);
+ total_mmap++;
+
+ return 0;
+}
+
+static int
+process_comm_event(event_t *event, unsigned long offset, unsigned long head)
{
struct thread *thread = threads__findnew(event->comm.pid);
- dump_printf(": %s:%d\n", event->comm.comm, event->comm.pid);
+ dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->comm.comm, event->comm.pid);
if (thread == NULL ||
thread__set_comm_adjust(thread, event->comm.comm)) {
dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
return -1;
}
+ total_comm++;
+
+ return 0;
+}
+
+static int
+process_task_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ struct thread *thread = threads__findnew(event->fork.pid);
+ struct thread *parent = threads__findnew(event->fork.ppid);
+
+ dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->header.type == PERF_RECORD_FORK ? "FORK" : "EXIT",
+ event->fork.pid, event->fork.tid,
+ event->fork.ppid, event->fork.ptid);
+
+ /*
+ * A thread clone will have the same PID for both
+ * parent and child.
+ */
+ if (thread == parent)
+ return 0;
+
+ if (event->header.type == PERF_RECORD_EXIT)
+ return 0;
+
+ if (!thread || !parent || thread__fork(thread, parent)) {
+ dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
+ return -1;
+ }
+ total_fork++;
+
+ return 0;
+}
+
+static int
+process_lost_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ dump_printf("%p [%p]: PERF_RECORD_LOST: id:%Ld: lost:%Ld\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->lost.id,
+ event->lost.lost);
+
+ total_lost += event->lost.lost;
return 0;
}
-static int process_read_event(event_t *event)
+static int
+process_read_event(event_t *event, unsigned long offset, unsigned long head)
{
struct perf_event_attr *attr;
@@ -716,9 +858,14 @@ static int process_read_event(event_t *event)
event->read.value);
}
- dump_printf(": %d %d %s %Lu\n", event->read.pid, event->read.tid,
- attr ? __event_name(attr->type, attr->config) : "FAIL",
- event->read.value);
+ dump_printf("%p [%p]: PERF_RECORD_READ: %d %d %s %Lu\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->read.pid,
+ event->read.tid,
+ attr ? __event_name(attr->type, attr->config)
+ : "FAIL",
+ event->read.value);
return 0;
}
@@ -754,11 +901,11 @@ static int sample_type_check(u64 type)
static struct perf_file_handler file_handler = {
.process_sample_event = process_sample_event,
- .process_mmap_event = event__process_mmap,
+ .process_mmap_event = process_mmap_event,
.process_comm_event = process_comm_event,
- .process_exit_event = event__process_task,
- .process_fork_event = event__process_task,
- .process_lost_event = event__process_lost,
+ .process_exit_event = process_task_event,
+ .process_fork_event = process_task_event,
+ .process_lost_event = process_lost_event,
.process_read_event = process_read_event,
.sample_type_check = sample_type_check,
};
@@ -777,15 +924,20 @@ static int __cmd_report(void)
register_perf_file_handler(&file_handler);
- ret = mmap_dispatch_perf_file(&header, input_name, force,
- full_paths, &event__cwdlen, &event__cwd);
+ ret = mmap_dispatch_perf_file(&header, input_name, force, full_paths,
+ &cwdlen, &cwd);
if (ret)
return ret;
- if (dump_trace) {
- event__print_totals();
+ dump_printf(" IP events: %10ld\n", total);
+ dump_printf(" mmap events: %10ld\n", total_mmap);
+ dump_printf(" comm events: %10ld\n", total_comm);
+ dump_printf(" fork events: %10ld\n", total_fork);
+ dump_printf(" lost events: %10ld\n", total_lost);
+ dump_printf(" unknown events: %10ld\n", file_handler.total_unknown);
+
+ if (dump_trace)
return 0;
- }
if (verbose > 3)
threads__fprintf(stdout);
@@ -794,8 +946,8 @@ static int __cmd_report(void)
dsos__fprintf(stdout);
collapse__resort();
- output__resort(event__stats.total);
- output__fprintf(stdout, event__stats.total);
+ output__resort(total);
+ output__fprintf(stdout, total);
if (show_threads)
perf_read_values_destroy(&show_threads_values);
@@ -869,10 +1021,9 @@ static const struct option options[] = {
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
- OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
- "file", "vmlinux pathname"),
+ OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
- OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
+ OPT_BOOLEAN('m', "modules", &modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples,
"Show a column with the number of samples"),
@@ -942,8 +1093,7 @@ static void setup_list(struct strlist **list, const char *list_str,
int cmd_report(int argc, const char **argv, const char *prefix __used)
{
- if (symbol__init(&symbol_conf) < 0)
- return -1;
+ symbol__init(0);
argc = parse_options(argc, argv, options, report_usage, 0);
diff --git a/trunk/tools/perf/builtin-sched.c b/trunk/tools/perf/builtin-sched.c
index 26b782f26ee1..df44b756cecc 100644
--- a/trunk/tools/perf/builtin-sched.c
+++ b/trunk/tools/perf/builtin-sched.c
@@ -22,6 +22,8 @@
static char const *input_name = "perf.data";
+static unsigned long total_comm = 0;
+
static struct perf_header *header;
static u64 sample_type;
@@ -30,6 +32,9 @@ static char *sort_order = default_sort_order;
static int profile_cpu = -1;
+static char *cwd;
+static int cwdlen;
+
#define PR_SET_NAME 15 /* Set process name */
#define MAX_CPUS 4096
@@ -220,7 +225,7 @@ static void calibrate_sleep_measurement_overhead(void)
static struct sched_atom *
get_new_event(struct task_desc *task, u64 timestamp)
{
- struct sched_atom *event = zalloc(sizeof(*event));
+ struct sched_atom *event = calloc(1, sizeof(*event));
unsigned long idx = task->nr_events;
size_t size;
@@ -288,7 +293,7 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
return;
}
- wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
+ wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem));
sem_init(wakee_event->wait_sem, 0, 0);
wakee_event->specific_wait = 1;
event->wait_sem = wakee_event->wait_sem;
@@ -318,7 +323,7 @@ static struct task_desc *register_pid(unsigned long pid, const char *comm)
if (task)
return task;
- task = zalloc(sizeof(*task));
+ task = calloc(1, sizeof(*task));
task->pid = pid;
task->nr = nr_tasks;
strcpy(task->comm, comm);
@@ -628,6 +633,27 @@ static void test_calibrations(void)
printf("the sleep test took %Ld nsecs\n", T1-T0);
}
+static int
+process_comm_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ struct thread *thread = threads__findnew(event->comm.tid);
+
+ dump_printf("%p [%p]: perf_event_comm: %s:%d\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->comm.comm, event->comm.pid);
+
+ if (thread == NULL ||
+ thread__set_comm(thread, event->comm.comm)) {
+ dump_printf("problem processing perf_event_comm, skipping event.\n");
+ return -1;
+ }
+ total_comm++;
+
+ return 0;
+}
+
+
struct raw_event_sample {
u32 size;
char data[0];
@@ -936,7 +962,9 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
static void thread_atoms_insert(struct thread *thread)
{
- struct work_atoms *atoms = zalloc(sizeof(*atoms));
+ struct work_atoms *atoms;
+
+ atoms = calloc(sizeof(*atoms), 1);
if (!atoms)
die("No memory");
@@ -968,7 +996,9 @@ add_sched_out_event(struct work_atoms *atoms,
char run_state,
u64 timestamp)
{
- struct work_atom *atom = zalloc(sizeof(*atom));
+ struct work_atom *atom;
+
+ atom = calloc(sizeof(*atom), 1);
if (!atom)
die("Non memory");
@@ -1596,7 +1626,8 @@ process_raw_event(event_t *raw_event __used, void *more_data,
process_sched_migrate_task_event(raw, event, cpu, timestamp, thread);
}
-static int process_sample_event(event_t *event)
+static int
+process_sample_event(event_t *event, unsigned long offset, unsigned long head)
{
struct thread *thread;
u64 ip = event->ip.ip;
@@ -1626,7 +1657,9 @@ static int process_sample_event(event_t *event)
more_data += sizeof(u64);
}
- dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
+ dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
event->header.misc,
event->ip.pid, event->ip.tid,
(void *)(long)ip,
@@ -1648,7 +1681,10 @@ static int process_sample_event(event_t *event)
return 0;
}
-static int process_lost_event(event_t *event __used)
+static int
+process_lost_event(event_t *event __used,
+ unsigned long offset __used,
+ unsigned long head __used)
{
nr_lost_chunks++;
nr_lost_events += event->lost.lost;
@@ -1672,7 +1708,7 @@ static int sample_type_check(u64 type)
static struct perf_file_handler file_handler = {
.process_sample_event = process_sample_event,
- .process_comm_event = event__process_comm,
+ .process_comm_event = process_comm_event,
.process_lost_event = process_lost_event,
.sample_type_check = sample_type_check,
};
@@ -1682,8 +1718,7 @@ static int read_events(void)
register_idle_thread();
register_perf_file_handler(&file_handler);
- return mmap_dispatch_perf_file(&header, input_name, 0, 0,
- &event__cwdlen, &event__cwd);
+ return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd);
}
static void print_bad_events(void)
diff --git a/trunk/tools/perf/builtin-timechart.c b/trunk/tools/perf/builtin-timechart.c
index cb58b6605fcc..665877e4a944 100644
--- a/trunk/tools/perf/builtin-timechart.c
+++ b/trunk/tools/perf/builtin-timechart.c
@@ -29,14 +29,14 @@
#include "util/header.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
-#include "util/event.h"
-#include "util/data_map.h"
#include "util/svghelper.h"
static char const *input_name = "perf.data";
static char const *output_name = "output.svg";
+static unsigned long page_size;
+static unsigned long mmap_window = 32;
static u64 sample_type;
static unsigned int numcpus;
@@ -49,6 +49,8 @@ static u64 first_time, last_time;
static int power_only;
+static struct perf_header *header;
+
struct per_pid;
struct per_pidcomm;
@@ -154,9 +156,9 @@ struct sample_wrapper *all_samples;
struct process_filter;
struct process_filter {
- char *name;
- int pid;
- struct process_filter *next;
+ char *name;
+ int pid;
+ struct process_filter *next;
};
static struct process_filter *process_filter;
@@ -1043,6 +1045,36 @@ static void write_svg_file(const char *filename)
svg_close();
}
+static int
+process_event(event_t *event)
+{
+
+ switch (event->header.type) {
+
+ case PERF_RECORD_COMM:
+ return process_comm_event(event);
+ case PERF_RECORD_FORK:
+ return process_fork_event(event);
+ case PERF_RECORD_EXIT:
+ return process_exit_event(event);
+ case PERF_RECORD_SAMPLE:
+ return queue_sample_event(event);
+
+ /*
+ * We dont process them right now but they are fine:
+ */
+ case PERF_RECORD_MMAP:
+ case PERF_RECORD_THROTTLE:
+ case PERF_RECORD_UNTHROTTLE:
+ return 0;
+
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
static void process_samples(void)
{
struct sample_wrapper *cursor;
@@ -1058,38 +1090,105 @@ static void process_samples(void)
}
}
-static int sample_type_check(u64 type)
+
+static int __cmd_timechart(void)
{
- sample_type = type;
+ int ret, rc = EXIT_FAILURE;
+ unsigned long offset = 0;
+ unsigned long head, shift;
+ struct stat statbuf;
+ event_t *event;
+ uint32_t size;
+ char *buf;
+ int input;
+
+ input = open(input_name, O_RDONLY);
+ if (input < 0) {
+ fprintf(stderr, " failed to open file: %s", input_name);
+ if (!strcmp(input_name, "perf.data"))
+ fprintf(stderr, " (try 'perf record' first)");
+ fprintf(stderr, "\n");
+ exit(-1);
+ }
- if (!(sample_type & PERF_SAMPLE_RAW)) {
- fprintf(stderr, "No trace samples found in the file.\n"
- "Have you used 'perf timechart record' to record it?\n");
- return -1;
+ ret = fstat(input, &statbuf);
+ if (ret < 0) {
+ perror("failed to stat file");
+ exit(-1);
}
- return 0;
-}
+ if (!statbuf.st_size) {
+ fprintf(stderr, "zero-sized file, nothing to do!\n");
+ exit(0);
+ }
-static struct perf_file_handler file_handler = {
- .process_comm_event = process_comm_event,
- .process_fork_event = process_fork_event,
- .process_exit_event = process_exit_event,
- .process_sample_event = queue_sample_event,
- .sample_type_check = sample_type_check,
-};
+ header = perf_header__read(input);
+ head = header->data_offset;
-static int __cmd_timechart(void)
-{
- struct perf_header *header;
- int ret;
+ sample_type = perf_header__sample_type(header);
+
+ shift = page_size * (head / page_size);
+ offset += shift;
+ head -= shift;
+
+remap:
+ buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
+ MAP_SHARED, input, offset);
+ if (buf == MAP_FAILED) {
+ perror("failed to mmap file");
+ exit(-1);
+ }
+
+more:
+ event = (event_t *)(buf + head);
+
+ size = event->header.size;
+ if (!size)
+ size = 8;
- register_perf_file_handler(&file_handler);
+ if (head + event->header.size >= page_size * mmap_window) {
+ int ret2;
+
+ shift = page_size * (head / page_size);
+
+ ret2 = munmap(buf, page_size * mmap_window);
+ assert(ret2 == 0);
+
+ offset += shift;
+ head -= shift;
+ goto remap;
+ }
+
+ size = event->header.size;
+
+ if (!size || process_event(event) < 0) {
+ pr_warning("%p [%p]: skipping unknown header type: %d\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->header.type);
+ /*
+ * assume we lost track of the stream, check alignment, and
+ * increment a single u64 in the hope to catch on again 'soon'.
+ */
+
+ if (unlikely(head & 7))
+ head &= ~7ULL;
+
+ size = 8;
+ }
+
+ head += size;
+
+ if (offset + head >= header->data_offset + header->data_size)
+ goto done;
+
+ if (offset + head < (unsigned long)statbuf.st_size)
+ goto more;
+
+done:
+ rc = EXIT_SUCCESS;
+ close(input);
- ret = mmap_dispatch_perf_file(&header, input_name, 0, 0,
- &event__cwdlen, &event__cwd);
- if (ret)
- return EXIT_FAILURE;
process_samples();
@@ -1102,7 +1201,7 @@ static int __cmd_timechart(void)
pr_info("Written %2.1f seconds of trace to %s.\n",
(last_time - first_time) / 1000000000.0, output_name);
- return EXIT_SUCCESS;
+ return rc;
}
static const char * const timechart_usage[] = {
@@ -1169,6 +1268,8 @@ int cmd_timechart(int argc, const char **argv, const char *prefix __used)
{
symbol__init(0);
+ page_size = getpagesize();
+
argc = parse_options(argc, argv, options, timechart_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
diff --git a/trunk/tools/perf/builtin-top.c b/trunk/tools/perf/builtin-top.c
index e0a374d0e43a..89b7f68a1799 100644
--- a/trunk/tools/perf/builtin-top.c
+++ b/trunk/tools/perf/builtin-top.c
@@ -78,8 +78,6 @@ static int dump_symtab = 0;
static bool hide_kernel_symbols = false;
static bool hide_user_symbols = false;
-static struct winsize winsize;
-struct symbol_conf symbol_conf;
/*
* Source
@@ -102,75 +100,58 @@ static int display_weighted = -1;
* Symbols
*/
-struct sym_entry_source {
- struct source_line *source;
- struct source_line *lines;
- struct source_line **lines_tail;
- pthread_mutex_t lock;
-};
-
struct sym_entry {
struct rb_node rb_node;
struct list_head node;
+ unsigned long count[MAX_COUNTERS];
unsigned long snap_count;
double weight;
int skip;
- u16 name_len;
u8 origin;
struct map *map;
- struct sym_entry_source *src;
- unsigned long count[0];
+ struct source_line *source;
+ struct source_line *lines;
+ struct source_line **lines_tail;
+ pthread_mutex_t source_lock;
};
/*
* Source functions
*/
-static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
+/* most GUI terminals set LINES (although some don't export it) */
+static int term_rows(void)
{
- return ((void *)self) + symbol_conf.priv_size;
-}
+ char *lines_string = getenv("LINES");
+ int n_lines;
-static void get_term_dimensions(struct winsize *ws)
-{
- char *s = getenv("LINES");
-
- if (s != NULL) {
- ws->ws_row = atoi(s);
- s = getenv("COLUMNS");
- if (s != NULL) {
- ws->ws_col = atoi(s);
- if (ws->ws_row && ws->ws_col)
- return;
- }
- }
+ if (lines_string && (n_lines = atoi(lines_string)) > 0)
+ return n_lines;
#ifdef TIOCGWINSZ
- if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
- ws->ws_row && ws->ws_col)
- return;
+ else {
+ struct winsize ws;
+ if (!ioctl(1, TIOCGWINSZ, &ws) && ws.ws_row)
+ return ws.ws_row;
+ }
#endif
- ws->ws_row = 25;
- ws->ws_col = 80;
+ return 25;
}
-static void update_print_entries(struct winsize *ws)
+static void update_print_entries(void)
{
- print_entries = ws->ws_row;
-
+ print_entries = term_rows();
if (print_entries > 9)
print_entries -= 9;
}
static void sig_winch_handler(int sig __used)
{
- get_term_dimensions(&winsize);
- update_print_entries(&winsize);
+ update_print_entries();
}
static void parse_source(struct sym_entry *syme)
{
struct symbol *sym;
- struct sym_entry_source *source;
struct map *map;
FILE *file;
char command[PATH_MAX*2];
@@ -180,21 +161,12 @@ static void parse_source(struct sym_entry *syme)
if (!syme)
return;
- if (syme->src == NULL) {
- syme->src = zalloc(sizeof(*source));
- if (syme->src == NULL)
- return;
- pthread_mutex_init(&syme->src->lock, NULL);
- }
-
- source = syme->src;
-
- if (source->lines) {
- pthread_mutex_lock(&source->lock);
+ if (syme->lines) {
+ pthread_mutex_lock(&syme->source_lock);
goto out_assign;
}
- sym = sym_entry__symbol(syme);
+ sym = (struct symbol *)(syme + 1);
map = syme->map;
path = map->dso->long_name;
@@ -210,8 +182,8 @@ static void parse_source(struct sym_entry *syme)
if (!file)
return;
- pthread_mutex_lock(&source->lock);
- source->lines_tail = &source->lines;
+ pthread_mutex_lock(&syme->source_lock);
+ syme->lines_tail = &syme->lines;
while (!feof(file)) {
struct source_line *src;
size_t dummy = 0;
@@ -231,8 +203,8 @@ static void parse_source(struct sym_entry *syme)
*c = 0;
src->next = NULL;
- *source->lines_tail = src;
- source->lines_tail = &src->next;
+ *syme->lines_tail = src;
+ syme->lines_tail = &src->next;
if (strlen(src->line)>8 && src->line[8] == ':') {
src->eip = strtoull(src->line, NULL, 16);
@@ -246,7 +218,7 @@ static void parse_source(struct sym_entry *syme)
pclose(file);
out_assign:
sym_filter_entry = syme;
- pthread_mutex_unlock(&source->lock);
+ pthread_mutex_unlock(&syme->source_lock);
}
static void __zero_source_counters(struct sym_entry *syme)
@@ -254,7 +226,7 @@ static void __zero_source_counters(struct sym_entry *syme)
int i;
struct source_line *line;
- line = syme->src->lines;
+ line = syme->lines;
while (line) {
for (i = 0; i < nr_counters; i++)
line->count[i] = 0;
@@ -269,13 +241,13 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
if (syme != sym_filter_entry)
return;
- if (pthread_mutex_trylock(&syme->src->lock))
+ if (pthread_mutex_trylock(&syme->source_lock))
return;
- if (syme->src == NULL || syme->src->source == NULL)
+ if (!syme->source)
goto out_unlock;
- for (line = syme->src->lines; line; line = line->next) {
+ for (line = syme->lines; line; line = line->next) {
if (line->eip == ip) {
line->count[counter]++;
break;
@@ -284,25 +256,25 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
break;
}
out_unlock:
- pthread_mutex_unlock(&syme->src->lock);
+ pthread_mutex_unlock(&syme->source_lock);
}
static void lookup_sym_source(struct sym_entry *syme)
{
- struct symbol *symbol = sym_entry__symbol(syme);
+ struct symbol *symbol = (struct symbol *)(syme + 1);
struct source_line *line;
char pattern[PATH_MAX];
sprintf(pattern, "<%s>:", symbol->name);
- pthread_mutex_lock(&syme->src->lock);
- for (line = syme->src->lines; line; line = line->next) {
+ pthread_mutex_lock(&syme->source_lock);
+ for (line = syme->lines; line; line = line->next) {
if (strstr(line->line, pattern)) {
- syme->src->source = line;
+ syme->source = line;
break;
}
}
- pthread_mutex_unlock(&syme->src->lock);
+ pthread_mutex_unlock(&syme->source_lock);
}
static void show_lines(struct source_line *queue, int count, int total)
@@ -332,24 +304,24 @@ static void show_details(struct sym_entry *syme)
if (!syme)
return;
- if (!syme->src->source)
+ if (!syme->source)
lookup_sym_source(syme);
- if (!syme->src->source)
+ if (!syme->source)
return;
- symbol = sym_entry__symbol(syme);
+ symbol = (struct symbol *)(syme + 1);
printf("Showing %s for %s\n", event_name(sym_counter), symbol->name);
printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter);
- pthread_mutex_lock(&syme->src->lock);
- line = syme->src->source;
+ pthread_mutex_lock(&syme->source_lock);
+ line = syme->source;
while (line) {
total += line->count[sym_counter];
line = line->next;
}
- line = syme->src->source;
+ line = syme->source;
while (line) {
float pcnt = 0.0;
@@ -374,7 +346,7 @@ static void show_details(struct sym_entry *syme)
line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8;
line = line->next;
}
- pthread_mutex_unlock(&syme->src->lock);
+ pthread_mutex_unlock(&syme->source_lock);
if (more)
printf("%d lines not displayed, maybe increase display entries [e]\n", more);
}
@@ -451,8 +423,6 @@ static void print_sym_table(void)
struct sym_entry *syme, *n;
struct rb_root tmp = RB_ROOT;
struct rb_node *nd;
- int sym_width = 0, dso_width = 0, max_dso_width;
- const int win_width = winsize.ws_col - 1;
samples = userspace_samples = 0;
@@ -464,7 +434,6 @@ static void print_sym_table(void)
list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
syme->snap_count = syme->count[snap];
if (syme->snap_count != 0) {
-
if ((hide_user_symbols &&
syme->origin == PERF_RECORD_MISC_USER) ||
(hide_kernel_symbols &&
@@ -484,7 +453,8 @@ static void print_sym_table(void)
puts(CONSOLE_CLEAR);
- printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
+ printf(
+"------------------------------------------------------------------------------\n");
printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [",
samples_per_sec,
100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec)));
@@ -522,35 +492,13 @@ static void print_sym_table(void)
printf(", %d CPUs)\n", nr_cpus);
}
- printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
+ printf("------------------------------------------------------------------------------\n\n");
if (sym_filter_entry) {
show_details(sym_filter_entry);
return;
}
- /*
- * Find the longest symbol name that will be displayed
- */
- for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
- syme = rb_entry(nd, struct sym_entry, rb_node);
- if (++printed > print_entries ||
- (int)syme->snap_count < count_filter)
- continue;
-
- if (syme->map->dso->long_name_len > dso_width)
- dso_width = syme->map->dso->long_name_len;
-
- if (syme->name_len > sym_width)
- sym_width = syme->name_len;
- }
-
- printed = 0;
-
- max_dso_width = winsize.ws_col - sym_width - 29;
- if (dso_width > max_dso_width)
- dso_width = max_dso_width;
- putchar('\n');
if (nr_counters == 1)
printf(" samples pcnt");
else
@@ -558,21 +506,19 @@ static void print_sym_table(void)
if (verbose)
printf(" RIP ");
- printf(" %-*.*s DSO\n", sym_width, sym_width, "function");
+ printf(" function DSO\n");
printf(" %s _______ _____",
nr_counters == 1 ? " " : "______");
if (verbose)
printf(" ________________");
- printf(" %-*.*s", sym_width, sym_width, graph_line);
- printf(" %-*.*s", dso_width, dso_width, graph_line);
- puts("\n");
+ printf(" ________________________________ ________________\n\n");
for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
struct symbol *sym;
double pcnt;
syme = rb_entry(nd, struct sym_entry, rb_node);
- sym = sym_entry__symbol(syme);
+ sym = (struct symbol *)(syme + 1);
if (++printed > print_entries || (int)syme->snap_count < count_filter)
continue;
@@ -588,11 +534,9 @@ static void print_sym_table(void)
percent_color_fprintf(stdout, "%4.1f%%", pcnt);
if (verbose)
printf(" %016llx", sym->start);
- printf(" %-*.*s", sym_width, sym_width, sym->name);
- printf(" %-*.*s\n", dso_width, dso_width,
- dso_width >= syme->map->dso->long_name_len ?
- syme->map->dso->long_name :
- syme->map->dso->short_name);
+ printf(" %-32s", sym->name);
+ printf(" %s", syme->map->dso->short_name);
+ printf("\n");
}
}
@@ -639,10 +583,10 @@ static void prompt_symbol(struct sym_entry **target, const char *msg)
/* zero counters of active symbol */
if (syme) {
- pthread_mutex_lock(&syme->src->lock);
+ pthread_mutex_lock(&syme->source_lock);
__zero_source_counters(syme);
*target = NULL;
- pthread_mutex_unlock(&syme->src->lock);
+ pthread_mutex_unlock(&syme->source_lock);
}
fprintf(stdout, "\n%s: ", msg);
@@ -658,7 +602,7 @@ static void prompt_symbol(struct sym_entry **target, const char *msg)
pthread_mutex_unlock(&active_symbols_lock);
list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
- struct symbol *sym = sym_entry__symbol(syme);
+ struct symbol *sym = (struct symbol *)(syme + 1);
if (!strcmp(buf, sym->name)) {
found = syme;
@@ -682,7 +626,7 @@ static void print_mapped_keys(void)
char *name = NULL;
if (sym_filter_entry) {
- struct symbol *sym = sym_entry__symbol(sym_filter_entry);
+ struct symbol *sym = (struct symbol *)(sym_filter_entry+1);
name = sym->name;
}
@@ -695,7 +639,7 @@ static void print_mapped_keys(void)
fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter);
- if (symbol_conf.vmlinux_name) {
+ if (vmlinux_name) {
fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
fprintf(stdout, "\t[S] stop annotation.\n");
@@ -732,7 +676,7 @@ static int key_mapped(int c)
case 'F':
case 's':
case 'S':
- return symbol_conf.vmlinux_name ? 1 : 0;
+ return vmlinux_name ? 1 : 0;
default:
break;
}
@@ -774,7 +718,7 @@ static void handle_keypress(int c)
case 'e':
prompt_integer(&print_entries, "Enter display entries (lines)");
if (print_entries == 0) {
- sig_winch_handler(SIGWINCH);
+ update_print_entries();
signal(SIGWINCH, sig_winch_handler);
} else
signal(SIGWINCH, SIG_DFL);
@@ -808,8 +752,6 @@ static void handle_keypress(int c)
case 'q':
case 'Q':
printf("exiting.\n");
- if (dump_symtab)
- dsos__fprintf(stderr);
exit(0);
case 's':
prompt_symbol(&sym_filter_entry, "Enter details symbol");
@@ -820,10 +762,10 @@ static void handle_keypress(int c)
else {
struct sym_entry *syme = sym_filter_entry;
- pthread_mutex_lock(&syme->src->lock);
+ pthread_mutex_lock(&syme->source_lock);
sym_filter_entry = NULL;
__zero_source_counters(syme);
- pthread_mutex_unlock(&syme->src->lock);
+ pthread_mutex_unlock(&syme->source_lock);
}
break;
case 'U':
@@ -909,7 +851,7 @@ static int symbol_filter(struct map *map, struct symbol *sym)
syme = symbol__priv(sym);
syme->map = map;
- syme->src = NULL;
+ pthread_mutex_init(&syme->source_lock, NULL);
if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter))
sym_filter_entry = syme;
@@ -920,8 +862,16 @@ static int symbol_filter(struct map *map, struct symbol *sym)
}
}
- if (!syme->skip)
- syme->name_len = strlen(sym->name);
+ return 0;
+}
+
+static int parse_symbols(void)
+{
+ if (dsos__load_kernel(vmlinux_name, symbol_filter, 1) <= 0)
+ return -1;
+
+ if (dump_symtab)
+ dsos__fprintf(stderr);
return 0;
}
@@ -929,28 +879,55 @@ static int symbol_filter(struct map *map, struct symbol *sym)
static void event__process_sample(const event_t *self, int counter)
{
u64 ip = self->ip.ip;
+ struct map *map;
struct sym_entry *syme;
- struct addr_location al;
+ struct symbol *sym;
u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
switch (origin) {
- case PERF_RECORD_MISC_USER:
+ case PERF_RECORD_MISC_USER: {
+ struct thread *thread;
+
if (hide_user_symbols)
return;
- break;
+
+ thread = threads__findnew(self->ip.pid);
+ if (thread == NULL)
+ return;
+
+ map = thread__find_map(thread, ip);
+ if (map != NULL) {
+ ip = map->map_ip(map, ip);
+ sym = map__find_symbol(map, ip, symbol_filter);
+ if (sym == NULL)
+ return;
+ userspace_samples++;
+ break;
+ }
+ }
+ /*
+ * If this is outside of all known maps,
+ * and is a negative address, try to look it
+ * up in the kernel dso, as it might be a
+ * vsyscall or vdso (which executes in user-mode).
+ */
+ if ((long long)ip >= 0)
+ return;
+ /* Fall thru */
case PERF_RECORD_MISC_KERNEL:
if (hide_kernel_symbols)
return;
+
+ sym = kernel_maps__find_symbol(ip, &map);
+ if (sym == NULL)
+ return;
break;
default:
return;
}
- if (event__preprocess_sample(self, &al, symbol_filter) < 0 ||
- al.sym == NULL)
- return;
+ syme = symbol__priv(sym);
- syme = symbol__priv(al.sym);
if (!syme->skip) {
syme->count[counter]++;
syme->origin = origin;
@@ -959,12 +936,30 @@ static void event__process_sample(const event_t *self, int counter)
if (list_empty(&syme->node) || !syme->node.next)
__list_insert_active_sym(syme);
pthread_mutex_unlock(&active_symbols_lock);
- if (origin == PERF_RECORD_MISC_USER)
- ++userspace_samples;
++samples;
+ return;
+ }
+}
+
+static void event__process_mmap(event_t *self)
+{
+ struct thread *thread = threads__findnew(self->mmap.pid);
+
+ if (thread != NULL) {
+ struct map *map = map__new(&self->mmap, NULL, 0);
+ if (map != NULL)
+ thread__insert_map(thread, map);
}
}
+static void event__process_comm(event_t *self)
+{
+ struct thread *thread = threads__findnew(self->comm.pid);
+
+ if (thread != NULL)
+ thread__set_comm(thread, self->comm.comm);
+}
+
static int event__process(event_t *event)
{
switch (event->header.type) {
@@ -1216,8 +1211,7 @@ static const struct option options[] = {
"system-wide collection from all CPUs"),
OPT_INTEGER('C', "CPU", &profile_cpu,
"CPU to profile on"),
- OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
- "file", "vmlinux pathname"),
+ OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"),
OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols,
"hide kernel symbols"),
OPT_INTEGER('m', "mmap-pages", &mmap_pages,
@@ -1253,6 +1247,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
{
int counter;
+ symbol__init(sizeof(struct sym_entry));
+
page_size = sysconf(_SC_PAGE_SIZE);
argc = parse_options(argc, argv, options, top_usage, 0);
@@ -1269,18 +1265,13 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
if (!nr_counters)
nr_counters = 1;
- symbol_conf.priv_size = (sizeof(struct sym_entry) +
- (nr_counters + 1) * sizeof(unsigned long));
- if (symbol_conf.vmlinux_name == NULL)
- symbol_conf.try_vmlinux_path = true;
- if (symbol__init(&symbol_conf) < 0)
- return -1;
-
if (delay_secs < 1)
delay_secs = 1;
+ parse_symbols();
parse_source(sym_filter_entry);
+
/*
* User specified count overrides default frequency.
*/
@@ -1310,9 +1301,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
if (target_pid != -1 || profile_cpu != -1)
nr_cpus = 1;
- get_term_dimensions(&winsize);
if (print_entries == 0) {
- update_print_entries(&winsize);
+ update_print_entries();
signal(SIGWINCH, sig_winch_handler);
}
diff --git a/trunk/tools/perf/builtin-trace.c b/trunk/tools/perf/builtin-trace.c
index abb914aa7be6..d042d656c561 100644
--- a/trunk/tools/perf/builtin-trace.c
+++ b/trunk/tools/perf/builtin-trace.c
@@ -5,66 +5,49 @@
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
-#include "util/exec_cmd.h"
-#include "util/trace-event.h"
-
-static char const *script_name;
-static char const *generate_script_lang;
-static int default_start_script(const char *script __attribute((unused)))
-{
- return 0;
-}
+#include "util/parse-options.h"
-static int default_stop_script(void)
-{
- return 0;
-}
+#include "perf.h"
+#include "util/debug.h"
-static int default_generate_script(const char *outfile __attribute ((unused)))
-{
- return 0;
-}
+#include "util/trace-event.h"
+#include "util/data_map.h"
-static struct scripting_ops default_scripting_ops = {
- .start_script = default_start_script,
- .stop_script = default_stop_script,
- .process_event = print_event,
- .generate_script = default_generate_script,
-};
+static char const *input_name = "perf.data";
-static struct scripting_ops *scripting_ops;
+static unsigned long total = 0;
+static unsigned long total_comm = 0;
-static void setup_scripting(void)
-{
- /* make sure PERF_EXEC_PATH is set for scripts */
- perf_set_argv_exec_path(perf_exec_path());
+static struct perf_header *header;
+static u64 sample_type;
- setup_perl_scripting();
+static char *cwd;
+static int cwdlen;
- scripting_ops = &default_scripting_ops;
-}
-static int cleanup_scripting(void)
+static int
+process_comm_event(event_t *event, unsigned long offset, unsigned long head)
{
- return scripting_ops->stop_script();
-}
-
-#include "util/parse-options.h"
-
-#include "perf.h"
-#include "util/debug.h"
+ struct thread *thread = threads__findnew(event->comm.pid);
-#include "util/trace-event.h"
-#include "util/data_map.h"
-#include "util/exec_cmd.h"
+ dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->comm.comm, event->comm.pid);
-static char const *input_name = "perf.data";
+ if (thread == NULL ||
+ thread__set_comm(thread, event->comm.comm)) {
+ dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
+ return -1;
+ }
+ total_comm++;
-static struct perf_header *header;
-static u64 sample_type;
+ return 0;
+}
-static int process_sample_event(event_t *event)
+static int
+process_sample_event(event_t *event, unsigned long offset, unsigned long head)
{
u64 ip = event->ip.ip;
u64 timestamp = -1;
@@ -89,7 +72,9 @@ static int process_sample_event(event_t *event)
more_data += sizeof(u64);
}
- dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
+ dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
event->header.misc,
event->ip.pid, event->ip.tid,
(void *)(long)ip,
@@ -114,10 +99,9 @@ static int process_sample_event(event_t *event)
* field, although it should be the same than this perf
* event pid
*/
- scripting_ops->process_event(cpu, raw->data, raw->size,
- timestamp, thread->comm);
+ print_event(cpu, raw->data, raw->size, timestamp, thread->comm);
}
- event__stats.total += period;
+ total += period;
return 0;
}
@@ -138,7 +122,7 @@ static int sample_type_check(u64 type)
static struct perf_file_handler file_handler = {
.process_sample_event = process_sample_event,
- .process_comm_event = event__process_comm,
+ .process_comm_event = process_comm_event,
.sample_type_check = sample_type_check,
};
@@ -147,156 +131,7 @@ static int __cmd_trace(void)
register_idle_thread();
register_perf_file_handler(&file_handler);
- return mmap_dispatch_perf_file(&header, input_name,
- 0, 0, &event__cwdlen, &event__cwd);
-}
-
-struct script_spec {
- struct list_head node;
- struct scripting_ops *ops;
- char spec[0];
-};
-
-LIST_HEAD(script_specs);
-
-static struct script_spec *script_spec__new(const char *spec,
- struct scripting_ops *ops)
-{
- struct script_spec *s = malloc(sizeof(*s) + strlen(spec) + 1);
-
- if (s != NULL) {
- strcpy(s->spec, spec);
- s->ops = ops;
- }
-
- return s;
-}
-
-static void script_spec__delete(struct script_spec *s)
-{
- free(s->spec);
- free(s);
-}
-
-static void script_spec__add(struct script_spec *s)
-{
- list_add_tail(&s->node, &script_specs);
-}
-
-static struct script_spec *script_spec__find(const char *spec)
-{
- struct script_spec *s;
-
- list_for_each_entry(s, &script_specs, node)
- if (strcasecmp(s->spec, spec) == 0)
- return s;
- return NULL;
-}
-
-static struct script_spec *script_spec__findnew(const char *spec,
- struct scripting_ops *ops)
-{
- struct script_spec *s = script_spec__find(spec);
-
- if (s)
- return s;
-
- s = script_spec__new(spec, ops);
- if (!s)
- goto out_delete_spec;
-
- script_spec__add(s);
-
- return s;
-
-out_delete_spec:
- script_spec__delete(s);
-
- return NULL;
-}
-
-int script_spec_register(const char *spec, struct scripting_ops *ops)
-{
- struct script_spec *s;
-
- s = script_spec__find(spec);
- if (s)
- return -1;
-
- s = script_spec__findnew(spec, ops);
- if (!s)
- return -1;
-
- return 0;
-}
-
-static struct scripting_ops *script_spec__lookup(const char *spec)
-{
- struct script_spec *s = script_spec__find(spec);
- if (!s)
- return NULL;
-
- return s->ops;
-}
-
-static void list_available_languages(void)
-{
- struct script_spec *s;
-
- fprintf(stderr, "\n");
- fprintf(stderr, "Scripting language extensions (used in "
- "perf trace -s [spec:]script.[spec]):\n\n");
-
- list_for_each_entry(s, &script_specs, node)
- fprintf(stderr, " %-42s [%s]\n", s->spec, s->ops->name);
-
- fprintf(stderr, "\n");
-}
-
-static int parse_scriptname(const struct option *opt __used,
- const char *str, int unset __used)
-{
- char spec[PATH_MAX];
- const char *script, *ext;
- int len;
-
- if (strcmp(str, "list") == 0) {
- list_available_languages();
- return 0;
- }
-
- script = strchr(str, ':');
- if (script) {
- len = script - str;
- if (len >= PATH_MAX) {
- fprintf(stderr, "invalid language specifier");
- return -1;
- }
- strncpy(spec, str, len);
- spec[len] = '\0';
- scripting_ops = script_spec__lookup(spec);
- if (!scripting_ops) {
- fprintf(stderr, "invalid language specifier");
- return -1;
- }
- script++;
- } else {
- script = str;
- ext = strchr(script, '.');
- if (!ext) {
- fprintf(stderr, "invalid script extension");
- return -1;
- }
- scripting_ops = script_spec__lookup(++ext);
- if (!scripting_ops) {
- fprintf(stderr, "invalid script extension");
- return -1;
- }
- }
-
- script_name = strdup(script);
-
- return 0;
+ return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd);
}
static const char * const annotate_usage[] = {
@@ -311,23 +146,13 @@ static const struct option options[] = {
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('l', "latency", &latency_format,
"show latency attributes (irqs/preemption disabled, etc)"),
- OPT_CALLBACK('s', "script", NULL, "name",
- "script file name (lang:script name, script name, or *)",
- parse_scriptname),
- OPT_STRING('g', "gen-script", &generate_script_lang, "lang",
- "generate perf-trace.xx script in specified language"),
-
OPT_END()
};
int cmd_trace(int argc, const char **argv, const char *prefix __used)
{
- int err;
-
symbol__init(0);
- setup_scripting();
-
argc = parse_options(argc, argv, options, annotate_usage, 0);
if (argc) {
/*
@@ -340,50 +165,5 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
setup_pager();
- if (generate_script_lang) {
- struct stat perf_stat;
-
- int input = open(input_name, O_RDONLY);
- if (input < 0) {
- perror("failed to open file");
- exit(-1);
- }
-
- err = fstat(input, &perf_stat);
- if (err < 0) {
- perror("failed to stat file");
- exit(-1);
- }
-
- if (!perf_stat.st_size) {
- fprintf(stderr, "zero-sized file, nothing to do!\n");
- exit(0);
- }
-
- scripting_ops = script_spec__lookup(generate_script_lang);
- if (!scripting_ops) {
- fprintf(stderr, "invalid language specifier");
- return -1;
- }
-
- header = perf_header__new();
- if (header == NULL)
- return -1;
-
- perf_header__read(header, input);
- err = scripting_ops->generate_script("perf-trace");
- goto out;
- }
-
- if (script_name) {
- err = scripting_ops->start_script(script_name);
- if (err)
- goto out;
- }
-
- err = __cmd_trace();
-
- cleanup_scripting();
-out:
- return err;
+ return __cmd_trace();
}
diff --git a/trunk/tools/perf/builtin.h b/trunk/tools/perf/builtin.h
index a3d8bf65f26c..9b02d85091fe 100644
--- a/trunk/tools/perf/builtin.h
+++ b/trunk/tools/perf/builtin.h
@@ -28,6 +28,5 @@ extern int cmd_top(int argc, const char **argv, const char *prefix);
extern int cmd_trace(int argc, const char **argv, const char *prefix);
extern int cmd_version(int argc, const char **argv, const char *prefix);
extern int cmd_probe(int argc, const char **argv, const char *prefix);
-extern int cmd_kmem(int argc, const char **argv, const char *prefix);
#endif
diff --git a/trunk/tools/perf/command-list.txt b/trunk/tools/perf/command-list.txt
index 02b09ea17a3e..d3a6e18e4a5e 100644
--- a/trunk/tools/perf/command-list.txt
+++ b/trunk/tools/perf/command-list.txt
@@ -14,4 +14,3 @@ perf-timechart mainporcelain common
perf-top mainporcelain common
perf-trace mainporcelain common
perf-probe mainporcelain common
-perf-kmem mainporcelain common
diff --git a/trunk/tools/perf/perf.c b/trunk/tools/perf/perf.c
index cf64049bc9bd..89b82acac7d9 100644
--- a/trunk/tools/perf/perf.c
+++ b/trunk/tools/perf/perf.c
@@ -285,21 +285,20 @@ static void handle_internal_command(int argc, const char **argv)
{
const char *cmd = argv[0];
static struct cmd_struct commands[] = {
+ { "help", cmd_help, 0 },
+ { "list", cmd_list, 0 },
{ "buildid-list", cmd_buildid_list, 0 },
- { "help", cmd_help, 0 },
- { "list", cmd_list, 0 },
- { "record", cmd_record, 0 },
- { "report", cmd_report, 0 },
- { "bench", cmd_bench, 0 },
- { "stat", cmd_stat, 0 },
- { "timechart", cmd_timechart, 0 },
- { "top", cmd_top, 0 },
- { "annotate", cmd_annotate, 0 },
- { "version", cmd_version, 0 },
- { "trace", cmd_trace, 0 },
- { "sched", cmd_sched, 0 },
- { "probe", cmd_probe, 0 },
- { "kmem", cmd_kmem, 0 },
+ { "record", cmd_record, 0 },
+ { "report", cmd_report, 0 },
+ { "bench", cmd_bench, 0 },
+ { "stat", cmd_stat, 0 },
+ { "timechart", cmd_timechart, 0 },
+ { "top", cmd_top, 0 },
+ { "annotate", cmd_annotate, 0 },
+ { "version", cmd_version, 0 },
+ { "trace", cmd_trace, 0 },
+ { "sched", cmd_sched, 0 },
+ { "probe", cmd_probe, 0 },
};
unsigned int i;
static const char ext[] = STRIP_EXTENSION;
diff --git a/trunk/tools/perf/perf.h b/trunk/tools/perf/perf.h
index 454d5d55f32d..216bdb223f63 100644
--- a/trunk/tools/perf/perf.h
+++ b/trunk/tools/perf/perf.h
@@ -53,12 +53,6 @@
#define cpu_relax() asm volatile("" ::: "memory")
#endif
-#ifdef __ia64__
-#include "../../arch/ia64/include/asm/unistd.h"
-#define rmb() asm volatile ("mf" ::: "memory")
-#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
-#endif
-
#include
#include
#include
diff --git a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/trunk/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
deleted file mode 100644
index af78d9a52a7d..000000000000
--- a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * This file was generated automatically by ExtUtils::ParseXS version 2.18_02 from the
- * contents of Context.xs. Do not edit this file, edit Context.xs instead.
- *
- * ANY CHANGES MADE HERE WILL BE LOST!
- *
- */
-
-#line 1 "Context.xs"
-/*
- * Context.xs. XS interfaces for perf trace.
- *
- * Copyright (C) 2009 Tom Zanussi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include "EXTERN.h"
-#include "perl.h"
-#include "XSUB.h"
-#include "../../../util/trace-event-perl.h"
-
-#ifndef PERL_UNUSED_VAR
-# define PERL_UNUSED_VAR(var) if (0) var = var
-#endif
-
-#line 41 "Context.c"
-
-XS(XS_Perf__Trace__Context_common_pc); /* prototype to pass -Wmissing-prototypes */
-XS(XS_Perf__Trace__Context_common_pc)
-{
-#ifdef dVAR
- dVAR; dXSARGS;
-#else
- dXSARGS;
-#endif
- if (items != 1)
- Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_pc", "context");
- PERL_UNUSED_VAR(cv); /* -W */
- {
- struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0)));
- int RETVAL;
- dXSTARG;
-
- RETVAL = common_pc(context);
- XSprePUSH; PUSHi((IV)RETVAL);
- }
- XSRETURN(1);
-}
-
-
-XS(XS_Perf__Trace__Context_common_flags); /* prototype to pass -Wmissing-prototypes */
-XS(XS_Perf__Trace__Context_common_flags)
-{
-#ifdef dVAR
- dVAR; dXSARGS;
-#else
- dXSARGS;
-#endif
- if (items != 1)
- Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_flags", "context");
- PERL_UNUSED_VAR(cv); /* -W */
- {
- struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0)));
- int RETVAL;
- dXSTARG;
-
- RETVAL = common_flags(context);
- XSprePUSH; PUSHi((IV)RETVAL);
- }
- XSRETURN(1);
-}
-
-
-XS(XS_Perf__Trace__Context_common_lock_depth); /* prototype to pass -Wmissing-prototypes */
-XS(XS_Perf__Trace__Context_common_lock_depth)
-{
-#ifdef dVAR
- dVAR; dXSARGS;
-#else
- dXSARGS;
-#endif
- if (items != 1)
- Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_lock_depth", "context");
- PERL_UNUSED_VAR(cv); /* -W */
- {
- struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0)));
- int RETVAL;
- dXSTARG;
-
- RETVAL = common_lock_depth(context);
- XSprePUSH; PUSHi((IV)RETVAL);
- }
- XSRETURN(1);
-}
-
-#ifdef __cplusplus
-extern "C"
-#endif
-XS(boot_Perf__Trace__Context); /* prototype to pass -Wmissing-prototypes */
-XS(boot_Perf__Trace__Context)
-{
-#ifdef dVAR
- dVAR; dXSARGS;
-#else
- dXSARGS;
-#endif
- const char* file = __FILE__;
-
- PERL_UNUSED_VAR(cv); /* -W */
- PERL_UNUSED_VAR(items); /* -W */
- XS_VERSION_BOOTCHECK ;
-
- newXSproto("Perf::Trace::Context::common_pc", XS_Perf__Trace__Context_common_pc, file, "$");
- newXSproto("Perf::Trace::Context::common_flags", XS_Perf__Trace__Context_common_flags, file, "$");
- newXSproto("Perf::Trace::Context::common_lock_depth", XS_Perf__Trace__Context_common_lock_depth, file, "$");
- if (PL_unitcheckav)
- call_list(PL_scopestack_ix, PL_unitcheckav);
- XSRETURN_YES;
-}
-
diff --git a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs b/trunk/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs
deleted file mode 100644
index fb78006c165e..000000000000
--- a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Context.xs. XS interfaces for perf trace.
- *
- * Copyright (C) 2009 Tom Zanussi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include "EXTERN.h"
-#include "perl.h"
-#include "XSUB.h"
-#include "../../../util/trace-event-perl.h"
-
-MODULE = Perf::Trace::Context PACKAGE = Perf::Trace::Context
-PROTOTYPES: ENABLE
-
-int
-common_pc(context)
- struct scripting_context * context
-
-int
-common_flags(context)
- struct scripting_context * context
-
-int
-common_lock_depth(context)
- struct scripting_context * context
-
diff --git a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL b/trunk/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL
deleted file mode 100644
index decdeb0f6789..000000000000
--- a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL
+++ /dev/null
@@ -1,17 +0,0 @@
-use 5.010000;
-use ExtUtils::MakeMaker;
-# See lib/ExtUtils/MakeMaker.pm for details of how to influence
-# the contents of the Makefile that is written.
-WriteMakefile(
- NAME => 'Perf::Trace::Context',
- VERSION_FROM => 'lib/Perf/Trace/Context.pm', # finds $VERSION
- PREREQ_PM => {}, # e.g., Module::Name => 1.1
- ($] >= 5.005 ? ## Add these new keywords supported since 5.005
- (ABSTRACT_FROM => 'lib/Perf/Trace/Context.pm', # retrieve abstract from module
- AUTHOR => 'Tom Zanussi ') : ()),
- LIBS => [''], # e.g., '-lm'
- DEFINE => '-I ../..', # e.g., '-DHAVE_SOMETHING'
- INC => '-I.', # e.g., '-I. -I/usr/include/other'
- # Un-comment this if you add C files to link with later:
- OBJECT => 'Context.o', # link all the C files too
-);
diff --git a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/README b/trunk/tools/perf/scripts/perl/Perf-Trace-Util/README
deleted file mode 100644
index 9a9707630791..000000000000
--- a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/README
+++ /dev/null
@@ -1,59 +0,0 @@
-Perf-Trace-Util version 0.01
-============================
-
-This module contains utility functions for use with perf trace.
-
-Core.pm and Util.pm are pure Perl modules; Core.pm contains routines
-that the core perf support for Perl calls on and should always be
-'used', while Util.pm contains useful but optional utility functions
-that scripts may want to use. Context.pm contains the Perl->C
-interface that allows scripts to access data in the embedding perf
-executable; scripts wishing to do that should 'use Context.pm'.
-
-The Perl->C perf interface is completely driven by Context.xs. If you
-want to add new Perl functions that end up accessing C data in the
-perf executable, you add desciptions of the new functions here.
-scripting_context is a pointer to the perf data in the perf executable
-that you want to access - it's passed as the second parameter,
-$context, to all handler functions.
-
-After you do that:
-
- perl Makefile.PL # to create a Makefile for the next step
- make # to create Context.c
-
- edit Context.c to add const to the char* file = __FILE__ line in
- XS(boot_Perf__Trace__Context) to silence a warning/error.
-
- You can delete the Makefile, object files and anything else that was
- generated e.g. blib and shared library, etc, except for of course
- Context.c
-
- You should then be able to run the normal perf make as usual.
-
-INSTALLATION
-
-Building perf with perf trace Perl scripting should install this
-module in the right place.
-
-You should make sure libperl and ExtUtils/Embed.pm are installed first
-e.g. apt-get install libperl-dev or yum install perl-ExtUtils-Embed.
-
-DEPENDENCIES
-
-This module requires these other modules and libraries:
-
- None
-
-COPYRIGHT AND LICENCE
-
-Copyright (C) 2009 by Tom Zanussi
-
-This library is free software; you can redistribute it and/or modify
-it under the same terms as Perl itself, either Perl version 5.10.0 or,
-at your option, any later version of Perl 5 you may have available.
-
-Alternatively, this software may be distributed under the terms of the
-GNU General Public License ("GPL") version 2 as published by the Free
-Software Foundation.
-
diff --git a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm b/trunk/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm
deleted file mode 100644
index 6c7f3659cb17..000000000000
--- a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm
+++ /dev/null
@@ -1,55 +0,0 @@
-package Perf::Trace::Context;
-
-use 5.010000;
-use strict;
-use warnings;
-
-require Exporter;
-
-our @ISA = qw(Exporter);
-
-our %EXPORT_TAGS = ( 'all' => [ qw(
-) ] );
-
-our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
-
-our @EXPORT = qw(
- common_pc common_flags common_lock_depth
-);
-
-our $VERSION = '0.01';
-
-require XSLoader;
-XSLoader::load('Perf::Trace::Context', $VERSION);
-
-1;
-__END__
-=head1 NAME
-
-Perf::Trace::Context - Perl extension for accessing functions in perf.
-
-=head1 SYNOPSIS
-
- use Perf::Trace::Context;
-
-=head1 SEE ALSO
-
-Perf (trace) documentation
-
-=head1 AUTHOR
-
-Tom Zanussi, Etzanussi@gmail.com
-
-=head1 COPYRIGHT AND LICENSE
-
-Copyright (C) 2009 by Tom Zanussi
-
-This library is free software; you can redistribute it and/or modify
-it under the same terms as Perl itself, either Perl version 5.10.0 or,
-at your option, any later version of Perl 5 you may have available.
-
-Alternatively, this software may be distributed under the terms of the
-GNU General Public License ("GPL") version 2 as published by the Free
-Software Foundation.
-
-=cut
diff --git a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm b/trunk/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm
deleted file mode 100644
index 9df376a9f629..000000000000
--- a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm
+++ /dev/null
@@ -1,192 +0,0 @@
-package Perf::Trace::Core;
-
-use 5.010000;
-use strict;
-use warnings;
-
-require Exporter;
-
-our @ISA = qw(Exporter);
-
-our %EXPORT_TAGS = ( 'all' => [ qw(
-) ] );
-
-our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
-
-our @EXPORT = qw(
-define_flag_field define_flag_value flag_str dump_flag_fields
-define_symbolic_field define_symbolic_value symbol_str dump_symbolic_fields
-trace_flag_str
-);
-
-our $VERSION = '0.01';
-
-my %trace_flags = (0x00 => "NONE",
- 0x01 => "IRQS_OFF",
- 0x02 => "IRQS_NOSUPPORT",
- 0x04 => "NEED_RESCHED",
- 0x08 => "HARDIRQ",
- 0x10 => "SOFTIRQ");
-
-sub trace_flag_str
-{
- my ($value) = @_;
-
- my $string;
-
- my $print_delim = 0;
-
- foreach my $idx (sort {$a <=> $b} keys %trace_flags) {
- if (!$value && !$idx) {
- $string .= "NONE";
- last;
- }
-
- if ($idx && ($value & $idx) == $idx) {
- if ($print_delim) {
- $string .= " | ";
- }
- $string .= "$trace_flags{$idx}";
- $print_delim = 1;
- $value &= ~$idx;
- }
- }
-
- return $string;
-}
-
-my %flag_fields;
-my %symbolic_fields;
-
-sub flag_str
-{
- my ($event_name, $field_name, $value) = @_;
-
- my $string;
-
- if ($flag_fields{$event_name}{$field_name}) {
- my $print_delim = 0;
- foreach my $idx (sort {$a <=> $b} keys %{$flag_fields{$event_name}{$field_name}{"values"}}) {
- if (!$value && !$idx) {
- $string .= "$flag_fields{$event_name}{$field_name}{'values'}{$idx}";
- last;
- }
- if ($idx && ($value & $idx) == $idx) {
- if ($print_delim && $flag_fields{$event_name}{$field_name}{'delim'}) {
- $string .= " $flag_fields{$event_name}{$field_name}{'delim'} ";
- }
- $string .= "$flag_fields{$event_name}{$field_name}{'values'}{$idx}";
- $print_delim = 1;
- $value &= ~$idx;
- }
- }
- }
-
- return $string;
-}
-
-sub define_flag_field
-{
- my ($event_name, $field_name, $delim) = @_;
-
- $flag_fields{$event_name}{$field_name}{"delim"} = $delim;
-}
-
-sub define_flag_value
-{
- my ($event_name, $field_name, $value, $field_str) = @_;
-
- $flag_fields{$event_name}{$field_name}{"values"}{$value} = $field_str;
-}
-
-sub dump_flag_fields
-{
- for my $event (keys %flag_fields) {
- print "event $event:\n";
- for my $field (keys %{$flag_fields{$event}}) {
- print " field: $field:\n";
- print " delim: $flag_fields{$event}{$field}{'delim'}\n";
- foreach my $idx (sort {$a <=> $b} keys %{$flag_fields{$event}{$field}{"values"}}) {
- print " value $idx: $flag_fields{$event}{$field}{'values'}{$idx}\n";
- }
- }
- }
-}
-
-sub symbol_str
-{
- my ($event_name, $field_name, $value) = @_;
-
- if ($symbolic_fields{$event_name}{$field_name}) {
- foreach my $idx (sort {$a <=> $b} keys %{$symbolic_fields{$event_name}{$field_name}{"values"}}) {
- if (!$value && !$idx) {
- return "$symbolic_fields{$event_name}{$field_name}{'values'}{$idx}";
- last;
- }
- if ($value == $idx) {
- return "$symbolic_fields{$event_name}{$field_name}{'values'}{$idx}";
- }
- }
- }
-
- return undef;
-}
-
-sub define_symbolic_field
-{
- my ($event_name, $field_name) = @_;
-
- # nothing to do, really
-}
-
-sub define_symbolic_value
-{
- my ($event_name, $field_name, $value, $field_str) = @_;
-
- $symbolic_fields{$event_name}{$field_name}{"values"}{$value} = $field_str;
-}
-
-sub dump_symbolic_fields
-{
- for my $event (keys %symbolic_fields) {
- print "event $event:\n";
- for my $field (keys %{$symbolic_fields{$event}}) {
- print " field: $field:\n";
- foreach my $idx (sort {$a <=> $b} keys %{$symbolic_fields{$event}{$field}{"values"}}) {
- print " value $idx: $symbolic_fields{$event}{$field}{'values'}{$idx}\n";
- }
- }
- }
-}
-
-1;
-__END__
-=head1 NAME
-
-Perf::Trace::Core - Perl extension for perf trace
-
-=head1 SYNOPSIS
-
- use Perf::Trace::Core
-
-=head1 SEE ALSO
-
-Perf (trace) documentation
-
-=head1 AUTHOR
-
-Tom Zanussi, Etzanussi@gmail.com
-
-=head1 COPYRIGHT AND LICENSE
-
-Copyright (C) 2009 by Tom Zanussi
-
-This library is free software; you can redistribute it and/or modify
-it under the same terms as Perl itself, either Perl version 5.10.0 or,
-at your option, any later version of Perl 5 you may have available.
-
-Alternatively, this software may be distributed under the terms of the
-GNU General Public License ("GPL") version 2 as published by the Free
-Software Foundation.
-
-=cut
diff --git a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm b/trunk/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
deleted file mode 100644
index 052f132ced24..000000000000
--- a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
+++ /dev/null
@@ -1,88 +0,0 @@
-package Perf::Trace::Util;
-
-use 5.010000;
-use strict;
-use warnings;
-
-require Exporter;
-
-our @ISA = qw(Exporter);
-
-our %EXPORT_TAGS = ( 'all' => [ qw(
-) ] );
-
-our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
-
-our @EXPORT = qw(
-avg nsecs nsecs_secs nsecs_nsecs nsecs_usecs print_nsecs
-);
-
-our $VERSION = '0.01';
-
-sub avg
-{
- my ($total, $n) = @_;
-
- return $total / $n;
-}
-
-my $NSECS_PER_SEC = 1000000000;
-
-sub nsecs
-{
- my ($secs, $nsecs) = @_;
-
- return $secs * $NSECS_PER_SEC + $nsecs;
-}
-
-sub nsecs_secs {
- my ($nsecs) = @_;
-
- return $nsecs / $NSECS_PER_SEC;
-}
-
-sub nsecs_nsecs {
- my ($nsecs) = @_;
-
- return $nsecs - nsecs_secs($nsecs);
-}
-
-sub nsecs_str {
- my ($nsecs) = @_;
-
- my $str = sprintf("%5u.%09u", nsecs_secs($nsecs), nsecs_nsecs($nsecs));
-
- return $str;
-}
-
-1;
-__END__
-=head1 NAME
-
-Perf::Trace::Util - Perl extension for perf trace
-
-=head1 SYNOPSIS
-
- use Perf::Trace::Util;
-
-=head1 SEE ALSO
-
-Perf (trace) documentation
-
-=head1 AUTHOR
-
-Tom Zanussi, Etzanussi@gmail.com
-
-=head1 COPYRIGHT AND LICENSE
-
-Copyright (C) 2009 by Tom Zanussi
-
-This library is free software; you can redistribute it and/or modify
-it under the same terms as Perl itself, either Perl version 5.10.0 or,
-at your option, any later version of Perl 5 you may have available.
-
-Alternatively, this software may be distributed under the terms of the
-GNU General Public License ("GPL") version 2 as published by the Free
-Software Foundation.
-
-=cut
diff --git a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/typemap b/trunk/tools/perf/scripts/perl/Perf-Trace-Util/typemap
deleted file mode 100644
index 840836804aa7..000000000000
--- a/trunk/tools/perf/scripts/perl/Perf-Trace-Util/typemap
+++ /dev/null
@@ -1 +0,0 @@
-struct scripting_context * T_PTR
diff --git a/trunk/tools/perf/scripts/perl/bin/check-perf-trace-record b/trunk/tools/perf/scripts/perl/bin/check-perf-trace-record
deleted file mode 100644
index c7ec5de2f535..000000000000
--- a/trunk/tools/perf/scripts/perl/bin/check-perf-trace-record
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-perf record -c 1 -f -a -M -R -e kmem:kmalloc -e irq:softirq_entry
-
-
-
-
-
diff --git a/trunk/tools/perf/scripts/perl/bin/check-perf-trace-report b/trunk/tools/perf/scripts/perl/bin/check-perf-trace-report
deleted file mode 100644
index 89948b015020..000000000000
--- a/trunk/tools/perf/scripts/perl/bin/check-perf-trace-report
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-perf trace -s ~/libexec/perf-core/scripts/perl/check-perf-trace.pl
-
-
-
diff --git a/trunk/tools/perf/scripts/perl/bin/rw-by-file-record b/trunk/tools/perf/scripts/perl/bin/rw-by-file-record
deleted file mode 100644
index b25056ebf963..000000000000
--- a/trunk/tools/perf/scripts/perl/bin/rw-by-file-record
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-perf record -c 1 -f -a -M -R -e syscalls:sys_enter_read -e syscalls:sys_enter_write
diff --git a/trunk/tools/perf/scripts/perl/bin/rw-by-file-report b/trunk/tools/perf/scripts/perl/bin/rw-by-file-report
deleted file mode 100644
index f5dcf9cb5bd2..000000000000
--- a/trunk/tools/perf/scripts/perl/bin/rw-by-file-report
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl
-
-
-
diff --git a/trunk/tools/perf/scripts/perl/bin/rw-by-pid-record b/trunk/tools/perf/scripts/perl/bin/rw-by-pid-record
deleted file mode 100644
index 8903979c5b6c..000000000000
--- a/trunk/tools/perf/scripts/perl/bin/rw-by-pid-record
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-perf record -c 1 -f -a -M -R -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write
diff --git a/trunk/tools/perf/scripts/perl/bin/rw-by-pid-report b/trunk/tools/perf/scripts/perl/bin/rw-by-pid-report
deleted file mode 100644
index cea16f78a3a2..000000000000
--- a/trunk/tools/perf/scripts/perl/bin/rw-by-pid-report
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl
-
-
-
diff --git a/trunk/tools/perf/scripts/perl/bin/wakeup-latency-record b/trunk/tools/perf/scripts/perl/bin/wakeup-latency-record
deleted file mode 100644
index 6abedda911a4..000000000000
--- a/trunk/tools/perf/scripts/perl/bin/wakeup-latency-record
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-perf record -c 1 -f -a -M -R -e sched:sched_switch -e sched:sched_wakeup
-
-
-
-
diff --git a/trunk/tools/perf/scripts/perl/bin/wakeup-latency-report b/trunk/tools/perf/scripts/perl/bin/wakeup-latency-report
deleted file mode 100644
index 85769dc456eb..000000000000
--- a/trunk/tools/perf/scripts/perl/bin/wakeup-latency-report
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-perf trace -s ~/libexec/perf-core/scripts/perl/wakeup-latency.pl
-
-
-
diff --git a/trunk/tools/perf/scripts/perl/bin/workqueue-stats-record b/trunk/tools/perf/scripts/perl/bin/workqueue-stats-record
deleted file mode 100644
index fce6637b19ba..000000000000
--- a/trunk/tools/perf/scripts/perl/bin/workqueue-stats-record
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-perf record -c 1 -f -a -M -R -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion
diff --git a/trunk/tools/perf/scripts/perl/bin/workqueue-stats-report b/trunk/tools/perf/scripts/perl/bin/workqueue-stats-report
deleted file mode 100644
index aa68435be926..000000000000
--- a/trunk/tools/perf/scripts/perl/bin/workqueue-stats-report
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-perf trace -s ~/libexec/perf-core/scripts/perl/workqueue-stats.pl
-
-
-
-
diff --git a/trunk/tools/perf/scripts/perl/check-perf-trace.pl b/trunk/tools/perf/scripts/perl/check-perf-trace.pl
deleted file mode 100644
index 4e7dc0a407a5..000000000000
--- a/trunk/tools/perf/scripts/perl/check-perf-trace.pl
+++ /dev/null
@@ -1,106 +0,0 @@
-# perf trace event handlers, generated by perf trace -g perl
-# (c) 2009, Tom Zanussi
-# Licensed under the terms of the GNU GPL License version 2
-
-# This script tests basic functionality such as flag and symbol
-# strings, common_xxx() calls back into perf, begin, end, unhandled
-# events, etc. Basically, if this script runs successfully and
-# displays expected results, perl scripting support should be ok.
-
-use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
-use lib "./Perf-Trace-Util/lib";
-use Perf::Trace::Core;
-use Perf::Trace::Context;
-use Perf::Trace::Util;
-
-sub trace_begin
-{
- print "trace_begin\n";
-}
-
-sub trace_end
-{
- print "trace_end\n";
-
- print_unhandled();
-}
-
-sub irq::softirq_entry
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm,
- $vec) = @_;
-
- print_header($event_name, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm);
-
- print_uncommon($context);
-
- printf("vec=%s\n",
- symbol_str("irq::softirq_entry", "vec", $vec));
-}
-
-sub kmem::kmalloc
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm,
- $call_site, $ptr, $bytes_req, $bytes_alloc,
- $gfp_flags) = @_;
-
- print_header($event_name, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm);
-
- print_uncommon($context);
-
- printf("call_site=%p, ptr=%p, bytes_req=%u, bytes_alloc=%u, ".
- "gfp_flags=%s\n",
- $call_site, $ptr, $bytes_req, $bytes_alloc,
-
- flag_str("kmem::kmalloc", "gfp_flags", $gfp_flags));
-}
-
-# print trace fields not included in handler args
-sub print_uncommon
-{
- my ($context) = @_;
-
- printf("common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, ",
- common_pc($context), trace_flag_str(common_flags($context)),
- common_lock_depth($context));
-
-}
-
-my %unhandled;
-
-sub print_unhandled
-{
- if ((scalar keys %unhandled) == 0) {
- return;
- }
-
- print "\nunhandled events:\n\n";
-
- printf("%-40s %10s\n", "event", "count");
- printf("%-40s %10s\n", "----------------------------------------",
- "-----------");
-
- foreach my $event_name (keys %unhandled) {
- printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
- }
-}
-
-sub trace_unhandled
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm) = @_;
-
- $unhandled{$event_name}++;
-}
-
-sub print_header
-{
- my ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;
-
- printf("%-20s %5u %05u.%09u %8u %-20s ",
- $event_name, $cpu, $secs, $nsecs, $pid, $comm);
-}
diff --git a/trunk/tools/perf/scripts/perl/rw-by-file.pl b/trunk/tools/perf/scripts/perl/rw-by-file.pl
deleted file mode 100644
index 61f91561d848..000000000000
--- a/trunk/tools/perf/scripts/perl/rw-by-file.pl
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/usr/bin/perl -w
-# (c) 2009, Tom Zanussi
-# Licensed under the terms of the GNU GPL License version 2
-
-# Display r/w activity for files read/written to for a given program
-
-# The common_* event handler fields are the most useful fields common to
-# all events. They don't necessarily correspond to the 'common_*' fields
-# in the status files. Those fields not available as handler params can
-# be retrieved via script functions of the form get_common_*().
-
-use 5.010000;
-use strict;
-use warnings;
-
-use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
-use lib "./Perf-Trace-Util/lib";
-use Perf::Trace::Core;
-use Perf::Trace::Util;
-
-# change this to the comm of the program you're interested in
-my $for_comm = "perf";
-
-my %reads;
-my %writes;
-
-sub syscalls::sys_enter_read
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm, $nr, $fd, $buf, $count) = @_;
-
- if ($common_comm eq $for_comm) {
- $reads{$fd}{bytes_requested} += $count;
- $reads{$fd}{total_reads}++;
- }
-}
-
-sub syscalls::sys_enter_write
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm, $nr, $fd, $buf, $count) = @_;
-
- if ($common_comm eq $for_comm) {
- $writes{$fd}{bytes_written} += $count;
- $writes{$fd}{total_writes}++;
- }
-}
-
-sub trace_end
-{
- printf("file read counts for $for_comm:\n\n");
-
- printf("%6s %10s %10s\n", "fd", "# reads", "bytes_requested");
- printf("%6s %10s %10s\n", "------", "----------", "-----------");
-
- foreach my $fd (sort {$reads{$b}{bytes_requested} <=>
- $reads{$a}{bytes_requested}} keys %reads) {
- my $total_reads = $reads{$fd}{total_reads};
- my $bytes_requested = $reads{$fd}{bytes_requested};
- printf("%6u %10u %10u\n", $fd, $total_reads, $bytes_requested);
- }
-
- printf("\nfile write counts for $for_comm:\n\n");
-
- printf("%6s %10s %10s\n", "fd", "# writes", "bytes_written");
- printf("%6s %10s %10s\n", "------", "----------", "-----------");
-
- foreach my $fd (sort {$writes{$b}{bytes_written} <=>
- $writes{$a}{bytes_written}} keys %writes) {
- my $total_writes = $writes{$fd}{total_writes};
- my $bytes_written = $writes{$fd}{bytes_written};
- printf("%6u %10u %10u\n", $fd, $total_writes, $bytes_written);
- }
-
- print_unhandled();
-}
-
-my %unhandled;
-
-sub print_unhandled
-{
- if ((scalar keys %unhandled) == 0) {
- return;
- }
-
- print "\nunhandled events:\n\n";
-
- printf("%-40s %10s\n", "event", "count");
- printf("%-40s %10s\n", "----------------------------------------",
- "-----------");
-
- foreach my $event_name (keys %unhandled) {
- printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
- }
-}
-
-sub trace_unhandled
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm) = @_;
-
- $unhandled{$event_name}++;
-}
-
-
diff --git a/trunk/tools/perf/scripts/perl/rw-by-pid.pl b/trunk/tools/perf/scripts/perl/rw-by-pid.pl
deleted file mode 100644
index da601fae1a00..000000000000
--- a/trunk/tools/perf/scripts/perl/rw-by-pid.pl
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/usr/bin/perl -w
-# (c) 2009, Tom Zanussi
-# Licensed under the terms of the GNU GPL License version 2
-
-# Display r/w activity for all processes
-
-# The common_* event handler fields are the most useful fields common to
-# all events. They don't necessarily correspond to the 'common_*' fields
-# in the status files. Those fields not available as handler params can
-# be retrieved via script functions of the form get_common_*().
-
-use 5.010000;
-use strict;
-use warnings;
-
-use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
-use lib "./Perf-Trace-Util/lib";
-use Perf::Trace::Core;
-use Perf::Trace::Util;
-
-my %reads;
-my %writes;
-
-sub syscalls::sys_exit_read
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm,
- $nr, $ret) = @_;
-
- if ($ret > 0) {
- $reads{$common_pid}{bytes_read} += $ret;
- } else {
- if (!defined ($reads{$common_pid}{bytes_read})) {
- $reads{$common_pid}{bytes_read} = 0;
- }
- $reads{$common_pid}{errors}{$ret}++;
- }
-}
-
-sub syscalls::sys_enter_read
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm,
- $nr, $fd, $buf, $count) = @_;
-
- $reads{$common_pid}{bytes_requested} += $count;
- $reads{$common_pid}{total_reads}++;
- $reads{$common_pid}{comm} = $common_comm;
-}
-
-sub syscalls::sys_exit_write
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm,
- $nr, $ret) = @_;
-
- if ($ret <= 0) {
- $writes{$common_pid}{errors}{$ret}++;
- }
-}
-
-sub syscalls::sys_enter_write
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm,
- $nr, $fd, $buf, $count) = @_;
-
- $writes{$common_pid}{bytes_written} += $count;
- $writes{$common_pid}{total_writes}++;
- $writes{$common_pid}{comm} = $common_comm;
-}
-
-sub trace_end
-{
- printf("read counts by pid:\n\n");
-
- printf("%6s %20s %10s %10s %10s\n", "pid", "comm",
- "# reads", "bytes_requested", "bytes_read");
- printf("%6s %-20s %10s %10s %10s\n", "------", "--------------------",
- "-----------", "----------", "----------");
-
- foreach my $pid (sort {$reads{$b}{bytes_read} <=>
- $reads{$a}{bytes_read}} keys %reads) {
- my $comm = $reads{$pid}{comm};
- my $total_reads = $reads{$pid}{total_reads};
- my $bytes_requested = $reads{$pid}{bytes_requested};
- my $bytes_read = $reads{$pid}{bytes_read};
-
- printf("%6s %-20s %10s %10s %10s\n", $pid, $comm,
- $total_reads, $bytes_requested, $bytes_read);
- }
-
- printf("\nfailed reads by pid:\n\n");
-
- printf("%6s %20s %6s %10s\n", "pid", "comm", "error #", "# errors");
- printf("%6s %20s %6s %10s\n", "------", "--------------------",
- "------", "----------");
-
- foreach my $pid (keys %reads) {
- my $comm = $reads{$pid}{comm};
- foreach my $err (sort {$reads{$b}{comm} cmp $reads{$a}{comm}}
- keys %{$reads{$pid}{errors}}) {
- my $errors = $reads{$pid}{errors}{$err};
-
- printf("%6d %-20s %6d %10s\n", $pid, $comm, $err, $errors);
- }
- }
-
- printf("\nwrite counts by pid:\n\n");
-
- printf("%6s %20s %10s %10s\n", "pid", "comm",
- "# writes", "bytes_written");
- printf("%6s %-20s %10s %10s\n", "------", "--------------------",
- "-----------", "----------");
-
- foreach my $pid (sort {$writes{$b}{bytes_written} <=>
- $writes{$a}{bytes_written}} keys %writes) {
- my $comm = $writes{$pid}{comm};
- my $total_writes = $writes{$pid}{total_writes};
- my $bytes_written = $writes{$pid}{bytes_written};
-
- printf("%6s %-20s %10s %10s\n", $pid, $comm,
- $total_writes, $bytes_written);
- }
-
- printf("\nfailed writes by pid:\n\n");
-
- printf("%6s %20s %6s %10s\n", "pid", "comm", "error #", "# errors");
- printf("%6s %20s %6s %10s\n", "------", "--------------------",
- "------", "----------");
-
- foreach my $pid (keys %writes) {
- my $comm = $writes{$pid}{comm};
- foreach my $err (sort {$writes{$b}{comm} cmp $writes{$a}{comm}}
- keys %{$writes{$pid}{errors}}) {
- my $errors = $writes{$pid}{errors}{$err};
-
- printf("%6d %-20s %6d %10s\n", $pid, $comm, $err, $errors);
- }
- }
-
- print_unhandled();
-}
-
-my %unhandled;
-
-sub print_unhandled
-{
- if ((scalar keys %unhandled) == 0) {
- return;
- }
-
- print "\nunhandled events:\n\n";
-
- printf("%-40s %10s\n", "event", "count");
- printf("%-40s %10s\n", "----------------------------------------",
- "-----------");
-
- foreach my $event_name (keys %unhandled) {
- printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
- }
-}
-
-sub trace_unhandled
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm) = @_;
-
- $unhandled{$event_name}++;
-}
diff --git a/trunk/tools/perf/scripts/perl/wakeup-latency.pl b/trunk/tools/perf/scripts/perl/wakeup-latency.pl
deleted file mode 100644
index ed58ef284e23..000000000000
--- a/trunk/tools/perf/scripts/perl/wakeup-latency.pl
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/perl -w
-# (c) 2009, Tom Zanussi
-# Licensed under the terms of the GNU GPL License version 2
-
-# Display avg/min/max wakeup latency
-
-# The common_* event handler fields are the most useful fields common to
-# all events. They don't necessarily correspond to the 'common_*' fields
-# in the status files. Those fields not available as handler params can
-# be retrieved via script functions of the form get_common_*().
-
-use 5.010000;
-use strict;
-use warnings;
-
-use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
-use lib "./Perf-Trace-Util/lib";
-use Perf::Trace::Core;
-use Perf::Trace::Util;
-
-my %last_wakeup;
-
-my $max_wakeup_latency;
-my $min_wakeup_latency;
-my $total_wakeup_latency;
-my $total_wakeups;
-
-sub sched::sched_switch
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm,
- $prev_comm, $prev_pid, $prev_prio, $prev_state, $next_comm, $next_pid,
- $next_prio) = @_;
-
- my $wakeup_ts = $last_wakeup{$common_cpu}{ts};
- if ($wakeup_ts) {
- my $switch_ts = nsecs($common_secs, $common_nsecs);
- my $wakeup_latency = $switch_ts - $wakeup_ts;
- if ($wakeup_latency > $max_wakeup_latency) {
- $max_wakeup_latency = $wakeup_latency;
- }
- if ($wakeup_latency < $min_wakeup_latency) {
- $min_wakeup_latency = $wakeup_latency;
- }
- $total_wakeup_latency += $wakeup_latency;
- $total_wakeups++;
- }
- $last_wakeup{$common_cpu}{ts} = 0;
-}
-
-sub sched::sched_wakeup
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm,
- $comm, $pid, $prio, $success, $target_cpu) = @_;
-
- $last_wakeup{$target_cpu}{ts} = nsecs($common_secs, $common_nsecs);
-}
-
-sub trace_begin
-{
- $min_wakeup_latency = 1000000000;
- $max_wakeup_latency = 0;
-}
-
-sub trace_end
-{
- printf("wakeup_latency stats:\n\n");
- print "total_wakeups: $total_wakeups\n";
- printf("avg_wakeup_latency (ns): %u\n",
- avg($total_wakeup_latency, $total_wakeups));
- printf("min_wakeup_latency (ns): %u\n", $min_wakeup_latency);
- printf("max_wakeup_latency (ns): %u\n", $max_wakeup_latency);
-
- print_unhandled();
-}
-
-my %unhandled;
-
-sub print_unhandled
-{
- if ((scalar keys %unhandled) == 0) {
- return;
- }
-
- print "\nunhandled events:\n\n";
-
- printf("%-40s %10s\n", "event", "count");
- printf("%-40s %10s\n", "----------------------------------------",
- "-----------");
-
- foreach my $event_name (keys %unhandled) {
- printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
- }
-}
-
-sub trace_unhandled
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm) = @_;
-
- $unhandled{$event_name}++;
-}
diff --git a/trunk/tools/perf/scripts/perl/workqueue-stats.pl b/trunk/tools/perf/scripts/perl/workqueue-stats.pl
deleted file mode 100644
index 511302c8a494..000000000000
--- a/trunk/tools/perf/scripts/perl/workqueue-stats.pl
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/perl -w
-# (c) 2009, Tom Zanussi
-# Licensed under the terms of the GNU GPL License version 2
-
-# Displays workqueue stats
-#
-# Usage:
-#
-# perf record -c 1 -f -a -R -e workqueue:workqueue_creation -e
-# workqueue:workqueue_destruction -e workqueue:workqueue_execution
-# -e workqueue:workqueue_insertion
-#
-# perf trace -p -s tools/perf/scripts/perl/workqueue-stats.pl
-
-use 5.010000;
-use strict;
-use warnings;
-
-use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
-use lib "./Perf-Trace-Util/lib";
-use Perf::Trace::Core;
-use Perf::Trace::Util;
-
-my @cpus;
-
-sub workqueue::workqueue_destruction
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm,
- $thread_comm, $thread_pid) = @_;
-
- $cpus[$common_cpu]{$thread_pid}{destroyed}++;
- $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
-}
-
-sub workqueue::workqueue_creation
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm,
- $thread_comm, $thread_pid, $cpu) = @_;
-
- $cpus[$common_cpu]{$thread_pid}{created}++;
- $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
-}
-
-sub workqueue::workqueue_execution
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm,
- $thread_comm, $thread_pid, $func) = @_;
-
- $cpus[$common_cpu]{$thread_pid}{executed}++;
- $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
-}
-
-sub workqueue::workqueue_insertion
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm,
- $thread_comm, $thread_pid, $func) = @_;
-
- $cpus[$common_cpu]{$thread_pid}{inserted}++;
- $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
-}
-
-sub trace_end
-{
- print "workqueue work stats:\n\n";
- my $cpu = 0;
- printf("%3s %6s %6s\t%-20s\n", "cpu", "ins", "exec", "name");
- printf("%3s %6s %6s\t%-20s\n", "---", "---", "----", "----");
- foreach my $pidhash (@cpus) {
- while ((my $pid, my $wqhash) = each %$pidhash) {
- my $ins = $$wqhash{'inserted'};
- my $exe = $$wqhash{'executed'};
- my $comm = $$wqhash{'comm'};
- if ($ins || $exe) {
- printf("%3u %6u %6u\t%-20s\n", $cpu, $ins, $exe, $comm);
- }
- }
- $cpu++;
- }
-
- $cpu = 0;
- print "\nworkqueue lifecycle stats:\n\n";
- printf("%3s %6s %6s\t%-20s\n", "cpu", "created", "destroyed", "name");
- printf("%3s %6s %6s\t%-20s\n", "---", "-------", "---------", "----");
- foreach my $pidhash (@cpus) {
- while ((my $pid, my $wqhash) = each %$pidhash) {
- my $created = $$wqhash{'created'};
- my $destroyed = $$wqhash{'destroyed'};
- my $comm = $$wqhash{'comm'};
- if ($created || $destroyed) {
- printf("%3u %6u %6u\t%-20s\n", $cpu, $created, $destroyed,
- $comm);
- }
- }
- $cpu++;
- }
-
- print_unhandled();
-}
-
-my %unhandled;
-
-sub print_unhandled
-{
- if ((scalar keys %unhandled) == 0) {
- return;
- }
-
- print "\nunhandled events:\n\n";
-
- printf("%-40s %10s\n", "event", "count");
- printf("%-40s %10s\n", "----------------------------------------",
- "-----------");
-
- foreach my $event_name (keys %unhandled) {
- printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
- }
-}
-
-sub trace_unhandled
-{
- my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
- $common_pid, $common_comm) = @_;
-
- $unhandled{$event_name}++;
-}
diff --git a/trunk/tools/perf/util/ctype.c b/trunk/tools/perf/util/ctype.c
index 35073621e5de..0b791bd346bc 100644
--- a/trunk/tools/perf/util/ctype.c
+++ b/trunk/tools/perf/util/ctype.c
@@ -29,11 +29,3 @@ unsigned char sane_ctype[256] = {
A, A, A, A, A, A, A, A, A, A, A, R, R, P, P, 0, /* 112..127 */
/* Nothing in the 128.. range */
};
-
-const char *graph_line =
- "_____________________________________________________________________"
- "_____________________________________________________________________";
-const char *graph_dotted_line =
- "---------------------------------------------------------------------"
- "---------------------------------------------------------------------"
- "---------------------------------------------------------------------";
diff --git a/trunk/tools/perf/util/data_map.c b/trunk/tools/perf/util/data_map.c
index ca0bedf637c2..14cb8465eb08 100644
--- a/trunk/tools/perf/util/data_map.c
+++ b/trunk/tools/perf/util/data_map.c
@@ -8,9 +8,11 @@ static struct perf_file_handler *curr_handler;
static unsigned long mmap_window = 32;
static char __cwd[PATH_MAX];
-static int process_event_stub(event_t *event __used)
+static int
+process_event_stub(event_t *event __used,
+ unsigned long offset __used,
+ unsigned long head __used)
{
- dump_printf(": unhandled!\n");
return 0;
}
@@ -38,62 +40,30 @@ void register_perf_file_handler(struct perf_file_handler *handler)
curr_handler = handler;
}
-static const char *event__name[] = {
- [0] = "TOTAL",
- [PERF_RECORD_MMAP] = "MMAP",
- [PERF_RECORD_LOST] = "LOST",
- [PERF_RECORD_COMM] = "COMM",
- [PERF_RECORD_EXIT] = "EXIT",
- [PERF_RECORD_THROTTLE] = "THROTTLE",
- [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
- [PERF_RECORD_FORK] = "FORK",
- [PERF_RECORD_READ] = "READ",
- [PERF_RECORD_SAMPLE] = "SAMPLE",
-};
-
-unsigned long event__total[PERF_RECORD_MAX];
-
-void event__print_totals(void)
-{
- int i;
- for (i = 0; i < PERF_RECORD_MAX; ++i)
- pr_info("%10s events: %10ld\n",
- event__name[i], event__total[i]);
-}
-
static int
process_event(event_t *event, unsigned long offset, unsigned long head)
{
trace_event(event);
- if (event->header.type < PERF_RECORD_MAX) {
- dump_printf("%p [%p]: PERF_RECORD_%s",
- (void *)(offset + head),
- (void *)(long)(event->header.size),
- event__name[event->header.type]);
- ++event__total[0];
- ++event__total[event->header.type];
- }
-
switch (event->header.type) {
case PERF_RECORD_SAMPLE:
- return curr_handler->process_sample_event(event);
+ return curr_handler->process_sample_event(event, offset, head);
case PERF_RECORD_MMAP:
- return curr_handler->process_mmap_event(event);
+ return curr_handler->process_mmap_event(event, offset, head);
case PERF_RECORD_COMM:
- return curr_handler->process_comm_event(event);
+ return curr_handler->process_comm_event(event, offset, head);
case PERF_RECORD_FORK:
- return curr_handler->process_fork_event(event);
+ return curr_handler->process_fork_event(event, offset, head);
case PERF_RECORD_EXIT:
- return curr_handler->process_exit_event(event);
+ return curr_handler->process_exit_event(event, offset, head);
case PERF_RECORD_LOST:
- return curr_handler->process_lost_event(event);
+ return curr_handler->process_lost_event(event, offset, head);
case PERF_RECORD_READ:
- return curr_handler->process_read_event(event);
+ return curr_handler->process_read_event(event, offset, head);
case PERF_RECORD_THROTTLE:
- return curr_handler->process_throttle_event(event);
+ return curr_handler->process_throttle_event(event, offset, head);
case PERF_RECORD_UNTHROTTLE:
- return curr_handler->process_unthrottle_event(event);
+ return curr_handler->process_unthrottle_event(event, offset, head);
default:
curr_handler->total_unknown++;
return -1;
@@ -136,7 +106,7 @@ int mmap_dispatch_perf_file(struct perf_header **pheader,
int *cwdlen,
char **cwd)
{
- int err;
+ int ret, rc = EXIT_FAILURE;
struct perf_header *header;
unsigned long head, shift;
unsigned long offset = 0;
@@ -148,63 +118,56 @@ int mmap_dispatch_perf_file(struct perf_header **pheader,
int input;
char *buf;
- if (curr_handler == NULL) {
- pr_debug("Forgot to register perf file handler\n");
- return -EINVAL;
- }
+ if (!curr_handler)
+ die("Forgot to register perf file handler");
page_size = getpagesize();
input = open(input_name, O_RDONLY);
if (input < 0) {
- pr_err("Failed to open file: %s", input_name);
+ fprintf(stderr, " failed to open file: %s", input_name);
if (!strcmp(input_name, "perf.data"))
- pr_err(" (try 'perf record' first)");
- pr_err("\n");
- return -errno;
+ fprintf(stderr, " (try 'perf record' first)");
+ fprintf(stderr, "\n");
+ exit(-1);
}
- if (fstat(input, &input_stat) < 0) {
- pr_err("failed to stat file");
- err = -errno;
- goto out_close;
+ ret = fstat(input, &input_stat);
+ if (ret < 0) {
+ perror("failed to stat file");
+ exit(-1);
}
- err = -EACCES;
if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
- pr_err("file: %s not owned by current user or root\n",
+ fprintf(stderr, "file: %s not owned by current user or root\n",
input_name);
- goto out_close;
+ exit(-1);
}
- if (input_stat.st_size == 0) {
- pr_info("zero-sized file, nothing to do!\n");
- goto done;
+ if (!input_stat.st_size) {
+ fprintf(stderr, "zero-sized file, nothing to do!\n");
+ exit(0);
}
- err = -ENOMEM;
- header = perf_header__new();
- if (header == NULL)
- goto out_close;
-
- err = perf_header__read(header, input);
- if (err < 0)
- goto out_delete;
- *pheader = header;
+ *pheader = perf_header__read(input);
+ header = *pheader;
head = header->data_offset;
sample_type = perf_header__sample_type(header);
- err = -EINVAL;
- if (curr_handler->sample_type_check &&
- curr_handler->sample_type_check(sample_type) < 0)
- goto out_delete;
+ if (curr_handler->sample_type_check)
+ if (curr_handler->sample_type_check(sample_type) < 0)
+ exit(-1);
+
+ if (load_kernel(NULL) < 0) {
+ perror("failed to load kernel symbols");
+ return EXIT_FAILURE;
+ }
if (!full_paths) {
if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
- pr_err("failed to get the current directory\n");
- err = -errno;
- goto out_delete;
+ perror("failed to get the current directory");
+ return EXIT_FAILURE;
}
*cwd = __cwd;
*cwdlen = strlen(*cwd);
@@ -218,12 +181,11 @@ int mmap_dispatch_perf_file(struct perf_header **pheader,
head -= shift;
remap:
- buf = mmap(NULL, page_size * mmap_window, PROT_READ,
- MAP_SHARED, input, offset);
+ buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
+ MAP_SHARED, input, offset);
if (buf == MAP_FAILED) {
- pr_err("failed to mmap file\n");
- err = -errno;
- goto out_delete;
+ perror("failed to mmap file");
+ exit(-1);
}
more:
@@ -280,12 +242,10 @@ int mmap_dispatch_perf_file(struct perf_header **pheader,
goto more;
done:
- err = 0;
-out_close:
+ rc = EXIT_SUCCESS;
close(input);
- return err;
-out_delete:
- perf_header__delete(header);
- goto out_close;
+ return rc;
}
+
+
diff --git a/trunk/tools/perf/util/data_map.h b/trunk/tools/perf/util/data_map.h
index 3180ff7e3633..ae036ecd7625 100644
--- a/trunk/tools/perf/util/data_map.h
+++ b/trunk/tools/perf/util/data_map.h
@@ -4,7 +4,7 @@
#include "event.h"
#include "header.h"
-typedef int (*event_type_handler_t)(event_t *);
+typedef int (*event_type_handler_t)(event_t *, unsigned long, unsigned long);
struct perf_file_handler {
event_type_handler_t process_sample_event;
diff --git a/trunk/tools/perf/util/event.c b/trunk/tools/perf/util/event.c
index 414b89d1bde9..1dae7e3b400d 100644
--- a/trunk/tools/perf/util/event.c
+++ b/trunk/tools/perf/util/event.c
@@ -2,7 +2,6 @@
#include "event.h"
#include "debug.h"
#include "string.h"
-#include "thread.h"
static pid_t event__synthesize_comm(pid_t pid, int full,
int (*process)(event_t *event))
@@ -176,137 +175,3 @@ void event__synthesize_threads(int (*process)(event_t *event))
closedir(proc);
}
-
-char *event__cwd;
-int event__cwdlen;
-
-struct events_stats event__stats;
-
-int event__process_comm(event_t *self)
-{
- struct thread *thread = threads__findnew(self->comm.pid);
-
- dump_printf(": %s:%d\n", self->comm.comm, self->comm.pid);
-
- if (thread == NULL || thread__set_comm(thread, self->comm.comm)) {
- dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
- return -1;
- }
-
- return 0;
-}
-
-int event__process_lost(event_t *self)
-{
- dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
- event__stats.lost += self->lost.lost;
- return 0;
-}
-
-int event__process_mmap(event_t *self)
-{
- struct thread *thread = threads__findnew(self->mmap.pid);
- struct map *map = map__new(&self->mmap, MAP__FUNCTION,
- event__cwd, event__cwdlen);
-
- dump_printf(" %d/%d: [%p(%p) @ %p]: %s\n",
- self->mmap.pid, self->mmap.tid,
- (void *)(long)self->mmap.start,
- (void *)(long)self->mmap.len,
- (void *)(long)self->mmap.pgoff,
- self->mmap.filename);
-
- if (thread == NULL || map == NULL)
- dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
- else
- thread__insert_map(thread, map);
-
- return 0;
-}
-
-int event__process_task(event_t *self)
-{
- struct thread *thread = threads__findnew(self->fork.pid);
- struct thread *parent = threads__findnew(self->fork.ppid);
-
- dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
- self->fork.ppid, self->fork.ptid);
- /*
- * A thread clone will have the same PID for both parent and child.
- */
- if (thread == parent)
- return 0;
-
- if (self->header.type == PERF_RECORD_EXIT)
- return 0;
-
- if (thread == NULL || parent == NULL ||
- thread__fork(thread, parent) < 0) {
- dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
- return -1;
- }
-
- return 0;
-}
-
-void thread__find_addr_location(struct thread *self, u8 cpumode,
- enum map_type type, u64 addr,
- struct addr_location *al,
- symbol_filter_t filter)
-{
- struct thread *thread = al->thread = self;
-
- al->addr = addr;
-
- if (cpumode & PERF_RECORD_MISC_KERNEL) {
- al->level = 'k';
- thread = kthread;
- } else if (cpumode & PERF_RECORD_MISC_USER)
- al->level = '.';
- else {
- al->level = 'H';
- al->map = NULL;
- al->sym = NULL;
- return;
- }
-try_again:
- al->map = thread__find_map(thread, type, al->addr);
- if (al->map == NULL) {
- /*
- * If this is outside of all known maps, and is a negative
- * address, try to look it up in the kernel dso, as it might be
- * a vsyscall or vdso (which executes in user-mode).
- *
- * XXX This is nasty, we should have a symbol list in the
- * "[vdso]" dso, but for now lets use the old trick of looking
- * in the whole kernel symbol list.
- */
- if ((long long)al->addr < 0 && thread != kthread) {
- thread = kthread;
- goto try_again;
- }
- al->sym = NULL;
- } else {
- al->addr = al->map->map_ip(al->map, al->addr);
- al->sym = map__find_symbol(al->map, al->addr, filter);
- }
-}
-
-int event__preprocess_sample(const event_t *self, struct addr_location *al,
- symbol_filter_t filter)
-{
- u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- struct thread *thread = threads__findnew(self->ip.pid);
-
- if (thread == NULL)
- return -1;
-
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
-
- thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
- self->ip.ip, al, filter);
- dump_printf(" ...... dso: %s\n",
- al->map ? al->map->dso->long_name :
- al->level == 'H' ? "[hypervisor]" : "");
- return 0;
-}
diff --git a/trunk/tools/perf/util/event.h b/trunk/tools/perf/util/event.h
index a4cc8105cf67..1f771ce3a957 100644
--- a/trunk/tools/perf/util/event.h
+++ b/trunk/tools/perf/util/event.h
@@ -69,6 +69,13 @@ struct build_id_event {
char filename[];
};
+struct build_id_list {
+ struct build_id_event event;
+ struct list_head list;
+ const char *dso_name;
+ int len;
+};
+
typedef union event_union {
struct perf_event_header header;
struct ip_event ip;
@@ -80,19 +87,6 @@ typedef union event_union {
struct sample_event sample;
} event_t;
-struct events_stats {
- unsigned long total;
- unsigned long lost;
-};
-
-void event__print_totals(void);
-
-enum map_type {
- MAP__FUNCTION = 0,
-
- MAP__NR_TYPES,
-};
-
struct map {
union {
struct rb_node rb_node;
@@ -100,7 +94,6 @@ struct map {
};
u64 start;
u64 end;
- enum map_type type;
u64 pgoff;
u64 (*map_ip)(struct map *, u64);
u64 (*unmap_ip)(struct map *, u64);
@@ -126,34 +119,15 @@ struct symbol;
typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
-void map__init(struct map *self, enum map_type type,
- u64 start, u64 end, u64 pgoff, struct dso *dso);
-struct map *map__new(struct mmap_event *event, enum map_type,
- char *cwd, int cwdlen);
-void map__delete(struct map *self);
+void map__init(struct map *self, u64 start, u64 end, u64 pgoff,
+ struct dso *dso);
+struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen);
struct map *map__clone(struct map *self);
int map__overlap(struct map *l, struct map *r);
size_t map__fprintf(struct map *self, FILE *fp);
-struct symbol *map__find_symbol(struct map *self, u64 addr,
- symbol_filter_t filter);
-void map__fixup_start(struct map *self);
-void map__fixup_end(struct map *self);
+struct symbol *map__find_symbol(struct map *self, u64 ip, symbol_filter_t filter);
int event__synthesize_thread(pid_t pid, int (*process)(event_t *event));
void event__synthesize_threads(int (*process)(event_t *event));
-extern char *event__cwd;
-extern int event__cwdlen;
-extern struct events_stats event__stats;
-extern unsigned long event__total[PERF_RECORD_MAX];
-
-int event__process_comm(event_t *self);
-int event__process_lost(event_t *self);
-int event__process_mmap(event_t *self);
-int event__process_task(event_t *self);
-
-struct addr_location;
-int event__preprocess_sample(const event_t *self, struct addr_location *al,
- symbol_filter_t filter);
-
#endif /* __PERF_RECORD_H */
diff --git a/trunk/tools/perf/util/header.c b/trunk/tools/perf/util/header.c
index 4805e6dfd23c..b01a9537977f 100644
--- a/trunk/tools/perf/util/header.c
+++ b/trunk/tools/perf/util/header.c
@@ -63,7 +63,7 @@ int perf_header_attr__add_id(struct perf_header_attr *self, u64 id)
*/
struct perf_header *perf_header__new(void)
{
- struct perf_header *self = zalloc(sizeof(*self));
+ struct perf_header *self = calloc(sizeof(*self), 1);
if (self != NULL) {
self->size = 1;
@@ -78,24 +78,16 @@ struct perf_header *perf_header__new(void)
return self;
}
-void perf_header__delete(struct perf_header *self)
-{
- int i;
-
- for (i = 0; i < self->attrs; ++i)
- perf_header_attr__delete(self->attr[i]);
-
- free(self->attr);
- free(self);
-}
-
int perf_header__add_attr(struct perf_header *self,
struct perf_header_attr *attr)
{
+ int pos = self->attrs;
+
if (self->frozen)
return -1;
- if (self->attrs == self->size) {
+ self->attrs++;
+ if (self->attrs > self->size) {
int nsize = self->size * 2;
struct perf_header_attr **nattr;
@@ -106,8 +98,7 @@ int perf_header__add_attr(struct perf_header *self,
self->size = nsize;
self->attr = nattr;
}
-
- self->attr[self->attrs++] = attr;
+ self->attr[pos] = attr;
return 0;
}
@@ -176,7 +167,7 @@ static int do_write(int fd, const void *buf, size_t size)
int ret = write(fd, buf, size);
if (ret < 0)
- return -errno;
+ return -1;
size -= ret;
buf += ret;
@@ -185,59 +176,43 @@ static int do_write(int fd, const void *buf, size_t size)
return 0;
}
-static int __dsos__write_buildid_table(struct list_head *head, int fd)
+static int write_buildid_table(int fd, struct list_head *id_head)
{
- struct dso *pos;
-
- list_for_each_entry(pos, head, node) {
- int err;
- struct build_id_event b;
- size_t len;
-
- if (!pos->has_build_id)
- continue;
- len = pos->long_name_len + 1;
- len = ALIGN(len, 64);
- memset(&b, 0, sizeof(b));
- memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
- b.header.size = sizeof(b) + len;
- err = do_write(fd, &b, sizeof(b));
- if (err < 0)
- return err;
- err = do_write(fd, pos->long_name, len);
- if (err < 0)
- return err;
+ struct build_id_list *iter, *next;
+
+ list_for_each_entry_safe(iter, next, id_head, list) {
+ struct build_id_event *b = &iter->event;
+
+ if (do_write(fd, b, sizeof(*b)) < 0 ||
+ do_write(fd, iter->dso_name, iter->len) < 0)
+ return -1;
+ list_del(&iter->list);
+ free(iter);
}
return 0;
}
-static int dsos__write_buildid_table(int fd)
-{
- int err = __dsos__write_buildid_table(&dsos__kernel, fd);
- if (err == 0)
- err = __dsos__write_buildid_table(&dsos__user, fd);
- return err;
-}
-
-static int perf_header__adds_write(struct perf_header *self, int fd)
+static void
+perf_header__adds_write(struct perf_header *self, int fd)
{
+ LIST_HEAD(id_list);
int nr_sections;
struct perf_file_section *feat_sec;
int sec_size;
u64 sec_start;
- int idx = 0, err;
+ int idx = 0;
- if (dsos__read_build_ids())
+ if (fetch_build_id_table(&id_list))
perf_header__set_feat(self, HEADER_BUILD_ID);
nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
if (!nr_sections)
- return 0;
+ return;
feat_sec = calloc(sizeof(*feat_sec), nr_sections);
- if (feat_sec == NULL)
- return -ENOMEM;
+ if (!feat_sec)
+ die("No memory");
sec_size = sizeof(*feat_sec) * nr_sections;
@@ -263,29 +238,23 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
/* Write build-ids */
buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
- err = dsos__write_buildid_table(fd);
- if (err < 0) {
- pr_debug("failed to write buildid table\n");
- goto out_free;
- }
+ if (write_buildid_table(fd, &id_list) < 0)
+ die("failed to write buildid table");
buildid_sec->size = lseek(fd, 0, SEEK_CUR) - buildid_sec->offset;
}
lseek(fd, sec_start, SEEK_SET);
- err = do_write(fd, feat_sec, sec_size);
- if (err < 0)
- pr_debug("failed to write feature section\n");
-out_free:
+ if (do_write(fd, feat_sec, sec_size) < 0)
+ die("failed to write feature section");
free(feat_sec);
- return err;
}
-int perf_header__write(struct perf_header *self, int fd, bool at_exit)
+void perf_header__write(struct perf_header *self, int fd, bool at_exit)
{
struct perf_file_header f_header;
struct perf_file_attr f_attr;
struct perf_header_attr *attr;
- int i, err;
+ int i;
lseek(fd, sizeof(f_header), SEEK_SET);
@@ -294,11 +263,8 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
attr = self->attr[i];
attr->id_offset = lseek(fd, 0, SEEK_CUR);
- err = do_write(fd, attr->id, attr->ids * sizeof(u64));
- if (err < 0) {
- pr_debug("failed to write perf header\n");
- return err;
- }
+ if (do_write(fd, attr->id, attr->ids * sizeof(u64)) < 0)
+ die("failed to write perf header");
}
@@ -314,30 +280,20 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
.size = attr->ids * sizeof(u64),
}
};
- err = do_write(fd, &f_attr, sizeof(f_attr));
- if (err < 0) {
- pr_debug("failed to write perf header attribute\n");
- return err;
- }
+ if (do_write(fd, &f_attr, sizeof(f_attr)) < 0)
+ die("failed to write perf header attribute");
}
self->event_offset = lseek(fd, 0, SEEK_CUR);
self->event_size = event_count * sizeof(struct perf_trace_event_type);
- if (events) {
- err = do_write(fd, events, self->event_size);
- if (err < 0) {
- pr_debug("failed to write perf header events\n");
- return err;
- }
- }
+ if (events)
+ if (do_write(fd, events, self->event_size) < 0)
+ die("failed to write perf header events");
self->data_offset = lseek(fd, 0, SEEK_CUR);
- if (at_exit) {
- err = perf_header__adds_write(self, fd);
- if (err < 0)
- return err;
- }
+ if (at_exit)
+ perf_header__adds_write(self, fd);
f_header = (struct perf_file_header){
.magic = PERF_MAGIC,
@@ -360,15 +316,11 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features));
lseek(fd, 0, SEEK_SET);
- err = do_write(fd, &f_header, sizeof(f_header));
- if (err < 0) {
- pr_debug("failed to write perf header\n");
- return err;
- }
+ if (do_write(fd, &f_header, sizeof(f_header)) < 0)
+ die("failed to write perf header");
lseek(fd, self->data_offset + self->data_size, SEEK_SET);
self->frozen = 1;
- return 0;
}
static void do_read(int fd, void *buf, size_t size)
@@ -478,17 +430,19 @@ static int perf_file_section__process(struct perf_file_section *self,
return 0;
}
-int perf_header__read(struct perf_header *self, int fd)
+struct perf_header *perf_header__read(int fd)
{
+ struct perf_header *self = perf_header__new();
struct perf_file_header f_header;
struct perf_file_attr f_attr;
u64 f_id;
int nr_attrs, nr_ids, i, j;
- if (perf_file_header__read(&f_header, self, fd) < 0) {
- pr_debug("incompatible file format\n");
- return -EINVAL;
- }
+ if (self == NULL)
+ die("nomem");
+
+ if (perf_file_header__read(&f_header, self, fd) < 0)
+ die("incompatible file format");
nr_attrs = f_header.attrs.size / sizeof(f_attr);
lseek(fd, f_header.attrs.offset, SEEK_SET);
@@ -502,7 +456,7 @@ int perf_header__read(struct perf_header *self, int fd)
attr = perf_header_attr__new(&f_attr.attr);
if (attr == NULL)
- return -ENOMEM;
+ die("nomem");
nr_ids = f_attr.ids.size / sizeof(u64);
lseek(fd, f_attr.ids.offset, SEEK_SET);
@@ -510,15 +464,11 @@ int perf_header__read(struct perf_header *self, int fd)
for (j = 0; j < nr_ids; j++) {
do_read(fd, &f_id, sizeof(f_id));
- if (perf_header_attr__add_id(attr, f_id) < 0) {
- perf_header_attr__delete(attr);
- return -ENOMEM;
- }
- }
- if (perf_header__add_attr(self, attr) < 0) {
- perf_header_attr__delete(attr);
- return -ENOMEM;
+ if (perf_header_attr__add_id(attr, f_id) < 0)
+ die("nomem");
}
+ if (perf_header__add_attr(self, attr) < 0)
+ die("nomem");
lseek(fd, tmp, SEEK_SET);
}
@@ -526,8 +476,8 @@ int perf_header__read(struct perf_header *self, int fd)
if (f_header.event_types.size) {
lseek(fd, f_header.event_types.offset, SEEK_SET);
events = malloc(f_header.event_types.size);
- if (events == NULL)
- return -ENOMEM;
+ if (!events)
+ die("nomem");
do_read(fd, events, f_header.event_types.size);
event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
}
@@ -537,7 +487,8 @@ int perf_header__read(struct perf_header *self, int fd)
lseek(fd, self->data_offset, SEEK_SET);
self->frozen = 1;
- return 0;
+
+ return self;
}
u64 perf_header__sample_type(struct perf_header *header)
diff --git a/trunk/tools/perf/util/header.h b/trunk/tools/perf/util/header.h
index d1dbe2b79c42..f46a94e09eea 100644
--- a/trunk/tools/perf/util/header.h
+++ b/trunk/tools/perf/util/header.h
@@ -55,11 +55,8 @@ struct perf_header {
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
-struct perf_header *perf_header__new(void);
-void perf_header__delete(struct perf_header *self);
-
-int perf_header__read(struct perf_header *self, int fd);
-int perf_header__write(struct perf_header *self, int fd, bool at_exit);
+struct perf_header *perf_header__read(int fd);
+void perf_header__write(struct perf_header *self, int fd, bool at_exit);
int perf_header__add_attr(struct perf_header *self,
struct perf_header_attr *attr);
@@ -78,6 +75,8 @@ perf_header__find_attr(u64 id, struct perf_header *header);
void perf_header__set_feat(struct perf_header *self, int feat);
bool perf_header__has_feat(const struct perf_header *self, int feat);
+struct perf_header *perf_header__new(void);
+
int perf_header__process_sections(struct perf_header *self, int fd,
int (*process)(struct perf_file_section *self,
int feat, int fd));
diff --git a/trunk/tools/perf/util/hist.c b/trunk/tools/perf/util/hist.c
index 0ebf6ee16caa..7393a02fd8d4 100644
--- a/trunk/tools/perf/util/hist.c
+++ b/trunk/tools/perf/util/hist.c
@@ -10,23 +10,31 @@ struct callchain_param callchain_param = {
.min_percent = 0.5
};
+unsigned long total;
+unsigned long total_mmap;
+unsigned long total_comm;
+unsigned long total_fork;
+unsigned long total_unknown;
+unsigned long total_lost;
+
/*
* histogram, sorted on item, collects counts
*/
-struct hist_entry *__hist_entry__add(struct addr_location *al,
+struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map,
+ struct symbol *sym,
struct symbol *sym_parent,
- u64 count, bool *hit)
+ u64 ip, u64 count, char level, bool *hit)
{
struct rb_node **p = &hist.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *he;
struct hist_entry entry = {
- .thread = al->thread,
- .map = al->map,
- .sym = al->sym,
- .ip = al->addr,
- .level = al->level,
+ .thread = thread,
+ .map = map,
+ .sym = sym,
+ .ip = ip,
+ .level = level,
.count = count,
.parent = sym_parent,
};
diff --git a/trunk/tools/perf/util/hist.h b/trunk/tools/perf/util/hist.h
index 3020db0c9292..ac2149c559b0 100644
--- a/trunk/tools/perf/util/hist.h
+++ b/trunk/tools/perf/util/hist.h
@@ -36,9 +36,9 @@ extern unsigned long total_fork;
extern unsigned long total_unknown;
extern unsigned long total_lost;
-struct hist_entry *__hist_entry__add(struct addr_location *al,
- struct symbol *parent,
- u64 count, bool *hit);
+struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map,
+ struct symbol *sym, struct symbol *parent,
+ u64 ip, u64 count, char level, bool *hit);
extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *);
extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *);
extern void hist_entry__free(struct hist_entry *);
diff --git a/trunk/tools/perf/util/include/asm/bug.h b/trunk/tools/perf/util/include/asm/bug.h
deleted file mode 100644
index 7fcc6810adc2..000000000000
--- a/trunk/tools/perf/util/include/asm/bug.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _PERF_ASM_GENERIC_BUG_H
-#define _PERF_ASM_GENERIC_BUG_H
-
-#define __WARN_printf(arg...) do { fprintf(stderr, arg); } while (0)
-
-#define WARN(condition, format...) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- __WARN_printf(format); \
- unlikely(__ret_warn_on); \
-})
-
-#define WARN_ONCE(condition, format...) ({ \
- static int __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once)) \
- if (WARN(!__warned, format)) \
- __warned = 1; \
- unlikely(__ret_warn_once); \
-})
-#endif
diff --git a/trunk/tools/perf/util/include/linux/bitops.h b/trunk/tools/perf/util/include/linux/bitops.h
index 8d63116e9435..ace57c36d1d0 100644
--- a/trunk/tools/perf/util/include/linux/bitops.h
+++ b/trunk/tools/perf/util/include/linux/bitops.h
@@ -7,8 +7,6 @@
#define CONFIG_GENERIC_FIND_FIRST_BIT
#include "../../../../include/linux/bitops.h"
-#undef __KERNEL__
-
static inline void set_bit(int nr, unsigned long *addr)
{
addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
diff --git a/trunk/tools/perf/util/map.c b/trunk/tools/perf/util/map.c
index 69f94fe9db20..94ca95073c40 100644
--- a/trunk/tools/perf/util/map.c
+++ b/trunk/tools/perf/util/map.c
@@ -20,10 +20,9 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen)
return n;
}
-void map__init(struct map *self, enum map_type type,
- u64 start, u64 end, u64 pgoff, struct dso *dso)
+void map__init(struct map *self, u64 start, u64 end, u64 pgoff,
+ struct dso *dso)
{
- self->type = type;
self->start = start;
self->end = end;
self->pgoff = pgoff;
@@ -33,8 +32,7 @@ void map__init(struct map *self, enum map_type type,
RB_CLEAR_NODE(&self->rb_node);
}
-struct map *map__new(struct mmap_event *event, enum map_type type,
- char *cwd, int cwdlen)
+struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen)
{
struct map *self = malloc(sizeof(*self));
@@ -65,7 +63,7 @@ struct map *map__new(struct mmap_event *event, enum map_type type,
if (dso == NULL)
goto out_delete;
- map__init(self, type, event->start, event->start + event->len,
+ map__init(self, event->start, event->start + event->len,
event->pgoff, dso);
if (self->dso == vdso || anon)
@@ -77,37 +75,12 @@ struct map *map__new(struct mmap_event *event, enum map_type type,
return NULL;
}
-void map__delete(struct map *self)
-{
- free(self);
-}
-
-void map__fixup_start(struct map *self)
-{
- struct rb_root *symbols = &self->dso->symbols[self->type];
- struct rb_node *nd = rb_first(symbols);
- if (nd != NULL) {
- struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
- self->start = sym->start;
- }
-}
-
-void map__fixup_end(struct map *self)
-{
- struct rb_root *symbols = &self->dso->symbols[self->type];
- struct rb_node *nd = rb_last(symbols);
- if (nd != NULL) {
- struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
- self->end = sym->end;
- }
-}
-
#define DSO__DELETED "(deleted)"
-struct symbol *map__find_symbol(struct map *self, u64 addr,
- symbol_filter_t filter)
+struct symbol *
+map__find_symbol(struct map *self, u64 ip, symbol_filter_t filter)
{
- if (!dso__loaded(self->dso, self->type)) {
+ if (!self->dso->loaded) {
int nr = dso__load(self->dso, self, filter);
if (nr < 0) {
@@ -140,7 +113,7 @@ struct symbol *map__find_symbol(struct map *self, u64 addr,
}
}
- return self->dso->find_symbol(self->dso, self->type, addr);
+ return self->dso->find_symbol(self->dso, ip);
}
struct map *map__clone(struct map *self)
diff --git a/trunk/tools/perf/util/parse-events.c b/trunk/tools/perf/util/parse-events.c
index 9e5dbd66d34d..0faf4f2bb5ca 100644
--- a/trunk/tools/perf/util/parse-events.c
+++ b/trunk/tools/perf/util/parse-events.c
@@ -1,4 +1,4 @@
-#include "../../../include/linux/hw_breakpoint.h"
+
#include "util.h"
#include "../perf.h"
#include "parse-options.h"
@@ -197,7 +197,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
if (id == config) {
closedir(evt_dir);
closedir(sys_dir);
- path = zalloc(sizeof(path));
+ path = calloc(1, sizeof(path));
path->system = malloc(MAX_EVENT_LENGTH);
if (!path->system) {
free(path);
@@ -540,81 +540,6 @@ static enum event_result parse_tracepoint_event(const char **strp,
attr, strp);
}
-static enum event_result
-parse_breakpoint_type(const char *type, const char **strp,
- struct perf_event_attr *attr)
-{
- int i;
-
- for (i = 0; i < 3; i++) {
- if (!type[i])
- break;
-
- switch (type[i]) {
- case 'r':
- attr->bp_type |= HW_BREAKPOINT_R;
- break;
- case 'w':
- attr->bp_type |= HW_BREAKPOINT_W;
- break;
- case 'x':
- attr->bp_type |= HW_BREAKPOINT_X;
- break;
- default:
- return EVT_FAILED;
- }
- }
- if (!attr->bp_type) /* Default */
- attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
-
- *strp = type + i;
-
- return EVT_HANDLED;
-}
-
-static enum event_result
-parse_breakpoint_event(const char **strp, struct perf_event_attr *attr)
-{
- const char *target;
- const char *type;
- char *endaddr;
- u64 addr;
- enum event_result err;
-
- target = strchr(*strp, ':');
- if (!target)
- return EVT_FAILED;
-
- if (strncmp(*strp, "mem", target - *strp) != 0)
- return EVT_FAILED;
-
- target++;
-
- addr = strtoull(target, &endaddr, 0);
- if (target == endaddr)
- return EVT_FAILED;
-
- attr->bp_addr = addr;
- *strp = endaddr;
-
- type = strchr(target, ':');
-
- /* If no type is defined, just rw as default */
- if (!type) {
- attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
- } else {
- err = parse_breakpoint_type(++type, strp, attr);
- if (err == EVT_FAILED)
- return EVT_FAILED;
- }
-
- /* We should find a nice way to override the access type */
- attr->bp_len = HW_BREAKPOINT_LEN_4;
- attr->type = PERF_TYPE_BREAKPOINT;
-
- return EVT_HANDLED;
-}
-
static int check_events(const char *str, unsigned int i)
{
int n;
@@ -748,10 +673,6 @@ parse_event_symbols(const char **str, struct perf_event_attr *attr)
if (ret != EVT_FAILED)
goto modifier;
- ret = parse_breakpoint_event(str, attr);
- if (ret != EVT_FAILED)
- goto modifier;
-
fprintf(stderr, "invalid or unsupported event: '%s'\n", *str);
fprintf(stderr, "Run 'perf list' for a list of valid events\n");
return EVT_FAILED;
@@ -938,9 +859,6 @@ void print_events(void)
"rNNN");
printf("\n");
- printf(" %-42s [hardware breakpoint]\n", "mem:[:access]");
- printf("\n");
-
print_tracepoint_events();
exit(129);
diff --git a/trunk/tools/perf/util/probe-event.c b/trunk/tools/perf/util/probe-event.c
deleted file mode 100644
index cd7fbda5e2a5..000000000000
--- a/trunk/tools/perf/util/probe-event.c
+++ /dev/null
@@ -1,484 +0,0 @@
-/*
- * probe-event.c : perf-probe definition to kprobe_events format converter
- *
- * Written by Masami Hiramatsu
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- */
-
-#define _GNU_SOURCE
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#undef _GNU_SOURCE
-#include "event.h"
-#include "string.h"
-#include "strlist.h"
-#include "debug.h"
-#include "parse-events.h" /* For debugfs_path */
-#include "probe-event.h"
-
-#define MAX_CMDLEN 256
-#define MAX_PROBE_ARGS 128
-#define PERFPROBE_GROUP "probe"
-
-#define semantic_error(msg ...) die("Semantic error :" msg)
-
-/* If there is no space to write, returns -E2BIG. */
-static int e_snprintf(char *str, size_t size, const char *format, ...)
-{
- int ret;
- va_list ap;
- va_start(ap, format);
- ret = vsnprintf(str, size, format, ap);
- va_end(ap);
- if (ret >= (int)size)
- ret = -E2BIG;
- return ret;
-}
-
-/* Parse probepoint definition. */
-static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
-{
- char *ptr, *tmp;
- char c, nc = 0;
- /*
- *
- * perf probe SRC:LN
- * perf probe FUNC[+OFFS|%return][@SRC]
- */
-
- ptr = strpbrk(arg, ":+@%");
- if (ptr) {
- nc = *ptr;
- *ptr++ = '\0';
- }
-
- /* Check arg is function or file and copy it */
- if (strchr(arg, '.')) /* File */
- pp->file = strdup(arg);
- else /* Function */
- pp->function = strdup(arg);
- DIE_IF(pp->file == NULL && pp->function == NULL);
-
- /* Parse other options */
- while (ptr) {
- arg = ptr;
- c = nc;
- ptr = strpbrk(arg, ":+@%");
- if (ptr) {
- nc = *ptr;
- *ptr++ = '\0';
- }
- switch (c) {
- case ':': /* Line number */
- pp->line = strtoul(arg, &tmp, 0);
- if (*tmp != '\0')
- semantic_error("There is non-digit charactor"
- " in line number.");
- break;
- case '+': /* Byte offset from a symbol */
- pp->offset = strtoul(arg, &tmp, 0);
- if (*tmp != '\0')
- semantic_error("There is non-digit charactor"
- " in offset.");
- break;
- case '@': /* File name */
- if (pp->file)
- semantic_error("SRC@SRC is not allowed.");
- pp->file = strdup(arg);
- DIE_IF(pp->file == NULL);
- if (ptr)
- semantic_error("@SRC must be the last "
- "option.");
- break;
- case '%': /* Probe places */
- if (strcmp(arg, "return") == 0) {
- pp->retprobe = 1;
- } else /* Others not supported yet */
- semantic_error("%%%s is not supported.", arg);
- break;
- default:
- DIE_IF("Program has a bug.");
- break;
- }
- }
-
- /* Exclusion check */
- if (pp->line && pp->offset)
- semantic_error("Offset can't be used with line number.");
-
- if (!pp->line && pp->file && !pp->function)
- semantic_error("File always requires line number.");
-
- if (pp->offset && !pp->function)
- semantic_error("Offset requires an entry function.");
-
- if (pp->retprobe && !pp->function)
- semantic_error("Return probe requires an entry function.");
-
- if ((pp->offset || pp->line) && pp->retprobe)
- semantic_error("Offset/Line can't be used with return probe.");
-
- pr_debug("symbol:%s file:%s line:%d offset:%d, return:%d\n",
- pp->function, pp->file, pp->line, pp->offset, pp->retprobe);
-}
-
-/* Parse perf-probe event definition */
-int parse_perf_probe_event(const char *str, struct probe_point *pp)
-{
- char **argv;
- int argc, i, need_dwarf = 0;
-
- argv = argv_split(str, &argc);
- if (!argv)
- die("argv_split failed.");
- if (argc > MAX_PROBE_ARGS + 1)
- semantic_error("Too many arguments");
-
- /* Parse probe point */
- parse_perf_probe_probepoint(argv[0], pp);
- if (pp->file || pp->line)
- need_dwarf = 1;
-
- /* Copy arguments and ensure return probe has no C argument */
- pp->nr_args = argc - 1;
- pp->args = zalloc(sizeof(char *) * pp->nr_args);
- for (i = 0; i < pp->nr_args; i++) {
- pp->args[i] = strdup(argv[i + 1]);
- if (!pp->args[i])
- die("Failed to copy argument.");
- if (is_c_varname(pp->args[i])) {
- if (pp->retprobe)
- semantic_error("You can't specify local"
- " variable for kretprobe");
- need_dwarf = 1;
- }
- }
-
- argv_free(argv);
- return need_dwarf;
-}
-
-/* Parse kprobe_events event into struct probe_point */
-void parse_trace_kprobe_event(const char *str, char **group, char **event,
- struct probe_point *pp)
-{
- char pr;
- char *p;
- int ret, i, argc;
- char **argv;
-
- pr_debug("Parsing kprobe_events: %s\n", str);
- argv = argv_split(str, &argc);
- if (!argv)
- die("argv_split failed.");
- if (argc < 2)
- semantic_error("Too less arguments.");
-
- /* Scan event and group name. */
- ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]",
- &pr, (float *)(void *)group, (float *)(void *)event);
- if (ret != 3)
- semantic_error("Failed to parse event name: %s", argv[0]);
- pr_debug("Group:%s Event:%s probe:%c\n", *group, *event, pr);
-
- if (!pp)
- goto end;
-
- pp->retprobe = (pr == 'r');
-
- /* Scan function name and offset */
- ret = sscanf(argv[1], "%a[^+]+%d", (float *)(void *)&pp->function, &pp->offset);
- if (ret == 1)
- pp->offset = 0;
-
- /* kprobe_events doesn't have this information */
- pp->line = 0;
- pp->file = NULL;
-
- pp->nr_args = argc - 2;
- pp->args = zalloc(sizeof(char *) * pp->nr_args);
- for (i = 0; i < pp->nr_args; i++) {
- p = strchr(argv[i + 2], '=');
- if (p) /* We don't need which register is assigned. */
- *p = '\0';
- pp->args[i] = strdup(argv[i + 2]);
- if (!pp->args[i])
- die("Failed to copy argument.");
- }
-
-end:
- argv_free(argv);
-}
-
-int synthesize_perf_probe_event(struct probe_point *pp)
-{
- char *buf;
- char offs[64] = "", line[64] = "";
- int i, len, ret;
-
- pp->probes[0] = buf = zalloc(MAX_CMDLEN);
- if (!buf)
- die("Failed to allocate memory by zalloc.");
- if (pp->offset) {
- ret = e_snprintf(offs, 64, "+%d", pp->offset);
- if (ret <= 0)
- goto error;
- }
- if (pp->line) {
- ret = e_snprintf(line, 64, ":%d", pp->line);
- if (ret <= 0)
- goto error;
- }
-
- if (pp->function)
- ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s", pp->function,
- offs, pp->retprobe ? "%return" : "", line);
- else
- ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s", pp->file, line);
- if (ret <= 0)
- goto error;
- len = ret;
-
- for (i = 0; i < pp->nr_args; i++) {
- ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s",
- pp->args[i]);
- if (ret <= 0)
- goto error;
- len += ret;
- }
- pp->found = 1;
-
- return pp->found;
-error:
- free(pp->probes[0]);
-
- return ret;
-}
-
-int synthesize_trace_kprobe_event(struct probe_point *pp)
-{
- char *buf;
- int i, len, ret;
-
- pp->probes[0] = buf = zalloc(MAX_CMDLEN);
- if (!buf)
- die("Failed to allocate memory by zalloc.");
- ret = e_snprintf(buf, MAX_CMDLEN, "%s+%d", pp->function, pp->offset);
- if (ret <= 0)
- goto error;
- len = ret;
-
- for (i = 0; i < pp->nr_args; i++) {
- ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s",
- pp->args[i]);
- if (ret <= 0)
- goto error;
- len += ret;
- }
- pp->found = 1;
-
- return pp->found;
-error:
- free(pp->probes[0]);
-
- return ret;
-}
-
-static int open_kprobe_events(int flags, int mode)
-{
- char buf[PATH_MAX];
- int ret;
-
- ret = e_snprintf(buf, PATH_MAX, "%s/../kprobe_events", debugfs_path);
- if (ret < 0)
- die("Failed to make kprobe_events path.");
-
- ret = open(buf, flags, mode);
- if (ret < 0) {
- if (errno == ENOENT)
- die("kprobe_events file does not exist -"
- " please rebuild with CONFIG_KPROBE_TRACER.");
- else
- die("Could not open kprobe_events file: %s",
- strerror(errno));
- }
- return ret;
-}
-
-/* Get raw string list of current kprobe_events */
-static struct strlist *get_trace_kprobe_event_rawlist(int fd)
-{
- int ret, idx;
- FILE *fp;
- char buf[MAX_CMDLEN];
- char *p;
- struct strlist *sl;
-
- sl = strlist__new(true, NULL);
-
- fp = fdopen(dup(fd), "r");
- while (!feof(fp)) {
- p = fgets(buf, MAX_CMDLEN, fp);
- if (!p)
- break;
-
- idx = strlen(p) - 1;
- if (p[idx] == '\n')
- p[idx] = '\0';
- ret = strlist__add(sl, buf);
- if (ret < 0)
- die("strlist__add failed: %s", strerror(-ret));
- }
- fclose(fp);
-
- return sl;
-}
-
-/* Free and zero clear probe_point */
-static void clear_probe_point(struct probe_point *pp)
-{
- int i;
-
- if (pp->function)
- free(pp->function);
- if (pp->file)
- free(pp->file);
- for (i = 0; i < pp->nr_args; i++)
- free(pp->args[i]);
- if (pp->args)
- free(pp->args);
- for (i = 0; i < pp->found; i++)
- free(pp->probes[i]);
- memset(pp, 0, sizeof(pp));
-}
-
-/* List up current perf-probe events */
-void show_perf_probe_events(void)
-{
- unsigned int i;
- int fd;
- char *group, *event;
- struct probe_point pp;
- struct strlist *rawlist;
- struct str_node *ent;
-
- fd = open_kprobe_events(O_RDONLY, 0);
- rawlist = get_trace_kprobe_event_rawlist(fd);
- close(fd);
-
- for (i = 0; i < strlist__nr_entries(rawlist); i++) {
- ent = strlist__entry(rawlist, i);
- parse_trace_kprobe_event(ent->s, &group, &event, &pp);
- synthesize_perf_probe_event(&pp);
- printf("[%s:%s]\t%s\n", group, event, pp.probes[0]);
- free(group);
- free(event);
- clear_probe_point(&pp);
- }
-
- strlist__delete(rawlist);
-}
-
-/* Get current perf-probe event names */
-static struct strlist *get_perf_event_names(int fd)
-{
- unsigned int i;
- char *group, *event;
- struct strlist *sl, *rawlist;
- struct str_node *ent;
-
- rawlist = get_trace_kprobe_event_rawlist(fd);
-
- sl = strlist__new(false, NULL);
- for (i = 0; i < strlist__nr_entries(rawlist); i++) {
- ent = strlist__entry(rawlist, i);
- parse_trace_kprobe_event(ent->s, &group, &event, NULL);
- strlist__add(sl, event);
- free(group);
- }
-
- strlist__delete(rawlist);
-
- return sl;
-}
-
-static int write_trace_kprobe_event(int fd, const char *buf)
-{
- int ret;
-
- ret = write(fd, buf, strlen(buf));
- if (ret <= 0)
- die("Failed to create event.");
- else
- printf("Added new event: %s\n", buf);
-
- return ret;
-}
-
-static void get_new_event_name(char *buf, size_t len, const char *base,
- struct strlist *namelist)
-{
- int i, ret;
- for (i = 0; i < MAX_EVENT_INDEX; i++) {
- ret = e_snprintf(buf, len, "%s_%d", base, i);
- if (ret < 0)
- die("snprintf() failed: %s", strerror(-ret));
- if (!strlist__has_entry(namelist, buf))
- break;
- }
- if (i == MAX_EVENT_INDEX)
- die("Too many events are on the same function.");
-}
-
-void add_trace_kprobe_events(struct probe_point *probes, int nr_probes)
-{
- int i, j, fd;
- struct probe_point *pp;
- char buf[MAX_CMDLEN];
- char event[64];
- struct strlist *namelist;
-
- fd = open_kprobe_events(O_RDWR, O_APPEND);
- /* Get current event names */
- namelist = get_perf_event_names(fd);
-
- for (j = 0; j < nr_probes; j++) {
- pp = probes + j;
- for (i = 0; i < pp->found; i++) {
- /* Get an unused new event name */
- get_new_event_name(event, 64, pp->function, namelist);
- snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s\n",
- pp->retprobe ? 'r' : 'p',
- PERFPROBE_GROUP, event,
- pp->probes[i]);
- write_trace_kprobe_event(fd, buf);
- /* Add added event name to namelist */
- strlist__add(namelist, event);
- }
- }
- close(fd);
-}
diff --git a/trunk/tools/perf/util/probe-event.h b/trunk/tools/perf/util/probe-event.h
deleted file mode 100644
index 0c6fe56fe38a..000000000000
--- a/trunk/tools/perf/util/probe-event.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _PROBE_EVENT_H
-#define _PROBE_EVENT_H
-
-#include "probe-finder.h"
-#include "strlist.h"
-
-extern int parse_perf_probe_event(const char *str, struct probe_point *pp);
-extern int synthesize_perf_probe_event(struct probe_point *pp);
-extern void parse_trace_kprobe_event(const char *str, char **group,
- char **event, struct probe_point *pp);
-extern int synthesize_trace_kprobe_event(struct probe_point *pp);
-extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes);
-extern void show_perf_probe_events(void);
-
-/* Maximum index number of event-name postfix */
-#define MAX_EVENT_INDEX 1024
-
-#endif /*_PROBE_EVENT_H */
diff --git a/trunk/tools/perf/util/string.c b/trunk/tools/perf/util/string.c
index f24a8cc933d5..227043577e06 100644
--- a/trunk/tools/perf/util/string.c
+++ b/trunk/tools/perf/util/string.c
@@ -1,3 +1,5 @@
+#include
+#include
#include "string.h"
#include "util.h"
@@ -125,104 +127,3 @@ s64 perf_atoll(const char *str)
out:
return length;
}
-
-/*
- * Helper function for splitting a string into an argv-like array.
- * originaly copied from lib/argv_split.c
- */
-static const char *skip_sep(const char *cp)
-{
- while (*cp && isspace(*cp))
- cp++;
-
- return cp;
-}
-
-static const char *skip_arg(const char *cp)
-{
- while (*cp && !isspace(*cp))
- cp++;
-
- return cp;
-}
-
-static int count_argc(const char *str)
-{
- int count = 0;
-
- while (*str) {
- str = skip_sep(str);
- if (*str) {
- count++;
- str = skip_arg(str);
- }
- }
-
- return count;
-}
-
-/**
- * argv_free - free an argv
- * @argv - the argument vector to be freed
- *
- * Frees an argv and the strings it points to.
- */
-void argv_free(char **argv)
-{
- char **p;
- for (p = argv; *p; p++)
- free(*p);
-
- free(argv);
-}
-
-/**
- * argv_split - split a string at whitespace, returning an argv
- * @str: the string to be split
- * @argcp: returned argument count
- *
- * Returns an array of pointers to strings which are split out from
- * @str. This is performed by strictly splitting on white-space; no
- * quote processing is performed. Multiple whitespace characters are
- * considered to be a single argument separator. The returned array
- * is always NULL-terminated. Returns NULL on memory allocation
- * failure.
- */
-char **argv_split(const char *str, int *argcp)
-{
- int argc = count_argc(str);
- char **argv = zalloc(sizeof(*argv) * (argc+1));
- char **argvp;
-
- if (argv == NULL)
- goto out;
-
- if (argcp)
- *argcp = argc;
-
- argvp = argv;
-
- while (*str) {
- str = skip_sep(str);
-
- if (*str) {
- const char *p = str;
- char *t;
-
- str = skip_arg(str);
-
- t = strndup(p, str-p);
- if (t == NULL)
- goto fail;
- *argvp++ = t;
- }
- }
- *argvp = NULL;
-
-out:
- return argv;
-
-fail:
- argv_free(argv);
- return NULL;
-}
diff --git a/trunk/tools/perf/util/string.h b/trunk/tools/perf/util/string.h
index bfecec265a1a..e50b07f80827 100644
--- a/trunk/tools/perf/util/string.h
+++ b/trunk/tools/perf/util/string.h
@@ -6,8 +6,6 @@
int hex2u64(const char *ptr, u64 *val);
char *strxfrchar(char *s, char from, char to);
s64 perf_atoll(const char *str);
-char **argv_split(const char *str, int *argcp);
-void argv_free(char **argv);
#define _STR(x) #x
#define STR(x) _STR(x)
diff --git a/trunk/tools/perf/util/symbol.c b/trunk/tools/perf/util/symbol.c
index fffcb937cdcb..1b77e81b38de 100644
--- a/trunk/tools/perf/util/symbol.c
+++ b/trunk/tools/perf/util/symbol.c
@@ -6,17 +6,11 @@
#include "debug.h"
-#include
#include
#include
#include
-#include
#include
-#ifndef NT_GNU_BUILD_ID
-#define NT_GNU_BUILD_ID 3
-#endif
-
enum dso_origin {
DSO__ORIG_KERNEL = 0,
DSO__ORIG_JAVA_JIT,
@@ -28,37 +22,17 @@ enum dso_origin {
DSO__ORIG_NOT_FOUND,
};
-static void dsos__add(struct list_head *head, struct dso *dso);
-static struct map *thread__find_map_by_name(struct thread *self, char *name);
-static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
-struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr);
-static int dso__load_kernel_sym(struct dso *self, struct map *map,
- struct thread *thread, symbol_filter_t filter);
+static void dsos__add(struct dso *dso);
+static struct dso *dsos__find(const char *name);
+static struct map *map__new2(u64 start, struct dso *dso);
+static void kernel_maps__insert(struct map *map);
unsigned int symbol__priv_size;
-static int vmlinux_path__nr_entries;
-static char **vmlinux_path;
-
-static struct symbol_conf symbol_conf__defaults = {
- .use_modules = true,
- .try_vmlinux_path = true,
-};
-
-static struct thread kthread_mem;
-struct thread *kthread = &kthread_mem;
-
-bool dso__loaded(const struct dso *self, enum map_type type)
-{
- return self->loaded & (1 << type);
-}
-static void dso__set_loaded(struct dso *self, enum map_type type)
-{
- self->loaded |= (1 << type);
-}
+static struct rb_root kernel_maps;
-static void symbols__fixup_end(struct rb_root *self)
+static void dso__fixup_sym_end(struct dso *self)
{
- struct rb_node *nd, *prevnd = rb_first(self);
+ struct rb_node *nd, *prevnd = rb_first(&self->syms);
struct symbol *curr, *prev;
if (prevnd == NULL)
@@ -79,10 +53,10 @@ static void symbols__fixup_end(struct rb_root *self)
curr->end = roundup(curr->start, 4096);
}
-static void __thread__fixup_maps_end(struct thread *self, enum map_type type)
+static void kernel_maps__fixup_end(void)
{
struct map *prev, *curr;
- struct rb_node *nd, *prevnd = rb_first(&self->maps[type]);
+ struct rb_node *nd, *prevnd = rb_first(&kernel_maps);
if (prevnd == NULL)
return;
@@ -95,31 +69,25 @@ static void __thread__fixup_maps_end(struct thread *self, enum map_type type)
prev->end = curr->start - 1;
}
- /*
- * We still haven't the actual symbols, so guess the
- * last map final address.
- */
- curr->end = ~0UL;
-}
-
-static void thread__fixup_maps_end(struct thread *self)
-{
- int i;
- for (i = 0; i < MAP__NR_TYPES; ++i)
- __thread__fixup_maps_end(self, i);
+ nd = rb_last(&curr->dso->syms);
+ if (nd) {
+ struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
+ curr->end = sym->end;
+ }
}
static struct symbol *symbol__new(u64 start, u64 len, const char *name)
{
size_t namelen = strlen(name) + 1;
- struct symbol *self = zalloc(symbol__priv_size +
- sizeof(*self) + namelen);
- if (self == NULL)
+ struct symbol *self = calloc(1, (symbol__priv_size +
+ sizeof(*self) + namelen));
+ if (!self)
return NULL;
- if (symbol__priv_size)
+ if (symbol__priv_size) {
+ memset(self, 0, symbol__priv_size);
self = ((void *)self) + symbol__priv_size;
-
+ }
self->start = start;
self->end = len ? start + len - 1 : start;
@@ -141,30 +109,15 @@ static size_t symbol__fprintf(struct symbol *self, FILE *fp)
self->start, self->end, self->name);
}
-static void dso__set_long_name(struct dso *self, char *name)
-{
- if (name == NULL)
- return;
- self->long_name = name;
- self->long_name_len = strlen(name);
-}
-
-static void dso__set_basename(struct dso *self)
-{
- self->short_name = basename(self->long_name);
-}
-
struct dso *dso__new(const char *name)
{
struct dso *self = malloc(sizeof(*self) + strlen(name) + 1);
if (self != NULL) {
- int i;
strcpy(self->name, name);
- dso__set_long_name(self, self->name);
+ self->long_name = self->name;
self->short_name = self->name;
- for (i = 0; i < MAP__NR_TYPES; ++i)
- self->symbols[i] = RB_ROOT;
+ self->syms = RB_ROOT;
self->find_symbol = dso__find_symbol;
self->slen_calculated = 0;
self->origin = DSO__ORIG_NOT_FOUND;
@@ -175,24 +128,22 @@ struct dso *dso__new(const char *name)
return self;
}
-static void symbols__delete(struct rb_root *self)
+static void dso__delete_symbols(struct dso *self)
{
struct symbol *pos;
- struct rb_node *next = rb_first(self);
+ struct rb_node *next = rb_first(&self->syms);
while (next) {
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, self);
+ rb_erase(&pos->rb_node, &self->syms);
symbol__delete(pos);
}
}
void dso__delete(struct dso *self)
{
- int i;
- for (i = 0; i < MAP__NR_TYPES; ++i)
- symbols__delete(&self->symbols[i]);
+ dso__delete_symbols(self);
if (self->long_name != self->name)
free(self->long_name);
free(self);
@@ -204,9 +155,9 @@ void dso__set_build_id(struct dso *self, void *build_id)
self->has_build_id = 1;
}
-static void symbols__insert(struct rb_root *self, struct symbol *sym)
+static void dso__insert_symbol(struct dso *self, struct symbol *sym)
{
- struct rb_node **p = &self->rb_node;
+ struct rb_node **p = &self->syms.rb_node;
struct rb_node *parent = NULL;
const u64 ip = sym->start;
struct symbol *s;
@@ -220,17 +171,17 @@ static void symbols__insert(struct rb_root *self, struct symbol *sym)
p = &(*p)->rb_right;
}
rb_link_node(&sym->rb_node, parent, p);
- rb_insert_color(&sym->rb_node, self);
+ rb_insert_color(&sym->rb_node, &self->syms);
}
-static struct symbol *symbols__find(struct rb_root *self, u64 ip)
+struct symbol *dso__find_symbol(struct dso *self, u64 ip)
{
struct rb_node *n;
if (self == NULL)
return NULL;
- n = self->rb_node;
+ n = self->syms.rb_node;
while (n) {
struct symbol *s = rb_entry(n, struct symbol, rb_node);
@@ -246,11 +197,6 @@ static struct symbol *symbols__find(struct rb_root *self, u64 ip)
return NULL;
}
-struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr)
-{
- return symbols__find(&self->symbols[type], addr);
-}
-
int build_id__sprintf(u8 *self, int len, char *bf)
{
char *bid = bf;
@@ -274,14 +220,15 @@ size_t dso__fprintf_buildid(struct dso *self, FILE *fp)
return fprintf(fp, "%s", sbuild_id);
}
-size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp)
+size_t dso__fprintf(struct dso *self, FILE *fp)
{
struct rb_node *nd;
size_t ret = fprintf(fp, "dso: %s (", self->short_name);
ret += dso__fprintf_buildid(self, fp);
ret += fprintf(fp, ")\n");
- for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) {
+
+ for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
ret += symbol__fprintf(pos, fp);
}
@@ -294,11 +241,10 @@ size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp)
* so that we can in the next step set the symbol ->end address and then
* call kernel_maps__split_kallsyms.
*/
-static int dso__load_all_kallsyms(struct dso *self, struct map *map)
+static int kernel_maps__load_all_kallsyms(void)
{
char *line = NULL;
size_t n;
- struct rb_root *root = &self->symbols[map->type];
FILE *file = fopen("/proc/kallsyms", "r");
if (file == NULL)
@@ -341,11 +287,13 @@ static int dso__load_all_kallsyms(struct dso *self, struct map *map)
if (sym == NULL)
goto out_delete_line;
+
/*
* We will pass the symbols to the filter later, in
- * map__split_kallsyms, when we have split the maps per module
+ * kernel_maps__split_kallsyms, when we have split the
+ * maps per module
*/
- symbols__insert(root, sym);
+ dso__insert_symbol(kernel_map->dso, sym);
}
free(line);
@@ -364,14 +312,12 @@ static int dso__load_all_kallsyms(struct dso *self, struct map *map)
* kernel range is broken in several maps, named [kernel].N, as we don't have
* the original ELF section names vmlinux have.
*/
-static int dso__split_kallsyms(struct dso *self, struct map *map, struct thread *thread,
- symbol_filter_t filter)
+static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules)
{
- struct map *curr_map = map;
+ struct map *map = kernel_map;
struct symbol *pos;
int count = 0;
- struct rb_root *root = &self->symbols[map->type];
- struct rb_node *next = rb_first(root);
+ struct rb_node *next = rb_first(&kernel_map->dso->syms);
int kernel_range = 0;
while (next) {
@@ -382,16 +328,16 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, struct thread
module = strchr(pos->name, '\t');
if (module) {
- if (!thread->use_modules)
- goto discard_symbol;
+ if (!use_modules)
+ goto delete_symbol;
*module++ = '\0';
- if (strcmp(self->name, module)) {
- curr_map = thread__find_map_by_name(thread, module);
- if (curr_map == NULL) {
- pr_debug("/proc/{kallsyms,modules} "
- "inconsistency!\n");
+ if (strcmp(map->dso->name, module)) {
+ map = kernel_maps__find_by_dso_name(module);
+ if (!map) {
+ pr_err("/proc/{kallsyms,modules} "
+ "inconsistency!\n");
return -1;
}
}
@@ -399,9 +345,9 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, struct thread
* So that we look just like we get from .ko files,
* i.e. not prelinked, relative to map->start.
*/
- pos->start = curr_map->map_ip(curr_map, pos->start);
- pos->end = curr_map->map_ip(curr_map, pos->end);
- } else if (curr_map != map) {
+ pos->start = map->map_ip(map, pos->start);
+ pos->end = map->map_ip(map, pos->end);
+ } else if (map != kernel_map) {
char dso_name[PATH_MAX];
struct dso *dso;
@@ -412,24 +358,25 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, struct thread
if (dso == NULL)
return -1;
- curr_map = map__new2(pos->start, dso, map->type);
+ map = map__new2(pos->start, dso);
if (map == NULL) {
dso__delete(dso);
return -1;
}
- curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
- __thread__insert_map(thread, curr_map);
+ map->map_ip = map->unmap_ip = identity__map_ip;
+ kernel_maps__insert(map);
++kernel_range;
}
- if (filter && filter(curr_map, pos)) {
-discard_symbol: rb_erase(&pos->rb_node, root);
+ if (filter && filter(map, pos)) {
+delete_symbol:
+ rb_erase(&pos->rb_node, &kernel_map->dso->syms);
symbol__delete(pos);
} else {
- if (curr_map != map) {
- rb_erase(&pos->rb_node, root);
- symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
+ if (map != kernel_map) {
+ rb_erase(&pos->rb_node, &kernel_map->dso->syms);
+ dso__insert_symbol(map->dso, pos);
}
count++;
}
@@ -439,22 +386,32 @@ discard_symbol: rb_erase(&pos->rb_node, root);
}
-static int dso__load_kallsyms(struct dso *self, struct map *map,
- struct thread *thread, symbol_filter_t filter)
+static int kernel_maps__load_kallsyms(symbol_filter_t filter, int use_modules)
{
- if (dso__load_all_kallsyms(self, map) < 0)
+ if (kernel_maps__load_all_kallsyms())
return -1;
- symbols__fixup_end(&self->symbols[map->type]);
- self->origin = DSO__ORIG_KERNEL;
+ dso__fixup_sym_end(kernel_map->dso);
- return dso__split_kallsyms(self, map, thread, filter);
+ return kernel_maps__split_kallsyms(filter, use_modules);
}
-size_t kernel_maps__fprintf(FILE *fp)
+static size_t kernel_maps__fprintf(FILE *fp)
{
size_t printed = fprintf(fp, "Kernel maps:\n");
- printed += thread__fprintf_maps(kthread, fp);
+ struct rb_node *nd;
+
+ for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+
+ printed += fprintf(fp, "Map:");
+ printed += map__fprintf(pos, fp);
+ if (verbose > 1) {
+ printed += dso__fprintf(pos->dso, fp);
+ printed += fprintf(fp, "--\n");
+ }
+ }
+
return printed + fprintf(fp, "END kernel maps\n");
}
@@ -504,7 +461,7 @@ static int dso__load_perf_map(struct dso *self, struct map *map,
if (filter && filter(map, sym))
symbol__delete(sym);
else {
- symbols__insert(&self->symbols[map->type], sym);
+ dso__insert_symbol(self, sym);
nr_syms++;
}
}
@@ -702,7 +659,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map,
if (filter && filter(map, f))
symbol__delete(f);
else {
- symbols__insert(&self->symbols[map->type], f);
+ dso__insert_symbol(self, f);
++nr;
}
}
@@ -724,7 +681,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map,
if (filter && filter(map, f))
symbol__delete(f);
else {
- symbols__insert(&self->symbols[map->type], f);
+ dso__insert_symbol(self, f);
++nr;
}
}
@@ -744,9 +701,9 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map,
return 0;
}
-static int dso__load_sym(struct dso *self, struct map *map,
- struct thread *thread, const char *name, int fd,
- symbol_filter_t filter, int kernel, int kmodule)
+static int dso__load_sym(struct dso *self, struct map *map, const char *name,
+ int fd, symbol_filter_t filter, int kernel,
+ int kmodule)
{
struct map *curr_map = map;
struct dso *curr_dso = self;
@@ -849,7 +806,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
snprintf(dso_name, sizeof(dso_name),
"%s%s", self->short_name, section_name);
- curr_map = thread__find_map_by_name(thread, dso_name);
+ curr_map = kernel_maps__find_by_dso_name(dso_name);
if (curr_map == NULL) {
u64 start = sym.st_value;
@@ -859,8 +816,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
curr_dso = dso__new(dso_name);
if (curr_dso == NULL)
goto out_elf_end;
- curr_map = map__new2(start, curr_dso,
- MAP__FUNCTION);
+ curr_map = map__new2(start, curr_dso);
if (curr_map == NULL) {
dso__delete(curr_dso);
goto out_elf_end;
@@ -868,8 +824,8 @@ static int dso__load_sym(struct dso *self, struct map *map,
curr_map->map_ip = identity__map_ip;
curr_map->unmap_ip = identity__map_ip;
curr_dso->origin = DSO__ORIG_KERNEL;
- __thread__insert_map(kthread, curr_map);
- dsos__add(&dsos__kernel, curr_dso);
+ kernel_maps__insert(curr_map);
+ dsos__add(curr_dso);
} else
curr_dso = curr_map->dso;
@@ -899,7 +855,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
if (filter && filter(curr_map, f))
symbol__delete(f);
else {
- symbols__insert(&curr_dso->symbols[curr_map->type], f);
+ dso__insert_symbol(curr_dso, f);
nr++;
}
}
@@ -908,7 +864,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
* For misannotated, zeroed, ASM function sizes.
*/
if (nr > 0)
- symbols__fixup_end(&self->symbols[map->type]);
+ dso__fixup_sym_end(self);
err = nr;
out_elf_end:
elf_end(elf);
@@ -916,46 +872,47 @@ static int dso__load_sym(struct dso *self, struct map *map,
return err;
}
-static bool dso__build_id_equal(const struct dso *self, u8 *build_id)
+bool fetch_build_id_table(struct list_head *head)
{
- return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0;
-}
-
-static bool __dsos__read_build_ids(struct list_head *head)
-{
- bool have_build_id = false;
+ bool have_buildid = false;
struct dso *pos;
- list_for_each_entry(pos, head, node)
- if (filename__read_build_id(pos->long_name, pos->build_id,
- sizeof(pos->build_id)) > 0) {
- have_build_id = true;
- pos->has_build_id = true;
- }
+ list_for_each_entry(pos, &dsos, node) {
+ struct build_id_list *new;
+ struct build_id_event b;
+ size_t len;
- return have_build_id;
-}
+ if (filename__read_build_id(pos->long_name,
+ &b.build_id,
+ sizeof(b.build_id)) < 0)
+ continue;
+ have_buildid = true;
+ memset(&b.header, 0, sizeof(b.header));
+ len = strlen(pos->long_name) + 1;
+ len = ALIGN(len, 64);
+ b.header.size = sizeof(b) + len;
-bool dsos__read_build_ids(void)
-{
- return __dsos__read_build_ids(&dsos__kernel) ||
- __dsos__read_build_ids(&dsos__user);
-}
+ new = malloc(sizeof(*new));
+ if (!new)
+ die("No memory\n");
-/*
- * Align offset to 4 bytes as needed for note name and descriptor data.
- */
-#define NOTE_ALIGN(n) (((n) + 3) & -4U)
+ memcpy(&new->event, &b, sizeof(b));
+ new->dso_name = pos->long_name;
+ new->len = len;
+
+ list_add_tail(&new->list, head);
+ }
+
+ return have_buildid;
+}
int filename__read_build_id(const char *filename, void *bf, size_t size)
{
int fd, err = -1;
GElf_Ehdr ehdr;
GElf_Shdr shdr;
- Elf_Data *data;
+ Elf_Data *build_id_data;
Elf_Scn *sec;
- Elf_Kind ek;
- void *ptr;
Elf *elf;
if (size < BUILD_ID_SIZE)
@@ -971,10 +928,6 @@ int filename__read_build_id(const char *filename, void *bf, size_t size)
goto out_close;
}
- ek = elf_kind(elf);
- if (ek != ELF_K_ELF)
- goto out_elf_end;
-
if (gelf_getehdr(elf, &ehdr) == NULL) {
pr_err("%s: cannot get elf header.\n", __func__);
goto out_elf_end;
@@ -982,37 +935,14 @@ int filename__read_build_id(const char *filename, void *bf, size_t size)
sec = elf_section_by_name(elf, &ehdr, &shdr,
".note.gnu.build-id", NULL);
- if (sec == NULL) {
- sec = elf_section_by_name(elf, &ehdr, &shdr,
- ".notes", NULL);
- if (sec == NULL)
- goto out_elf_end;
- }
-
- data = elf_getdata(sec, NULL);
- if (data == NULL)
+ if (sec == NULL)
goto out_elf_end;
- ptr = data->d_buf;
- while (ptr < (data->d_buf + data->d_size)) {
- GElf_Nhdr *nhdr = ptr;
- int namesz = NOTE_ALIGN(nhdr->n_namesz),
- descsz = NOTE_ALIGN(nhdr->n_descsz);
- const char *name;
-
- ptr += sizeof(*nhdr);
- name = ptr;
- ptr += namesz;
- if (nhdr->n_type == NT_GNU_BUILD_ID &&
- nhdr->n_namesz == sizeof("GNU")) {
- if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
- memcpy(bf, ptr, BUILD_ID_SIZE);
- err = BUILD_ID_SIZE;
- break;
- }
- }
- ptr += descsz;
- }
+ build_id_data = elf_getdata(sec, NULL);
+ if (build_id_data == NULL)
+ goto out_elf_end;
+ memcpy(bf, build_id_data->d_buf + 16, BUILD_ID_SIZE);
+ err = BUILD_ID_SIZE;
out_elf_end:
elf_end(elf);
out_close:
@@ -1021,48 +951,23 @@ int filename__read_build_id(const char *filename, void *bf, size_t size)
return err;
}
-int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
+static char *dso__read_build_id(struct dso *self)
{
- int fd, err = -1;
+ int len;
+ char *build_id = NULL;
+ unsigned char rawbf[BUILD_ID_SIZE];
- if (size < BUILD_ID_SIZE)
+ len = filename__read_build_id(self->long_name, rawbf, sizeof(rawbf));
+ if (len < 0)
goto out;
- fd = open(filename, O_RDONLY);
- if (fd < 0)
+ build_id = malloc(len * 2 + 1);
+ if (build_id == NULL)
goto out;
- while (1) {
- char bf[BUFSIZ];
- GElf_Nhdr nhdr;
- int namesz, descsz;
-
- if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
- break;
-
- namesz = NOTE_ALIGN(nhdr.n_namesz);
- descsz = NOTE_ALIGN(nhdr.n_descsz);
- if (nhdr.n_type == NT_GNU_BUILD_ID &&
- nhdr.n_namesz == sizeof("GNU")) {
- if (read(fd, bf, namesz) != namesz)
- break;
- if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
- if (read(fd, build_id,
- BUILD_ID_SIZE) == BUILD_ID_SIZE) {
- err = 0;
- break;
- }
- } else if (read(fd, bf, descsz) != descsz)
- break;
- } else {
- int n = namesz + descsz;
- if (read(fd, bf, n) != n)
- break;
- }
- }
- close(fd);
+ build_id__sprintf(rawbf, len, build_id);
out:
- return err;
+ return build_id;
}
char dso__symtab_origin(const struct dso *self)
@@ -1085,17 +990,12 @@ char dso__symtab_origin(const struct dso *self)
int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
{
int size = PATH_MAX;
- char *name;
- u8 build_id[BUILD_ID_SIZE];
+ char *name = malloc(size), *build_id = NULL;
int ret = -1;
int fd;
- dso__set_loaded(self, map->type);
-
- if (self->kernel)
- return dso__load_kernel_sym(self, map, kthread, filter);
+ self->loaded = 1;
- name = malloc(size);
if (!name)
return -1;
@@ -1112,6 +1012,8 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
more:
do {
+ int berr = 0;
+
self->origin++;
switch (self->origin) {
case DSO__ORIG_FEDORA:
@@ -1123,18 +1025,12 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
self->long_name);
break;
case DSO__ORIG_BUILDID:
- if (filename__read_build_id(self->long_name, build_id,
- sizeof(build_id))) {
- char build_id_hex[BUILD_ID_SIZE * 2 + 1];
-
- build_id__sprintf(build_id, sizeof(build_id),
- build_id_hex);
+ build_id = dso__read_build_id(self);
+ if (build_id != NULL) {
snprintf(name, size,
"/usr/lib/debug/.build-id/%.2s/%s.debug",
- build_id_hex, build_id_hex + 2);
- if (self->has_build_id)
- goto compare_build_id;
- break;
+ build_id, build_id + 2);
+ goto compare_build_id;
}
self->origin++;
/* Fall thru */
@@ -1147,18 +1043,25 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
}
if (self->has_build_id) {
- if (filename__read_build_id(name, build_id,
- sizeof(build_id)) < 0)
+ bool match;
+ build_id = malloc(BUILD_ID_SIZE);
+ if (build_id == NULL)
goto more;
+ berr = filename__read_build_id(name, build_id,
+ BUILD_ID_SIZE);
compare_build_id:
- if (!dso__build_id_equal(self, build_id))
+ match = berr > 0 && memcmp(build_id, self->build_id,
+ sizeof(self->build_id)) == 0;
+ free(build_id);
+ build_id = NULL;
+ if (!match)
goto more;
}
fd = open(name, O_RDONLY);
} while (fd < 0);
- ret = dso__load_sym(self, map, NULL, name, fd, filter, 0, 0);
+ ret = dso__load_sym(self, map, name, fd, filter, 0, 0);
close(fd);
/*
@@ -1179,11 +1082,33 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
return ret;
}
-static struct map *thread__find_map_by_name(struct thread *self, char *name)
+struct map *kernel_map;
+
+static void kernel_maps__insert(struct map *map)
+{
+ maps__insert(&kernel_maps, map);
+}
+
+struct symbol *kernel_maps__find_symbol(u64 ip, struct map **mapp)
+{
+ struct map *map = maps__find(&kernel_maps, ip);
+
+ if (mapp)
+ *mapp = map;
+
+ if (map) {
+ ip = map->map_ip(map, ip);
+ return map->dso->find_symbol(map->dso, ip);
+ }
+
+ return NULL;
+}
+
+struct map *kernel_maps__find_by_dso_name(const char *name)
{
struct rb_node *nd;
- for (nd = rb_first(&self->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) {
struct map *map = rb_entry(nd, struct map, rb_node);
if (map->dso && strcmp(map->dso->name, name) == 0)
@@ -1193,13 +1118,32 @@ static struct map *thread__find_map_by_name(struct thread *self, char *name)
return NULL;
}
-static int dsos__set_modules_path_dir(char *dirname)
+static int dso__load_module_sym(struct dso *self, struct map *map,
+ symbol_filter_t filter)
+{
+ int err = 0, fd = open(self->long_name, O_RDONLY);
+
+ self->loaded = 1;
+
+ if (fd < 0) {
+ pr_err("%s: cannot open %s\n", __func__, self->long_name);
+ return err;
+ }
+
+ err = dso__load_sym(self, map, self->long_name, fd, filter, 0, 1);
+ close(fd);
+
+ return err;
+}
+
+static int dsos__load_modules_sym_dir(char *dirname, symbol_filter_t filter)
{
struct dirent *dent;
+ int nr_symbols = 0, err;
DIR *dir = opendir(dirname);
if (!dir) {
- pr_debug("%s: cannot open %s dir\n", __func__, dirname);
+ pr_err("%s: cannot open %s dir\n", __func__, dirname);
return -1;
}
@@ -1213,13 +1157,14 @@ static int dsos__set_modules_path_dir(char *dirname)
snprintf(path, sizeof(path), "%s/%s",
dirname, dent->d_name);
- if (dsos__set_modules_path_dir(path) < 0)
+ err = dsos__load_modules_sym_dir(path, filter);
+ if (err < 0)
goto failure;
} else {
char *dot = strrchr(dent->d_name, '.'),
dso_name[PATH_MAX];
struct map *map;
- char *long_name;
+ struct rb_node *last;
if (dot == NULL || strcmp(dot, ".ko"))
continue;
@@ -1227,27 +1172,45 @@ static int dsos__set_modules_path_dir(char *dirname)
(int)(dot - dent->d_name), dent->d_name);
strxfrchar(dso_name, '-', '_');
- map = thread__find_map_by_name(kthread, dso_name);
+ map = kernel_maps__find_by_dso_name(dso_name);
if (map == NULL)
continue;
snprintf(path, sizeof(path), "%s/%s",
dirname, dent->d_name);
- long_name = strdup(path);
- if (long_name == NULL)
+ map->dso->long_name = strdup(path);
+ if (map->dso->long_name == NULL)
+ goto failure;
+
+ err = dso__load_module_sym(map->dso, map, filter);
+ if (err < 0)
goto failure;
- dso__set_long_name(map->dso, long_name);
+ last = rb_last(&map->dso->syms);
+ if (last) {
+ struct symbol *sym;
+ /*
+ * We do this here as well, even having the
+ * symbol size found in the symtab because
+ * misannotated ASM symbols may have the size
+ * set to zero.
+ */
+ dso__fixup_sym_end(map->dso);
+
+ sym = rb_entry(last, struct symbol, rb_node);
+ map->end = map->start + sym->end;
+ }
}
+ nr_symbols += err;
}
- return 0;
+ return nr_symbols;
failure:
closedir(dir);
return -1;
}
-static int dsos__set_modules_path(void)
+static int dsos__load_modules_sym(symbol_filter_t filter)
{
struct utsname uts;
char modules_path[PATH_MAX];
@@ -1258,7 +1221,7 @@ static int dsos__set_modules_path(void)
snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel",
uts.release);
- return dsos__set_modules_path_dir(modules_path);
+ return dsos__load_modules_sym_dir(modules_path, filter);
}
/*
@@ -1266,7 +1229,7 @@ static int dsos__set_modules_path(void)
* they are loaded) and for vmlinux, where only after we load all the
* symbols we'll know where it starts and ends.
*/
-static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
+static struct map *map__new2(u64 start, struct dso *dso)
{
struct map *self = malloc(sizeof(*self));
@@ -1274,13 +1237,13 @@ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
/*
* ->end will be filled after we load all the symbols
*/
- map__init(self, type, start, 0, 0, dso);
+ map__init(self, start, 0, 0, dso);
}
return self;
}
-static int thread__create_module_maps(struct thread *self)
+static int dsos__load_modules(void)
{
char *line = NULL;
size_t n;
@@ -1324,27 +1287,21 @@ static int thread__create_module_maps(struct thread *self)
if (dso == NULL)
goto out_delete_line;
- map = map__new2(start, dso, MAP__FUNCTION);
+ map = map__new2(start, dso);
if (map == NULL) {
dso__delete(dso);
goto out_delete_line;
}
- snprintf(name, sizeof(name),
- "/sys/module/%s/notes/.note.gnu.build-id", line);
- if (sysfs__read_build_id(name, dso->build_id,
- sizeof(dso->build_id)) == 0)
- dso->has_build_id = true;
-
dso->origin = DSO__ORIG_KMODULE;
- __thread__insert_map(self, map);
- dsos__add(&dsos__kernel, dso);
+ kernel_maps__insert(map);
+ dsos__add(dso);
}
free(line);
fclose(file);
- return dsos__set_modules_path();
+ return 0;
out_delete_line:
free(line);
@@ -1352,106 +1309,106 @@ static int thread__create_module_maps(struct thread *self)
return -1;
}
-static int dso__load_vmlinux(struct dso *self, struct map *map, struct thread *thread,
+static int dso__load_vmlinux(struct dso *self, struct map *map,
const char *vmlinux, symbol_filter_t filter)
{
- int err = -1, fd;
-
- if (self->has_build_id) {
- u8 build_id[BUILD_ID_SIZE];
+ int err, fd = open(vmlinux, O_RDONLY);
- if (filename__read_build_id(vmlinux, build_id,
- sizeof(build_id)) < 0) {
- pr_debug("No build_id in %s, ignoring it\n", vmlinux);
- return -1;
- }
- if (!dso__build_id_equal(self, build_id)) {
- char expected_build_id[BUILD_ID_SIZE * 2 + 1],
- vmlinux_build_id[BUILD_ID_SIZE * 2 + 1];
-
- build_id__sprintf(self->build_id,
- sizeof(self->build_id),
- expected_build_id);
- build_id__sprintf(build_id, sizeof(build_id),
- vmlinux_build_id);
- pr_debug("build_id in %s is %s while expected is %s, "
- "ignoring it\n", vmlinux, vmlinux_build_id,
- expected_build_id);
- return -1;
- }
- }
+ self->loaded = 1;
- fd = open(vmlinux, O_RDONLY);
if (fd < 0)
return -1;
- dso__set_loaded(self, map->type);
- err = dso__load_sym(self, map, thread, self->long_name, fd, filter, 1, 0);
+ err = dso__load_sym(self, map, self->long_name, fd, filter, 1, 0);
+
close(fd);
return err;
}
-static int dso__load_kernel_sym(struct dso *self, struct map *map,
- struct thread *thread, symbol_filter_t filter)
+int dsos__load_kernel(const char *vmlinux, symbol_filter_t filter,
+ int use_modules)
{
- int err;
- bool is_kallsyms;
-
- if (vmlinux_path != NULL) {
- int i;
- pr_debug("Looking at the vmlinux_path (%d entries long)\n",
- vmlinux_path__nr_entries);
- for (i = 0; i < vmlinux_path__nr_entries; ++i) {
- err = dso__load_vmlinux(self, map, thread,
- vmlinux_path[i], filter);
- if (err > 0) {
- pr_debug("Using %s for symbols\n",
- vmlinux_path[i]);
- dso__set_long_name(self,
- strdup(vmlinux_path[i]));
- goto out_fixup;
- }
- }
+ int err = -1;
+ struct dso *dso = dso__new(vmlinux);
+
+ if (dso == NULL)
+ return -1;
+
+ dso->short_name = "[kernel]";
+ kernel_map = map__new2(0, dso);
+ if (kernel_map == NULL)
+ goto out_delete_dso;
+
+ kernel_map->map_ip = kernel_map->unmap_ip = identity__map_ip;
+
+ if (use_modules && dsos__load_modules() < 0) {
+ pr_warning("Failed to load list of modules in use! "
+ "Continuing...\n");
+ use_modules = 0;
}
- is_kallsyms = self->long_name[0] == '[';
- if (is_kallsyms)
- goto do_kallsyms;
-
- err = dso__load_vmlinux(self, map, thread, self->long_name, filter);
- if (err <= 0) {
- pr_info("The file %s cannot be used, "
- "trying to use /proc/kallsyms...", self->long_name);
-do_kallsyms:
- err = dso__load_kallsyms(self, map, thread, filter);
- if (err > 0 && !is_kallsyms)
- dso__set_long_name(self, strdup("[kernel.kallsyms]"));
+ if (vmlinux) {
+ err = dso__load_vmlinux(dso, kernel_map, vmlinux, filter);
+ if (err > 0 && use_modules) {
+ int syms = dsos__load_modules_sym(filter);
+
+ if (syms < 0)
+ pr_warning("Failed to read module symbols!"
+ " Continuing...\n");
+ else
+ err += syms;
+ }
}
+ if (err <= 0)
+ err = kernel_maps__load_kallsyms(filter, use_modules);
+
if (err > 0) {
-out_fixup:
- map__fixup_start(map);
- map__fixup_end(map);
+ struct rb_node *node = rb_first(&dso->syms);
+ struct symbol *sym = rb_entry(node, struct symbol, rb_node);
+
+ kernel_map->start = sym->start;
+ node = rb_last(&dso->syms);
+ sym = rb_entry(node, struct symbol, rb_node);
+ kernel_map->end = sym->end;
+
+ dso->origin = DSO__ORIG_KERNEL;
+ kernel_maps__insert(kernel_map);
+ /*
+ * Now that we have all sorted out, just set the ->end of all
+ * maps:
+ */
+ kernel_maps__fixup_end();
+ dsos__add(dso);
+
+ if (verbose)
+ kernel_maps__fprintf(stderr);
}
return err;
+
+out_delete_dso:
+ dso__delete(dso);
+ return -1;
}
-LIST_HEAD(dsos__user);
-LIST_HEAD(dsos__kernel);
-struct dso *vdso;
+LIST_HEAD(dsos);
+struct dso *vdso;
+
+const char *vmlinux_name = "vmlinux";
+int modules;
-static void dsos__add(struct list_head *head, struct dso *dso)
+static void dsos__add(struct dso *dso)
{
- list_add_tail(&dso->node, head);
+ list_add_tail(&dso->node, &dsos);
}
-static struct dso *dsos__find(struct list_head *head, const char *name)
+static struct dso *dsos__find(const char *name)
{
struct dso *pos;
- list_for_each_entry(pos, head, node)
+ list_for_each_entry(pos, &dsos, node)
if (strcmp(pos->name, name) == 0)
return pos;
return NULL;
@@ -1459,170 +1416,53 @@ static struct dso *dsos__find(struct list_head *head, const char *name)
struct dso *dsos__findnew(const char *name)
{
- struct dso *dso = dsos__find(&dsos__user, name);
+ struct dso *dso = dsos__find(name);
if (!dso) {
dso = dso__new(name);
- if (dso != NULL) {
- dsos__add(&dsos__user, dso);
- dso__set_basename(dso);
- }
+ if (dso != NULL)
+ dsos__add(dso);
}
return dso;
}
-static void __dsos__fprintf(struct list_head *head, FILE *fp)
+void dsos__fprintf(FILE *fp)
{
struct dso *pos;
- list_for_each_entry(pos, head, node) {
- int i;
- for (i = 0; i < MAP__NR_TYPES; ++i)
- dso__fprintf(pos, i, fp);
- }
-}
-
-void dsos__fprintf(FILE *fp)
-{
- __dsos__fprintf(&dsos__kernel, fp);
- __dsos__fprintf(&dsos__user, fp);
+ list_for_each_entry(pos, &dsos, node)
+ dso__fprintf(pos, fp);
}
-static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp)
+size_t dsos__fprintf_buildid(FILE *fp)
{
struct dso *pos;
size_t ret = 0;
- list_for_each_entry(pos, head, node) {
+ list_for_each_entry(pos, &dsos, node) {
ret += dso__fprintf_buildid(pos, fp);
ret += fprintf(fp, " %s\n", pos->long_name);
}
return ret;
}
-size_t dsos__fprintf_buildid(FILE *fp)
-{
- return (__dsos__fprintf_buildid(&dsos__kernel, fp) +
- __dsos__fprintf_buildid(&dsos__user, fp));
-}
-
-static int thread__create_kernel_map(struct thread *self, const char *vmlinux)
+int load_kernel(symbol_filter_t filter)
{
- struct map *kmap;
- struct dso *kernel = dso__new(vmlinux ?: "[kernel.kallsyms]");
-
- if (kernel == NULL)
+ if (dsos__load_kernel(vmlinux_name, filter, modules) <= 0)
return -1;
- kmap = map__new2(0, kernel, MAP__FUNCTION);
- if (kmap == NULL)
- goto out_delete_kernel_dso;
-
- kmap->map_ip = kmap->unmap_ip = identity__map_ip;
- kernel->short_name = "[kernel]";
- kernel->kernel = 1;
-
vdso = dso__new("[vdso]");
- if (vdso == NULL)
- goto out_delete_kernel_map;
- dso__set_loaded(vdso, MAP__FUNCTION);
-
- if (sysfs__read_build_id("/sys/kernel/notes", kernel->build_id,
- sizeof(kernel->build_id)) == 0)
- kernel->has_build_id = true;
-
- __thread__insert_map(self, kmap);
- dsos__add(&dsos__kernel, kernel);
- dsos__add(&dsos__user, vdso);
-
- return 0;
-
-out_delete_kernel_map:
- map__delete(kmap);
-out_delete_kernel_dso:
- dso__delete(kernel);
- return -1;
-}
-
-static void vmlinux_path__exit(void)
-{
- while (--vmlinux_path__nr_entries >= 0) {
- free(vmlinux_path[vmlinux_path__nr_entries]);
- vmlinux_path[vmlinux_path__nr_entries] = NULL;
- }
-
- free(vmlinux_path);
- vmlinux_path = NULL;
-}
-
-static int vmlinux_path__init(void)
-{
- struct utsname uts;
- char bf[PATH_MAX];
-
- if (uname(&uts) < 0)
+ if (!vdso)
return -1;
- vmlinux_path = malloc(sizeof(char *) * 5);
- if (vmlinux_path == NULL)
- return -1;
-
- vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux");
- if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
- goto out_fail;
- ++vmlinux_path__nr_entries;
- vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux");
- if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
- goto out_fail;
- ++vmlinux_path__nr_entries;
- snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release);
- vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
- if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
- goto out_fail;
- ++vmlinux_path__nr_entries;
- snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release);
- vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
- if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
- goto out_fail;
- ++vmlinux_path__nr_entries;
- snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux",
- uts.release);
- vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
- if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
- goto out_fail;
- ++vmlinux_path__nr_entries;
+ dsos__add(vdso);
return 0;
-
-out_fail:
- vmlinux_path__exit();
- return -1;
}
-int symbol__init(struct symbol_conf *conf)
+void symbol__init(unsigned int priv_size)
{
- const struct symbol_conf *pconf = conf ?: &symbol_conf__defaults;
-
elf_version(EV_CURRENT);
- symbol__priv_size = pconf->priv_size;
- thread__init(kthread, 0);
-
- if (pconf->try_vmlinux_path && vmlinux_path__init() < 0)
- return -1;
-
- if (thread__create_kernel_map(kthread, pconf->vmlinux_name) < 0) {
- vmlinux_path__exit();
- return -1;
- }
-
- kthread->use_modules = pconf->use_modules;
- if (pconf->use_modules && thread__create_module_maps(kthread) < 0)
- pr_debug("Failed to load list of modules in use, "
- "continuing...\n");
- /*
- * Now that we have all the maps created, just set the ->end of them:
- */
- thread__fixup_maps_end(kthread);
- return 0;
+ symbol__priv_size = priv_size;
}
diff --git a/trunk/tools/perf/util/symbol.h b/trunk/tools/perf/util/symbol.h
index 17003efa0b39..51c5a4a08133 100644
--- a/trunk/tools/perf/util/symbol.h
+++ b/trunk/tools/perf/util/symbol.h
@@ -49,13 +49,6 @@ struct symbol {
char name[0];
};
-struct symbol_conf {
- unsigned short priv_size;
- bool try_vmlinux_path,
- use_modules;
- const char *vmlinux_name;
-};
-
extern unsigned int symbol__priv_size;
static inline void *symbol__priv(struct symbol *self)
@@ -63,27 +56,16 @@ static inline void *symbol__priv(struct symbol *self)
return ((void *)self) - symbol__priv_size;
}
-struct addr_location {
- struct thread *thread;
- struct map *map;
- struct symbol *sym;
- u64 addr;
- char level;
-};
-
struct dso {
struct list_head node;
- struct rb_root symbols[MAP__NR_TYPES];
- struct symbol *(*find_symbol)(struct dso *self,
- enum map_type type, u64 addr);
+ struct rb_root syms;
+ struct symbol *(*find_symbol)(struct dso *, u64 ip);
u8 adjust_symbols:1;
u8 slen_calculated:1;
+ u8 loaded:1;
u8 has_build_id:1;
- u8 kernel:1;
unsigned char origin;
- u8 loaded;
u8 build_id[BUILD_ID_SIZE];
- u16 long_name_len;
const char *short_name;
char *long_name;
char name[0];
@@ -92,29 +74,30 @@ struct dso {
struct dso *dso__new(const char *name);
void dso__delete(struct dso *self);
-bool dso__loaded(const struct dso *self, enum map_type type);
+struct symbol *dso__find_symbol(struct dso *self, u64 ip);
+int dsos__load_kernel(const char *vmlinux, symbol_filter_t filter, int modules);
struct dso *dsos__findnew(const char *name);
int dso__load(struct dso *self, struct map *map, symbol_filter_t filter);
void dsos__fprintf(FILE *fp);
size_t dsos__fprintf_buildid(FILE *fp);
size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
-size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
+size_t dso__fprintf(struct dso *self, FILE *fp);
char dso__symtab_origin(const struct dso *self);
void dso__set_build_id(struct dso *self, void *build_id);
int filename__read_build_id(const char *filename, void *bf, size_t size);
-int sysfs__read_build_id(const char *filename, void *bf, size_t size);
-bool dsos__read_build_ids(void);
+bool fetch_build_id_table(struct list_head *head);
int build_id__sprintf(u8 *self, int len, char *bf);
-size_t kernel_maps__fprintf(FILE *fp);
+int load_kernel(symbol_filter_t filter);
-int symbol__init(struct symbol_conf *conf);
+void symbol__init(unsigned int priv_size);
-struct thread;
-struct thread *kthread;
-extern struct list_head dsos__user, dsos__kernel;
+extern struct list_head dsos;
+extern struct map *kernel_map;
extern struct dso *vdso;
+extern const char *vmlinux_name;
+extern int modules;
#endif /* __PERF_SYMBOL */
diff --git a/trunk/tools/perf/util/thread.c b/trunk/tools/perf/util/thread.c
index 603f5610861b..0f6d78c9863a 100644
--- a/trunk/tools/perf/util/thread.c
+++ b/trunk/tools/perf/util/thread.c
@@ -9,26 +9,17 @@
static struct rb_root threads;
static struct thread *last_match;
-void thread__init(struct thread *self, pid_t pid)
-{
- int i;
- self->pid = pid;
- self->comm = NULL;
- for (i = 0; i < MAP__NR_TYPES; ++i) {
- self->maps[i] = RB_ROOT;
- INIT_LIST_HEAD(&self->removed_maps[i]);
- }
-}
-
static struct thread *thread__new(pid_t pid)
{
- struct thread *self = zalloc(sizeof(*self));
+ struct thread *self = calloc(1, sizeof(*self));
if (self != NULL) {
- thread__init(self, pid);
+ self->pid = pid;
self->comm = malloc(32);
if (self->comm)
snprintf(self->comm, 32, ":%d", self->pid);
+ self->maps = RB_ROOT;
+ INIT_LIST_HEAD(&self->removed_maps);
}
return self;
@@ -53,68 +44,24 @@ int thread__comm_len(struct thread *self)
return self->comm_len;
}
-static const char *map_type__name[MAP__NR_TYPES] = {
- [MAP__FUNCTION] = "Functions",
-};
-
-static size_t __thread__fprintf_maps(struct thread *self,
- enum map_type type, FILE *fp)
+static size_t thread__fprintf(struct thread *self, FILE *fp)
{
- size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
struct rb_node *nd;
+ struct map *pos;
+ size_t ret = fprintf(fp, "Thread %d %s\nCurrent maps:\n",
+ self->pid, self->comm);
- for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
- struct map *pos = rb_entry(nd, struct map, rb_node);
- printed += fprintf(fp, "Map:");
- printed += map__fprintf(pos, fp);
- if (verbose > 1) {
- printed += dso__fprintf(pos->dso, type, fp);
- printed += fprintf(fp, "--\n");
- }
+ for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) {
+ pos = rb_entry(nd, struct map, rb_node);
+ ret += map__fprintf(pos, fp);
}
- return printed;
-}
+ ret = fprintf(fp, "Removed maps:\n");
-size_t thread__fprintf_maps(struct thread *self, FILE *fp)
-{
- size_t printed = 0, i;
- for (i = 0; i < MAP__NR_TYPES; ++i)
- printed += __thread__fprintf_maps(self, i, fp);
- return printed;
-}
+ list_for_each_entry(pos, &self->removed_maps, node)
+ ret += map__fprintf(pos, fp);
-static size_t __thread__fprintf_removed_maps(struct thread *self,
- enum map_type type, FILE *fp)
-{
- struct map *pos;
- size_t printed = 0;
-
- list_for_each_entry(pos, &self->removed_maps[type], node) {
- printed += fprintf(fp, "Map:");
- printed += map__fprintf(pos, fp);
- if (verbose > 1) {
- printed += dso__fprintf(pos->dso, type, fp);
- printed += fprintf(fp, "--\n");
- }
- }
- return printed;
-}
-
-static size_t thread__fprintf_removed_maps(struct thread *self, FILE *fp)
-{
- size_t printed = 0, i;
- for (i = 0; i < MAP__NR_TYPES; ++i)
- printed += __thread__fprintf_removed_maps(self, i, fp);
- return printed;
-}
-
-static size_t thread__fprintf(struct thread *self, FILE *fp)
-{
- size_t printed = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
- printed += thread__fprintf_removed_maps(self, fp);
- printed += fprintf(fp, "Removed maps:\n");
- return printed + thread__fprintf_removed_maps(self, fp);
+ return ret;
}
struct thread *threads__findnew(pid_t pid)
@@ -170,8 +117,7 @@ struct thread *register_idle_thread(void)
static void thread__remove_overlappings(struct thread *self, struct map *map)
{
- struct rb_root *root = &self->maps[map->type];
- struct rb_node *next = rb_first(root);
+ struct rb_node *next = rb_first(&self->maps);
while (next) {
struct map *pos = rb_entry(next, struct map, rb_node);
@@ -186,13 +132,13 @@ static void thread__remove_overlappings(struct thread *self, struct map *map)
map__fprintf(pos, stderr);
}
- rb_erase(&pos->rb_node, root);
+ rb_erase(&pos->rb_node, &self->maps);
/*
* We may have references to this map, for instance in some
* hist_entry instances, so just move them to a separate
* list.
*/
- list_add_tail(&pos->node, &self->removed_maps[map->type]);
+ list_add_tail(&pos->node, &self->removed_maps);
}
}
@@ -239,26 +185,12 @@ struct map *maps__find(struct rb_root *maps, u64 ip)
void thread__insert_map(struct thread *self, struct map *map)
{
thread__remove_overlappings(self, map);
- maps__insert(&self->maps[map->type], map);
-}
-
-static int thread__clone_maps(struct thread *self, struct thread *parent,
- enum map_type type)
-{
- struct rb_node *nd;
- for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
- struct map *map = rb_entry(nd, struct map, rb_node);
- struct map *new = map__clone(map);
- if (new == NULL)
- return -ENOMEM;
- thread__insert_map(self, new);
- }
- return 0;
+ maps__insert(&self->maps, map);
}
int thread__fork(struct thread *self, struct thread *parent)
{
- int i;
+ struct rb_node *nd;
if (self->comm)
free(self->comm);
@@ -266,9 +198,14 @@ int thread__fork(struct thread *self, struct thread *parent)
if (!self->comm)
return -ENOMEM;
- for (i = 0; i < MAP__NR_TYPES; ++i)
- if (thread__clone_maps(self, parent, i) < 0)
+ for (nd = rb_first(&parent->maps); nd; nd = rb_next(nd)) {
+ struct map *map = rb_entry(nd, struct map, rb_node);
+ struct map *new = map__clone(map);
+ if (!new)
return -ENOMEM;
+ thread__insert_map(self, new);
+ }
+
return 0;
}
@@ -285,15 +222,3 @@ size_t threads__fprintf(FILE *fp)
return ret;
}
-
-struct symbol *thread__find_symbol(struct thread *self,
- enum map_type type, u64 addr,
- symbol_filter_t filter)
-{
- struct map *map = thread__find_map(self, type, addr);
-
- if (map != NULL)
- return map__find_symbol(map, map->map_ip(map, addr), filter);
-
- return NULL;
-}
diff --git a/trunk/tools/perf/util/thread.h b/trunk/tools/perf/util/thread.h
index 686d6e914d9e..53addd77ce8f 100644
--- a/trunk/tools/perf/util/thread.h
+++ b/trunk/tools/perf/util/thread.h
@@ -7,50 +7,31 @@
struct thread {
struct rb_node rb_node;
- struct rb_root maps[MAP__NR_TYPES];
- struct list_head removed_maps[MAP__NR_TYPES];
+ struct rb_root maps;
+ struct list_head removed_maps;
pid_t pid;
- bool use_modules;
char shortname[3];
char *comm;
int comm_len;
};
-void thread__init(struct thread *self, pid_t pid);
int thread__set_comm(struct thread *self, const char *comm);
int thread__comm_len(struct thread *self);
struct thread *threads__findnew(pid_t pid);
struct thread *register_idle_thread(void);
void thread__insert_map(struct thread *self, struct map *map);
int thread__fork(struct thread *self, struct thread *parent);
-size_t thread__fprintf_maps(struct thread *self, FILE *fp);
size_t threads__fprintf(FILE *fp);
void maps__insert(struct rb_root *maps, struct map *map);
-struct map *maps__find(struct rb_root *maps, u64 addr);
+struct map *maps__find(struct rb_root *maps, u64 ip);
-static inline struct map *thread__find_map(struct thread *self,
- enum map_type type, u64 addr)
-{
- return self ? maps__find(&self->maps[type], addr) : NULL;
-}
+struct symbol *kernel_maps__find_symbol(const u64 ip, struct map **mapp);
+struct map *kernel_maps__find_by_dso_name(const char *name);
-static inline void __thread__insert_map(struct thread *self, struct map *map)
+static inline struct map *thread__find_map(struct thread *self, u64 ip)
{
- maps__insert(&self->maps[map->type], map);
+ return self ? maps__find(&self->maps, ip) : NULL;
}
-void thread__find_addr_location(struct thread *self, u8 cpumode,
- enum map_type type, u64 addr,
- struct addr_location *al,
- symbol_filter_t filter);
-struct symbol *thread__find_symbol(struct thread *self,
- enum map_type type, u64 addr,
- symbol_filter_t filter);
-
-static inline struct symbol *
-thread__find_function(struct thread *self, u64 addr, symbol_filter_t filter)
-{
- return thread__find_symbol(self, MAP__FUNCTION, addr, filter);
-}
#endif /* __PERF_THREAD_H */
diff --git a/trunk/tools/perf/util/trace-event-info.c b/trunk/tools/perf/util/trace-event-info.c
index cace35595530..831052d4b4fb 100644
--- a/trunk/tools/perf/util/trace-event-info.c
+++ b/trunk/tools/perf/util/trace-event-info.c
@@ -33,11 +33,11 @@
#include