Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 121110
b: refs/heads/master
c: 24de386
h: refs/heads/master
v: v3
  • Loading branch information
Ingo Molnar committed Nov 13, 2008
1 parent 5abde91 commit 7c4fa11
Show file tree
Hide file tree
Showing 13 changed files with 550 additions and 7 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1dc1c6adf38bc5799d1594681645ced40ced4b6b
refs/heads/master: 24de38620dd3504782c90c0892eef888d0c351e9
3 changes: 3 additions & 0 deletions trunk/arch/x86/kernel/vsyscall_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@
* want per guest time just set the kernel.vsyscall64 sysctl to 0.
*/

/* Disable profiling for userspace code: */
#define DISABLE_BRANCH_PROFILING

#include <linux/time.h>
#include <linux/init.h>
#include <linux/kernel.h>
Expand Down
3 changes: 3 additions & 0 deletions trunk/arch/x86/vdso/vclock_gettime.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@
* Also alternative() doesn't work.
*/

/* Disable profiling for userspace code: */
#define DISABLE_BRANCH_PROFILING

#include <linux/kernel.h>
#include <linux/posix-timers.h>
#include <linux/time.h>
Expand Down
14 changes: 13 additions & 1 deletion trunk/include/asm-generic/vmlinux.lds.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,17 @@
#define MCOUNT_REC()
#endif

#ifdef CONFIG_TRACE_BRANCH_PROFILING
#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_likely_profile) = .; \
*(_ftrace_likely) \
VMLINUX_SYMBOL(__stop_likely_profile) = .; \
VMLINUX_SYMBOL(__start_unlikely_profile) = .; \
*(_ftrace_unlikely) \
VMLINUX_SYMBOL(__stop_unlikely_profile) = .;
#else
#define LIKELY_PROFILE()
#endif

/* .data section */
#define DATA_DATA \
*(.data) \
Expand All @@ -62,7 +73,8 @@
VMLINUX_SYMBOL(__stop___markers) = .; \
VMLINUX_SYMBOL(__start___tracepoints) = .; \
*(__tracepoints) \
VMLINUX_SYMBOL(__stop___tracepoints) = .;
VMLINUX_SYMBOL(__stop___tracepoints) = .; \
LIKELY_PROFILE()

#define RO_DATA(align) \
. = ALIGN((align)); \
Expand Down
66 changes: 64 additions & 2 deletions trunk/include/linux/compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,70 @@ extern void __chk_io_ptr(const volatile void __iomem *);
* specific implementations come from the above header files
*/

#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
struct ftrace_branch_data {
const char *func;
const char *file;
unsigned line;
unsigned long correct;
unsigned long incorrect;
};

/*
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
* to disable branch tracing on a per file basis.
*/
#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING)
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);

#define likely_notrace(x) __builtin_expect(!!(x), 1)
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)

#define likely_check(x) ({ \
int ______r; \
static struct ftrace_branch_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_likely"))) \
______f = { \
.func = __func__, \
.file = __FILE__, \
.line = __LINE__, \
}; \
______f.line = __LINE__; \
______r = likely_notrace(x); \
ftrace_likely_update(&______f, ______r, 1); \
______r; \
})
#define unlikely_check(x) ({ \
int ______r; \
static struct ftrace_branch_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_unlikely"))) \
______f = { \
.func = __func__, \
.file = __FILE__, \
.line = __LINE__, \
}; \
______f.line = __LINE__; \
______r = unlikely_notrace(x); \
ftrace_likely_update(&______f, ______r, 0); \
______r; \
})

/*
* Using __builtin_constant_p(x) to ignore cases where the return
* value is always the same. This idea is taken from a similar patch
* written by Daniel Walker.
*/
# ifndef likely
# define likely(x) (__builtin_constant_p(x) ? !!(x) : likely_check(x))
# endif
# ifndef unlikely
# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : unlikely_check(x))
# endif
#else
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
#endif

/* Optimization barrier */
#ifndef barrier
Expand Down
38 changes: 38 additions & 0 deletions trunk/kernel/trace/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,44 @@ config BOOT_TRACER
selected, because the self-tests are an initcall as well and that
would invalidate the boot trace. )

config TRACE_BRANCH_PROFILING
bool "Trace likely/unlikely profiler"
depends on DEBUG_KERNEL
select TRACING
help
This tracer profiles all the the likely and unlikely macros
in the kernel. It will display the results in:

/debugfs/tracing/profile_likely
/debugfs/tracing/profile_unlikely

Note: this will add a significant overhead, only turn this
on if you need to profile the system's use of these macros.

Say N if unsure.

config TRACING_BRANCHES
bool
help
Selected by tracers that will trace the likely and unlikely
conditions. This prevents the tracers themselves from being
profiled. Profiling the tracing infrastructure can only happen
when the likelys and unlikelys are not being traced.

config BRANCH_TRACER
bool "Trace likely/unlikely instances"
depends on TRACE_BRANCH_PROFILING
select TRACING_BRANCHES
help
This traces the events of likely and unlikely condition
calls in the kernel. The difference between this and the
"Trace likely/unlikely profiler" is that this is not a
histogram of the callers, but actually places the calling
events into a running trace buffer to see when and where the
events happened, as well as their results.

Say N if unsure.

config STACK_TRACER
bool "Trace max stack"
depends on HAVE_FUNCTION_TRACER
Expand Down
6 changes: 6 additions & 0 deletions trunk/kernel/trace/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,11 @@ CFLAGS_trace_selftest_dynamic.o = -pg
obj-y += trace_selftest_dynamic.o
endif

# If unlikely tracing is enabled, do not trace these files
ifdef CONFIG_TRACING_BRANCHES
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
endif

obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
obj-$(CONFIG_RING_BUFFER) += ring_buffer.o

Expand All @@ -25,5 +30,6 @@ obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o

libftrace-y := ftrace.o
8 changes: 7 additions & 1 deletion trunk/kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,14 @@ void tracing_off(void)
/* FIXME!!! */
u64 ring_buffer_time_stamp(int cpu)
{
u64 time;

preempt_disable_notrace();
/* shift to debug/test normalization and TIME_EXTENTS */
return sched_clock() << DEBUG_SHIFT;
time = sched_clock() << DEBUG_SHIFT;
preempt_enable_notrace();

return time;
}

void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
Expand Down
29 changes: 29 additions & 0 deletions trunk/kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,9 @@ static const char *trace_options[] = {
"sched-tree",
"ftrace_printk",
"ftrace_preempt",
#ifdef CONFIG_BRANCH_TRACER
"branch",
#endif
NULL
};

Expand Down Expand Up @@ -1648,6 +1651,18 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
trace_seq_print_cont(s, iter);
break;
}
case TRACE_BRANCH: {
struct trace_branch *field;

trace_assign_type(field, entry);

trace_seq_printf(s, "[%s] %s:%s:%d\n",
field->correct ? " ok " : " MISS ",
field->func,
field->file,
field->line);
break;
}
default:
trace_seq_printf(s, "Unknown type %d\n", entry->type);
}
Expand Down Expand Up @@ -1787,6 +1802,18 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
return print_return_function(iter);
break;
}
case TRACE_BRANCH: {
struct trace_branch *field;

trace_assign_type(field, entry);

trace_seq_printf(s, "[%s] %s:%s:%d\n",
field->correct ? " ok " : " MISS ",
field->func,
field->file,
field->line);
break;
}
}
return TRACE_TYPE_HANDLED;
}
Expand Down Expand Up @@ -2592,13 +2619,15 @@ static int tracing_set_tracer(char *buf)
if (t == current_trace)
goto out;

trace_branch_disable();
if (current_trace && current_trace->reset)
current_trace->reset(tr);

current_trace = t;
if (t->init)
t->init(tr);

trace_branch_enable(tr);
out:
mutex_unlock(&trace_types_lock);

Expand Down
41 changes: 41 additions & 0 deletions trunk/kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ enum trace_type {
TRACE_SPECIAL,
TRACE_MMIO_RW,
TRACE_MMIO_MAP,
TRACE_BRANCH,
TRACE_BOOT_CALL,
TRACE_BOOT_RET,
TRACE_FN_RET,
Expand Down Expand Up @@ -134,6 +135,16 @@ struct trace_boot_ret {
struct boot_trace_ret boot_ret;
};

#define TRACE_FUNC_SIZE 30
#define TRACE_FILE_SIZE 20
struct trace_branch {
struct trace_entry ent;
unsigned line;
char func[TRACE_FUNC_SIZE+1];
char file[TRACE_FILE_SIZE+1];
char correct;
};

/*
* trace_flag_type is an enumeration that holds different
* states when a trace occurs. These are:
Expand Down Expand Up @@ -236,6 +247,7 @@ extern void __ftrace_bad_type(void);
TRACE_MMIO_MAP); \
IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\
__ftrace_bad_type(); \
} while (0)
Expand Down Expand Up @@ -408,6 +420,8 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,
struct trace_array *tr);
extern int trace_selftest_startup_sysprof(struct tracer *trace,
struct trace_array *tr);
extern int trace_selftest_startup_branch(struct tracer *trace,
struct trace_array *tr);
#endif /* CONFIG_FTRACE_STARTUP_TEST */

extern void *head_page(struct trace_array_cpu *data);
Expand Down Expand Up @@ -456,6 +470,9 @@ enum trace_iterator_flags {
TRACE_ITER_SCHED_TREE = 0x200,
TRACE_ITER_PRINTK = 0x400,
TRACE_ITER_PREEMPTONLY = 0x800,
#ifdef CONFIG_BRANCH_TRACER
TRACE_ITER_BRANCH = 0x1000,
#endif
};

/*
Expand Down Expand Up @@ -515,4 +532,28 @@ static inline void ftrace_preempt_enable(int resched)
preempt_enable_notrace();
}

#ifdef CONFIG_BRANCH_TRACER
extern int enable_branch_tracing(struct trace_array *tr);
extern void disable_branch_tracing(void);
static inline int trace_branch_enable(struct trace_array *tr)
{
if (trace_flags & TRACE_ITER_BRANCH)
return enable_branch_tracing(tr);
return 0;
}
static inline void trace_branch_disable(void)
{
/* due to races, always disable */
disable_branch_tracing();
}
#else
static inline int trace_branch_enable(struct trace_array *tr)
{
return 0;
}
static inline void trace_branch_disable(void)
{
}
#endif /* CONFIG_BRANCH_TRACER */

#endif /* _LINUX_KERNEL_TRACE_H */
Loading

0 comments on commit 7c4fa11

Please sign in to comment.