Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 121131
b: refs/heads/master
c: e7d3737
h: refs/heads/master
i:
  121129: c82b882
  121127: 4ec7ecb
v: v3
  • Loading branch information
Frederic Weisbecker authored and Ingo Molnar committed Nov 16, 2008
1 parent d3be37b commit 80fe54a
Show file tree
Hide file tree
Showing 7 changed files with 212 additions and 156 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: b01c746617da5e260803eb10ed64ca043e9a1241
refs/heads/master: e7d3737ea1b102030f44e96c97754101e41515f0
18 changes: 11 additions & 7 deletions trunk/arch/x86/kernel/entry_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -1190,7 +1190,7 @@ ENTRY(mcount)
jnz trace
#ifdef CONFIG_FUNCTION_RET_TRACER
cmpl $ftrace_stub, ftrace_function_return
jnz trace_return
jnz ftrace_return_caller
#endif
.globl ftrace_stub
ftrace_stub:
Expand All @@ -1211,9 +1211,15 @@ trace:
popl %ecx
popl %eax
jmp ftrace_stub
END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */

#ifdef CONFIG_FUNCTION_RET_TRACER
trace_return:
ENTRY(ftrace_return_caller)
cmpl $0, function_trace_stop
jne ftrace_stub

pushl %eax
pushl %ecx
pushl %edx
Expand All @@ -1223,7 +1229,8 @@ trace_return:
popl %edx
popl %ecx
popl %eax
jmp ftrace_stub
ret
END(ftrace_return_caller)

.globl return_to_handler
return_to_handler:
Expand All @@ -1237,10 +1244,7 @@ return_to_handler:
popl %ecx
popl %eax
ret
#endif /* CONFIG_FUNCTION_RET_TRACER */
END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
#endif

.section .rodata,"a"
#include "syscall_table_32.S"
Expand Down
258 changes: 130 additions & 128 deletions trunk/arch/x86/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,134 +24,6 @@
#include <asm/nmi.h>



#ifdef CONFIG_FUNCTION_RET_TRACER

/*
* These functions are picked from those used on
* this page for dynamic ftrace. They have been
* simplified to ignore all traces in NMI context.
*/
static atomic_t in_nmi;

void ftrace_nmi_enter(void)
{
atomic_inc(&in_nmi);
}

void ftrace_nmi_exit(void)
{
atomic_dec(&in_nmi);
}

/* Add a function return address to the trace stack on thread info.*/
static int push_return_trace(unsigned long ret, unsigned long long time,
unsigned long func)
{
int index;
struct thread_info *ti = current_thread_info();

/* The return trace stack is full */
if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
return -EBUSY;

index = ++ti->curr_ret_stack;
barrier();
ti->ret_stack[index].ret = ret;
ti->ret_stack[index].func = func;
ti->ret_stack[index].calltime = time;

return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
static void pop_return_trace(unsigned long *ret, unsigned long long *time,
unsigned long *func)
{
int index;

struct thread_info *ti = current_thread_info();
index = ti->curr_ret_stack;
*ret = ti->ret_stack[index].ret;
*func = ti->ret_stack[index].func;
*time = ti->ret_stack[index].calltime;
ti->curr_ret_stack--;
}

/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned long ftrace_return_to_handler(void)
{
struct ftrace_retfunc trace;
pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
trace.rettime = cpu_clock(raw_smp_processor_id());
ftrace_function_return(&trace);

return trace.ret;
}

/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
unsigned long long calltime;
int faulted;
unsigned long return_hooker = (unsigned long)
&return_to_handler;

/* Nmi's are currently unsupported */
if (atomic_read(&in_nmi))
return;

/*
* Protect against fault, even if it shouldn't
* happen. This tool is too much intrusive to
* ignore such a protection.
*/
asm volatile(
"1: movl (%[parent_old]), %[old]\n"
"2: movl %[return_hooker], (%[parent_replaced])\n"
" movl $0, %[faulted]\n"

".section .fixup, \"ax\"\n"
"3: movl $1, %[faulted]\n"
".previous\n"

".section __ex_table, \"a\"\n"
" .long 1b, 3b\n"
" .long 2b, 3b\n"
".previous\n"

: [parent_replaced] "=r" (parent), [old] "=r" (old),
[faulted] "=r" (faulted)
: [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
: "memory"
);

if (WARN_ON(faulted)) {
unregister_ftrace_return();
return;
}

if (WARN_ON(!__kernel_text_address(old))) {
unregister_ftrace_return();
*parent = old;
return;
}

calltime = cpu_clock(raw_smp_processor_id());

if (push_return_trace(old, calltime, self_addr) == -EBUSY)
*parent = old;
}

#endif

#ifdef CONFIG_DYNAMIC_FTRACE

union ftrace_code_union {
Expand Down Expand Up @@ -450,3 +322,133 @@ int __init ftrace_dyn_arch_init(void *data)
return 0;
}
#endif

#ifdef CONFIG_FUNCTION_RET_TRACER

#ifndef CONFIG_DYNAMIC_FTRACE

/*
* These functions are picked from those used on
* this page for dynamic ftrace. They have been
* simplified to ignore all traces in NMI context.
*/
static atomic_t in_nmi;

void ftrace_nmi_enter(void)
{
atomic_inc(&in_nmi);
}

void ftrace_nmi_exit(void)
{
atomic_dec(&in_nmi);
}
#endif /* !CONFIG_DYNAMIC_FTRACE */

/* Add a function return address to the trace stack on thread info.*/
static int push_return_trace(unsigned long ret, unsigned long long time,
unsigned long func)
{
int index;
struct thread_info *ti = current_thread_info();

/* The return trace stack is full */
if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
return -EBUSY;

index = ++ti->curr_ret_stack;
barrier();
ti->ret_stack[index].ret = ret;
ti->ret_stack[index].func = func;
ti->ret_stack[index].calltime = time;

return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
static void pop_return_trace(unsigned long *ret, unsigned long long *time,
unsigned long *func)
{
int index;

struct thread_info *ti = current_thread_info();
index = ti->curr_ret_stack;
*ret = ti->ret_stack[index].ret;
*func = ti->ret_stack[index].func;
*time = ti->ret_stack[index].calltime;
ti->curr_ret_stack--;
}

/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned long ftrace_return_to_handler(void)
{
struct ftrace_retfunc trace;
pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
trace.rettime = cpu_clock(raw_smp_processor_id());
ftrace_function_return(&trace);

return trace.ret;
}

/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
unsigned long long calltime;
int faulted;
unsigned long return_hooker = (unsigned long)
&return_to_handler;

/* Nmi's are currently unsupported */
if (atomic_read(&in_nmi))
return;

/*
* Protect against fault, even if it shouldn't
* happen. This tool is too much intrusive to
* ignore such a protection.
*/
asm volatile(
"1: movl (%[parent_old]), %[old]\n"
"2: movl %[return_hooker], (%[parent_replaced])\n"
" movl $0, %[faulted]\n"

".section .fixup, \"ax\"\n"
"3: movl $1, %[faulted]\n"
".previous\n"

".section __ex_table, \"a\"\n"
" .long 1b, 3b\n"
" .long 2b, 3b\n"
".previous\n"

: [parent_replaced] "=r" (parent), [old] "=r" (old),
[faulted] "=r" (faulted)
: [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
: "memory"
);

if (WARN_ON(faulted)) {
unregister_ftrace_return();
return;
}

if (WARN_ON(!__kernel_text_address(old))) {
unregister_ftrace_return();
*parent = old;
return;
}

calltime = cpu_clock(raw_smp_processor_id());

if (push_return_trace(old, calltime, self_addr) == -EBUSY)
*parent = old;
}

#endif /* CONFIG_FUNCTION_RET_TRACER */
16 changes: 15 additions & 1 deletion trunk/include/linux/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,17 @@ struct ftrace_ops {

extern int function_trace_stop;

/*
* Type of the current tracing.
*/
enum ftrace_tracing_type_t {
FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
FTRACE_TYPE_RETURN, /* Hook the return of the function */
};

/* Current tracing type, default is FTRACE_TYPE_ENTER */
extern enum ftrace_tracing_type_t ftrace_tracing_type;

/**
* ftrace_stop - stop function tracer.
*
Expand Down Expand Up @@ -104,6 +115,9 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
extern void ftrace_call(void);
extern void mcount_call(void);
#ifdef CONFIG_FUNCTION_RET_TRACER
extern void ftrace_return_caller(void);
#endif

/**
* ftrace_make_nop - convert code into top
Expand Down Expand Up @@ -310,7 +324,7 @@ struct ftrace_retfunc {
/* Type of a callback handler of tracing return function */
typedef void (*trace_function_return_t)(struct ftrace_retfunc *);

extern void register_ftrace_return(trace_function_return_t func);
extern int register_ftrace_return(trace_function_return_t func);
/* The current handler in use */
extern trace_function_return_t ftrace_function_return;
extern void unregister_ftrace_return(void);
Expand Down
1 change: 0 additions & 1 deletion trunk/kernel/trace/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ config FUNCTION_TRACER

config FUNCTION_RET_TRACER
bool "Kernel Function return Tracer"
depends on !DYNAMIC_FTRACE
depends on HAVE_FUNCTION_RET_TRACER
depends on FUNCTION_TRACER
help
Expand Down
Loading

0 comments on commit 80fe54a

Please sign in to comment.