Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 121150
b: refs/heads/master
c: 3f8e402
h: refs/heads/master
v: v3
  • Loading branch information
Ingo Molnar committed Nov 17, 2008
1 parent 1e1415c commit 99224f6
Show file tree
Hide file tree
Showing 21 changed files with 510 additions and 329 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 227a837567e339c74d9d4243d03a29bd943a018c
refs/heads/master: 3f8e402f34ecc7d1d00b54703d3baa401b8bdd78
8 changes: 8 additions & 0 deletions trunk/arch/x86/include/asm/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,14 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
*/
return addr - 1;
}

#ifdef CONFIG_DYNAMIC_FTRACE

struct dyn_arch_ftrace {
/* No extra data needed for x86 */
};

#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */

Expand Down
18 changes: 11 additions & 7 deletions trunk/arch/x86/kernel/entry_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -1190,7 +1190,7 @@ ENTRY(mcount)
jnz trace
#ifdef CONFIG_FUNCTION_RET_TRACER
cmpl $ftrace_stub, ftrace_function_return
jnz trace_return
jnz ftrace_return_caller
#endif
.globl ftrace_stub
ftrace_stub:
Expand All @@ -1211,9 +1211,15 @@ trace:
popl %ecx
popl %eax
jmp ftrace_stub
END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */

#ifdef CONFIG_FUNCTION_RET_TRACER
trace_return:
ENTRY(ftrace_return_caller)
cmpl $0, function_trace_stop
jne ftrace_stub

pushl %eax
pushl %ecx
pushl %edx
Expand All @@ -1223,7 +1229,8 @@ trace_return:
popl %edx
popl %ecx
popl %eax
jmp ftrace_stub
ret
END(ftrace_return_caller)

.globl return_to_handler
return_to_handler:
Expand All @@ -1237,10 +1244,7 @@ return_to_handler:
popl %ecx
popl %eax
ret
#endif /* CONFIG_FUNCTION_RET_TRACER */
END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
#endif

.section .rodata,"a"
#include "syscall_table_32.S"
Expand Down
286 changes: 156 additions & 130 deletions trunk/arch/x86/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,133 +24,6 @@
#include <asm/nmi.h>



#ifdef CONFIG_FUNCTION_RET_TRACER

/*
* These functions are picked from those used on
* this page for dynamic ftrace. They have been
* simplified to ignore all traces in NMI context.
*/
static atomic_t in_nmi;

void ftrace_nmi_enter(void)
{
atomic_inc(&in_nmi);
}

void ftrace_nmi_exit(void)
{
atomic_dec(&in_nmi);
}

/* Add a function return address to the trace stack on thread info.*/
static int push_return_trace(unsigned long ret, unsigned long long time,
unsigned long func)
{
int index;
struct thread_info *ti = current_thread_info();

/* The return trace stack is full */
if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
return -EBUSY;

index = ++ti->curr_ret_stack;
ti->ret_stack[index].ret = ret;
ti->ret_stack[index].func = func;
ti->ret_stack[index].calltime = time;

return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
static void pop_return_trace(unsigned long *ret, unsigned long long *time,
unsigned long *func)
{
int index;

struct thread_info *ti = current_thread_info();
index = ti->curr_ret_stack;
*ret = ti->ret_stack[index].ret;
*func = ti->ret_stack[index].func;
*time = ti->ret_stack[index].calltime;
ti->curr_ret_stack--;
}

/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned long ftrace_return_to_handler(void)
{
struct ftrace_retfunc trace;
pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
trace.rettime = cpu_clock(raw_smp_processor_id());
ftrace_function_return(&trace);

return trace.ret;
}

/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
unsigned long long calltime;
int faulted;
unsigned long return_hooker = (unsigned long)
&return_to_handler;

/* Nmi's are currently unsupported */
if (atomic_read(&in_nmi))
return;

/*
* Protect against fault, even if it shouldn't
* happen. This tool is too much intrusive to
* ignore such a protection.
*/
asm volatile(
"1: movl (%[parent_old]), %[old]\n"
"2: movl %[return_hooker], (%[parent_replaced])\n"
" movl $0, %[faulted]\n"

".section .fixup, \"ax\"\n"
"3: movl $1, %[faulted]\n"
".previous\n"

".section __ex_table, \"a\"\n"
" .long 1b, 3b\n"
" .long 2b, 3b\n"
".previous\n"

: [parent_replaced] "=r" (parent), [old] "=r" (old),
[faulted] "=r" (faulted)
: [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
: "memory"
);

if (WARN_ON(faulted)) {
unregister_ftrace_return();
return;
}

if (WARN_ON(!__kernel_text_address(old))) {
unregister_ftrace_return();
*parent = old;
return;
}

calltime = cpu_clock(raw_smp_processor_id());

if (push_return_trace(old, calltime, self_addr) == -EBUSY)
*parent = old;
}

#endif

#ifdef CONFIG_DYNAMIC_FTRACE

union ftrace_code_union {
Expand All @@ -166,7 +39,7 @@ static int ftrace_calc_offset(long ip, long addr)
return (int)(addr - ip);
}

unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
static union ftrace_code_union calc;

Expand Down Expand Up @@ -311,12 +184,12 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)

static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];

unsigned char *ftrace_nop_replace(void)
static unsigned char *ftrace_nop_replace(void)
{
return ftrace_nop;
}

int
static int
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
{
Expand Down Expand Up @@ -349,6 +222,29 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
return 0;
}

int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned char *new, *old;
unsigned long ip = rec->ip;

old = ftrace_call_replace(ip, addr);
new = ftrace_nop_replace();

return ftrace_modify_code(rec->ip, old, new);
}

int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned char *new, *old;
unsigned long ip = rec->ip;

old = ftrace_nop_replace();
new = ftrace_call_replace(ip, addr);

return ftrace_modify_code(rec->ip, old, new);
}

int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
Expand Down Expand Up @@ -426,3 +322,133 @@ int __init ftrace_dyn_arch_init(void *data)
return 0;
}
#endif

#ifdef CONFIG_FUNCTION_RET_TRACER

#ifndef CONFIG_DYNAMIC_FTRACE

/*
* These functions are picked from those used on
* this page for dynamic ftrace. They have been
* simplified to ignore all traces in NMI context.
*/
static atomic_t in_nmi;

void ftrace_nmi_enter(void)
{
atomic_inc(&in_nmi);
}

void ftrace_nmi_exit(void)
{
atomic_dec(&in_nmi);
}
#endif /* !CONFIG_DYNAMIC_FTRACE */

/* Add a function return address to the trace stack on thread info.*/
static int push_return_trace(unsigned long ret, unsigned long long time,
unsigned long func)
{
int index;
struct thread_info *ti = current_thread_info();

/* The return trace stack is full */
if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
return -EBUSY;

index = ++ti->curr_ret_stack;
barrier();
ti->ret_stack[index].ret = ret;
ti->ret_stack[index].func = func;
ti->ret_stack[index].calltime = time;

return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
static void pop_return_trace(unsigned long *ret, unsigned long long *time,
unsigned long *func)
{
int index;

struct thread_info *ti = current_thread_info();
index = ti->curr_ret_stack;
*ret = ti->ret_stack[index].ret;
*func = ti->ret_stack[index].func;
*time = ti->ret_stack[index].calltime;
ti->curr_ret_stack--;
}

/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned long ftrace_return_to_handler(void)
{
struct ftrace_retfunc trace;
pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
trace.rettime = cpu_clock(raw_smp_processor_id());
ftrace_function_return(&trace);

return trace.ret;
}

/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
unsigned long long calltime;
int faulted;
unsigned long return_hooker = (unsigned long)
&return_to_handler;

/* Nmi's are currently unsupported */
if (atomic_read(&in_nmi))
return;

/*
* Protect against fault, even if it shouldn't
* happen. This tool is too much intrusive to
* ignore such a protection.
*/
asm volatile(
"1: movl (%[parent_old]), %[old]\n"
"2: movl %[return_hooker], (%[parent_replaced])\n"
" movl $0, %[faulted]\n"

".section .fixup, \"ax\"\n"
"3: movl $1, %[faulted]\n"
".previous\n"

".section __ex_table, \"a\"\n"
" .long 1b, 3b\n"
" .long 2b, 3b\n"
".previous\n"

: [parent_replaced] "=r" (parent), [old] "=r" (old),
[faulted] "=r" (faulted)
: [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
: "memory"
);

if (WARN_ON(faulted)) {
unregister_ftrace_return();
return;
}

if (WARN_ON(!__kernel_text_address(old))) {
unregister_ftrace_return();
*parent = old;
return;
}

calltime = cpu_clock(raw_smp_processor_id());

if (push_return_trace(old, calltime, self_addr) == -EBUSY)
*parent = old;
}

#endif /* CONFIG_FUNCTION_RET_TRACER */
Loading

0 comments on commit 99224f6

Please sign in to comment.