Skip to content

Commit

Permalink
Merge branch 'tracing/function-return-tracer' into tracing/fastboot
Browse files Browse the repository at this point in the history
  • Loading branch information
Ingo Molnar committed Nov 12, 2008
2 parents d06bbd6 + 19b3e96 commit 60a011c
Show file tree
Hide file tree
Showing 16 changed files with 506 additions and 16 deletions.
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ config X86
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_RET_TRACER if X86_32
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
select HAVE_ARCH_KGDB if !X86_VOYAGER
Expand Down
26 changes: 26 additions & 0 deletions arch/x86/include/asm/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,30 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */

#ifdef CONFIG_FUNCTION_RET_TRACER
#define FTRACE_RET_STACK_SIZE 20

#ifndef __ASSEMBLY__

/*
* Stack of return addresses for functions
* of a thread.
* Used in struct thread_info
*/
struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
unsigned long long calltime;
};

/*
* Primary handler of a function return.
* It relays on ftrace_return_to_handler.
* Defined in entry32.S
*/
extern void return_to_handler(void);

#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_RET_TRACER */

#endif /* _ASM_X86_FTRACE_H */
24 changes: 24 additions & 0 deletions arch/x86/include/asm/thread_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
struct task_struct;
struct exec_domain;
#include <asm/processor.h>
#include <asm/ftrace.h>

struct thread_info {
struct task_struct *task; /* main task structure */
Expand All @@ -38,8 +39,30 @@ struct thread_info {
*/
__u8 supervisor_stack[0];
#endif

#ifdef CONFIG_FUNCTION_RET_TRACER
/* Index of current stored adress in ret_stack */
int curr_ret_stack;
/* Stack of return addresses for return function tracing */
struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE];
#endif
};

#ifdef CONFIG_FUNCTION_RET_TRACER
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = 1, \
.addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
.curr_ret_stack = -1,\
}
#else
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
Expand All @@ -52,6 +75,7 @@ struct thread_info {
.fn = do_no_restart_syscall, \
}, \
}
#endif

#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
Expand Down
6 changes: 6 additions & 0 deletions arch/x86/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,11 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
endif

ifdef CONFIG_FUNCTION_RET_TRACER
# Don't trace __switch_to() but let it for function tracer
CFLAGS_REMOVE_process_32.o = -pg
endif

#
# vsyscalls (which work on the user stack) should have
# no stack-protector checks:
Expand Down Expand Up @@ -65,6 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_RET_TRACER) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
Expand Down
33 changes: 33 additions & 0 deletions arch/x86/kernel/entry_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -1188,6 +1188,10 @@ ENTRY(mcount)

cmpl $ftrace_stub, ftrace_trace_function
jnz trace
#ifdef CONFIG_FUNCTION_RET_TRACER
cmpl $ftrace_stub, ftrace_function_return
jnz trace_return
#endif
.globl ftrace_stub
ftrace_stub:
ret
Expand All @@ -1206,8 +1210,37 @@ trace:
popl %edx
popl %ecx
popl %eax
jmp ftrace_stub

#ifdef CONFIG_FUNCTION_RET_TRACER
trace_return:
pushl %eax
pushl %ecx
pushl %edx
movl 0xc(%esp), %eax
pushl %eax
lea 0x4(%ebp), %eax
pushl %eax
call prepare_ftrace_return
addl $8, %esp
popl %edx
popl %ecx
popl %eax
jmp ftrace_stub

.globl return_to_handler
return_to_handler:
pushl $0
pushl %eax
pushl %ecx
pushl %edx
call ftrace_return_to_handler
movl %eax, 0xc(%esp)
popl %edx
popl %ecx
popl %eax
ret
#endif /* CONFIG_FUNCTION_RET_TRACER */
END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
Expand Down
182 changes: 175 additions & 7 deletions arch/x86/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,178 @@
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/list.h>

#include <asm/ftrace.h>
#include <linux/ftrace.h>
#include <asm/nops.h>
#include <asm/nmi.h>


static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];

#ifdef CONFIG_FUNCTION_RET_TRACER

/*
* These functions are picked from those used on
* this page for dynamic ftrace. They have been
* simplified to ignore all traces in NMI context.
*/
static atomic_t in_nmi;

void ftrace_nmi_enter(void)
{
atomic_inc(&in_nmi);
}

void ftrace_nmi_exit(void)
{
atomic_dec(&in_nmi);
}

/*
* Synchronize accesses to return adresses stack with
* interrupts.
*/
static raw_spinlock_t ret_stack_lock;

/* Add a function return address to the trace stack on thread info.*/
static int push_return_trace(unsigned long ret, unsigned long long time,
unsigned long func)
{
int index;
struct thread_info *ti;
unsigned long flags;
int err = 0;

raw_local_irq_save(flags);
__raw_spin_lock(&ret_stack_lock);

ti = current_thread_info();
/* The return trace stack is full */
if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) {
err = -EBUSY;
goto out;
}

index = ++ti->curr_ret_stack;
ti->ret_stack[index].ret = ret;
ti->ret_stack[index].func = func;
ti->ret_stack[index].calltime = time;

out:
__raw_spin_unlock(&ret_stack_lock);
raw_local_irq_restore(flags);
return err;
}

/* Retrieve a function return address to the trace stack on thread info.*/
static void pop_return_trace(unsigned long *ret, unsigned long long *time,
unsigned long *func)
{
struct thread_info *ti;
int index;
unsigned long flags;

raw_local_irq_save(flags);
__raw_spin_lock(&ret_stack_lock);

ti = current_thread_info();
index = ti->curr_ret_stack;
*ret = ti->ret_stack[index].ret;
*func = ti->ret_stack[index].func;
*time = ti->ret_stack[index].calltime;
ti->curr_ret_stack--;

__raw_spin_unlock(&ret_stack_lock);
raw_local_irq_restore(flags);
}

/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned long ftrace_return_to_handler(void)
{
struct ftrace_retfunc trace;
pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
trace.rettime = cpu_clock(raw_smp_processor_id());
ftrace_function_return(&trace);

return trace.ret;
}

/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
asmlinkage
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
unsigned long long calltime;
int faulted;
unsigned long return_hooker = (unsigned long)
&return_to_handler;

/* Nmi's are currently unsupported */
if (atomic_read(&in_nmi))
return;

/*
* Protect against fault, even if it shouldn't
* happen. This tool is too much intrusive to
* ignore such a protection.
*/
asm volatile(
"1: movl (%[parent_old]), %[old]\n"
"2: movl %[return_hooker], (%[parent_replaced])\n"
" movl $0, %[faulted]\n"

".section .fixup, \"ax\"\n"
"3: movl $1, %[faulted]\n"
".previous\n"

".section __ex_table, \"a\"\n"
" .long 1b, 3b\n"
" .long 2b, 3b\n"
".previous\n"

: [parent_replaced] "=r" (parent), [old] "=r" (old),
[faulted] "=r" (faulted)
: [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
: "memory"
);

if (WARN_ON(faulted)) {
unregister_ftrace_return();
return;
}

if (WARN_ON(!__kernel_text_address(old))) {
unregister_ftrace_return();
*parent = old;
return;
}

calltime = cpu_clock(raw_smp_processor_id());

if (push_return_trace(old, calltime, self_addr) == -EBUSY)
*parent = old;
}

static int __init init_ftrace_function_return(void)
{
ret_stack_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
return 0;
}
device_initcall(init_ftrace_function_return);


#endif

#ifdef CONFIG_DYNAMIC_FTRACE

union ftrace_code_union {
char code[MCOUNT_INSN_SIZE];
Expand All @@ -31,17 +195,11 @@ union ftrace_code_union {
} __attribute__((packed));
};


static int ftrace_calc_offset(long ip, long addr)
{
return (int)(addr - ip);
}

unsigned char *ftrace_nop_replace(void)
{
return ftrace_nop;
}

unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
static union ftrace_code_union calc;
Expand Down Expand Up @@ -183,6 +341,15 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
}




static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];

unsigned char *ftrace_nop_replace(void)
{
return ftrace_nop;
}

int
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
Expand Down Expand Up @@ -292,3 +459,4 @@ int __init ftrace_dyn_arch_init(void *data)

return 0;
}
#endif
Loading

0 comments on commit 60a011c

Please sign in to comment.