Skip to content

Commit

Permalink
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/ke…
Browse files Browse the repository at this point in the history
…rnel/git/rostedt/linux-trace into perf/core

Pull ftrace updates from Steve Rostedt:

" This patch series extends ftrace function tracing utility to be
  more dynamic for its users. It allows for data passing to the callback
  functions, as well as reading regs as if a breakpoint were to trigger
  at function entry.

  The main goal of this patch series was to allow kprobes to use ftrace
  as an optimized probe point when a probe is placed on an ftrace nop.
  With lots of help from Masami Hiramatsu, and going through lots of
  iterations, we finally came up with a good solution. "

Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Ingo Molnar committed Aug 21, 2012
2 parents 194f8dc + e525389 commit 26198c2
Show file tree
Hide file tree
Showing 19 changed files with 1,199 additions and 216 deletions.
49 changes: 30 additions & 19 deletions arch/x86/include/asm/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,27 +3,33 @@

#ifdef __ASSEMBLY__

.macro MCOUNT_SAVE_FRAME
/* taken from glibc */
subq $0x38, %rsp
movq %rax, (%rsp)
movq %rcx, 8(%rsp)
movq %rdx, 16(%rsp)
movq %rsi, 24(%rsp)
movq %rdi, 32(%rsp)
movq %r8, 40(%rsp)
movq %r9, 48(%rsp)
/* skip is set if the stack was already partially adjusted */
.macro MCOUNT_SAVE_FRAME skip=0
/*
* We add enough stack to save all regs.
*/
subq $(SS+8-\skip), %rsp
movq %rax, RAX(%rsp)
movq %rcx, RCX(%rsp)
movq %rdx, RDX(%rsp)
movq %rsi, RSI(%rsp)
movq %rdi, RDI(%rsp)
movq %r8, R8(%rsp)
movq %r9, R9(%rsp)
/* Move RIP to its proper location */
movq SS+8(%rsp), %rdx
movq %rdx, RIP(%rsp)
.endm

.macro MCOUNT_RESTORE_FRAME
movq 48(%rsp), %r9
movq 40(%rsp), %r8
movq 32(%rsp), %rdi
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
movq 8(%rsp), %rcx
movq (%rsp), %rax
addq $0x38, %rsp
.macro MCOUNT_RESTORE_FRAME skip=0
movq R9(%rsp), %r9
movq R8(%rsp), %r8
movq RDI(%rsp), %rdi
movq RSI(%rsp), %rsi
movq RDX(%rsp), %rdx
movq RCX(%rsp), %rcx
movq RAX(%rsp), %rax
addq $(SS+8-\skip), %rsp
.endm

#endif
Expand All @@ -32,6 +38,11 @@
#define MCOUNT_ADDR ((long)(mcount))
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */

#ifdef CONFIG_DYNAMIC_FTRACE
#define ARCH_SUPPORTS_FTRACE_OPS 1
#define ARCH_SUPPORTS_FTRACE_SAVE_REGS
#endif

#ifndef __ASSEMBLY__
extern void mcount(void);
extern atomic_t modifying_ftrace_code;
Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/kprobes.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include <asm/insn.h>

#define __ARCH_WANT_KPROBES_INSN_SLOT
#define ARCH_SUPPORTS_KPROBES_ON_FTRACE

struct pt_regs;
struct kprobe;
Expand Down
75 changes: 71 additions & 4 deletions arch/x86/kernel/entry_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -1109,17 +1109,21 @@ ENTRY(ftrace_caller)
pushl %eax
pushl %ecx
pushl %edx
movl 0xc(%esp), %eax
pushl $0 /* Pass NULL as regs pointer */
movl 4*4(%esp), %eax
movl 0x4(%ebp), %edx
leal function_trace_op, %ecx
subl $MCOUNT_INSN_SIZE, %eax

.globl ftrace_call
ftrace_call:
call ftrace_stub

addl $4,%esp /* skip NULL pointer */
popl %edx
popl %ecx
popl %eax
ftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
Expand All @@ -1131,6 +1135,72 @@ ftrace_stub:
ret
END(ftrace_caller)

ENTRY(ftrace_regs_caller)
pushf /* push flags before compare (in cs location) */
cmpl $0, function_trace_stop
jne ftrace_restore_flags

/*
* i386 does not save SS and ESP when coming from kernel.
* Instead, to get sp, &regs->sp is used (see ptrace.h).
* Unfortunately, that means eflags must be at the same location
* as the current return ip is. We move the return ip into the
* ip location, and move flags into the return ip location.
*/
pushl 4(%esp) /* save return ip into ip slot */
subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */

pushl $0 /* Load 0 into orig_ax */
pushl %gs
pushl %fs
pushl %es
pushl %ds
pushl %eax
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx

movl 13*4(%esp), %eax /* Get the saved flags */
movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
/* clobbering return ip */
movl $__KERNEL_CS,13*4(%esp)

movl 12*4(%esp), %eax /* Load ip (1st parameter) */
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
pushl %esp /* Save pt_regs as 4th parameter */

GLOBAL(ftrace_regs_call)
call ftrace_stub

addl $4, %esp /* Skip pt_regs */
movl 14*4(%esp), %eax /* Move flags back into cs */
movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
movl 12*4(%esp), %eax /* Get return ip from regs->ip */
addl $MCOUNT_INSN_SIZE, %eax
movl %eax, 14*4(%esp) /* Put return ip back for ret */

popl %ebx
popl %ecx
popl %edx
popl %esi
popl %edi
popl %ebp
popl %eax
popl %ds
popl %es
popl %fs
popl %gs
addl $8, %esp /* Skip orig_ax and ip */
popf /* Pop flags at end (no addl to corrupt flags) */
jmp ftrace_ret

ftrace_restore_flags:
popf
jmp ftrace_stub
#else /* ! CONFIG_DYNAMIC_FTRACE */

ENTRY(mcount)
Expand Down Expand Up @@ -1171,9 +1241,6 @@ END(mcount)

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
cmpl $0, function_trace_stop
jne ftrace_stub

pushl %eax
pushl %ecx
pushl %edx
Expand Down
96 changes: 86 additions & 10 deletions arch/x86/kernel/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -73,20 +73,34 @@ ENTRY(mcount)
retq
END(mcount)

/* skip is set if stack has been adjusted */
.macro ftrace_caller_setup skip=0
MCOUNT_SAVE_FRAME \skip

/* Load the ftrace_ops into the 3rd parameter */
leaq function_trace_op, %rdx

/* Load ip into the first parameter */
movq RIP(%rsp), %rdi
subq $MCOUNT_INSN_SIZE, %rdi
/* Load the parent_ip into the second parameter */
movq 8(%rbp), %rsi
.endm

ENTRY(ftrace_caller)
/* Check if tracing was disabled (quick check) */
cmpl $0, function_trace_stop
jne ftrace_stub

MCOUNT_SAVE_FRAME

movq 0x38(%rsp), %rdi
movq 8(%rbp), %rsi
subq $MCOUNT_INSN_SIZE, %rdi
ftrace_caller_setup
/* regs go into 4th parameter (but make it NULL) */
movq $0, %rcx

GLOBAL(ftrace_call)
call ftrace_stub

MCOUNT_RESTORE_FRAME
ftrace_return:

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
GLOBAL(ftrace_graph_call)
Expand All @@ -97,6 +111,71 @@ GLOBAL(ftrace_stub)
retq
END(ftrace_caller)

ENTRY(ftrace_regs_caller)
/* Save the current flags before compare (in SS location)*/
pushfq

/* Check if tracing was disabled (quick check) */
cmpl $0, function_trace_stop
jne ftrace_restore_flags

/* skip=8 to skip flags saved in SS */
ftrace_caller_setup 8

/* Save the rest of pt_regs */
movq %r15, R15(%rsp)
movq %r14, R14(%rsp)
movq %r13, R13(%rsp)
movq %r12, R12(%rsp)
movq %r11, R11(%rsp)
movq %r10, R10(%rsp)
movq %rbp, RBP(%rsp)
movq %rbx, RBX(%rsp)
/* Copy saved flags */
movq SS(%rsp), %rcx
movq %rcx, EFLAGS(%rsp)
/* Kernel segments */
movq $__KERNEL_DS, %rcx
movq %rcx, SS(%rsp)
movq $__KERNEL_CS, %rcx
movq %rcx, CS(%rsp)
/* Stack - skipping return address */
leaq SS+16(%rsp), %rcx
movq %rcx, RSP(%rsp)

/* regs go into 4th parameter */
leaq (%rsp), %rcx

GLOBAL(ftrace_regs_call)
call ftrace_stub

/* Copy flags back to SS, to restore them */
movq EFLAGS(%rsp), %rax
movq %rax, SS(%rsp)

/* restore the rest of pt_regs */
movq R15(%rsp), %r15
movq R14(%rsp), %r14
movq R13(%rsp), %r13
movq R12(%rsp), %r12
movq R10(%rsp), %r10
movq RBP(%rsp), %rbp
movq RBX(%rsp), %rbx

/* skip=8 to skip flags saved in SS */
MCOUNT_RESTORE_FRAME 8

/* Restore flags */
popfq

jmp ftrace_return
ftrace_restore_flags:
popfq
jmp ftrace_stub

END(ftrace_regs_caller)


#else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(mcount)
cmpl $0, function_trace_stop
Expand All @@ -119,7 +198,7 @@ GLOBAL(ftrace_stub)
trace:
MCOUNT_SAVE_FRAME

movq 0x38(%rsp), %rdi
movq RIP(%rsp), %rdi
movq 8(%rbp), %rsi
subq $MCOUNT_INSN_SIZE, %rdi

Expand All @@ -134,13 +213,10 @@ END(mcount)

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
cmpl $0, function_trace_stop
jne ftrace_stub

MCOUNT_SAVE_FRAME

leaq 8(%rbp), %rdi
movq 0x38(%rsp), %rsi
movq RIP(%rsp), %rsi
movq (%rbp), %rdx
subq $MCOUNT_INSN_SIZE, %rsi

Expand Down
Loading

0 comments on commit 26198c2

Please sign in to comment.