Skip to content

Commit

Permalink
powerpc/ftrace: Activate HAVE_DYNAMIC_FTRACE_WITH_REGS on PPC32
Browse files Browse the repository at this point in the history
Unlike PPC64, PPC32 doesn't require any special compiler option
to get _mcount() call not clobbering registers.

Provide ftrace_regs_caller() and ftrace_regs_call() and activate
HAVE_DYNAMIC_FTRACE_WITH_REGS.

That's heavily copied from ftrace_64_mprofile.S

For the time being leave livepatching aside, it will come with
following patch.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1862dc7719855cc2a4eec80920d94c955877557e.1635423081.git.christophe.leroy@csgroup.eu
  • Loading branch information
Christophe Leroy authored and Michael Ellerman committed Nov 29, 2021
1 parent c93d4f6 commit 7dfbfb8
Show file tree
Hide file tree
Showing 4 changed files with 125 additions and 12 deletions.
4 changes: 2 additions & 2 deletions arch/powerpc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ config PPC
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL || PPC32
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
select HAVE_FAST_GUP
Expand All @@ -230,7 +230,7 @@ config PPC
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS && PPC64
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
select HAVE_OPTPROBES
Expand Down
8 changes: 8 additions & 0 deletions arch/powerpc/kernel/module_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,14 @@ int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
if (!module->arch.tramp)
return -ENOENT;

#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
module->arch.tramp_regs = do_plt_call(module->core_layout.base,
(unsigned long)ftrace_regs_caller,
sechdrs, module);
if (!module->arch.tramp_regs)
return -ENOENT;
#endif

return 0;
}
#endif
16 changes: 14 additions & 2 deletions arch/powerpc/kernel/trace/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -561,6 +561,8 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
int err;
struct ppc_inst op;
u32 *ip = (u32 *)rec->ip;
struct module *mod = rec->arch.mod;
unsigned long tramp;

/* read where this goes */
if (copy_inst_from_kernel_nofault(&op, ip))
Expand All @@ -573,13 +575,23 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
}

/* If we never set up a trampoline to ftrace_caller, then bail */
if (!rec->arch.mod->arch.tramp) {
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
if (!mod->arch.tramp || !mod->arch.tramp_regs) {
#else
if (!mod->arch.tramp) {
#endif
pr_err("No ftrace trampoline\n");
return -EINVAL;
}

#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
if (rec->flags & FTRACE_FL_REGS)
tramp = mod->arch.tramp_regs;
else
#endif
tramp = mod->arch.tramp;
/* create the branch to the trampoline */
err = create_branch(&op, ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
err = create_branch(&op, ip, tramp, BRANCH_SET_LINK);
if (err) {
pr_err("REL24 out of range!\n");
return -EINVAL;
Expand Down
109 changes: 101 additions & 8 deletions arch/powerpc/kernel/trace/ftrace_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <asm/asm-offsets.h>
#include <asm/ftrace.h>
#include <asm/export.h>
#include <asm/ptrace.h>

_GLOBAL(mcount)
_GLOBAL(_mcount)
Expand All @@ -29,34 +30,114 @@ _GLOBAL(ftrace_caller)
MCOUNT_SAVE_FRAME
/* r3 ends up with link register */
subi r3, r3, MCOUNT_INSN_SIZE
lis r5,function_trace_op@ha
lwz r5,function_trace_op@l(r5)
li r6, 0
.globl ftrace_call
ftrace_call:
bl ftrace_stub
nop
MCOUNT_RESTORE_FRAME
ftrace_caller_common:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
b ftrace_graph_stub
_GLOBAL(ftrace_graph_stub)
#endif
MCOUNT_RESTORE_FRAME
/* old link register ends up in ctr reg */
bctr


_GLOBAL(ftrace_stub)
blr

_GLOBAL(ftrace_regs_caller)
/* Save the original return address in A's stack frame */
stw r0,LRSAVE(r1)

/* Create our stack frame + pt_regs */
stwu r1,-INT_FRAME_SIZE(r1)

/* Save all gprs to pt_regs */
stw r0, GPR0(r1)
stmw r2, GPR2(r1)

/* Save previous stack pointer (r1) */
addi r8, r1, INT_FRAME_SIZE
stw r8, GPR1(r1)

/* Load special regs for save below */
mfmsr r8
mfctr r9
mfxer r10
mfcr r11

/* Get the _mcount() call site out of LR */
mflr r7
/* Save it as pt_regs->nip */
stw r7, _NIP(r1)
/* Save the read LR in pt_regs->link */
stw r0, _LINK(r1)

lis r3,function_trace_op@ha
lwz r5,function_trace_op@l(r3)

/* Calculate ip from nip-4 into r3 for call below */
subi r3, r7, MCOUNT_INSN_SIZE

/* Put the original return address in r4 as parent_ip */
mr r4, r0

/* Save special regs */
stw r8, _MSR(r1)
stw r9, _CTR(r1)
stw r10, _XER(r1)
stw r11, _CCR(r1)

/* Load &pt_regs in r6 for call below */
addi r6, r1, STACK_FRAME_OVERHEAD

/* ftrace_call(r3, r4, r5, r6) */
.globl ftrace_regs_call
ftrace_regs_call:
bl ftrace_stub
nop

/* Load ctr with the possibly modified NIP */
lwz r3, _NIP(r1)
mtctr r3

/* Restore gprs */
lmw r2, GPR2(r1)

/* Restore possibly modified LR */
lwz r0, _LINK(r1)
mtlr r0

/* Pop our stack frame */
addi r1, r1, INT_FRAME_SIZE

b ftrace_caller_common

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
_GLOBAL(ftrace_graph_caller)
stwu r1,-48(r1)
stw r3, 12(r1)
stw r4, 16(r1)
stw r5, 20(r1)
stw r6, 24(r1)
stw r7, 28(r1)
stw r8, 32(r1)
stw r9, 36(r1)
stw r10,40(r1)

addi r5, r1, 48
/* load r4 with local address */
lwz r4, 44(r1)
mfctr r4 /* ftrace_caller has moved local addr here */
stw r4, 44(r1)
mflr r3 /* ftrace_caller has restored LR from stack */
subi r4, r4, MCOUNT_INSN_SIZE

/* Grab the LR out of the caller stack frame */
lwz r3,52(r1)

bl prepare_ftrace_return
nop

Expand All @@ -65,9 +146,21 @@ _GLOBAL(ftrace_graph_caller)
* Change the LR in the callers stack frame to this.
*/
stw r3,52(r1)
mtlr r3
lwz r0,44(r1)
mtctr r0

lwz r3, 12(r1)
lwz r4, 16(r1)
lwz r5, 20(r1)
lwz r6, 24(r1)
lwz r7, 28(r1)
lwz r8, 32(r1)
lwz r9, 36(r1)
lwz r10,40(r1)

addi r1, r1, 48

MCOUNT_RESTORE_FRAME
/* old link register ends up in ctr reg */
bctr

_GLOBAL(return_to_handler)
Expand Down

0 comments on commit 7dfbfb8

Please sign in to comment.