Skip to content

Commit

Permalink
x86/ibt,kprobes: Cure sym+0 equals fentry woes
Browse files Browse the repository at this point in the history
In order to allow kprobes to skip the ENDBR instructions at sym+0 for
X86_KERNEL_IBT builds, change _kprobe_addr() to take an architecture
callback to inspect the function at hand and modify the offset if
needed.

This streamlines the existing interface to cover more cases and
require less hooks. Once PowerPC gets fully converted there will only
be the one arch hook.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lore.kernel.org/r/20220308154318.405947704@infradead.org
  • Loading branch information
Peter Zijlstra committed Mar 15, 2022
1 parent e52fc2c commit cc66bb9
Show file tree
Hide file tree
Showing 4 changed files with 92 additions and 28 deletions.
34 changes: 21 additions & 13 deletions arch/powerpc/kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,27 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
return addr;
}

static bool arch_kprobe_on_func_entry(unsigned long offset)
{
#ifdef PPC64_ELF_ABI_v2
#ifdef CONFIG_KPROBES_ON_FTRACE
return offset <= 16;
#else
return offset <= 8;
#endif
#else
return !offset;
#endif
}

/* XXX try and fold the magic of kprobe_lookup_name() in this */
kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
bool *on_func_entry)
{
*on_func_entry = arch_kprobe_on_func_entry(offset);
return (kprobe_opcode_t *)(addr + offset);
}

void *alloc_insn_page(void)
{
void *page;
Expand Down Expand Up @@ -218,19 +239,6 @@ static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs
kcb->kprobe_saved_msr = regs->msr;
}

bool arch_kprobe_on_func_entry(unsigned long offset)
{
#ifdef PPC64_ELF_ABI_v2
#ifdef CONFIG_KPROBES_ON_FTRACE
return offset <= 16;
#else
return offset <= 8;
#endif
#else
return !offset;
#endif
}

void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->link;
Expand Down
17 changes: 17 additions & 0 deletions arch/x86/kernel/kprobes/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
#include <asm/insn.h>
#include <asm/debugreg.h>
#include <asm/set_memory.h>
#include <asm/ibt.h>

#include "common.h"

Expand Down Expand Up @@ -294,6 +295,22 @@ static int can_probe(unsigned long paddr)
return (addr == paddr);
}

/* If x86 supports IBT (ENDBR) it must be skipped. */
kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
bool *on_func_entry)
{
if (is_endbr(*(u32 *)addr)) {
*on_func_entry = !offset || offset == 4;
if (*on_func_entry)
offset = 4;

} else {
*on_func_entry = !offset;
}

return (kprobe_opcode_t *)(addr + offset);
}

/*
* Copy an instruction with recovering modified instruction by kprobes
* and adjust the displacement if the instruction uses the %rip-relative
Expand Down
3 changes: 2 additions & 1 deletion include/linux/kprobes.h
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,6 @@ extern int arch_init_kprobes(void);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
extern int arch_populate_kprobe_blacklist(void);
extern bool arch_kprobe_on_func_entry(unsigned long offset);
extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);

extern bool within_kprobe_blacklist(unsigned long addr);
Expand Down Expand Up @@ -384,6 +383,8 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
}

kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, bool *on_func_entry);

int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num);
Expand Down
66 changes: 52 additions & 14 deletions kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -1488,25 +1488,69 @@ bool within_kprobe_blacklist(unsigned long addr)
return false;
}

/*
* arch_adjust_kprobe_addr - adjust the address
* @addr: symbol base address
* @offset: offset within the symbol
* @on_func_entry: was this @addr+@offset on the function entry
*
* Typically returns @addr + @offset, except for special cases where the
* function might be prefixed by a CFI landing pad, in that case any offset
* inside the landing pad is mapped to the first 'real' instruction of the
* symbol.
*
* Specifically, for things like IBT/BTI, skip the resp. ENDBR/BTI.C
* instruction at +0.
*/
kprobe_opcode_t *__weak arch_adjust_kprobe_addr(unsigned long addr,
unsigned long offset,
bool *on_func_entry)
{
*on_func_entry = !offset;
return (kprobe_opcode_t *)(addr + offset);
}

/*
* If 'symbol_name' is specified, look it up and add the 'offset'
* to it. This way, we can specify a relative address to a symbol.
* This returns encoded errors if it fails to look up symbol or invalid
* combination of parameters.
*/
static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
const char *symbol_name, unsigned int offset)
static kprobe_opcode_t *
_kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
unsigned long offset, bool *on_func_entry)
{
if ((symbol_name && addr) || (!symbol_name && !addr))
goto invalid;

if (symbol_name) {
/*
* Input: @sym + @offset
* Output: @addr + @offset
*
* NOTE: kprobe_lookup_name() does *NOT* fold the offset
* argument into it's output!
*/
addr = kprobe_lookup_name(symbol_name, offset);
if (!addr)
return ERR_PTR(-ENOENT);
}

addr = (kprobe_opcode_t *)(((char *)addr) + offset);
/*
* So here we have @addr + @offset, displace it into a new
* @addr' + @offset' where @addr' is the symbol start address.
*/
addr = (void *)addr + offset;
if (!kallsyms_lookup_size_offset((unsigned long)addr, NULL, &offset))
return ERR_PTR(-ENOENT);
addr = (void *)addr - offset;

/*
* Then ask the architecture to re-combine them, taking care of
* magical function entry details while telling us if this was indeed
* at the start of the function.
*/
addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry);
if (addr)
return addr;

Expand All @@ -1516,7 +1560,8 @@ static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,

static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
{
return _kprobe_addr(p->addr, p->symbol_name, p->offset);
bool on_func_entry;
return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
}

/*
Expand Down Expand Up @@ -2043,11 +2088,6 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(pre_handler_kretprobe);

bool __weak arch_kprobe_on_func_entry(unsigned long offset)
{
return !offset;
}

/**
* kprobe_on_func_entry() -- check whether given address is function entry
* @addr: Target address
Expand All @@ -2063,15 +2103,13 @@ bool __weak arch_kprobe_on_func_entry(unsigned long offset)
*/
int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
{
kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
bool on_func_entry;
kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset, &on_func_entry);

if (IS_ERR(kp_addr))
return PTR_ERR(kp_addr);

if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
return -ENOENT;

if (!arch_kprobe_on_func_entry(offset))
if (!on_func_entry)
return -EINVAL;

return 0;
Expand Down

0 comments on commit cc66bb9

Please sign in to comment.