Skip to content

Commit

Permalink
x86/ibt: Clean up is_endbr()
Browse files Browse the repository at this point in the history
Pretty much every caller of is_endbr() actually wants to test something at an
address and ends up doing get_kernel_nofault(). Fold the lot into a more
convenient helper.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Sami Tolvanen <samitolvanen@google.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: "Masami Hiramatsu (Google)" <mhiramat@kernel.org>
Link: https://lore.kernel.org/r/20250207122546.181367417@infradead.org
  • Loading branch information
Peter Zijlstra committed Feb 14, 2025
1 parent 92d2da3 commit 72e213a
Show file tree
Hide file tree
Showing 7 changed files with 27 additions and 52 deletions.
2 changes: 1 addition & 1 deletion arch/x86/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2844,7 +2844,7 @@ static bool is_uprobe_at_func_entry(struct pt_regs *regs)
return true;

/* endbr64 (64-bit only) */
if (user_64bit_mode(regs) && is_endbr(*(u32 *)auprobe->insn))
if (user_64bit_mode(regs) && is_endbr((u32 *)auprobe->insn))
return true;

return false;
Expand Down
16 changes: 2 additions & 14 deletions arch/x86/include/asm/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,21 +36,9 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)

static inline unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
{
#ifdef CONFIG_X86_KERNEL_IBT
u32 instr;

/* We want to be extra safe in case entry ip is on the page edge,
* but otherwise we need to avoid get_kernel_nofault()'s overhead.
*/
if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
return fentry_ip;
} else {
instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
}
if (is_endbr(instr))
if (is_endbr((void*)(fentry_ip - ENDBR_INSN_SIZE)))
fentry_ip -= ENDBR_INSN_SIZE;
#endif

return fentry_ip;
}
#define ftrace_get_symaddr(fentry_ip) arch_ftrace_get_symaddr(fentry_ip)
Expand Down
5 changes: 3 additions & 2 deletions arch/x86/include/asm/ibt.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ static inline __attribute_const__ u32 gen_endbr_poison(void)
return 0x001f0f66; /* osp nopl (%rax) */
}

static inline bool is_endbr(u32 val)
static inline bool __is_endbr(u32 val)
{
if (val == gen_endbr_poison())
return true;
Expand All @@ -74,6 +74,7 @@ static inline bool is_endbr(u32 val)
return val == gen_endbr();
}

extern __noendbr bool is_endbr(u32 *val);
extern __noendbr u64 ibt_save(bool disable);
extern __noendbr void ibt_restore(u64 save);

Expand All @@ -98,7 +99,7 @@ extern __noendbr void ibt_restore(u64 save);

#define __noendbr

static inline bool is_endbr(u32 val) { return false; }
static inline bool is_endbr(u32 *val) { return false; }

static inline u64 ibt_save(bool disable) { return 0; }
static inline void ibt_restore(u64 save) { }
Expand Down
20 changes: 14 additions & 6 deletions arch/x86/kernel/alternative.c
Original file line number Diff line number Diff line change
Expand Up @@ -852,16 +852,24 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }

#ifdef CONFIG_X86_KERNEL_IBT

__noendbr bool is_endbr(u32 *val)
{
u32 endbr;

__get_kernel_nofault(&endbr, val, u32, Efault);
return __is_endbr(endbr);

Efault:
return false;
}

static void poison_cfi(void *addr);

static void __init_or_module poison_endbr(void *addr, bool warn)
{
u32 endbr, poison = gen_endbr_poison();

if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr)))
return;
u32 poison = gen_endbr_poison();

if (!is_endbr(endbr)) {
if (!is_endbr(addr)) {
WARN_ON_ONCE(warn);
return;
}
Expand Down Expand Up @@ -988,7 +996,7 @@ static u32 cfi_seed __ro_after_init;
static u32 cfi_rehash(u32 hash)
{
hash ^= cfi_seed;
while (unlikely(is_endbr(hash) || is_endbr(-hash))) {
while (unlikely(__is_endbr(hash) || __is_endbr(-hash))) {
bool lsb = hash & 1;
hash >>= 1;
if (lsb)
Expand Down
11 changes: 1 addition & 10 deletions arch/x86/kernel/kprobes/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -373,16 +373,7 @@ static bool can_probe(unsigned long paddr)
kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
bool *on_func_entry)
{
u32 insn;

/*
* Since 'addr' is not guaranteed to be safe to access, use
* copy_from_kernel_nofault() to read the instruction:
*/
if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(u32)))
return NULL;

if (is_endbr(insn)) {
if (is_endbr((u32 *)addr)) {
*on_func_entry = !offset || offset == 4;
if (*on_func_entry)
offset = 4;
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -641,7 +641,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
* See emit_prologue(), for IBT builds the trampoline hook is preceded
* with an ENDBR instruction.
*/
if (is_endbr(*(u32 *)ip))
if (is_endbr(ip))
ip += ENDBR_INSN_SIZE;

return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
Expand Down Expand Up @@ -3036,7 +3036,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
/* skip patched call instruction and point orig_call to actual
* body of the kernel function.
*/
if (is_endbr(*(u32 *)orig_call))
if (is_endbr(orig_call))
orig_call += ENDBR_INSN_SIZE;
orig_call += X86_PATCH_SIZE;
}
Expand Down
21 changes: 4 additions & 17 deletions kernel/trace/bpf_trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -1038,27 +1038,14 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
.arg1_type = ARG_PTR_TO_CTX,
};

#ifdef CONFIG_X86_KERNEL_IBT
static unsigned long get_entry_ip(unsigned long fentry_ip)
static inline unsigned long get_entry_ip(unsigned long fentry_ip)
{
u32 instr;

/* We want to be extra safe in case entry ip is on the page edge,
* but otherwise we need to avoid get_kernel_nofault()'s overhead.
*/
if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
return fentry_ip;
} else {
instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
}
if (is_endbr(instr))
#ifdef CONFIG_X86_KERNEL_IBT
if (is_endbr((void *)(fentry_ip - ENDBR_INSN_SIZE)))
fentry_ip -= ENDBR_INSN_SIZE;
#endif
return fentry_ip;
}
#else
#define get_entry_ip(fentry_ip) fentry_ip
#endif

BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
{
Expand Down

0 comments on commit 72e213a

Please sign in to comment.