Skip to content

Commit

Permalink
x86/bpf: Emit call depth accounting if required
Browse files Browse the repository at this point in the history
Ensure that calls in BPF jitted programs are emitting call depth accounting
when enabled to keep the call/return balanced. The return thunk jump is
already injected due to the earlier retbleed mitigations.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220915111148.615413406@infradead.org
  • Loading branch information
Thomas Gleixner authored and Peter Zijlstra committed Oct 17, 2022
1 parent 396e0b8 commit b2e9dfe
Show file tree
Hide file tree
Showing 3 changed files with 48 additions and 9 deletions.
6 changes: 6 additions & 0 deletions arch/x86/include/asm/alternative.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
struct module *mod);
extern void *callthunks_translate_call_dest(void *dest);
extern bool is_callthunk(void *addr);
extern int x86_call_depth_emit_accounting(u8 **pprog, void *func);
#else
static __always_inline void callthunks_patch_builtin_calls(void) {}
static __always_inline void
Expand All @@ -106,6 +107,11 @@ static __always_inline bool is_callthunk(void *addr)
{
return false;
}
static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
void *func)
{
return 0;
}
#endif

#ifdef CONFIG_SMP
Expand Down
19 changes: 19 additions & 0 deletions arch/x86/kernel/callthunks.c
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,25 @@ bool is_callthunk(void *addr)
return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size);
}

#ifdef CONFIG_BPF_JIT
int x86_call_depth_emit_accounting(u8 **pprog, void *func)
{
unsigned int tmpl_size = SKL_TMPL_SIZE;
void *tmpl = skl_call_thunk_template;

if (!thunks_initialized)
return 0;

/* Is function call target a thunk? */
if (is_callthunk(func))
return 0;

memcpy(*pprog, tmpl, tmpl_size);
*pprog += tmpl_size;
return tmpl_size;
}
#endif

#ifdef CONFIG_MODULES
void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
struct module *mod)
Expand Down
32 changes: 23 additions & 9 deletions arch/x86/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -340,6 +340,13 @@ static int emit_call(u8 **pprog, void *func, void *ip)
return emit_patch(pprog, func, ip, 0xE8);
}

static int emit_rsb_call(u8 **pprog, void *func, void *ip)
{
OPTIMIZER_HIDE_VAR(func);
x86_call_depth_emit_accounting(pprog, func);
return emit_patch(pprog, func, ip, 0xE8);
}

static int emit_jump(u8 **pprog, void *func, void *ip)
{
return emit_patch(pprog, func, ip, 0xE9);
Expand Down Expand Up @@ -1436,19 +1443,26 @@ st: if (is_imm8(insn->off))
break;

/* call */
case BPF_JMP | BPF_CALL:
case BPF_JMP | BPF_CALL: {
int offs;

func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) {
/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
EMIT3_off32(0x48, 0x8B, 0x85,
-round_up(bpf_prog->aux->stack_depth, 8) - 8);
if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
if (!imm32)
return -EINVAL;
offs = 7 + x86_call_depth_emit_accounting(&prog, func);
} else {
if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
if (!imm32)
return -EINVAL;
offs = x86_call_depth_emit_accounting(&prog, func);
}
if (emit_call(&prog, func, image + addrs[i - 1] + offs))
return -EINVAL;
break;
}

case BPF_JMP | BPF_TAIL_CALL:
if (imm32)
Expand Down Expand Up @@ -1854,7 +1868,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
/* arg2: lea rsi, [rbp - ctx_cookie_off] */
EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);

if (emit_call(&prog, enter, prog))
if (emit_rsb_call(&prog, enter, prog))
return -EINVAL;
/* remember prog start time returned by __bpf_prog_enter */
emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
Expand All @@ -1875,7 +1889,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
(long) p->insnsi >> 32,
(u32) (long) p->insnsi);
/* call JITed bpf program or interpreter */
if (emit_call(&prog, p->bpf_func, prog))
if (emit_rsb_call(&prog, p->bpf_func, prog))
return -EINVAL;

/*
Expand All @@ -1899,7 +1913,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
/* arg3: lea rdx, [rbp - run_ctx_off] */
EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
if (emit_call(&prog, exit, prog))
if (emit_rsb_call(&prog, exit, prog))
return -EINVAL;

*pprog = prog;
Expand Down Expand Up @@ -2147,7 +2161,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
if (flags & BPF_TRAMP_F_CALL_ORIG) {
/* arg1: mov rdi, im */
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
if (emit_call(&prog, __bpf_tramp_enter, prog)) {
if (emit_rsb_call(&prog, __bpf_tramp_enter, prog)) {
ret = -EINVAL;
goto cleanup;
}
Expand Down Expand Up @@ -2179,7 +2193,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
EMIT2(0xff, 0xd0); /* call *rax */
} else {
/* call original function */
if (emit_call(&prog, orig_call, prog)) {
if (emit_rsb_call(&prog, orig_call, prog)) {
ret = -EINVAL;
goto cleanup;
}
Expand Down Expand Up @@ -2223,7 +2237,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
im->ip_epilogue = prog;
/* arg1: mov rdi, im */
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
if (emit_call(&prog, __bpf_tramp_exit, prog)) {
if (emit_rsb_call(&prog, __bpf_tramp_exit, prog)) {
ret = -EINVAL;
goto cleanup;
}
Expand Down

0 comments on commit b2e9dfe

Please sign in to comment.