Skip to content

Commit

Permalink
Merge branch 'bpf-fix-array-bounds-error-with-may_goto-and-add-selftest'
Browse files Browse the repository at this point in the history
Jiayuan Chen says:

====================
bpf: Fix array bounds error with may_goto and add selftest

Syzbot caught an array out-of-bounds bug [1]. It turns out that when the
BPF program runs through do_misc_fixups(), it allocates an extra 8 bytes
on the call stack, which eventually causes stack_depth to exceed 512.

I was able to reproduce this issue probabilistically by enabling
CONFIG_UBSAN=y and disabling CONFIG_BPF_JIT_ALWAYS_ON with the selfttest
I provide in second patch(although it doesn't happen every time - I didn't
dig deeper into why UBSAN behaves this way).

Furthermore, if I set /proc/sys/net/core/bpf_jit_enable to 0 to disable
the jit, a panic occurs, and the reason is the same, that bpf_func is
assigned an incorrect address.

[---[ end trace ]---
[Oops: general protection fault, probably for non-canonical address
0x100f0e0e0d090808: 0000 [#1] PREEMPT SMP NOPTI
[Tainted: [W]=WARN, [O]=OOT_MODULE
[RIP: 0010:bpf_test_run+0x1d2/0x360
[RSP: 0018:ffffafc7955178a0 EFLAGS: 00010246
[RAX: 100f0e0e0d090808 RBX: ffff8e9fdb2c4100 RCX: 0000000000000018
[RDX: 00000000002b5b18 RSI: ffffafc780497048 RDI: ffff8ea04d601700
[RBP: ffffafc780497000 R08: ffffafc795517a0c R09: 0000000000000000
[R10: 0000000000000000 R11: fefefefefefefeff R12: ffff8ea04d601700
[R13: ffffafc795517928 R14: ffffafc795517928 R15: 0000000000000000
[CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[CR2: 00007f181c064648 CR3: 00000001aa2be003 CR4: 0000000000770ef0
[DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400
[PKRU: 55555554
[Call Trace:
[ <TASK>
[ ? die_addr+0x36/0x90
[ ? exc_general_protection+0x237/0x430
[ ? asm_exc_general_protection+0x26/0x30
[ ? bpf_test_run+0x1d2/0x360
[ ? bpf_test_run+0x10d/0x360
[ ? __link_object+0x12a/0x1e0
[ ? slab_build_skb+0x23/0x130
[ ? kmem_cache_alloc_noprof+0x2ea/0x3f0
[ ? sk_prot_alloc+0xc2/0x120
[ bpf_prog_test_run_skb+0x21b/0x590
[ __sys_bpf+0x340/0xa80
[ __x64_sys_bpf+0x1e/0x30
---
v2 -> v3:
Optimized some code naming and conditional judgment logic.
https://lore.kernel.org/bpf/20250213131214.164982-1-mrpre@163.com/T/

v1 -> v2:
Directly reject loading programs with a stack size greater than 512 when
jit disabled.(Suggested by Alexei Starovoitov)
https://lore.kernel.org/bpf/20250212135251.85487-1-mrpre@163.com/T/
====================

Link: https://patch.msgid.link/20250214091823.46042-1-mrpre@163.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
  • Loading branch information
Alexei Starovoitov committed Feb 15, 2025
2 parents a458544 + 72266ee commit 772b9b1
Show file tree
Hide file tree
Showing 5 changed files with 102 additions and 4 deletions.
19 changes: 15 additions & 4 deletions kernel/bpf/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2290,17 +2290,18 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
insn->code = BPF_JMP | BPF_CALL_ARGS;
}
#endif
#else
#endif

static unsigned int __bpf_prog_ret0_warn(const void *ctx,
const struct bpf_insn *insn)
{
/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
* is not working properly, so warn about it!
* is not working properly, or interpreter is being used when
* prog->jit_requested is not 0, so warn about it!
*/
WARN_ON_ONCE(1);
return 0;
}
#endif

bool bpf_prog_map_compatible(struct bpf_map *map,
const struct bpf_prog *fp)
Expand Down Expand Up @@ -2380,8 +2381,18 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
{
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
u32 idx = (round_up(stack_depth, 32) / 32) - 1;

fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
/* may_goto may cause stack size > 512, leading to idx out-of-bounds.
* But for non-JITed programs, we don't need bpf_func, so no bounds
* check needed.
*/
if (!fp->jit_requested &&
!WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) {
fp->bpf_func = interpreters[idx];
} else {
fp->bpf_func = __bpf_prog_ret0_warn;
}
#else
fp->bpf_func = __bpf_prog_ret0_warn;
#endif
Expand Down
7 changes: 7 additions & 0 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -21903,6 +21903,13 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
if (subprogs[cur_subprog + 1].start == i + delta + 1) {
subprogs[cur_subprog].stack_depth += stack_depth_extra;
subprogs[cur_subprog].stack_extra = stack_depth_extra;

stack_depth = subprogs[cur_subprog].stack_depth;
if (stack_depth > MAX_BPF_STACK && !prog->jit_requested) {
verbose(env, "stack size %d(extra %d) is too large\n",
stack_depth, stack_depth_extra);
return -EINVAL;
}
cur_subprog++;
stack_depth = subprogs[cur_subprog].stack_depth;
stack_depth_extra = 0;
Expand Down
2 changes: 2 additions & 0 deletions tools/testing/selftests/bpf/progs/bpf_misc.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,8 @@
#define __arch_arm64 __arch("ARM64")
#define __arch_riscv64 __arch("RISCV64")
#define __caps_unpriv(caps) __attribute__((btf_decl_tag("comment:test_caps_unpriv=" EXPAND_QUOTE(caps))))
#define __load_if_JITed() __attribute__((btf_decl_tag("comment:load_mode=jited")))
#define __load_if_no_JITed() __attribute__((btf_decl_tag("comment:load_mode=no_jited")))

/* Define common capabilities tested using __caps_unpriv */
#define CAP_NET_ADMIN 12
Expand Down
52 changes: 52 additions & 0 deletions tools/testing/selftests/bpf/progs/verifier_stack_ptr.c
Original file line number Diff line number Diff line change
Expand Up @@ -481,4 +481,56 @@ l1_%=: r0 = 42; \
: __clobber_all);
}

SEC("socket")
__description("PTR_TO_STACK stack size > 512")
__failure __msg("invalid write to stack R1 off=-520 size=8")
__naked void stack_check_size_gt_512(void)
{
asm volatile (" \
r1 = r10; \
r1 += -520; \
r0 = 42; \
*(u64*)(r1 + 0) = r0; \
exit; \
" ::: __clobber_all);
}

#ifdef __BPF_FEATURE_MAY_GOTO
SEC("socket")
__description("PTR_TO_STACK stack size 512 with may_goto with jit")
__load_if_JITed()
__success __retval(42)
__naked void stack_check_size_512_with_may_goto_jit(void)
{
asm volatile (" \
r1 = r10; \
r1 += -512; \
r0 = 42; \
*(u32*)(r1 + 0) = r0; \
may_goto l0_%=; \
r2 = 100; \
l0_%=: \
exit; \
" ::: __clobber_all);
}

SEC("socket")
__description("PTR_TO_STACK stack size 512 with may_goto without jit")
__load_if_no_JITed()
__failure __msg("stack size 520(extra 8) is too large")
__naked void stack_check_size_512_with_may_goto(void)
{
asm volatile (" \
r1 = r10; \
r1 += -512; \
r0 = 42; \
*(u32*)(r1 + 0) = r0; \
may_goto l0_%=; \
r2 = 100; \
l0_%=: \
exit; \
" ::: __clobber_all);
}
#endif

char _license[] SEC("license") = "GPL";
26 changes: 26 additions & 0 deletions tools/testing/selftests/bpf/test_loader.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
#define TEST_TAG_JITED_PFX "comment:test_jited="
#define TEST_TAG_JITED_PFX_UNPRIV "comment:test_jited_unpriv="
#define TEST_TAG_CAPS_UNPRIV "comment:test_caps_unpriv="
#define TEST_TAG_LOAD_MODE_PFX "comment:load_mode="

/* Warning: duplicated in bpf_misc.h */
#define POINTER_VALUE 0xcafe4all
Expand All @@ -55,6 +56,11 @@ enum mode {
UNPRIV = 2
};

enum load_mode {
JITED = 1 << 0,
NO_JITED = 1 << 1,
};

struct expect_msg {
const char *substr; /* substring match */
regex_t regex;
Expand Down Expand Up @@ -87,6 +93,7 @@ struct test_spec {
int prog_flags;
int mode_mask;
int arch_mask;
int load_mask;
bool auxiliary;
bool valid;
};
Expand Down Expand Up @@ -406,6 +413,7 @@ static int parse_test_spec(struct test_loader *tester,
bool collect_jit = false;
int func_id, i, err = 0;
u32 arch_mask = 0;
u32 load_mask = 0;
struct btf *btf;
enum arch arch;

Expand Down Expand Up @@ -580,10 +588,22 @@ static int parse_test_spec(struct test_loader *tester,
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;
} else if (str_has_pfx(s, TEST_TAG_LOAD_MODE_PFX)) {
val = s + sizeof(TEST_TAG_LOAD_MODE_PFX) - 1;
if (strcmp(val, "jited") == 0) {
load_mask = JITED;
} else if (strcmp(val, "no_jited") == 0) {
load_mask = NO_JITED;
} else {
PRINT_FAIL("bad load spec: '%s'", val);
err = -EINVAL;
goto cleanup;
}
}
}

spec->arch_mask = arch_mask ?: -1;
spec->load_mask = load_mask ?: (JITED | NO_JITED);

if (spec->mode_mask == 0)
spec->mode_mask = PRIV;
Expand Down Expand Up @@ -928,6 +948,7 @@ void run_subtest(struct test_loader *tester,
bool unpriv)
{
struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv;
int current_runtime = is_jit_enabled() ? JITED : NO_JITED;
struct bpf_program *tprog = NULL, *tprog_iter;
struct bpf_link *link, *links[32] = {};
struct test_spec *spec_iter;
Expand All @@ -946,6 +967,11 @@ void run_subtest(struct test_loader *tester,
return;
}

if ((current_runtime & spec->load_mask) == 0) {
test__skip();
return;
}

if (unpriv) {
if (!can_execute_unpriv(tester, spec)) {
test__skip();
Expand Down

0 comments on commit 772b9b1

Please sign in to comment.