From 6d26d985eeda89faedabbcf6607c37454b9691b0 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sat, 22 Apr 2023 09:35:44 +0200 Subject: [PATCH 1/5] bpf: fix link failure with NETFILTER=y INET=n Explicitly check if NETFILTER_BPF_LINK is enabled, else configs that have NETFILTER=y but CONFIG_INET=n fail to link: > kernel/bpf/syscall.o: undefined reference to `netfilter_prog_ops' > kernel/bpf/verifier.o: undefined reference to `netfilter_verifier_ops' Fixes: fd9c663b9ad6 ("bpf: minimal support for programs hooked into netfilter framework") Reported-by: kernel test robot Link: https://lore.kernel.org/oe-kbuild-all/202304220903.fRZTJtxe-lkp@intel.com/ Signed-off-by: Florian Westphal Link: https://lore.kernel.org/r/20230422073544.17634-1-fw@strlen.de Signed-off-by: Alexei Starovoitov --- include/linux/bpf_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 39a999abb0ceb..fc0d6f32c6876 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -79,7 +79,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm, #endif BPF_PROG_TYPE(BPF_PROG_TYPE_SYSCALL, bpf_syscall, void *, void *) -#ifdef CONFIG_NETFILTER +#ifdef CONFIG_NETFILTER_BPF_LINK BPF_PROG_TYPE(BPF_PROG_TYPE_NETFILTER, netfilter, struct bpf_nf_ctx, struct bpf_nf_ctx) #endif From 35150203e30b52d657165e325e3abc3b29c2086d Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 23:45:14 +0300 Subject: [PATCH 2/5] selftests/bpf: verifier/prevent_map_lookup converted to inline assembly Test verifier/prevent_map_lookup automatically converted to use inline assembly. This was a part of a series [1] but could not be applied becuase another patch from a series had to be witheld. [1] https://lore.kernel.org/bpf/20230421174234.2391278-1-eddyz87@gmail.com/ Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421204514.2450907-1-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/verifier.c | 2 + .../bpf/progs/verifier_prevent_map_lookup.c | 61 +++++++++++++++++++ .../bpf/verifier/prevent_map_lookup.c | 29 --------- 3 files changed, 63 insertions(+), 29 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c delete mode 100644 tools/testing/selftests/bpf/verifier/prevent_map_lookup.c diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index c8bab8b1a6a45..2497716ee3790 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -42,6 +42,7 @@ #include "verifier_meta_access.skel.h" #include "verifier_netfilter_ctx.skel.h" #include "verifier_netfilter_retcode.skel.h" +#include "verifier_prevent_map_lookup.skel.h" #include "verifier_raw_stack.skel.h" #include "verifier_raw_tp_writable.skel.h" #include "verifier_reg_equal.skel.h" @@ -140,6 +141,7 @@ void test_verifier_masking(void) { RUN(verifier_masking); } void test_verifier_meta_access(void) { RUN(verifier_meta_access); } void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); } void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); } +void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); } void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } void test_verifier_reg_equal(void) { RUN(verifier_reg_equal); } diff --git a/tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c b/tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c new file mode 100644 index 0000000000000..8d27c780996f0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/prevent_map_lookup.c */ + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_STACK_TRACE); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} map_stacktrace SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 8); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog2_socket SEC(".maps"); + +SEC("perf_event") +__description("prevent map lookup in stack trace") +__failure __msg("cannot pass map_type 7 into func bpf_map_lookup_elem") +__naked void map_lookup_in_stack_trace(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_stacktrace] ll; \ + call %[bpf_map_lookup_elem]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_stacktrace) + : __clobber_all); +} + +SEC("socket") +__description("prevent map lookup in prog array") +__failure __msg("cannot pass map_type 3 into func bpf_map_lookup_elem") +__failure_unpriv +__naked void map_lookup_in_prog_array(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_prog2_socket] ll; \ + call %[bpf_map_lookup_elem]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_prog2_socket) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c deleted file mode 100644 index fc4e301260f65..0000000000000 --- a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c +++ /dev/null @@ -1,29 +0,0 @@ -{ - "prevent map lookup in stack trace", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_stacktrace = { 3 }, - .result = REJECT, - .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem", - .prog_type = BPF_PROG_TYPE_PERF_EVENT, -}, -{ - "prevent map lookup in prog array", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_prog2 = { 3 }, - .result = REJECT, - .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem", -}, From 7deca5eae83389ca40ac1b1bde96e4af17cca84f Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Mon, 24 Apr 2023 13:43:21 -0700 Subject: [PATCH 3/5] bpf: Disable bpf_refcount_acquire kfunc calls until race conditions are fixed As reported by Kumar in [0], the shared ownership implementation for BPF programs has some race conditions which need to be addressed before it can safely be used. This patch does so in a minimal way instead of ripping out shared ownership entirely, as proper fixes for the issues raised will follow ASAP, at which point this patch's commit can be reverted to re-enable shared ownership. The patch removes the ability to call bpf_refcount_acquire_impl from BPF programs. Programs can only bump refcount and obtain a new owning reference using this kfunc, so removing the ability to call it effectively disables shared ownership. Instead of changing success / failure expectations for bpf_refcount-related selftests, this patch just disables them from running for now. [0]: https://lore.kernel.org/bpf/d7hyspcow5wtjcmw4fugdgyp3fwhljwuscp3xyut5qnwivyeru@ysdq543otzv2/ Reported-by: Kumar Kartikeya Dwivedi Signed-off-by: Dave Marchevsky Link: https://lore.kernel.org/r/20230424204321.2680232-1-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 5 ++++- tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c | 2 -- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0d73139ee4d8c..5c4aa393f65a6 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -10509,7 +10509,10 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i); return -EINVAL; } - + if (rec->refcount_off >= 0) { + verbose(env, "bpf_refcount_acquire calls are disabled for now\n"); + return -EINVAL; + } meta->arg_refcount_acquire.btf = reg->btf; meta->arg_refcount_acquire.btf_id = reg->btf_id; break; diff --git a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c index 2ab23832062d7..595cbf92bff5f 100644 --- a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c @@ -9,10 +9,8 @@ void test_refcounted_kptr(void) { - RUN_TESTS(refcounted_kptr); } void test_refcounted_kptr_fail(void) { - RUN_TESTS(refcounted_kptr_fail); } From a0c109dcafb15b8bee187c49fb746779374f60f0 Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Mon, 24 Apr 2023 16:11:03 +0000 Subject: [PATCH 4/5] bpf: Add __rcu_read_{lock,unlock} into btf id deny list The tracing recursion prevention mechanism must be protected by rcu, that leaves __rcu_read_{lock,unlock} unprotected by this mechanism. If we trace them, the recursion will happen. Let's add them into the btf id deny list. When CONFIG_PREEMPT_RCU is enabled, it can be reproduced with a simple bpf program as such: SEC("fentry/__rcu_read_lock") int fentry_run() { return 0; } Signed-off-by: Yafang Shao Link: https://lore.kernel.org/r/20230424161104.3737-2-laoar.shao@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5c4aa393f65a6..fbcf5a4e2fcd4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -18671,6 +18671,10 @@ BTF_ID(func, rcu_read_unlock_strict) BTF_ID(func, preempt_count_add) BTF_ID(func, preempt_count_sub) #endif +#ifdef CONFIG_PREEMPT_RCU +BTF_ID(func, __rcu_read_lock) +BTF_ID(func, __rcu_read_unlock) +#endif BTF_SET_END(btf_id_deny) static bool can_be_sleepable(struct bpf_prog *prog) From be7dbd275dc6b911a5b9a22c4f9cb71b2c7fd847 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 24 Apr 2023 16:51:28 -0700 Subject: [PATCH 5/5] selftests/bpf: avoid mark_all_scalars_precise() trigger in one of iter tests iter_pass_iter_ptr_to_subprog subtest is relying on actual array size being passed as subprog parameter. This combined with recent fixes to precision tracking in conditional jumps ([0]) is now causing verifier to backtrack all the way to the point where sum() and fill() subprogs are called, at which point precision backtrack bails out and forces all the states to have precise SCALAR registers. This in turn causes each possible value of i within fill() and sum() subprogs to cause a different non-equivalent state, preventing iterator code to converge. For now, change the test to assume fixed size of passed in array. Once BPF verifier supports precision tracking across subprogram calls, these changes will be reverted as unnecessary. [0] 71b547f56124 ("bpf: Fix incorrect verifier pruning due to missing register precision taints") Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230424235128.1941726-1-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/iters.c | 26 +++++++++++++---------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index 6b9b3c56f009a..be16143ae292f 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -651,25 +651,29 @@ int iter_stack_array_loop(const void *ctx) return sum; } -static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul) +#define ARR_SZ 16 + +static __noinline void fill(struct bpf_iter_num *it, int *arr, int mul) { - int *t, i; + int *t; + __u64 i; while ((t = bpf_iter_num_next(it))) { i = *t; - if (i >= n) + if (i >= ARR_SZ) break; arr[i] = i * mul; } } -static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n) +static __noinline int sum(struct bpf_iter_num *it, int *arr) { - int *t, i, sum = 0;; + int *t, sum = 0;; + __u64 i; while ((t = bpf_iter_num_next(it))) { i = *t; - if (i >= n) + if (i >= ARR_SZ) break; sum += arr[i]; } @@ -681,7 +685,7 @@ SEC("raw_tp") __success int iter_pass_iter_ptr_to_subprog(const void *ctx) { - int arr1[16], arr2[32]; + int arr1[ARR_SZ], arr2[ARR_SZ]; struct bpf_iter_num it; int n, sum1, sum2; @@ -690,25 +694,25 @@ int iter_pass_iter_ptr_to_subprog(const void *ctx) /* fill arr1 */ n = ARRAY_SIZE(arr1); bpf_iter_num_new(&it, 0, n); - fill(&it, arr1, n, 2); + fill(&it, arr1, 2); bpf_iter_num_destroy(&it); /* fill arr2 */ n = ARRAY_SIZE(arr2); bpf_iter_num_new(&it, 0, n); - fill(&it, arr2, n, 10); + fill(&it, arr2, 10); bpf_iter_num_destroy(&it); /* sum arr1 */ n = ARRAY_SIZE(arr1); bpf_iter_num_new(&it, 0, n); - sum1 = sum(&it, arr1, n); + sum1 = sum(&it, arr1); bpf_iter_num_destroy(&it); /* sum arr2 */ n = ARRAY_SIZE(arr2); bpf_iter_num_new(&it, 0, n); - sum2 = sum(&it, arr2, n); + sum2 = sum(&it, arr2); bpf_iter_num_destroy(&it); bpf_printk("sum1=%d, sum2=%d", sum1, sum2);