diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index e7e1a8acc2997..929a3baca8ef3 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -291,4 +291,107 @@ enum libbpf_tristate { /* Helper macro to print out debug messages */ #define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args) +struct bpf_iter_num; + +extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __weak __ksym; +extern int *bpf_iter_num_next(struct bpf_iter_num *it) __weak __ksym; +extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym; + +#ifndef bpf_for_each +/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for + * using BPF open-coded iterators without having to write mundane explicit + * low-level loop logic. Instead, it provides for()-like generic construct + * that can be used pretty naturally. E.g., for some hypothetical cgroup + * iterator, you'd write: + * + * struct cgroup *cg, *parent_cg = <...>; + * + * bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) { + * bpf_printk("Child cgroup id = %d", cg->cgroup_id); + * if (cg->cgroup_id == 123) + * break; + * } + * + * I.e., it looks almost like high-level for each loop in other languages, + * supports continue/break, and is verifiable by BPF verifier. + * + * For iterating integers, the difference betwen bpf_for_each(num, i, N, M) + * and bpf_for(i, N, M) is in that bpf_for() provides additional proof to + * verifier that i is in [N, M) range, and in bpf_for_each() case i is `int + * *`, not just `int`. So for integers bpf_for() is more convenient. + * + * Note: this macro relies on C99 feature of allowing to declare variables + * inside for() loop, bound to for() loop lifetime. It also utilizes GCC + * extension: __attribute__((cleanup())), supported by both GCC and + * Clang. + */ +#define bpf_for_each(type, cur, args...) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \ + cleanup(bpf_iter_##type##_destroy))), \ + /* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_##type##_new(&___it, ##args), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_##type##_destroy, (void *)0); \ + /* iteration and termination check */ \ + (((cur) = bpf_iter_##type##_next(&___it))); \ +) +#endif /* bpf_for_each */ + +#ifndef bpf_for +/* bpf_for(i, start, end) implements a for()-like looping construct that sets + * provided integer variable *i* to values starting from *start* through, + * but not including, *end*. It also proves to BPF verifier that *i* belongs + * to range [start, end), so this can be used for accessing arrays without + * extra checks. + * + * Note: *start* and *end* are assumed to be expressions with no side effects + * and whose values do not change throughout bpf_for() loop execution. They do + * not have to be statically known or constant, though. + * + * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() + * loop bound variables and cleanup attribute, supported by GCC and Clang. + */ +#define bpf_for(i, start, end) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ + cleanup(bpf_iter_num_destroy))), \ + /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_num_new(&___it, (start), (end)), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_num_destroy, (void *)0); \ + ({ \ + /* iteration step */ \ + int *___t = bpf_iter_num_next(&___it); \ + /* termination and bounds check */ \ + (___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \ + }); \ +) +#endif /* bpf_for */ + +#ifndef bpf_repeat +/* bpf_repeat(N) performs N iterations without exposing iteration number + * + * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() + * loop bound variables and cleanup attribute, supported by GCC and Clang. + */ +#define bpf_repeat(N) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ + cleanup(bpf_iter_num_destroy))), \ + /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_num_new(&___it, 0, (N)), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_num_destroy, (void *)0); \ + bpf_iter_num_next(&___it); \ + /* nothing here */ \ +) +#endif /* bpf_repeat */ + #endif diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 49cd304ae3bc5..2600d83842528 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -333,6 +333,7 @@ struct reloc_desc { struct { int map_idx; int sym_off; + int ext_idx; }; }; }; @@ -4042,7 +4043,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog, else reloc_desc->type = RELO_EXTERN_LD64; reloc_desc->insn_idx = insn_idx; - reloc_desc->sym_off = i; /* sym_off stores extern index */ + reloc_desc->ext_idx = i; return 0; } @@ -5811,8 +5812,8 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) } /* base map load ldimm64 special constant, used also for log fixup logic */ -#define MAP_LDIMM64_POISON_BASE 2001000000 -#define MAP_LDIMM64_POISON_PFX "200100" +#define POISON_LDIMM64_MAP_BASE 2001000000 +#define POISON_LDIMM64_MAP_PFX "200100" static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx, int insn_idx, struct bpf_insn *insn, @@ -5834,12 +5835,36 @@ static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx, * invalid func unknown#2001000123 * where lower 123 is map index into obj->maps[] array */ - insn->imm = MAP_LDIMM64_POISON_BASE + map_idx; + insn->imm = POISON_LDIMM64_MAP_BASE + map_idx; insn++; } } +/* unresolved kfunc call special constant, used also for log fixup logic */ +#define POISON_CALL_KFUNC_BASE 2002000000 +#define POISON_CALL_KFUNC_PFX "2002" + +static void poison_kfunc_call(struct bpf_program *prog, int relo_idx, + int insn_idx, struct bpf_insn *insn, + int ext_idx, const struct extern_desc *ext) +{ + pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n", + prog->name, relo_idx, insn_idx, ext->name); + + /* we turn kfunc call into invalid helper call with identifiable constant */ + insn->code = BPF_JMP | BPF_CALL; + insn->dst_reg = 0; + insn->src_reg = 0; + insn->off = 0; + /* if this instruction is reachable (not a dead code), + * verifier will complain with something like: + * invalid func unknown#2001000123 + * where lower 123 is extern index into obj->externs[] array + */ + insn->imm = POISON_CALL_KFUNC_BASE + ext_idx; +} + /* Relocate data references within program code: * - map references; * - global variable references; @@ -5885,7 +5910,7 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) } break; case RELO_EXTERN_LD64: - ext = &obj->externs[relo->sym_off]; + ext = &obj->externs[relo->ext_idx]; if (ext->type == EXT_KCFG) { if (obj->gen_loader) { insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; @@ -5907,14 +5932,14 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) } break; case RELO_EXTERN_CALL: - ext = &obj->externs[relo->sym_off]; + ext = &obj->externs[relo->ext_idx]; insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL; if (ext->is_set) { insn[0].imm = ext->ksym.kernel_btf_id; insn[0].off = ext->ksym.btf_fd_idx; - } else { /* unresolved weak kfunc */ - insn[0].imm = 0; - insn[0].off = 0; + } else { /* unresolved weak kfunc call */ + poison_kfunc_call(prog, i, relo->insn_idx, insn, + relo->ext_idx, ext); } break; case RELO_SUBPROG_ADDR: @@ -7022,13 +7047,13 @@ static void fixup_log_missing_map_load(struct bpf_program *prog, char *buf, size_t buf_sz, size_t log_sz, char *line1, char *line2, char *line3) { - /* Expected log for failed and not properly guarded CO-RE relocation: + /* Expected log for failed and not properly guarded map reference: * line1 -> 123: (85) call unknown#2001000345 * line2 -> invalid func unknown#2001000345 * line3 -> * * "123" is the index of the instruction that was poisoned. - * "345" in "2001000345" are map index in obj->maps to fetch map name. + * "345" in "2001000345" is a map index in obj->maps to fetch map name. */ struct bpf_object *obj = prog->obj; const struct bpf_map *map; @@ -7038,7 +7063,7 @@ static void fixup_log_missing_map_load(struct bpf_program *prog, if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2) return; - map_idx -= MAP_LDIMM64_POISON_BASE; + map_idx -= POISON_LDIMM64_MAP_BASE; if (map_idx < 0 || map_idx >= obj->nr_maps) return; map = &obj->maps[map_idx]; @@ -7051,6 +7076,39 @@ static void fixup_log_missing_map_load(struct bpf_program *prog, patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); } +static void fixup_log_missing_kfunc_call(struct bpf_program *prog, + char *buf, size_t buf_sz, size_t log_sz, + char *line1, char *line2, char *line3) +{ + /* Expected log for failed and not properly guarded kfunc call: + * line1 -> 123: (85) call unknown#2002000345 + * line2 -> invalid func unknown#2002000345 + * line3 -> + * + * "123" is the index of the instruction that was poisoned. + * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name. + */ + struct bpf_object *obj = prog->obj; + const struct extern_desc *ext; + int insn_idx, ext_idx; + char patch[128]; + + if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2) + return; + + ext_idx -= POISON_CALL_KFUNC_BASE; + if (ext_idx < 0 || ext_idx >= obj->nr_extern) + return; + ext = &obj->externs[ext_idx]; + + snprintf(patch, sizeof(patch), + "%d: \n" + "kfunc '%s' is referenced but wasn't resolved\n", + insn_idx, ext->name); + + patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); +} + static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz) { /* look for familiar error patterns in last N lines of the log */ @@ -7070,23 +7128,33 @@ static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_s if (!cur_line) return; - /* failed CO-RE relocation case */ if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) { prev_line = find_prev_line(buf, cur_line); if (!prev_line) continue; + /* failed CO-RE relocation case */ fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz, prev_line, cur_line, next_line); return; - } else if (str_has_pfx(cur_line, "invalid func unknown#"MAP_LDIMM64_POISON_PFX)) { + } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) { prev_line = find_prev_line(buf, cur_line); if (!prev_line) continue; + /* reference to uncreated BPF map */ fixup_log_missing_map_load(prog, buf, buf_sz, log_sz, prev_line, cur_line, next_line); return; + } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) { + prev_line = find_prev_line(buf, cur_line); + if (!prev_line) + continue; + + /* reference to unresolved kfunc */ + fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz, + prev_line, cur_line, next_line); + return; } } } @@ -7098,7 +7166,7 @@ static int bpf_program_record_relos(struct bpf_program *prog) for (i = 0; i < prog->nr_reloc; i++) { struct reloc_desc *relo = &prog->reloc_desc[i]; - struct extern_desc *ext = &obj->externs[relo->sym_off]; + struct extern_desc *ext = &obj->externs[relo->ext_idx]; int kind; switch (relo->type) { @@ -7536,8 +7604,9 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id, kern_btf, kfunc_proto_id); if (ret <= 0) { - pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with kernel [%d]\n", - ext->name, local_func_proto_id, kfunc_proto_id); + pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n", + ext->name, local_func_proto_id, + mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id); return -EINVAL; } @@ -7571,8 +7640,8 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64. */ ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; - pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n", - ext->name, kfunc_id); + pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n", + ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id); return 0; } diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c index bc27170bdeb04..dba71d98a2279 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_fixup.c +++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c @@ -135,6 +135,35 @@ static void missing_map(void) test_log_fixup__destroy(skel); } +static void missing_kfunc(void) +{ + char log_buf[8 * 1024]; + struct test_log_fixup* skel; + int err; + + skel = test_log_fixup__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_program__set_autoload(skel->progs.use_missing_kfunc, true); + bpf_program__set_log_buf(skel->progs.use_missing_kfunc, log_buf, sizeof(log_buf)); + + err = test_log_fixup__load(skel); + if (!ASSERT_ERR(err, "load_fail")) + goto cleanup; + + ASSERT_HAS_SUBSTR(log_buf, + "0: \n" + "kfunc 'bpf_nonexistent_kfunc' is referenced but wasn't resolved\n", + "log_buf"); + + if (env.verbosity > VERBOSE_NONE) + printf("LOG: \n=================\n%s=================\n", log_buf); + +cleanup: + test_log_fixup__destroy(skel); +} + void test_log_fixup(void) { if (test__start_subtest("bad_core_relo_trunc_none")) @@ -147,4 +176,6 @@ void test_log_fixup(void) bad_core_relo_subprog(); if (test__start_subtest("missing_map")) missing_map(); + if (test__start_subtest("missing_kfunc")) + missing_kfunc(); } diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 6e3b4903c5412..3b307de8dab99 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -121,107 +121,4 @@ /* make it look to compiler like value is read and written */ #define __sink(expr) asm volatile("" : "+g"(expr)) -struct bpf_iter_num; - -extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __ksym; -extern int *bpf_iter_num_next(struct bpf_iter_num *it) __ksym; -extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym; - -#ifndef bpf_for_each -/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for - * using BPF open-coded iterators without having to write mundane explicit - * low-level loop logic. Instead, it provides for()-like generic construct - * that can be used pretty naturally. E.g., for some hypothetical cgroup - * iterator, you'd write: - * - * struct cgroup *cg, *parent_cg = <...>; - * - * bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) { - * bpf_printk("Child cgroup id = %d", cg->cgroup_id); - * if (cg->cgroup_id == 123) - * break; - * } - * - * I.e., it looks almost like high-level for each loop in other languages, - * supports continue/break, and is verifiable by BPF verifier. - * - * For iterating integers, the difference betwen bpf_for_each(num, i, N, M) - * and bpf_for(i, N, M) is in that bpf_for() provides additional proof to - * verifier that i is in [N, M) range, and in bpf_for_each() case i is `int - * *`, not just `int`. So for integers bpf_for() is more convenient. - * - * Note: this macro relies on C99 feature of allowing to declare variables - * inside for() loop, bound to for() loop lifetime. It also utilizes GCC - * extension: __attribute__((cleanup())), supported by both GCC and - * Clang. - */ -#define bpf_for_each(type, cur, args...) for ( \ - /* initialize and define destructor */ \ - struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \ - cleanup(bpf_iter_##type##_destroy))), \ - /* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \ - *___p __attribute__((unused)) = ( \ - bpf_iter_##type##_new(&___it, ##args), \ - /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ - /* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \ - (void)bpf_iter_##type##_destroy, (void *)0); \ - /* iteration and termination check */ \ - (((cur) = bpf_iter_##type##_next(&___it))); \ -) -#endif /* bpf_for_each */ - -#ifndef bpf_for -/* bpf_for(i, start, end) implements a for()-like looping construct that sets - * provided integer variable *i* to values starting from *start* through, - * but not including, *end*. It also proves to BPF verifier that *i* belongs - * to range [start, end), so this can be used for accessing arrays without - * extra checks. - * - * Note: *start* and *end* are assumed to be expressions with no side effects - * and whose values do not change throughout bpf_for() loop execution. They do - * not have to be statically known or constant, though. - * - * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() - * loop bound variables and cleanup attribute, supported by GCC and Clang. - */ -#define bpf_for(i, start, end) for ( \ - /* initialize and define destructor */ \ - struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ - cleanup(bpf_iter_num_destroy))), \ - /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ - *___p __attribute__((unused)) = ( \ - bpf_iter_num_new(&___it, (start), (end)), \ - /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ - /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ - (void)bpf_iter_num_destroy, (void *)0); \ - ({ \ - /* iteration step */ \ - int *___t = bpf_iter_num_next(&___it); \ - /* termination and bounds check */ \ - (___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \ - }); \ -) -#endif /* bpf_for */ - -#ifndef bpf_repeat -/* bpf_repeat(N) performs N iterations without exposing iteration number - * - * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() - * loop bound variables and cleanup attribute, supported by GCC and Clang. - */ -#define bpf_repeat(N) for ( \ - /* initialize and define destructor */ \ - struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ - cleanup(bpf_iter_num_destroy))), \ - /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ - *___p __attribute__((unused)) = ( \ - bpf_iter_num_new(&___it, 0, (N)), \ - /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ - /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ - (void)bpf_iter_num_destroy, (void *)0); \ - bpf_iter_num_next(&___it); \ - /* nothing here */ \ -) -#endif /* bpf_repeat */ - #endif diff --git a/tools/testing/selftests/bpf/progs/test_log_fixup.c b/tools/testing/selftests/bpf/progs/test_log_fixup.c index 60450cb0e72ec..1bd48feaaa428 100644 --- a/tools/testing/selftests/bpf/progs/test_log_fixup.c +++ b/tools/testing/selftests/bpf/progs/test_log_fixup.c @@ -61,4 +61,14 @@ int use_missing_map(const void *ctx) return value != NULL; } +extern int bpf_nonexistent_kfunc(void) __ksym __weak; + +SEC("?raw_tp/sys_enter") +int use_missing_kfunc(const void *ctx) +{ + bpf_nonexistent_kfunc(); + + return 0; +} + char _license[] SEC("license") = "GPL";