From 7e6f3cd89f04a0a577002d5696288b482109d25c Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 14 Jul 2021 11:43:53 +0200 Subject: [PATCH 1/8] bpf, x86: Store caller's ip in trampoline stack Storing caller's ip in trampoline's stack. Trampoline programs can reach the IP in (ctx - 8) address, so there's no change in program's arguments interface. The IP address is takes from [fp + 8], which is return address from the initial 'call fentry' call to trampoline. This IP address will be returned via bpf_get_func_ip helper helper, which is added in following patches. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20210714094400.396467-2-jolsa@kernel.org --- arch/x86/net/bpf_jit_comp.c | 19 +++++++++++++++++++ include/linux/bpf.h | 5 +++++ 2 files changed, 24 insertions(+) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index e835164189f16..c320b3ce7b58f 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1951,6 +1951,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i if (flags & BPF_TRAMP_F_CALL_ORIG) stack_size += 8; /* room for return value of orig_call */ + if (flags & BPF_TRAMP_F_IP_ARG) + stack_size += 8; /* room for IP address argument */ + if (flags & BPF_TRAMP_F_SKIP_FRAME) /* skip patched call instruction and point orig_call to actual * body of the kernel function. @@ -1964,6 +1967,22 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ EMIT1(0x53); /* push rbx */ + if (flags & BPF_TRAMP_F_IP_ARG) { + /* Store IP address of the traced function: + * mov rax, QWORD PTR [rbp + 8] + * sub rax, X86_PATCH_SIZE + * mov QWORD PTR [rbp - stack_size], rax + */ + emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); + EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE); + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -stack_size); + + /* Continue with stack_size for regs storage, stack will + * be correctly restored with 'leave' instruction. + */ + stack_size -= 8; + } + save_regs(m, &prog, nr_args, stack_size); if (flags & BPF_TRAMP_F_CALL_ORIG) { diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a9a4a480a6d0d..94d77dc7ce352 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -579,6 +579,11 @@ struct btf_func_model { */ #define BPF_TRAMP_F_SKIP_FRAME BIT(2) +/* Store IP address of the caller on the trampoline stack, + * so it's available for trampoline's programs. + */ +#define BPF_TRAMP_F_IP_ARG BIT(3) + /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 */ From 1e37392cccdea94da635e3c6d16b21865806f619 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 14 Jul 2021 11:43:54 +0200 Subject: [PATCH 2/8] bpf: Enable BPF_TRAMP_F_IP_ARG for trampolines with call_get_func_ip Enabling BPF_TRAMP_F_IP_ARG for trampolines that actually need it. The BPF_TRAMP_F_IP_ARG adds extra 3 instructions to trampoline code and is used only by programs with bpf_get_func_ip helper, which is added in following patch and sets call_get_func_ip bit. This patch ensures that BPF_TRAMP_F_IP_ARG flag is used only for trampolines that have programs with call_get_func_ip set. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20210714094400.396467-3-jolsa@kernel.org --- include/linux/filter.h | 3 ++- kernel/bpf/trampoline.c | 12 +++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index 472f97074da0e..ba36989f711a3 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -559,7 +559,8 @@ struct bpf_prog { kprobe_override:1, /* Do we override a kprobe? */ has_callchain_buf:1, /* callchain buffer allocated? */ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ - call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */ + call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ + call_get_func_ip:1; /* Do we call get_func_ip() */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 28a3630c48ee1..b2535acfe9db8 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -172,7 +172,7 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr) } static struct bpf_tramp_progs * -bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total) +bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg) { const struct bpf_prog_aux *aux; struct bpf_tramp_progs *tprogs; @@ -189,8 +189,10 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total) *total += tr->progs_cnt[kind]; progs = tprogs[kind].progs; - hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist) + hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist) { + *ip_arg |= aux->prog->call_get_func_ip; *progs++ = aux->prog; + } } return tprogs; } @@ -333,9 +335,10 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr) struct bpf_tramp_image *im; struct bpf_tramp_progs *tprogs; u32 flags = BPF_TRAMP_F_RESTORE_REGS; + bool ip_arg = false; int err, total; - tprogs = bpf_trampoline_get_progs(tr, &total); + tprogs = bpf_trampoline_get_progs(tr, &total, &ip_arg); if (IS_ERR(tprogs)) return PTR_ERR(tprogs); @@ -357,6 +360,9 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr) tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs) flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME; + if (ip_arg) + flags |= BPF_TRAMP_F_IP_ARG; + err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE, &tr->func.model, flags, tprogs, tr->func.addr); From 9b99edcae5c80c8fb9f8e7149bae528c9e610a72 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 14 Jul 2021 11:43:55 +0200 Subject: [PATCH 3/8] bpf: Add bpf_get_func_ip helper for tracing programs Adding bpf_get_func_ip helper for BPF_PROG_TYPE_TRACING programs, specifically for all trampoline attach types. The trampoline's caller IP address is stored in (ctx - 8) address. so there's no reason to actually call the helper, but rather fixup the call instruction and return [ctx - 8] value directly. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20210714094400.396467-4-jolsa@kernel.org --- include/uapi/linux/bpf.h | 7 ++++++ kernel/bpf/verifier.c | 43 ++++++++++++++++++++++++++++++++++ kernel/trace/bpf_trace.c | 15 ++++++++++++ tools/include/uapi/linux/bpf.h | 7 ++++++ 4 files changed, 72 insertions(+) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 3544ec5234f09..89688f16ad60c 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -4841,6 +4841,12 @@ union bpf_attr { * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its * own timer which would have led to a deadlock otherwise. + * + * u64 bpf_get_func_ip(void *ctx) + * Description + * Get address of the traced function (for tracing programs). + * Return + * Address of the traced function. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5016,6 +5022,7 @@ union bpf_attr { FN(timer_set_callback), \ FN(timer_start), \ FN(timer_cancel), \ + FN(get_func_ip), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 344ee67265cc7..ceef190514e41 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6161,6 +6161,27 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env, return err; } +static int check_get_func_ip(struct bpf_verifier_env *env) +{ + enum bpf_attach_type eatype = env->prog->expected_attach_type; + enum bpf_prog_type type = resolve_prog_type(env->prog); + int func_id = BPF_FUNC_get_func_ip; + + if (type == BPF_PROG_TYPE_TRACING) { + if (eatype != BPF_TRACE_FENTRY && eatype != BPF_TRACE_FEXIT && + eatype != BPF_MODIFY_RETURN) { + verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n", + func_id_name(func_id), func_id); + return -ENOTSUPP; + } + return 0; + } + + verbose(env, "func %s#%d not supported for program type %d\n", + func_id_name(func_id), func_id, type); + return -ENOTSUPP; +} + static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx_p) { @@ -6439,6 +6460,12 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) env->prog->call_get_stack = true; + if (func_id == BPF_FUNC_get_func_ip) { + if (check_get_func_ip(env)) + return -ENOTSUPP; + env->prog->call_get_func_ip = true; + } + if (changes_data) clear_all_pkt_pointers(env); return 0; @@ -12632,6 +12659,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; bool expect_blinding = bpf_jit_blinding_enabled(prog); + enum bpf_prog_type prog_type = resolve_prog_type(prog); struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; @@ -12998,6 +13026,21 @@ static int do_misc_fixups(struct bpf_verifier_env *env) continue; } + /* Implement bpf_get_func_ip inline. */ + if (prog_type == BPF_PROG_TYPE_TRACING && + insn->imm == BPF_FUNC_get_func_ip) { + /* Load IP address from ctx - 8 */ + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); + if (!new_prog) + return -ENOMEM; + + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + continue; + } + patch_call_imm: fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 6c77d25137e06..3e71503eeb23e 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -948,6 +948,19 @@ const struct bpf_func_proto bpf_snprintf_btf_proto = { .arg5_type = ARG_ANYTHING, }; +BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx) +{ + /* This helper call is inlined by verifier. */ + return ((u64 *)ctx)[-1]; +} + +static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = { + .func = bpf_get_func_ip_tracing, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + const struct bpf_func_proto * bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -1058,6 +1071,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_for_each_map_elem_proto; case BPF_FUNC_snprintf: return &bpf_snprintf_proto; + case BPF_FUNC_get_func_ip: + return &bpf_get_func_ip_proto_tracing; default: return bpf_base_func_proto(func_id); } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 3544ec5234f09..89688f16ad60c 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -4841,6 +4841,12 @@ union bpf_attr { * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its * own timer which would have led to a deadlock otherwise. + * + * u64 bpf_get_func_ip(void *ctx) + * Description + * Get address of the traced function (for tracing programs). + * Return + * Address of the traced function. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5016,6 +5022,7 @@ union bpf_attr { FN(timer_set_callback), \ FN(timer_start), \ FN(timer_cancel), \ + FN(get_func_ip), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper From 9ffd9f3ff7193933dae171740ab70a103d460065 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 14 Jul 2021 11:43:56 +0200 Subject: [PATCH 4/8] bpf: Add bpf_get_func_ip helper for kprobe programs Adding bpf_get_func_ip helper for BPF_PROG_TYPE_KPROBE programs, so it's now possible to call bpf_get_func_ip from both kprobe and kretprobe programs. Taking the caller's address from 'struct kprobe::addr', which is defined for both kprobe and kretprobe. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Reviewed-by: Masami Hiramatsu Link: https://lore.kernel.org/bpf/20210714094400.396467-5-jolsa@kernel.org --- include/uapi/linux/bpf.h | 2 +- kernel/bpf/verifier.c | 2 ++ kernel/trace/bpf_trace.c | 16 ++++++++++++++++ tools/include/uapi/linux/bpf.h | 2 +- 4 files changed, 20 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 89688f16ad60c..2db6925e04f4f 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -4844,7 +4844,7 @@ union bpf_attr { * * u64 bpf_get_func_ip(void *ctx) * Description - * Get address of the traced function (for tracing programs). + * Get address of the traced function (for tracing and kprobe programs). * Return * Address of the traced function. */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ceef190514e41..97216f799ba89 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6175,6 +6175,8 @@ static int check_get_func_ip(struct bpf_verifier_env *env) return -ENOTSUPP; } return 0; + } else if (type == BPF_PROG_TYPE_KPROBE) { + return 0; } verbose(env, "func %s#%d not supported for program type %d\n", diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 3e71503eeb23e..0b113716bc7a3 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -961,6 +961,20 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) +{ + struct kprobe *kp = kprobe_running(); + + return kp ? (u64) kp->addr : 0; +} + +static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { + .func = bpf_get_func_ip_kprobe, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + const struct bpf_func_proto * bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -1092,6 +1106,8 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_override_return: return &bpf_override_return_proto; #endif + case BPF_FUNC_get_func_ip: + return &bpf_get_func_ip_proto_kprobe; default: return bpf_tracing_func_proto(func_id, prog); } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 89688f16ad60c..2db6925e04f4f 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -4844,7 +4844,7 @@ union bpf_attr { * * u64 bpf_get_func_ip(void *ctx) * Description - * Get address of the traced function (for tracing programs). + * Get address of the traced function (for tracing and kprobe programs). * Return * Address of the traced function. */ From 5d8b583d04aedb3bd5f6d227a334c210c7d735f9 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 14 Jul 2021 11:43:57 +0200 Subject: [PATCH 5/8] selftests/bpf: Add test for bpf_get_func_ip helper Adding test for bpf_get_func_ip helper for fentry, fexit, kprobe, kretprobe and fmod_ret programs. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20210714094400.396467-6-jolsa@kernel.org --- .../bpf/prog_tests/get_func_ip_test.c | 39 ++++++++++++ .../selftests/bpf/progs/get_func_ip_test.c | 62 +++++++++++++++++++ 2 files changed, 101 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c create mode 100644 tools/testing/selftests/bpf/progs/get_func_ip_test.c diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c new file mode 100644 index 0000000000000..8bb18a8d31a06 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "get_func_ip_test.skel.h" + +void test_get_func_ip_test(void) +{ + struct get_func_ip_test *skel = NULL; + __u32 duration = 0, retval; + int err, prog_fd; + + skel = get_func_ip_test__open_and_load(); + if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open_and_load")) + return; + + err = get_func_ip_test__attach(skel); + if (!ASSERT_OK(err, "get_func_ip_test__attach")) + goto cleanup; + + prog_fd = bpf_program__fd(skel->progs.test1); + err = bpf_prog_test_run(prog_fd, 1, NULL, 0, + NULL, NULL, &retval, &duration); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(retval, 0, "test_run"); + + prog_fd = bpf_program__fd(skel->progs.test5); + err = bpf_prog_test_run(prog_fd, 1, NULL, 0, + NULL, NULL, &retval, &duration); + + ASSERT_OK(err, "test_run"); + + ASSERT_EQ(skel->bss->test1_result, 1, "test1_result"); + ASSERT_EQ(skel->bss->test2_result, 1, "test2_result"); + ASSERT_EQ(skel->bss->test3_result, 1, "test3_result"); + ASSERT_EQ(skel->bss->test4_result, 1, "test4_result"); + ASSERT_EQ(skel->bss->test5_result, 1, "test5_result"); + +cleanup: + get_func_ip_test__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_test.c new file mode 100644 index 0000000000000..ba3e107b52dd4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/get_func_ip_test.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +extern const void bpf_fentry_test1 __ksym; +extern const void bpf_fentry_test2 __ksym; +extern const void bpf_fentry_test3 __ksym; +extern const void bpf_fentry_test4 __ksym; +extern const void bpf_modify_return_test __ksym; + +__u64 test1_result = 0; +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(test1, int a) +{ + __u64 addr = bpf_get_func_ip(ctx); + + test1_result = (const void *) addr == &bpf_fentry_test1; + return 0; +} + +__u64 test2_result = 0; +SEC("fexit/bpf_fentry_test2") +int BPF_PROG(test2, int a) +{ + __u64 addr = bpf_get_func_ip(ctx); + + test2_result = (const void *) addr == &bpf_fentry_test2; + return 0; +} + +__u64 test3_result = 0; +SEC("kprobe/bpf_fentry_test3") +int test3(struct pt_regs *ctx) +{ + __u64 addr = bpf_get_func_ip(ctx); + + test3_result = (const void *) addr == &bpf_fentry_test3; + return 0; +} + +__u64 test4_result = 0; +SEC("kretprobe/bpf_fentry_test4") +int BPF_KRETPROBE(test4) +{ + __u64 addr = bpf_get_func_ip(ctx); + + test4_result = (const void *) addr == &bpf_fentry_test4; + return 0; +} + +__u64 test5_result = 0; +SEC("fmod_ret/bpf_modify_return_test") +int BPF_PROG(test5, int a, int *b, int ret) +{ + __u64 addr = bpf_get_func_ip(ctx); + + test5_result = (const void *) addr == &bpf_modify_return_test; + return ret; +} From ac0ed488297a9850b0c285646b7854228368ba6b Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 14 Jul 2021 11:43:58 +0200 Subject: [PATCH 6/8] libbpf: Add bpf_program__attach_kprobe_opts function Adding bpf_program__attach_kprobe_opts that does the same as bpf_program__attach_kprobe, but takes opts argument. Currently opts struct holds just retprobe bool, but we will add new field in following patch. The function is not exported, so there's no need to add size to the struct bpf_program_attach_kprobe_opts for now. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20210714094400.396467-7-jolsa@kernel.org --- tools/lib/bpf/libbpf.c | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 88b99401040c0..d93a6f9408d12 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -10346,19 +10346,24 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, return pfd; } -struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog, - bool retprobe, - const char *func_name) +struct bpf_program_attach_kprobe_opts { + bool retprobe; +}; + +static struct bpf_link* +bpf_program__attach_kprobe_opts(struct bpf_program *prog, + const char *func_name, + struct bpf_program_attach_kprobe_opts *opts) { char errmsg[STRERR_BUFSIZE]; struct bpf_link *link; int pfd, err; - pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name, + pfd = perf_event_open_probe(false /* uprobe */, opts->retprobe, func_name, 0 /* offset */, -1 /* pid */); if (pfd < 0) { pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n", - prog->name, retprobe ? "kretprobe" : "kprobe", func_name, + prog->name, opts->retprobe ? "kretprobe" : "kprobe", func_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); return libbpf_err_ptr(pfd); } @@ -10367,23 +10372,34 @@ struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog, if (err) { close(pfd); pr_warn("prog '%s': failed to attach to %s '%s': %s\n", - prog->name, retprobe ? "kretprobe" : "kprobe", func_name, + prog->name, opts->retprobe ? "kretprobe" : "kprobe", func_name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); return libbpf_err_ptr(err); } return link; } +struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog, + bool retprobe, + const char *func_name) +{ + struct bpf_program_attach_kprobe_opts opts = { + .retprobe = retprobe, + }; + + return bpf_program__attach_kprobe_opts(prog, func_name, &opts); +} + static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec, struct bpf_program *prog) { + struct bpf_program_attach_kprobe_opts opts; const char *func_name; - bool retprobe; func_name = prog->sec_name + sec->len; - retprobe = strcmp(sec->sec, "kretprobe/") == 0; + opts.retprobe = strcmp(sec->sec, "kretprobe/") == 0; - return bpf_program__attach_kprobe(prog, retprobe, func_name); + return bpf_program__attach_kprobe_opts(prog, func_name, &opts); } struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog, From a2488b5f483f18e6e34be2a15eb4a79f4a0d8728 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Wed, 14 Jul 2021 11:43:59 +0200 Subject: [PATCH 7/8] libbpf: Allow specification of "kprobe/function+offset" kprobes can be placed on most instructions in a function, not just entry, and ftrace and bpftrace support the function+offset notification for probe placement. Adding parsing of func_name into func+offset to bpf_program__attach_kprobe() allows the user to specify SEC("kprobe/bpf_fentry_test5+0x6") ...for example, and the offset can be passed to perf_event_open_probe() to support kprobe attachment. Signed-off-by: Alan Maguire Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20210714094400.396467-8-jolsa@kernel.org --- tools/lib/bpf/libbpf.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index d93a6f9408d12..abe6d4842bb0d 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -10348,6 +10348,7 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, struct bpf_program_attach_kprobe_opts { bool retprobe; + unsigned long offset; }; static struct bpf_link* @@ -10360,7 +10361,7 @@ bpf_program__attach_kprobe_opts(struct bpf_program *prog, int pfd, err; pfd = perf_event_open_probe(false /* uprobe */, opts->retprobe, func_name, - 0 /* offset */, -1 /* pid */); + opts->offset, -1 /* pid */); if (pfd < 0) { pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n", prog->name, opts->retprobe ? "kretprobe" : "kprobe", func_name, @@ -10394,12 +10395,31 @@ static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec, struct bpf_program *prog) { struct bpf_program_attach_kprobe_opts opts; + unsigned long offset = 0; + struct bpf_link *link; const char *func_name; + char *func; + int n, err; func_name = prog->sec_name + sec->len; opts.retprobe = strcmp(sec->sec, "kretprobe/") == 0; - return bpf_program__attach_kprobe_opts(prog, func_name, &opts); + n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%lx", &func, &offset); + if (n < 1) { + err = -EINVAL; + pr_warn("kprobe name is invalid: %s\n", func_name); + return libbpf_err_ptr(err); + } + if (opts.retprobe && offset != 0) { + err = -EINVAL; + pr_warn("kretprobes do not support offset specification\n"); + return libbpf_err_ptr(err); + } + + opts.offset = offset; + link = bpf_program__attach_kprobe_opts(prog, func, &opts); + free(func); + return link; } struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog, From 8237e75420897a4bf9b38b67cd243331bbd96a01 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 14 Jul 2021 11:44:00 +0200 Subject: [PATCH 8/8] selftests/bpf: Add test for bpf_get_func_ip in kprobe+offset probe Adding test for bpf_get_func_ip in kprobe+ofset probe. Because of the offset value it's arch specific, enabling the new test only for x86_64 architecture. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20210714094400.396467-9-jolsa@kernel.org --- .../bpf/prog_tests/get_func_ip_test.c | 18 ++++++++++++++++-- .../selftests/bpf/progs/get_func_ip_test.c | 11 +++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c index 8bb18a8d31a06..088b3653610d2 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c +++ b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c @@ -8,10 +8,21 @@ void test_get_func_ip_test(void) __u32 duration = 0, retval; int err, prog_fd; - skel = get_func_ip_test__open_and_load(); - if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open_and_load")) + skel = get_func_ip_test__open(); + if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open")) return; + /* test6 is x86_64 specifc because of the instruction + * offset, disabling it for all other archs + */ +#ifndef __x86_64__ + bpf_program__set_autoload(skel->progs.test6, false); +#endif + + err = get_func_ip_test__load(skel); + if (!ASSERT_OK(err, "get_func_ip_test__load")) + goto cleanup; + err = get_func_ip_test__attach(skel); if (!ASSERT_OK(err, "get_func_ip_test__attach")) goto cleanup; @@ -33,6 +44,9 @@ void test_get_func_ip_test(void) ASSERT_EQ(skel->bss->test3_result, 1, "test3_result"); ASSERT_EQ(skel->bss->test4_result, 1, "test4_result"); ASSERT_EQ(skel->bss->test5_result, 1, "test5_result"); +#ifdef __x86_64__ + ASSERT_EQ(skel->bss->test6_result, 1, "test6_result"); +#endif cleanup: get_func_ip_test__destroy(skel); diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_test.c index ba3e107b52dd4..acd587b6e859d 100644 --- a/tools/testing/selftests/bpf/progs/get_func_ip_test.c +++ b/tools/testing/selftests/bpf/progs/get_func_ip_test.c @@ -10,6 +10,7 @@ extern const void bpf_fentry_test2 __ksym; extern const void bpf_fentry_test3 __ksym; extern const void bpf_fentry_test4 __ksym; extern const void bpf_modify_return_test __ksym; +extern const void bpf_fentry_test6 __ksym; __u64 test1_result = 0; SEC("fentry/bpf_fentry_test1") @@ -60,3 +61,13 @@ int BPF_PROG(test5, int a, int *b, int ret) test5_result = (const void *) addr == &bpf_modify_return_test; return ret; } + +__u64 test6_result = 0; +SEC("kprobe/bpf_fentry_test6+0x5") +int test6(struct pt_regs *ctx) +{ + __u64 addr = bpf_get_func_ip(ctx); + + test6_result = (const void *) addr == &bpf_fentry_test6 + 5; + return 0; +}