Skip to content

Commit

Permalink
perf/bpf: Always use perf callchains if exist
Browse files Browse the repository at this point in the history
If the perf_event has PERF_SAMPLE_CALLCHAIN, BPF can use it for stack trace.
The problematic cases like PEBS and IBS already handled in the PMU driver and
they filled the callchain info in the sample data.  For others, we can call
perf_callchain() before the BPF handler.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220908214104.3851807-2-namhyung@kernel.org
  • Loading branch information
Namhyung Kim authored and Peter Zijlstra committed Sep 13, 2022
1 parent 3749d33 commit 16817ad
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 4 deletions.
4 changes: 2 additions & 2 deletions kernel/bpf/stackmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
int ret;

/* perf_sample_data doesn't have callchain, use bpf_get_stackid */
if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
return bpf_get_stackid((unsigned long)(ctx->regs),
(unsigned long) map, flags, 0, 0);

Expand Down Expand Up @@ -506,7 +506,7 @@ BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
int err = -EINVAL;
__u64 nr_kernel;

if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);

if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
Expand Down
12 changes: 10 additions & 2 deletions kernel/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -10000,8 +10000,16 @@ static void bpf_overflow_handler(struct perf_event *event,
goto out;
rcu_read_lock();
prog = READ_ONCE(event->prog);
if (prog)
if (prog) {
if (prog->call_get_stack &&
(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) &&
!(data->sample_flags & PERF_SAMPLE_CALLCHAIN)) {
data->callchain = perf_callchain(event, regs);
data->sample_flags |= PERF_SAMPLE_CALLCHAIN;
}

ret = bpf_prog_run(prog, &ctx);
}
rcu_read_unlock();
out:
__this_cpu_dec(bpf_prog_active);
Expand All @@ -10027,7 +10035,7 @@ static int perf_event_set_bpf_handler(struct perf_event *event,

if (event->attr.precise_ip &&
prog->call_get_stack &&
(!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY) ||
(!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) ||
event->attr.exclude_callchain_kernel ||
event->attr.exclude_callchain_user)) {
/*
Expand Down

0 comments on commit 16817ad

Please sign in to comment.