Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
2226d5b
Documentation
arch
block
crypto
drivers
firmware
fs
include
init
ipc
kernel
lib
mm
net
samples
scripts
security
sound
tools
firewire
perf
Documentation
arch
bench
scripts
util
include
scripting-engines
ui
PERF-VERSION-GEN
abspath.c
alias.c
bitmap.c
build-id.c
build-id.h
cache.h
callchain.c
callchain.h
color.c
color.h
config.c
cpumap.c
cpumap.h
ctype.c
debug.c
debug.h
debugfs.c
debugfs.h
environment.c
event.c
event.h
evsel.c
evsel.h
exec_cmd.c
exec_cmd.h
generate-cmdlist.sh
header.c
header.h
help.c
help.h
hist.c
hist.h
hweight.c
levenshtein.c
levenshtein.h
map.c
map.h
pager.c
parse-events.c
parse-events.h
parse-options.c
parse-options.h
path.c
probe-event.c
probe-event.h
probe-finder.c
probe-finder.h
pstack.c
pstack.h
quote.c
quote.h
run-command.c
run-command.h
session.c
session.h
sigchain.c
sigchain.h
sort.c
sort.h
strbuf.c
strbuf.h
string.c
strlist.c
strlist.h
svghelper.c
svghelper.h
symbol.c
symbol.h
thread.c
thread.h
trace-event-info.c
trace-event-parse.c
trace-event-read.c
trace-event-scripting.c
trace-event.h
types.h
usage.c
util.c
util.h
values.c
values.h
wrapper.c
xyarray.c
xyarray.h
.gitignore
CREDITS
MANIFEST
Makefile
builtin-annotate.c
builtin-bench.c
builtin-buildid-cache.c
builtin-buildid-list.c
builtin-diff.c
builtin-help.c
builtin-inject.c
builtin-kmem.c
builtin-kvm.c
builtin-list.c
builtin-lock.c
builtin-probe.c
builtin-record.c
builtin-report.c
builtin-sched.c
builtin-script.c
builtin-stat.c
builtin-test.c
builtin-timechart.c
builtin-top.c
builtin.h
command-list.txt
design.txt
feature-tests.mak
perf-archive.sh
perf.c
perf.h
power
slub
testing
usb
virtio
usr
virt
.gitignore
.mailmap
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
REPORTING-BUGS
Breadcrumbs
linux
/
tools
/
perf
/
util
/
hist.c
Copy path
Blame
Blame
Latest commit
History
History
1190 lines (984 loc) · 27.3 KB
Breadcrumbs
linux
/
tools
/
perf
/
util
/
hist.c
Top
File metadata and controls
Code
Blame
1190 lines (984 loc) · 27.3 KB
Raw
#include "util.h" #include "build-id.h" #include "hist.h" #include "session.h" #include "sort.h" #include <math.h> enum hist_filter { HIST_FILTER__DSO, HIST_FILTER__THREAD, HIST_FILTER__PARENT, }; struct callchain_param callchain_param = { .mode = CHAIN_GRAPH_REL, .min_percent = 0.5 }; u16 hists__col_len(struct hists *self, enum hist_column col) { return self->col_len[col]; } void hists__set_col_len(struct hists *self, enum hist_column col, u16 len) { self->col_len[col] = len; } bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len) { if (len > hists__col_len(self, col)) { hists__set_col_len(self, col, len); return true; } return false; } static void hists__reset_col_len(struct hists *self) { enum hist_column col; for (col = 0; col < HISTC_NR_COLS; ++col) hists__set_col_len(self, col, 0); } static void hists__calc_col_len(struct hists *self, struct hist_entry *h) { u16 len; if (h->ms.sym) hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen); len = thread__comm_len(h->thread); if (hists__new_col_len(self, HISTC_COMM, len)) hists__set_col_len(self, HISTC_THREAD, len + 6); if (h->ms.map) { len = dso__name_len(h->ms.map->dso); hists__new_col_len(self, HISTC_DSO, len); } } static void hist_entry__add_cpumode_period(struct hist_entry *self, unsigned int cpumode, u64 period) { switch (cpumode) { case PERF_RECORD_MISC_KERNEL: self->period_sys += period; break; case PERF_RECORD_MISC_USER: self->period_us += period; break; case PERF_RECORD_MISC_GUEST_KERNEL: self->period_guest_sys += period; break; case PERF_RECORD_MISC_GUEST_USER: self->period_guest_us += period; break; default: break; } } /* * histogram, sorted on item, collects periods */ static struct hist_entry *hist_entry__new(struct hist_entry *template) { size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0; struct hist_entry *self = malloc(sizeof(*self) + callchain_size); if (self != NULL) { *self = *template; self->nr_events = 1; if (self->ms.map) self->ms.map->referenced = true; if (symbol_conf.use_callchain) callchain_init(self->callchain); } return self; } static void hists__inc_nr_entries(struct hists *self, struct hist_entry *h) { if (!h->filtered) { hists__calc_col_len(self, h); ++self->nr_entries; } } static u8 symbol__parent_filter(const struct symbol *parent) { if (symbol_conf.exclude_other && parent == NULL) return 1 << HIST_FILTER__PARENT; return 0; } struct hist_entry *__hists__add_entry(struct hists *self, struct addr_location *al, struct symbol *sym_parent, u64 period) { struct rb_node **p = &self->entries.rb_node; struct rb_node *parent = NULL; struct hist_entry *he; struct hist_entry entry = { .thread = al->thread, .ms = { .map = al->map, .sym = al->sym, }, .cpu = al->cpu, .ip = al->addr, .level = al->level, .period = period, .parent = sym_parent, .filtered = symbol__parent_filter(sym_parent), }; int cmp; while (*p != NULL) { parent = *p; he = rb_entry(parent, struct hist_entry, rb_node); cmp = hist_entry__cmp(&entry, he); if (!cmp) { he->period += period; ++he->nr_events; goto out; } if (cmp < 0) p = &(*p)->rb_left; else p = &(*p)->rb_right; } he = hist_entry__new(&entry); if (!he) return NULL; rb_link_node(&he->rb_node, parent, p); rb_insert_color(&he->rb_node, &self->entries); hists__inc_nr_entries(self, he); out: hist_entry__add_cpumode_period(he, al->cpumode, period); return he; } int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) { struct sort_entry *se; int64_t cmp = 0; list_for_each_entry(se, &hist_entry__sort_list, list) { cmp = se->se_cmp(left, right); if (cmp) break; } return cmp; } int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) { struct sort_entry *se; int64_t cmp = 0; list_for_each_entry(se, &hist_entry__sort_list, list) { int64_t (*f)(struct hist_entry *, struct hist_entry *); f = se->se_collapse ?: se->se_cmp; cmp = f(left, right); if (cmp) break; } return cmp; } void hist_entry__free(struct hist_entry *he) { free(he); } /* * collapse the histogram */ static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct hist_entry *iter; int64_t cmp; while (*p != NULL) { parent = *p; iter = rb_entry(parent, struct hist_entry, rb_node); cmp = hist_entry__collapse(iter, he); if (!cmp) { iter->period += he->period; if (symbol_conf.use_callchain) callchain_merge(iter->callchain, he->callchain); hist_entry__free(he); return false; } if (cmp < 0) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&he->rb_node, parent, p); rb_insert_color(&he->rb_node, root); return true; } void hists__collapse_resort(struct hists *self) { struct rb_root tmp; struct rb_node *next; struct hist_entry *n; if (!sort__need_collapse) return; tmp = RB_ROOT; next = rb_first(&self->entries); self->nr_entries = 0; hists__reset_col_len(self); while (next) { n = rb_entry(next, struct hist_entry, rb_node); next = rb_next(&n->rb_node); rb_erase(&n->rb_node, &self->entries); if (collapse__insert_entry(&tmp, n)) hists__inc_nr_entries(self, n); } self->entries = tmp; } /* * reverse the map, sort on period. */ static void __hists__insert_output_entry(struct rb_root *entries, struct hist_entry *he, u64 min_callchain_hits) { struct rb_node **p = &entries->rb_node; struct rb_node *parent = NULL; struct hist_entry *iter; if (symbol_conf.use_callchain) callchain_param.sort(&he->sorted_chain, he->callchain, min_callchain_hits, &callchain_param); while (*p != NULL) { parent = *p; iter = rb_entry(parent, struct hist_entry, rb_node); if (he->period > iter->period) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&he->rb_node, parent, p); rb_insert_color(&he->rb_node, entries); } void hists__output_resort(struct hists *self) { struct rb_root tmp; struct rb_node *next; struct hist_entry *n; u64 min_callchain_hits; min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100); tmp = RB_ROOT; next = rb_first(&self->entries); self->nr_entries = 0; hists__reset_col_len(self); while (next) { n = rb_entry(next, struct hist_entry, rb_node); next = rb_next(&n->rb_node); rb_erase(&n->rb_node, &self->entries); __hists__insert_output_entry(&tmp, n, min_callchain_hits); hists__inc_nr_entries(self, n); } self->entries = tmp; } static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) { int i; int ret = fprintf(fp, " "); for (i = 0; i < left_margin; i++) ret += fprintf(fp, " "); return ret; } static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, int left_margin) { int i; size_t ret = callchain__fprintf_left_margin(fp, left_margin); for (i = 0; i < depth; i++) if (depth_mask & (1 << i)) ret += fprintf(fp, "| "); else ret += fprintf(fp, " "); ret += fprintf(fp, "\n"); return ret; } static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth, int depth_mask, int period, u64 total_samples, u64 hits, int left_margin) { int i; size_t ret = 0; ret += callchain__fprintf_left_margin(fp, left_margin); for (i = 0; i < depth; i++) { if (depth_mask & (1 << i)) ret += fprintf(fp, "|"); else ret += fprintf(fp, " "); if (!period && i == depth - 1) { double percent; percent = hits * 100.0 / total_samples; ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent); } else ret += fprintf(fp, "%s", " "); } if (chain->ms.sym) ret += fprintf(fp, "%s\n", chain->ms.sym->name); else ret += fprintf(fp, "%p\n", (void *)(long)chain->ip); return ret; } static struct symbol *rem_sq_bracket; static struct callchain_list rem_hits; static void init_rem_hits(void) { rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); if (!rem_sq_bracket) { fprintf(stderr, "Not enough memory to display remaining hits\n"); return; } strcpy(rem_sq_bracket->name, "[...]"); rem_hits.ms.sym = rem_sq_bracket; } static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, u64 total_samples, int depth, int depth_mask, int left_margin) { struct rb_node *node, *next; struct callchain_node *child; struct callchain_list *chain; int new_depth_mask = depth_mask; u64 new_total; u64 remaining; size_t ret = 0; int i; uint entries_printed = 0; if (callchain_param.mode == CHAIN_GRAPH_REL) new_total = self->children_hit; else new_total = total_samples; remaining = new_total; node = rb_first(&self->rb_root); while (node) { u64 cumul; child = rb_entry(node, struct callchain_node, rb_node); cumul = cumul_hits(child); remaining -= cumul; /* * The depth mask manages the output of pipes that show * the depth. We don't want to keep the pipes of the current * level for the last child of this depth. * Except if we have remaining filtered hits. They will * supersede the last child */ next = rb_next(node); if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) new_depth_mask &= ~(1 << (depth - 1)); /* * But we keep the older depth mask for the line separator * to keep the level link until we reach the last child */ ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, left_margin); i = 0; list_for_each_entry(chain, &child->val, list) { ret += ipchain__fprintf_graph(fp, chain, depth, new_depth_mask, i++, new_total, cumul, left_margin); } ret += __callchain__fprintf_graph(fp, child, new_total, depth + 1, new_depth_mask | (1 << depth), left_margin); node = next; if (++entries_printed == callchain_param.print_limit) break; } if (callchain_param.mode == CHAIN_GRAPH_REL && remaining && remaining != new_total) { if (!rem_sq_bracket) return ret; new_depth_mask &= ~(1 << (depth - 1)); ret += ipchain__fprintf_graph(fp, &rem_hits, depth, new_depth_mask, 0, new_total, remaining, left_margin); } return ret; } static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self, u64 total_samples, int left_margin) { struct callchain_list *chain; bool printed = false; int i = 0; int ret = 0; u32 entries_printed = 0; list_for_each_entry(chain, &self->val, list) { if (!i++ && sort__first_dimension == SORT_SYM) continue; if (!printed) { ret += callchain__fprintf_left_margin(fp, left_margin); ret += fprintf(fp, "|\n"); ret += callchain__fprintf_left_margin(fp, left_margin); ret += fprintf(fp, "---"); left_margin += 3; printed = true; } else ret += callchain__fprintf_left_margin(fp, left_margin); if (chain->ms.sym) ret += fprintf(fp, " %s\n", chain->ms.sym->name); else ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); if (++entries_printed == callchain_param.print_limit) break; } ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin); return ret; } static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self, u64 total_samples) { struct callchain_list *chain; size_t ret = 0; if (!self) return 0; ret += callchain__fprintf_flat(fp, self->parent, total_samples); list_for_each_entry(chain, &self->val, list) { if (chain->ip >= PERF_CONTEXT_MAX) continue; if (chain->ms.sym) ret += fprintf(fp, " %s\n", chain->ms.sym->name); else ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); } return ret; } static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples, int left_margin) { struct rb_node *rb_node; struct callchain_node *chain; size_t ret = 0; u32 entries_printed = 0; rb_node = rb_first(&self->sorted_chain); while (rb_node) { double percent; chain = rb_entry(rb_node, struct callchain_node, rb_node); percent = chain->hit * 100.0 / total_samples; switch (callchain_param.mode) { case CHAIN_FLAT: ret += percent_color_fprintf(fp, " %6.2f%%\n", percent); ret += callchain__fprintf_flat(fp, chain, total_samples); break; case CHAIN_GRAPH_ABS: /* Falldown */ case CHAIN_GRAPH_REL: ret += callchain__fprintf_graph(fp, chain, total_samples, left_margin); case CHAIN_NONE: default: break; } ret += fprintf(fp, "\n"); if (++entries_printed == callchain_param.print_limit) break; rb_node = rb_next(rb_node); } return ret; } int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, struct hists *hists, struct hists *pair_hists, bool show_displacement, long displacement, bool color, u64 session_total) { struct sort_entry *se; u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; u64 nr_events; const char *sep = symbol_conf.field_sep; int ret; if (symbol_conf.exclude_other && !self->parent) return 0; if (pair_hists) { period = self->pair ? self->pair->period : 0; nr_events = self->pair ? self->pair->nr_events : 0; total = pair_hists->stats.total_period; period_sys = self->pair ? self->pair->period_sys : 0; period_us = self->pair ? self->pair->period_us : 0; period_guest_sys = self->pair ? self->pair->period_guest_sys : 0; period_guest_us = self->pair ? self->pair->period_guest_us : 0; } else { period = self->period; nr_events = self->nr_events; total = session_total; period_sys = self->period_sys; period_us = self->period_us; period_guest_sys = self->period_guest_sys; period_guest_us = self->period_guest_us; } if (total) { if (color) ret = percent_color_snprintf(s, size, sep ? "%.2f" : " %6.2f%%", (period * 100.0) / total); else ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%", (period * 100.0) / total); if (symbol_conf.show_cpu_utilization) { ret += percent_color_snprintf(s + ret, size - ret, sep ? "%.2f" : " %6.2f%%", (period_sys * 100.0) / total); ret += percent_color_snprintf(s + ret, size - ret, sep ? "%.2f" : " %6.2f%%", (period_us * 100.0) / total); if (perf_guest) { ret += percent_color_snprintf(s + ret, size - ret, sep ? "%.2f" : " %6.2f%%", (period_guest_sys * 100.0) / total); ret += percent_color_snprintf(s + ret, size - ret, sep ? "%.2f" : " %6.2f%%", (period_guest_us * 100.0) / total); } } } else ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period); if (symbol_conf.show_nr_samples) { if (sep) ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events); else ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events); } if (pair_hists) { char bf[32]; double old_percent = 0, new_percent = 0, diff; if (total > 0) old_percent = (period * 100.0) / total; if (session_total > 0) new_percent = (self->period * 100.0) / session_total; diff = new_percent - old_percent; if (fabs(diff) >= 0.01) snprintf(bf, sizeof(bf), "%+4.2F%%", diff); else snprintf(bf, sizeof(bf), " "); if (sep) ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf); else ret += snprintf(s + ret, size - ret, "%11.11s", bf); if (show_displacement) { if (displacement) snprintf(bf, sizeof(bf), "%+4ld", displacement); else snprintf(bf, sizeof(bf), " "); if (sep) ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf); else ret += snprintf(s + ret, size - ret, "%6.6s", bf); } } list_for_each_entry(se, &hist_entry__sort_list, list) { if (se->elide) continue; ret += snprintf(s + ret, size - ret, "%s", sep ?: " "); ret += se->se_snprintf(self, s + ret, size - ret, hists__col_len(hists, se->se_width_idx)); } return ret; } int hist_entry__fprintf(struct hist_entry *self, struct hists *hists, struct hists *pair_hists, bool show_displacement, long displacement, FILE *fp, u64 session_total) { char bf[512]; hist_entry__snprintf(self, bf, sizeof(bf), hists, pair_hists, show_displacement, displacement, true, session_total); return fprintf(fp, "%s\n", bf); } static size_t hist_entry__fprintf_callchain(struct hist_entry *self, struct hists *hists, FILE *fp, u64 session_total) { int left_margin = 0; if (sort__first_dimension == SORT_COMM) { struct sort_entry *se = list_first_entry(&hist_entry__sort_list, typeof(*se), list); left_margin = hists__col_len(hists, se->se_width_idx); left_margin -= thread__comm_len(self->thread); } return hist_entry_callchain__fprintf(fp, self, session_total, left_margin); } size_t hists__fprintf(struct hists *self, struct hists *pair, bool show_displacement, FILE *fp) { struct sort_entry *se; struct rb_node *nd; size_t ret = 0; unsigned long position = 1; long displacement = 0; unsigned int width; const char *sep = symbol_conf.field_sep; const char *col_width = symbol_conf.col_width_list_str; init_rem_hits(); fprintf(fp, "# %s", pair ? "Baseline" : "Overhead"); if (symbol_conf.show_nr_samples) { if (sep) fprintf(fp, "%cSamples", *sep); else fputs(" Samples ", fp); } if (symbol_conf.show_cpu_utilization) { if (sep) { ret += fprintf(fp, "%csys", *sep); ret += fprintf(fp, "%cus", *sep); if (perf_guest) { ret += fprintf(fp, "%cguest sys", *sep); ret += fprintf(fp, "%cguest us", *sep); } } else { ret += fprintf(fp, " sys "); ret += fprintf(fp, " us "); if (perf_guest) { ret += fprintf(fp, " guest sys "); ret += fprintf(fp, " guest us "); } } } if (pair) { if (sep) ret += fprintf(fp, "%cDelta", *sep); else ret += fprintf(fp, " Delta "); if (show_displacement) { if (sep) ret += fprintf(fp, "%cDisplacement", *sep); else ret += fprintf(fp, " Displ"); } } list_for_each_entry(se, &hist_entry__sort_list, list) { if (se->elide) continue; if (sep) { fprintf(fp, "%c%s", *sep, se->se_header); continue; } width = strlen(se->se_header); if (symbol_conf.col_width_list_str) { if (col_width) { hists__set_col_len(self, se->se_width_idx, atoi(col_width)); col_width = strchr(col_width, ','); if (col_width) ++col_width; } } if (!hists__new_col_len(self, se->se_width_idx, width)) width = hists__col_len(self, se->se_width_idx); fprintf(fp, " %*s", width, se->se_header); } fprintf(fp, "\n"); if (sep) goto print_entries; fprintf(fp, "# ........"); if (symbol_conf.show_nr_samples) fprintf(fp, " .........."); if (pair) { fprintf(fp, " .........."); if (show_displacement) fprintf(fp, " ....."); } list_for_each_entry(se, &hist_entry__sort_list, list) { unsigned int i; if (se->elide) continue; fprintf(fp, " "); width = hists__col_len(self, se->se_width_idx); if (width == 0) width = strlen(se->se_header); for (i = 0; i < width; i++) fprintf(fp, "."); } fprintf(fp, "\n#\n"); print_entries: for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (show_displacement) { if (h->pair != NULL) displacement = ((long)h->pair->position - (long)position); else displacement = 0; ++position; } ret += hist_entry__fprintf(h, self, pair, show_displacement, displacement, fp, self->stats.total_period); if (symbol_conf.use_callchain) ret += hist_entry__fprintf_callchain(h, self, fp, self->stats.total_period); if (h->ms.map == NULL && verbose > 1) { __map_groups__fprintf_maps(&h->thread->mg, MAP__FUNCTION, verbose, fp); fprintf(fp, "%.10s end\n", graph_dotted_line); } } free(rem_sq_bracket); return ret; } /* * See hists__fprintf to match the column widths */ unsigned int hists__sort_list_width(struct hists *self) { struct sort_entry *se; int ret = 9; /* total % */ if (symbol_conf.show_cpu_utilization) { ret += 7; /* count_sys % */ ret += 6; /* count_us % */ if (perf_guest) { ret += 13; /* count_guest_sys % */ ret += 12; /* count_guest_us % */ } } if (symbol_conf.show_nr_samples) ret += 11; list_for_each_entry(se, &hist_entry__sort_list, list) if (!se->elide) ret += 2 + hists__col_len(self, se->se_width_idx); if (verbose) /* Addr + origin */ ret += 3 + BITS_PER_LONG / 4; return ret; } static void hists__remove_entry_filter(struct hists *self, struct hist_entry *h, enum hist_filter filter) { h->filtered &= ~(1 << filter); if (h->filtered) return; ++self->nr_entries; if (h->ms.unfolded) self->nr_entries += h->nr_rows; h->row_offset = 0; self->stats.total_period += h->period; self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events; hists__calc_col_len(self, h); } void hists__filter_by_dso(struct hists *self, const struct dso *dso) { struct rb_node *nd; self->nr_entries = self->stats.total_period = 0; self->stats.nr_events[PERF_RECORD_SAMPLE] = 0; hists__reset_col_len(self); for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (symbol_conf.exclude_other && !h->parent) continue; if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) { h->filtered |= (1 << HIST_FILTER__DSO); continue; } hists__remove_entry_filter(self, h, HIST_FILTER__DSO); } } void hists__filter_by_thread(struct hists *self, const struct thread *thread) { struct rb_node *nd; self->nr_entries = self->stats.total_period = 0; self->stats.nr_events[PERF_RECORD_SAMPLE] = 0; hists__reset_col_len(self); for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (thread != NULL && h->thread != thread) { h->filtered |= (1 << HIST_FILTER__THREAD); continue; } hists__remove_entry_filter(self, h, HIST_FILTER__THREAD); } } static int symbol__alloc_hist(struct symbol *self) { struct sym_priv *priv = symbol__priv(self); const int size = (sizeof(*priv->hist) + (self->end - self->start) * sizeof(u64)); priv->hist = zalloc(size); return priv->hist == NULL ? -1 : 0; } int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip) { unsigned int sym_size, offset; struct symbol *sym = self->ms.sym; struct sym_priv *priv; struct sym_hist *h; if (!sym || !self->ms.map) return 0; priv = symbol__priv(sym); if (priv->hist == NULL && symbol__alloc_hist(sym) < 0) return -ENOMEM; sym_size = sym->end - sym->start; offset = ip - sym->start; pr_debug3("%s: ip=%#" PRIx64 "\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip)); if (offset >= sym_size) return 0; h = priv->hist; h->sum++; h->ip[offset]++; pr_debug3("%#" PRIx64 " %s: period++ [ip: %#" PRIx64 ", %#" PRIx64 "] => %" PRIu64 "\n", self->ms.sym->start, self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]); return 0; } static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize) { struct objdump_line *self = malloc(sizeof(*self) + privsize); if (self != NULL) { self->offset = offset; self->line = line; } return self; } void objdump_line__free(struct objdump_line *self) { free(self->line); free(self); } static void objdump__add_line(struct list_head *head, struct objdump_line *line) { list_add_tail(&line->node, head); } struct objdump_line *objdump__get_next_ip_line(struct list_head *head, struct objdump_line *pos) { list_for_each_entry_continue(pos, head, node) if (pos->offset >= 0) return pos; return NULL; } static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file, struct list_head *head, size_t privsize) { struct symbol *sym = self->ms.sym; struct objdump_line *objdump_line; char *line = NULL, *tmp, *tmp2, *c; size_t line_len; s64 line_ip, offset = -1; if (getline(&line, &line_len, file) < 0) return -1; if (!line) return -1; while (line_len != 0 && isspace(line[line_len - 1])) line[--line_len] = '\0'; c = strchr(line, '\n'); if (c) *c = 0; line_ip = -1; /* * Strip leading spaces: */ tmp = line; while (*tmp) { if (*tmp != ' ') break; tmp++; } if (*tmp) { /* * Parse hexa addresses followed by ':' */ line_ip = strtoull(tmp, &tmp2, 16); if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0') line_ip = -1; } if (line_ip != -1) { u64 start = map__rip_2objdump(self->ms.map, sym->start), end = map__rip_2objdump(self->ms.map, sym->end); offset = line_ip - start; if (offset < 0 || (u64)line_ip > end) offset = -1; } objdump_line = objdump_line__new(offset, line, privsize); if (objdump_line == NULL) { free(line); return -1; } objdump__add_line(head, objdump_line); return 0; } int hist_entry__annotate(struct hist_entry *self, struct list_head *head, size_t privsize) { struct symbol *sym = self->ms.sym; struct map *map = self->ms.map; struct dso *dso = map->dso; char *filename = dso__build_id_filename(dso, NULL, 0); bool free_filename = true; char command[PATH_MAX * 2]; FILE *file; int err = 0; char symfs_filename[PATH_MAX]; if (filename) { snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", symbol_conf.symfs, filename); } if (filename == NULL) { if (dso->has_build_id) { pr_err("Can't annotate %s: not enough memory\n", sym->name); return -ENOMEM; } goto fallback; } else if (readlink(symfs_filename, command, sizeof(command)) < 0 || strstr(command, "[kernel.kallsyms]") || access(symfs_filename, R_OK)) { free(filename); fallback: /* * If we don't have build-ids or the build-id file isn't in the * cache, or is just a kallsyms file, well, lets hope that this * DSO is the same as when 'perf record' ran. */ filename = dso->long_name; snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", symbol_conf.symfs, filename); free_filename = false; } if (dso->origin == DSO__ORIG_KERNEL) { if (dso->annotate_warned) goto out_free_filename; err = -ENOENT; dso->annotate_warned = 1; pr_err("Can't annotate %s: No vmlinux file was found in the " "path\n", sym->name); goto out_free_filename; } pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, filename, sym->name, map->unmap_ip(map, sym->start), map->unmap_ip(map, sym->end)); pr_debug("annotating [%p] %30s : [%p] %30s\n", dso, dso->long_name, sym, sym->name); snprintf(command, sizeof(command), "objdump --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand", map__rip_2objdump(map, sym->start), map__rip_2objdump(map, sym->end), symfs_filename, filename); pr_debug("Executing: %s\n", command); file = popen(command, "r"); if (!file) goto out_free_filename; while (!feof(file)) if (hist_entry__parse_objdump_line(self, file, head, privsize) < 0) break; pclose(file); out_free_filename: if (free_filename) free(filename); return err; } void hists__inc_nr_events(struct hists *self, u32 type) { ++self->stats.nr_events[0]; ++self->stats.nr_events[type]; } size_t hists__fprintf_nr_events(struct hists *self, FILE *fp) { int i; size_t ret = 0; for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { const char *name = event__get_event_name(i); if (!strcmp(name, "UNKNOWN")) continue; ret += fprintf(fp, "%16s events: %10d\n", name, self->stats.nr_events[i]); } return ret; }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
You can’t perform that action at this time.