10 static bool hists__filter_entry_by_dso(struct hists *hists,
11 struct hist_entry *he);
12 static bool hists__filter_entry_by_thread(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_symbol(struct hists *hists,
15 struct hist_entry *he);
24 struct callchain_param callchain_param = {
25 .mode = CHAIN_GRAPH_REL,
30 u16 hists__col_len(struct hists *hists, enum hist_column col)
32 return hists->col_len[col];
35 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
37 hists->col_len[col] = len;
40 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
42 if (len > hists__col_len(hists, col)) {
43 hists__set_col_len(hists, col, len);
49 void hists__reset_col_len(struct hists *hists)
53 for (col = 0; col < HISTC_NR_COLS; ++col)
54 hists__set_col_len(hists, col, 0);
57 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
59 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61 if (hists__col_len(hists, dso) < unresolved_col_width &&
62 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
63 !symbol_conf.dso_list)
64 hists__set_col_len(hists, dso, unresolved_col_width);
67 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
69 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
74 * +4 accounts for '[x] ' priv level info
75 * +2 accounts for 0x prefix on raw addresses
76 * +3 accounts for ' y ' symtab origin info
79 symlen = h->ms.sym->namelen + 4;
81 symlen += BITS_PER_LONG / 4 + 2 + 3;
82 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
84 symlen = unresolved_col_width + 4 + 2;
85 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
86 hists__set_unres_dso_col_len(hists, HISTC_DSO);
89 len = thread__comm_len(h->thread);
90 if (hists__new_col_len(hists, HISTC_COMM, len))
91 hists__set_col_len(hists, HISTC_THREAD, len + 6);
94 len = dso__name_len(h->ms.map->dso);
95 hists__new_col_len(hists, HISTC_DSO, len);
99 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
101 if (h->branch_info) {
102 if (h->branch_info->from.sym) {
103 symlen = (int)h->branch_info->from.sym->namelen + 4;
105 symlen += BITS_PER_LONG / 4 + 2 + 3;
106 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
108 symlen = dso__name_len(h->branch_info->from.map->dso);
109 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
111 symlen = unresolved_col_width + 4 + 2;
112 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
113 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
116 if (h->branch_info->to.sym) {
117 symlen = (int)h->branch_info->to.sym->namelen + 4;
119 symlen += BITS_PER_LONG / 4 + 2 + 3;
120 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
122 symlen = dso__name_len(h->branch_info->to.map->dso);
123 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
125 symlen = unresolved_col_width + 4 + 2;
126 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
127 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
132 if (h->mem_info->daddr.sym) {
133 symlen = (int)h->mem_info->daddr.sym->namelen + 4
134 + unresolved_col_width + 2;
135 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
138 symlen = unresolved_col_width + 4 + 2;
139 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
142 if (h->mem_info->daddr.map) {
143 symlen = dso__name_len(h->mem_info->daddr.map->dso);
144 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
147 symlen = unresolved_col_width + 4 + 2;
148 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
151 symlen = unresolved_col_width + 4 + 2;
152 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
153 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
156 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
157 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
158 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
159 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
160 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
161 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
164 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
166 struct rb_node *next = rb_first(&hists->entries);
167 struct hist_entry *n;
170 hists__reset_col_len(hists);
172 while (next && row++ < max_rows) {
173 n = rb_entry(next, struct hist_entry, rb_node);
175 hists__calc_col_len(hists, n);
176 next = rb_next(&n->rb_node);
180 static void hist_entry__add_cpumode_period(struct hist_entry *he,
181 unsigned int cpumode, u64 period)
184 case PERF_RECORD_MISC_KERNEL:
185 he->stat.period_sys += period;
187 case PERF_RECORD_MISC_USER:
188 he->stat.period_us += period;
190 case PERF_RECORD_MISC_GUEST_KERNEL:
191 he->stat.period_guest_sys += period;
193 case PERF_RECORD_MISC_GUEST_USER:
194 he->stat.period_guest_us += period;
201 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
205 he_stat->period += period;
206 he_stat->weight += weight;
207 he_stat->nr_events += 1;
210 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
212 dest->period += src->period;
213 dest->period_sys += src->period_sys;
214 dest->period_us += src->period_us;
215 dest->period_guest_sys += src->period_guest_sys;
216 dest->period_guest_us += src->period_guest_us;
217 dest->nr_events += src->nr_events;
218 dest->weight += src->weight;
221 static void hist_entry__decay(struct hist_entry *he)
223 he->stat.period = (he->stat.period * 7) / 8;
224 he->stat.nr_events = (he->stat.nr_events * 7) / 8;
225 /* XXX need decay for weight too? */
228 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
230 u64 prev_period = he->stat.period;
232 if (prev_period == 0)
235 hist_entry__decay(he);
238 hists->stats.total_period -= prev_period - he->stat.period;
240 return he->stat.period == 0;
243 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
245 struct rb_node *next = rb_first(&hists->entries);
246 struct hist_entry *n;
249 n = rb_entry(next, struct hist_entry, rb_node);
250 next = rb_next(&n->rb_node);
252 * We may be annotating this, for instance, so keep it here in
253 * case some it gets new samples, we'll eventually free it when
254 * the user stops browsing and it agains gets fully decayed.
256 if (((zap_user && n->level == '.') ||
257 (zap_kernel && n->level != '.') ||
258 hists__decay_entry(hists, n)) &&
260 rb_erase(&n->rb_node, &hists->entries);
262 if (sort__need_collapse)
263 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
272 * histogram, sorted on item, collects periods
275 static struct hist_entry *hist_entry__new(struct hist_entry *template)
277 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
278 struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
284 he->ms.map->referenced = true;
286 if (he->branch_info) {
288 * This branch info is (a part of) allocated from
289 * machine__resolve_bstack() and will be freed after
290 * adding new entries. So we need to save a copy.
292 he->branch_info = malloc(sizeof(*he->branch_info));
293 if (he->branch_info == NULL) {
298 memcpy(he->branch_info, template->branch_info,
299 sizeof(*he->branch_info));
301 if (he->branch_info->from.map)
302 he->branch_info->from.map->referenced = true;
303 if (he->branch_info->to.map)
304 he->branch_info->to.map->referenced = true;
308 if (he->mem_info->iaddr.map)
309 he->mem_info->iaddr.map->referenced = true;
310 if (he->mem_info->daddr.map)
311 he->mem_info->daddr.map->referenced = true;
314 if (symbol_conf.use_callchain)
315 callchain_init(he->callchain);
317 INIT_LIST_HEAD(&he->pairs.node);
323 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
326 hists__calc_col_len(hists, h);
328 hists->stats.total_period += h->stat.period;
332 static u8 symbol__parent_filter(const struct symbol *parent)
334 if (symbol_conf.exclude_other && parent == NULL)
335 return 1 << HIST_FILTER__PARENT;
339 static struct hist_entry *add_hist_entry(struct hists *hists,
340 struct hist_entry *entry,
341 struct addr_location *al,
346 struct rb_node *parent = NULL;
347 struct hist_entry *he;
350 p = &hists->entries_in->rb_node;
354 he = rb_entry(parent, struct hist_entry, rb_node_in);
357 * Make sure that it receives arguments in a same order as
358 * hist_entry__collapse() so that we can use an appropriate
359 * function when searching an entry regardless which sort
362 cmp = hist_entry__cmp(he, entry);
365 he_stat__add_period(&he->stat, period, weight);
368 * This mem info was allocated from machine__resolve_mem
369 * and will not be used anymore.
371 free(entry->mem_info);
373 /* If the map of an existing hist_entry has
374 * become out-of-date due to an exec() or
375 * similar, update it. Otherwise we will
376 * mis-adjust symbol addresses when computing
377 * the history counter to increment.
379 if (he->ms.map != entry->ms.map) {
380 he->ms.map = entry->ms.map;
382 he->ms.map->referenced = true;
393 he = hist_entry__new(entry);
397 rb_link_node(&he->rb_node_in, parent, p);
398 rb_insert_color(&he->rb_node_in, hists->entries_in);
400 hist_entry__add_cpumode_period(he, al->cpumode, period);
404 struct hist_entry *__hists__add_mem_entry(struct hists *self,
405 struct addr_location *al,
406 struct symbol *sym_parent,
411 struct hist_entry entry = {
412 .thread = al->thread,
425 .parent = sym_parent,
426 .filtered = symbol__parent_filter(sym_parent),
431 return add_hist_entry(self, &entry, al, period, weight);
434 struct hist_entry *__hists__add_branch_entry(struct hists *self,
435 struct addr_location *al,
436 struct symbol *sym_parent,
437 struct branch_info *bi,
441 struct hist_entry entry = {
442 .thread = al->thread,
455 .parent = sym_parent,
456 .filtered = symbol__parent_filter(sym_parent),
462 return add_hist_entry(self, &entry, al, period, weight);
465 struct hist_entry *__hists__add_entry(struct hists *self,
466 struct addr_location *al,
467 struct symbol *sym_parent, u64 period,
470 struct hist_entry entry = {
471 .thread = al->thread,
484 .parent = sym_parent,
485 .filtered = symbol__parent_filter(sym_parent),
491 return add_hist_entry(self, &entry, al, period, weight);
495 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
497 struct sort_entry *se;
500 list_for_each_entry(se, &hist_entry__sort_list, list) {
501 cmp = se->se_cmp(left, right);
510 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
512 struct sort_entry *se;
515 list_for_each_entry(se, &hist_entry__sort_list, list) {
516 int64_t (*f)(struct hist_entry *, struct hist_entry *);
518 f = se->se_collapse ?: se->se_cmp;
520 cmp = f(left, right);
528 void hist_entry__free(struct hist_entry *he)
530 free(he->branch_info);
536 * collapse the histogram
539 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
540 struct rb_root *root,
541 struct hist_entry *he)
543 struct rb_node **p = &root->rb_node;
544 struct rb_node *parent = NULL;
545 struct hist_entry *iter;
550 iter = rb_entry(parent, struct hist_entry, rb_node_in);
552 cmp = hist_entry__collapse(iter, he);
555 he_stat__add_stat(&iter->stat, &he->stat);
557 if (symbol_conf.use_callchain) {
558 callchain_cursor_reset(&callchain_cursor);
559 callchain_merge(&callchain_cursor,
563 hist_entry__free(he);
573 rb_link_node(&he->rb_node_in, parent, p);
574 rb_insert_color(&he->rb_node_in, root);
578 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
580 struct rb_root *root;
582 pthread_mutex_lock(&hists->lock);
584 root = hists->entries_in;
585 if (++hists->entries_in > &hists->entries_in_array[1])
586 hists->entries_in = &hists->entries_in_array[0];
588 pthread_mutex_unlock(&hists->lock);
593 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
595 hists__filter_entry_by_dso(hists, he);
596 hists__filter_entry_by_thread(hists, he);
597 hists__filter_entry_by_symbol(hists, he);
600 void hists__collapse_resort(struct hists *hists)
602 struct rb_root *root;
603 struct rb_node *next;
604 struct hist_entry *n;
606 if (!sort__need_collapse)
609 root = hists__get_rotate_entries_in(hists);
610 next = rb_first(root);
613 n = rb_entry(next, struct hist_entry, rb_node_in);
614 next = rb_next(&n->rb_node_in);
616 rb_erase(&n->rb_node_in, root);
617 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
619 * If it wasn't combined with one of the entries already
620 * collapsed, we need to apply the filters that may have
621 * been set by, say, the hist_browser.
623 hists__apply_filters(hists, n);
629 * reverse the map, sort on period.
632 static int period_cmp(u64 period_a, u64 period_b)
634 if (period_a > period_b)
636 if (period_a < period_b)
641 static int hist_entry__sort_on_period(struct hist_entry *a,
642 struct hist_entry *b)
646 struct perf_evsel *evsel;
647 struct hist_entry *pair;
648 u64 *periods_a, *periods_b;
650 ret = period_cmp(a->stat.period, b->stat.period);
651 if (ret || !symbol_conf.event_group)
654 evsel = hists_to_evsel(a->hists);
655 nr_members = evsel->nr_members;
659 periods_a = zalloc(sizeof(periods_a) * nr_members);
660 periods_b = zalloc(sizeof(periods_b) * nr_members);
662 if (!periods_a || !periods_b)
665 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
666 evsel = hists_to_evsel(pair->hists);
667 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
670 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
671 evsel = hists_to_evsel(pair->hists);
672 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
675 for (i = 1; i < nr_members; i++) {
676 ret = period_cmp(periods_a[i], periods_b[i]);
688 static void __hists__insert_output_entry(struct rb_root *entries,
689 struct hist_entry *he,
690 u64 min_callchain_hits)
692 struct rb_node **p = &entries->rb_node;
693 struct rb_node *parent = NULL;
694 struct hist_entry *iter;
696 if (symbol_conf.use_callchain)
697 callchain_param.sort(&he->sorted_chain, he->callchain,
698 min_callchain_hits, &callchain_param);
702 iter = rb_entry(parent, struct hist_entry, rb_node);
704 if (hist_entry__sort_on_period(he, iter) > 0)
710 rb_link_node(&he->rb_node, parent, p);
711 rb_insert_color(&he->rb_node, entries);
714 void hists__output_resort(struct hists *hists)
716 struct rb_root *root;
717 struct rb_node *next;
718 struct hist_entry *n;
719 u64 min_callchain_hits;
721 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
723 if (sort__need_collapse)
724 root = &hists->entries_collapsed;
726 root = hists->entries_in;
728 next = rb_first(root);
729 hists->entries = RB_ROOT;
731 hists->nr_entries = 0;
732 hists->stats.total_period = 0;
733 hists__reset_col_len(hists);
736 n = rb_entry(next, struct hist_entry, rb_node_in);
737 next = rb_next(&n->rb_node_in);
739 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
740 hists__inc_nr_entries(hists, n);
744 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
745 enum hist_filter filter)
747 h->filtered &= ~(1 << filter);
753 hists->nr_entries += h->nr_rows;
755 hists->stats.total_period += h->stat.period;
756 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
758 hists__calc_col_len(hists, h);
762 static bool hists__filter_entry_by_dso(struct hists *hists,
763 struct hist_entry *he)
765 if (hists->dso_filter != NULL &&
766 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
767 he->filtered |= (1 << HIST_FILTER__DSO);
774 void hists__filter_by_dso(struct hists *hists)
778 hists->nr_entries = hists->stats.total_period = 0;
779 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
780 hists__reset_col_len(hists);
782 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
783 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
785 if (symbol_conf.exclude_other && !h->parent)
788 if (hists__filter_entry_by_dso(hists, h))
791 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
795 static bool hists__filter_entry_by_thread(struct hists *hists,
796 struct hist_entry *he)
798 if (hists->thread_filter != NULL &&
799 he->thread != hists->thread_filter) {
800 he->filtered |= (1 << HIST_FILTER__THREAD);
807 void hists__filter_by_thread(struct hists *hists)
811 hists->nr_entries = hists->stats.total_period = 0;
812 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
813 hists__reset_col_len(hists);
815 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
816 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
818 if (hists__filter_entry_by_thread(hists, h))
821 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
825 static bool hists__filter_entry_by_symbol(struct hists *hists,
826 struct hist_entry *he)
828 if (hists->symbol_filter_str != NULL &&
829 (!he->ms.sym || strstr(he->ms.sym->name,
830 hists->symbol_filter_str) == NULL)) {
831 he->filtered |= (1 << HIST_FILTER__SYMBOL);
838 void hists__filter_by_symbol(struct hists *hists)
842 hists->nr_entries = hists->stats.total_period = 0;
843 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
844 hists__reset_col_len(hists);
846 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
847 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
849 if (hists__filter_entry_by_symbol(hists, h))
852 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
856 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
858 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
861 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
863 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
866 void events_stats__inc(struct events_stats *stats, u32 type)
868 ++stats->nr_events[0];
869 ++stats->nr_events[type];
872 void hists__inc_nr_events(struct hists *hists, u32 type)
874 events_stats__inc(&hists->stats, type);
877 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
878 struct hist_entry *pair)
880 struct rb_root *root;
882 struct rb_node *parent = NULL;
883 struct hist_entry *he;
886 if (sort__need_collapse)
887 root = &hists->entries_collapsed;
889 root = hists->entries_in;
895 he = rb_entry(parent, struct hist_entry, rb_node_in);
897 cmp = hist_entry__collapse(he, pair);
908 he = hist_entry__new(pair);
910 memset(&he->stat, 0, sizeof(he->stat));
912 rb_link_node(&he->rb_node_in, parent, p);
913 rb_insert_color(&he->rb_node_in, root);
914 hists__inc_nr_entries(hists, he);
920 static struct hist_entry *hists__find_entry(struct hists *hists,
921 struct hist_entry *he)
925 if (sort__need_collapse)
926 n = hists->entries_collapsed.rb_node;
928 n = hists->entries_in->rb_node;
931 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
932 int64_t cmp = hist_entry__collapse(iter, he);
946 * Look for pairs to link to the leader buckets (hist_entries):
948 void hists__match(struct hists *leader, struct hists *other)
950 struct rb_root *root;
952 struct hist_entry *pos, *pair;
954 if (sort__need_collapse)
955 root = &leader->entries_collapsed;
957 root = leader->entries_in;
959 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
960 pos = rb_entry(nd, struct hist_entry, rb_node_in);
961 pair = hists__find_entry(other, pos);
964 hist_entry__add_pair(pair, pos);
969 * Look for entries in the other hists that are not present in the leader, if
970 * we find them, just add a dummy entry on the leader hists, with period=0,
971 * nr_events=0, to serve as the list header.
973 int hists__link(struct hists *leader, struct hists *other)
975 struct rb_root *root;
977 struct hist_entry *pos, *pair;
979 if (sort__need_collapse)
980 root = &other->entries_collapsed;
982 root = other->entries_in;
984 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
985 pos = rb_entry(nd, struct hist_entry, rb_node_in);
987 if (!hist_entry__has_pairs(pos)) {
988 pair = hists__add_dummy_entry(leader, pos);
991 hist_entry__add_pair(pos, pair);