10 static bool hists__filter_entry_by_dso(struct hists *hists,
11 struct hist_entry *he);
12 static bool hists__filter_entry_by_thread(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_symbol(struct hists *hists,
15 struct hist_entry *he);
24 struct callchain_param callchain_param = {
25 .mode = CHAIN_GRAPH_REL,
30 u16 hists__col_len(struct hists *hists, enum hist_column col)
32 return hists->col_len[col];
35 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
37 hists->col_len[col] = len;
40 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
42 if (len > hists__col_len(hists, col)) {
43 hists__set_col_len(hists, col, len);
49 void hists__reset_col_len(struct hists *hists)
53 for (col = 0; col < HISTC_NR_COLS; ++col)
54 hists__set_col_len(hists, col, 0);
57 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
59 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61 if (hists__col_len(hists, dso) < unresolved_col_width &&
62 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
63 !symbol_conf.dso_list)
64 hists__set_col_len(hists, dso, unresolved_col_width);
67 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
69 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
73 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
75 hists__set_unres_dso_col_len(hists, HISTC_DSO);
77 len = thread__comm_len(h->thread);
78 if (hists__new_col_len(hists, HISTC_COMM, len))
79 hists__set_col_len(hists, HISTC_THREAD, len + 6);
82 len = dso__name_len(h->ms.map->dso);
83 hists__new_col_len(hists, HISTC_DSO, len);
87 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
92 * +4 accounts for '[x] ' priv level info
93 * +2 account of 0x prefix on raw addresses
95 if (h->branch_info->from.sym) {
96 symlen = (int)h->branch_info->from.sym->namelen + 4;
97 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
99 symlen = dso__name_len(h->branch_info->from.map->dso);
100 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
102 symlen = unresolved_col_width + 4 + 2;
103 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
107 if (h->branch_info->to.sym) {
108 symlen = (int)h->branch_info->to.sym->namelen + 4;
109 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
111 symlen = dso__name_len(h->branch_info->to.map->dso);
112 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
114 symlen = unresolved_col_width + 4 + 2;
115 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
116 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
121 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
123 struct rb_node *next = rb_first(&hists->entries);
124 struct hist_entry *n;
127 hists__reset_col_len(hists);
129 while (next && row++ < max_rows) {
130 n = rb_entry(next, struct hist_entry, rb_node);
132 hists__calc_col_len(hists, n);
133 next = rb_next(&n->rb_node);
137 static void hist_entry__add_cpumode_period(struct hist_entry *he,
138 unsigned int cpumode, u64 period)
141 case PERF_RECORD_MISC_KERNEL:
142 he->stat.period_sys += period;
144 case PERF_RECORD_MISC_USER:
145 he->stat.period_us += period;
147 case PERF_RECORD_MISC_GUEST_KERNEL:
148 he->stat.period_guest_sys += period;
150 case PERF_RECORD_MISC_GUEST_USER:
151 he->stat.period_guest_us += period;
158 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
160 he_stat->period += period;
161 he_stat->nr_events += 1;
164 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
166 dest->period += src->period;
167 dest->period_sys += src->period_sys;
168 dest->period_us += src->period_us;
169 dest->period_guest_sys += src->period_guest_sys;
170 dest->period_guest_us += src->period_guest_us;
171 dest->nr_events += src->nr_events;
174 static void hist_entry__decay(struct hist_entry *he)
176 he->stat.period = (he->stat.period * 7) / 8;
177 he->stat.nr_events = (he->stat.nr_events * 7) / 8;
180 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
182 u64 prev_period = he->stat.period;
184 if (prev_period == 0)
187 hist_entry__decay(he);
190 hists->stats.total_period -= prev_period - he->stat.period;
192 return he->stat.period == 0;
195 static void __hists__decay_entries(struct hists *hists, bool zap_user,
196 bool zap_kernel, bool threaded)
198 struct rb_node *next = rb_first(&hists->entries);
199 struct hist_entry *n;
202 n = rb_entry(next, struct hist_entry, rb_node);
203 next = rb_next(&n->rb_node);
205 * We may be annotating this, for instance, so keep it here in
206 * case some it gets new samples, we'll eventually free it when
207 * the user stops browsing and it agains gets fully decayed.
209 if (((zap_user && n->level == '.') ||
210 (zap_kernel && n->level != '.') ||
211 hists__decay_entry(hists, n)) &&
213 rb_erase(&n->rb_node, &hists->entries);
215 if (sort__need_collapse || threaded)
216 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
224 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
226 return __hists__decay_entries(hists, zap_user, zap_kernel, false);
229 void hists__decay_entries_threaded(struct hists *hists,
230 bool zap_user, bool zap_kernel)
232 return __hists__decay_entries(hists, zap_user, zap_kernel, true);
236 * histogram, sorted on item, collects periods
239 static struct hist_entry *hist_entry__new(struct hist_entry *template)
241 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
242 struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
248 he->ms.map->referenced = true;
250 if (he->branch_info) {
251 if (he->branch_info->from.map)
252 he->branch_info->from.map->referenced = true;
253 if (he->branch_info->to.map)
254 he->branch_info->to.map->referenced = true;
257 if (symbol_conf.use_callchain)
258 callchain_init(he->callchain);
260 INIT_LIST_HEAD(&he->pairs.node);
266 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
269 hists__calc_col_len(hists, h);
271 hists->stats.total_period += h->stat.period;
275 static u8 symbol__parent_filter(const struct symbol *parent)
277 if (symbol_conf.exclude_other && parent == NULL)
278 return 1 << HIST_FILTER__PARENT;
282 static struct hist_entry *add_hist_entry(struct hists *hists,
283 struct hist_entry *entry,
284 struct addr_location *al,
288 struct rb_node *parent = NULL;
289 struct hist_entry *he;
292 pthread_mutex_lock(&hists->lock);
294 p = &hists->entries_in->rb_node;
298 he = rb_entry(parent, struct hist_entry, rb_node_in);
301 * Make sure that it receives arguments in a same order as
302 * hist_entry__collapse() so that we can use an appropriate
303 * function when searching an entry regardless which sort
306 cmp = hist_entry__cmp(he, entry);
309 he_stat__add_period(&he->stat, period);
311 /* If the map of an existing hist_entry has
312 * become out-of-date due to an exec() or
313 * similar, update it. Otherwise we will
314 * mis-adjust symbol addresses when computing
315 * the history counter to increment.
317 if (he->ms.map != entry->ms.map) {
318 he->ms.map = entry->ms.map;
320 he->ms.map->referenced = true;
331 he = hist_entry__new(entry);
335 rb_link_node(&he->rb_node_in, parent, p);
336 rb_insert_color(&he->rb_node_in, hists->entries_in);
338 hist_entry__add_cpumode_period(he, al->cpumode, period);
340 pthread_mutex_unlock(&hists->lock);
344 struct hist_entry *__hists__add_branch_entry(struct hists *self,
345 struct addr_location *al,
346 struct symbol *sym_parent,
347 struct branch_info *bi,
350 struct hist_entry entry = {
351 .thread = al->thread,
363 .parent = sym_parent,
364 .filtered = symbol__parent_filter(sym_parent),
369 return add_hist_entry(self, &entry, al, period);
372 struct hist_entry *__hists__add_entry(struct hists *self,
373 struct addr_location *al,
374 struct symbol *sym_parent, u64 period)
376 struct hist_entry entry = {
377 .thread = al->thread,
389 .parent = sym_parent,
390 .filtered = symbol__parent_filter(sym_parent),
394 return add_hist_entry(self, &entry, al, period);
398 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
400 struct sort_entry *se;
403 list_for_each_entry(se, &hist_entry__sort_list, list) {
404 cmp = se->se_cmp(left, right);
413 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
415 struct sort_entry *se;
418 list_for_each_entry(se, &hist_entry__sort_list, list) {
419 int64_t (*f)(struct hist_entry *, struct hist_entry *);
421 f = se->se_collapse ?: se->se_cmp;
423 cmp = f(left, right);
431 void hist_entry__free(struct hist_entry *he)
433 free(he->branch_info);
438 * collapse the histogram
441 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
442 struct rb_root *root,
443 struct hist_entry *he)
445 struct rb_node **p = &root->rb_node;
446 struct rb_node *parent = NULL;
447 struct hist_entry *iter;
452 iter = rb_entry(parent, struct hist_entry, rb_node_in);
454 cmp = hist_entry__collapse(iter, he);
457 he_stat__add_stat(&iter->stat, &he->stat);
459 if (symbol_conf.use_callchain) {
460 callchain_cursor_reset(&callchain_cursor);
461 callchain_merge(&callchain_cursor,
465 hist_entry__free(he);
475 rb_link_node(&he->rb_node_in, parent, p);
476 rb_insert_color(&he->rb_node_in, root);
480 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
482 struct rb_root *root;
484 pthread_mutex_lock(&hists->lock);
486 root = hists->entries_in;
487 if (++hists->entries_in > &hists->entries_in_array[1])
488 hists->entries_in = &hists->entries_in_array[0];
490 pthread_mutex_unlock(&hists->lock);
495 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
497 hists__filter_entry_by_dso(hists, he);
498 hists__filter_entry_by_thread(hists, he);
499 hists__filter_entry_by_symbol(hists, he);
502 static void __hists__collapse_resort(struct hists *hists, bool threaded)
504 struct rb_root *root;
505 struct rb_node *next;
506 struct hist_entry *n;
508 if (!sort__need_collapse && !threaded)
511 root = hists__get_rotate_entries_in(hists);
512 next = rb_first(root);
515 n = rb_entry(next, struct hist_entry, rb_node_in);
516 next = rb_next(&n->rb_node_in);
518 rb_erase(&n->rb_node_in, root);
519 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
521 * If it wasn't combined with one of the entries already
522 * collapsed, we need to apply the filters that may have
523 * been set by, say, the hist_browser.
525 hists__apply_filters(hists, n);
530 void hists__collapse_resort(struct hists *hists)
532 return __hists__collapse_resort(hists, false);
535 void hists__collapse_resort_threaded(struct hists *hists)
537 return __hists__collapse_resort(hists, true);
541 * reverse the map, sort on period.
544 static int period_cmp(u64 period_a, u64 period_b)
546 if (period_a > period_b)
548 if (period_a < period_b)
553 static int hist_entry__sort_on_period(struct hist_entry *a,
554 struct hist_entry *b)
558 struct perf_evsel *evsel;
559 struct hist_entry *pair;
560 u64 *periods_a, *periods_b;
562 ret = period_cmp(a->stat.period, b->stat.period);
563 if (ret || !symbol_conf.event_group)
566 evsel = hists_to_evsel(a->hists);
567 nr_members = evsel->nr_members;
571 periods_a = zalloc(sizeof(periods_a) * nr_members);
572 periods_b = zalloc(sizeof(periods_b) * nr_members);
574 if (!periods_a || !periods_b)
577 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
578 evsel = hists_to_evsel(pair->hists);
579 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
582 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
583 evsel = hists_to_evsel(pair->hists);
584 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
587 for (i = 1; i < nr_members; i++) {
588 ret = period_cmp(periods_a[i], periods_b[i]);
600 static void __hists__insert_output_entry(struct rb_root *entries,
601 struct hist_entry *he,
602 u64 min_callchain_hits)
604 struct rb_node **p = &entries->rb_node;
605 struct rb_node *parent = NULL;
606 struct hist_entry *iter;
608 if (symbol_conf.use_callchain)
609 callchain_param.sort(&he->sorted_chain, he->callchain,
610 min_callchain_hits, &callchain_param);
614 iter = rb_entry(parent, struct hist_entry, rb_node);
616 if (hist_entry__sort_on_period(he, iter) > 0)
622 rb_link_node(&he->rb_node, parent, p);
623 rb_insert_color(&he->rb_node, entries);
626 static void __hists__output_resort(struct hists *hists, bool threaded)
628 struct rb_root *root;
629 struct rb_node *next;
630 struct hist_entry *n;
631 u64 min_callchain_hits;
633 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
635 if (sort__need_collapse || threaded)
636 root = &hists->entries_collapsed;
638 root = hists->entries_in;
640 next = rb_first(root);
641 hists->entries = RB_ROOT;
643 hists->nr_entries = 0;
644 hists->stats.total_period = 0;
645 hists__reset_col_len(hists);
648 n = rb_entry(next, struct hist_entry, rb_node_in);
649 next = rb_next(&n->rb_node_in);
651 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
652 hists__inc_nr_entries(hists, n);
656 void hists__output_resort(struct hists *hists)
658 return __hists__output_resort(hists, false);
661 void hists__output_resort_threaded(struct hists *hists)
663 return __hists__output_resort(hists, true);
666 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
667 enum hist_filter filter)
669 h->filtered &= ~(1 << filter);
675 hists->nr_entries += h->nr_rows;
677 hists->stats.total_period += h->stat.period;
678 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
680 hists__calc_col_len(hists, h);
684 static bool hists__filter_entry_by_dso(struct hists *hists,
685 struct hist_entry *he)
687 if (hists->dso_filter != NULL &&
688 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
689 he->filtered |= (1 << HIST_FILTER__DSO);
696 void hists__filter_by_dso(struct hists *hists)
700 hists->nr_entries = hists->stats.total_period = 0;
701 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
702 hists__reset_col_len(hists);
704 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
705 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
707 if (symbol_conf.exclude_other && !h->parent)
710 if (hists__filter_entry_by_dso(hists, h))
713 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
717 static bool hists__filter_entry_by_thread(struct hists *hists,
718 struct hist_entry *he)
720 if (hists->thread_filter != NULL &&
721 he->thread != hists->thread_filter) {
722 he->filtered |= (1 << HIST_FILTER__THREAD);
729 void hists__filter_by_thread(struct hists *hists)
733 hists->nr_entries = hists->stats.total_period = 0;
734 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
735 hists__reset_col_len(hists);
737 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
738 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
740 if (hists__filter_entry_by_thread(hists, h))
743 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
747 static bool hists__filter_entry_by_symbol(struct hists *hists,
748 struct hist_entry *he)
750 if (hists->symbol_filter_str != NULL &&
751 (!he->ms.sym || strstr(he->ms.sym->name,
752 hists->symbol_filter_str) == NULL)) {
753 he->filtered |= (1 << HIST_FILTER__SYMBOL);
760 void hists__filter_by_symbol(struct hists *hists)
764 hists->nr_entries = hists->stats.total_period = 0;
765 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
766 hists__reset_col_len(hists);
768 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
769 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
771 if (hists__filter_entry_by_symbol(hists, h))
774 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
778 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
780 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
783 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
785 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
788 void events_stats__inc(struct events_stats *stats, u32 type)
790 ++stats->nr_events[0];
791 ++stats->nr_events[type];
794 void hists__inc_nr_events(struct hists *hists, u32 type)
796 events_stats__inc(&hists->stats, type);
799 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
800 struct hist_entry *pair)
802 struct rb_root *root;
804 struct rb_node *parent = NULL;
805 struct hist_entry *he;
808 if (sort__need_collapse)
809 root = &hists->entries_collapsed;
811 root = hists->entries_in;
817 he = rb_entry(parent, struct hist_entry, rb_node_in);
819 cmp = hist_entry__collapse(he, pair);
830 he = hist_entry__new(pair);
832 memset(&he->stat, 0, sizeof(he->stat));
834 rb_link_node(&he->rb_node_in, parent, p);
835 rb_insert_color(&he->rb_node_in, root);
836 hists__inc_nr_entries(hists, he);
842 static struct hist_entry *hists__find_entry(struct hists *hists,
843 struct hist_entry *he)
847 if (sort__need_collapse)
848 n = hists->entries_collapsed.rb_node;
850 n = hists->entries_in->rb_node;
853 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
854 int64_t cmp = hist_entry__collapse(iter, he);
868 * Look for pairs to link to the leader buckets (hist_entries):
870 void hists__match(struct hists *leader, struct hists *other)
872 struct rb_root *root;
874 struct hist_entry *pos, *pair;
876 if (sort__need_collapse)
877 root = &leader->entries_collapsed;
879 root = leader->entries_in;
881 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
882 pos = rb_entry(nd, struct hist_entry, rb_node_in);
883 pair = hists__find_entry(other, pos);
886 hist_entry__add_pair(pair, pos);
891 * Look for entries in the other hists that are not present in the leader, if
892 * we find them, just add a dummy entry on the leader hists, with period=0,
893 * nr_events=0, to serve as the list header.
895 int hists__link(struct hists *leader, struct hists *other)
897 struct rb_root *root;
899 struct hist_entry *pos, *pair;
901 if (sort__need_collapse)
902 root = &other->entries_collapsed;
904 root = other->entries_in;
906 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
907 pos = rb_entry(nd, struct hist_entry, rb_node_in);
909 if (!hist_entry__has_pairs(pos)) {
910 pair = hists__add_dummy_entry(leader, pos);
913 hist_entry__add_pair(pos, pair);