9 #include "ui/progress.h"
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 struct hist_entry *he);
18 static bool hists__filter_entry_by_socket(struct hists *hists,
19 struct hist_entry *he);
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
23 return hists->col_len[col];
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
28 hists->col_len[col] = len;
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
33 if (len > hists__col_len(hists, col)) {
34 hists__set_col_len(hists, col, len);
40 void hists__reset_col_len(struct hists *hists)
44 for (col = 0; col < HISTC_NR_COLS; ++col)
45 hists__set_col_len(hists, col, 0);
48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
50 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
52 if (hists__col_len(hists, dso) < unresolved_col_width &&
53 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54 !symbol_conf.dso_list)
55 hists__set_col_len(hists, dso, unresolved_col_width);
58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
60 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
65 * +4 accounts for '[x] ' priv level info
66 * +2 accounts for 0x prefix on raw addresses
67 * +3 accounts for ' y ' symtab origin info
70 symlen = h->ms.sym->namelen + 4;
72 symlen += BITS_PER_LONG / 4 + 2 + 3;
73 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
75 symlen = unresolved_col_width + 4 + 2;
76 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77 hists__set_unres_dso_col_len(hists, HISTC_DSO);
80 len = thread__comm_len(h->thread);
81 if (hists__new_col_len(hists, HISTC_COMM, len))
82 hists__set_col_len(hists, HISTC_THREAD, len + 6);
85 len = dso__name_len(h->ms.map->dso);
86 hists__new_col_len(hists, HISTC_DSO, len);
90 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
93 if (h->branch_info->from.sym) {
94 symlen = (int)h->branch_info->from.sym->namelen + 4;
96 symlen += BITS_PER_LONG / 4 + 2 + 3;
97 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
99 symlen = dso__name_len(h->branch_info->from.map->dso);
100 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
102 symlen = unresolved_col_width + 4 + 2;
103 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
107 if (h->branch_info->to.sym) {
108 symlen = (int)h->branch_info->to.sym->namelen + 4;
110 symlen += BITS_PER_LONG / 4 + 2 + 3;
111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
113 symlen = dso__name_len(h->branch_info->to.map->dso);
114 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
116 symlen = unresolved_col_width + 4 + 2;
117 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
123 if (h->mem_info->daddr.sym) {
124 symlen = (int)h->mem_info->daddr.sym->namelen + 4
125 + unresolved_col_width + 2;
126 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
128 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
131 symlen = unresolved_col_width + 4 + 2;
132 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
135 if (h->mem_info->daddr.map) {
136 symlen = dso__name_len(h->mem_info->daddr.map->dso);
137 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
140 symlen = unresolved_col_width + 4 + 2;
141 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
144 symlen = unresolved_col_width + 4 + 2;
145 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
146 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
149 hists__new_col_len(hists, HISTC_CPU, 3);
150 hists__new_col_len(hists, HISTC_SOCKET, 6);
151 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
152 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
153 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
154 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
155 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
156 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
159 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
162 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
165 hists__new_col_len(hists, HISTC_TRANSACTION,
166 hist_entry__transaction_len());
169 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
171 struct rb_node *next = rb_first(&hists->entries);
172 struct hist_entry *n;
175 hists__reset_col_len(hists);
177 while (next && row++ < max_rows) {
178 n = rb_entry(next, struct hist_entry, rb_node);
180 hists__calc_col_len(hists, n);
181 next = rb_next(&n->rb_node);
185 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
186 unsigned int cpumode, u64 period)
189 case PERF_RECORD_MISC_KERNEL:
190 he_stat->period_sys += period;
192 case PERF_RECORD_MISC_USER:
193 he_stat->period_us += period;
195 case PERF_RECORD_MISC_GUEST_KERNEL:
196 he_stat->period_guest_sys += period;
198 case PERF_RECORD_MISC_GUEST_USER:
199 he_stat->period_guest_us += period;
206 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
210 he_stat->period += period;
211 he_stat->weight += weight;
212 he_stat->nr_events += 1;
215 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
217 dest->period += src->period;
218 dest->period_sys += src->period_sys;
219 dest->period_us += src->period_us;
220 dest->period_guest_sys += src->period_guest_sys;
221 dest->period_guest_us += src->period_guest_us;
222 dest->nr_events += src->nr_events;
223 dest->weight += src->weight;
226 static void he_stat__decay(struct he_stat *he_stat)
228 he_stat->period = (he_stat->period * 7) / 8;
229 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
230 /* XXX need decay for weight too? */
233 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
235 u64 prev_period = he->stat.period;
238 if (prev_period == 0)
241 he_stat__decay(&he->stat);
242 if (symbol_conf.cumulate_callchain)
243 he_stat__decay(he->stat_acc);
245 diff = prev_period - he->stat.period;
247 hists->stats.total_period -= diff;
249 hists->stats.total_non_filtered_period -= diff;
251 return he->stat.period == 0;
254 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
256 rb_erase(&he->rb_node, &hists->entries);
258 if (sort__need_collapse)
259 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
263 --hists->nr_non_filtered_entries;
265 hist_entry__delete(he);
268 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
270 struct rb_node *next = rb_first(&hists->entries);
271 struct hist_entry *n;
274 n = rb_entry(next, struct hist_entry, rb_node);
275 next = rb_next(&n->rb_node);
276 if (((zap_user && n->level == '.') ||
277 (zap_kernel && n->level != '.') ||
278 hists__decay_entry(hists, n))) {
279 hists__delete_entry(hists, n);
284 void hists__delete_entries(struct hists *hists)
286 struct rb_node *next = rb_first(&hists->entries);
287 struct hist_entry *n;
290 n = rb_entry(next, struct hist_entry, rb_node);
291 next = rb_next(&n->rb_node);
293 hists__delete_entry(hists, n);
298 * histogram, sorted on item, collects periods
301 static struct hist_entry *hist_entry__new(struct hist_entry *template,
304 size_t callchain_size = 0;
305 struct hist_entry *he;
307 if (symbol_conf.use_callchain)
308 callchain_size = sizeof(struct callchain_root);
310 he = zalloc(sizeof(*he) + callchain_size);
315 if (symbol_conf.cumulate_callchain) {
316 he->stat_acc = malloc(sizeof(he->stat));
317 if (he->stat_acc == NULL) {
321 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
323 memset(&he->stat, 0, sizeof(he->stat));
326 map__get(he->ms.map);
328 if (he->branch_info) {
330 * This branch info is (a part of) allocated from
331 * sample__resolve_bstack() and will be freed after
332 * adding new entries. So we need to save a copy.
334 he->branch_info = malloc(sizeof(*he->branch_info));
335 if (he->branch_info == NULL) {
336 map__zput(he->ms.map);
342 memcpy(he->branch_info, template->branch_info,
343 sizeof(*he->branch_info));
345 map__get(he->branch_info->from.map);
346 map__get(he->branch_info->to.map);
350 map__get(he->mem_info->iaddr.map);
351 map__get(he->mem_info->daddr.map);
354 if (symbol_conf.use_callchain)
355 callchain_init(he->callchain);
357 INIT_LIST_HEAD(&he->pairs.node);
358 thread__get(he->thread);
364 static u8 symbol__parent_filter(const struct symbol *parent)
366 if (symbol_conf.exclude_other && parent == NULL)
367 return 1 << HIST_FILTER__PARENT;
371 static struct hist_entry *hists__findnew_entry(struct hists *hists,
372 struct hist_entry *entry,
373 struct addr_location *al,
377 struct rb_node *parent = NULL;
378 struct hist_entry *he;
380 u64 period = entry->stat.period;
381 u64 weight = entry->stat.weight;
383 p = &hists->entries_in->rb_node;
387 he = rb_entry(parent, struct hist_entry, rb_node_in);
390 * Make sure that it receives arguments in a same order as
391 * hist_entry__collapse() so that we can use an appropriate
392 * function when searching an entry regardless which sort
395 cmp = hist_entry__cmp(he, entry);
399 he_stat__add_period(&he->stat, period, weight);
400 if (symbol_conf.cumulate_callchain)
401 he_stat__add_period(he->stat_acc, period, weight);
404 * This mem info was allocated from sample__resolve_mem
405 * and will not be used anymore.
407 zfree(&entry->mem_info);
409 /* If the map of an existing hist_entry has
410 * become out-of-date due to an exec() or
411 * similar, update it. Otherwise we will
412 * mis-adjust symbol addresses when computing
413 * the history counter to increment.
415 if (he->ms.map != entry->ms.map) {
416 map__put(he->ms.map);
417 he->ms.map = map__get(entry->ms.map);
428 he = hist_entry__new(entry, sample_self);
434 rb_link_node(&he->rb_node_in, parent, p);
435 rb_insert_color(&he->rb_node_in, hists->entries_in);
438 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
439 if (symbol_conf.cumulate_callchain)
440 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
444 struct hist_entry *__hists__add_entry(struct hists *hists,
445 struct addr_location *al,
446 struct symbol *sym_parent,
447 struct branch_info *bi,
449 u64 period, u64 weight, u64 transaction,
452 struct hist_entry entry = {
453 .thread = al->thread,
454 .comm = thread__comm(al->thread),
459 .socket = al->socket,
461 .cpumode = al->cpumode,
469 .parent = sym_parent,
470 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
474 .transaction = transaction,
477 return hists__findnew_entry(hists, &entry, al, sample_self);
481 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
482 struct addr_location *al __maybe_unused)
488 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
489 struct addr_location *al __maybe_unused)
495 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
497 struct perf_sample *sample = iter->sample;
500 mi = sample__resolve_mem(sample, al);
509 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
512 struct mem_info *mi = iter->priv;
513 struct hists *hists = evsel__hists(iter->evsel);
514 struct hist_entry *he;
519 cost = iter->sample->weight;
524 * must pass period=weight in order to get the correct
525 * sorting from hists__collapse_resort() which is solely
526 * based on periods. We want sorting be done on nr_events * weight
527 * and this is indirectly achieved by passing period=weight here
528 * and the he_stat__add_period() function.
530 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
531 cost, cost, 0, true);
540 iter_finish_mem_entry(struct hist_entry_iter *iter,
541 struct addr_location *al __maybe_unused)
543 struct perf_evsel *evsel = iter->evsel;
544 struct hists *hists = evsel__hists(evsel);
545 struct hist_entry *he = iter->he;
551 hists__inc_nr_samples(hists, he->filtered);
553 err = hist_entry__append_callchain(he, iter->sample);
557 * We don't need to free iter->priv (mem_info) here since the mem info
558 * was either already freed in hists__findnew_entry() or passed to a
559 * new hist entry by hist_entry__new().
568 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
570 struct branch_info *bi;
571 struct perf_sample *sample = iter->sample;
573 bi = sample__resolve_bstack(sample, al);
578 iter->total = sample->branch_stack->nr;
585 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
586 struct addr_location *al __maybe_unused)
588 /* to avoid calling callback function */
595 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
597 struct branch_info *bi = iter->priv;
603 if (iter->curr >= iter->total)
606 al->map = bi[i].to.map;
607 al->sym = bi[i].to.sym;
608 al->addr = bi[i].to.addr;
613 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
615 struct branch_info *bi;
616 struct perf_evsel *evsel = iter->evsel;
617 struct hists *hists = evsel__hists(evsel);
618 struct hist_entry *he = NULL;
624 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
628 * The report shows the percentage of total branches captured
629 * and not events sampled. Thus we use a pseudo period of 1.
631 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
632 1, bi->flags.cycles ? bi->flags.cycles : 1,
637 hists__inc_nr_samples(hists, he->filtered);
646 iter_finish_branch_entry(struct hist_entry_iter *iter,
647 struct addr_location *al __maybe_unused)
652 return iter->curr >= iter->total ? 0 : -1;
656 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
657 struct addr_location *al __maybe_unused)
663 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
665 struct perf_evsel *evsel = iter->evsel;
666 struct perf_sample *sample = iter->sample;
667 struct hist_entry *he;
669 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
670 sample->period, sample->weight,
671 sample->transaction, true);
680 iter_finish_normal_entry(struct hist_entry_iter *iter,
681 struct addr_location *al __maybe_unused)
683 struct hist_entry *he = iter->he;
684 struct perf_evsel *evsel = iter->evsel;
685 struct perf_sample *sample = iter->sample;
692 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
694 return hist_entry__append_callchain(he, sample);
698 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
699 struct addr_location *al __maybe_unused)
701 struct hist_entry **he_cache;
703 callchain_cursor_commit(&callchain_cursor);
706 * This is for detecting cycles or recursions so that they're
707 * cumulated only one time to prevent entries more than 100%
710 he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
711 if (he_cache == NULL)
714 iter->priv = he_cache;
721 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
722 struct addr_location *al)
724 struct perf_evsel *evsel = iter->evsel;
725 struct hists *hists = evsel__hists(evsel);
726 struct perf_sample *sample = iter->sample;
727 struct hist_entry **he_cache = iter->priv;
728 struct hist_entry *he;
731 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
732 sample->period, sample->weight,
733 sample->transaction, true);
738 he_cache[iter->curr++] = he;
740 hist_entry__append_callchain(he, sample);
743 * We need to re-initialize the cursor since callchain_append()
744 * advanced the cursor to the end.
746 callchain_cursor_commit(&callchain_cursor);
748 hists__inc_nr_samples(hists, he->filtered);
754 iter_next_cumulative_entry(struct hist_entry_iter *iter,
755 struct addr_location *al)
757 struct callchain_cursor_node *node;
759 node = callchain_cursor_current(&callchain_cursor);
763 return fill_callchain_info(al, node, iter->hide_unresolved);
767 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
768 struct addr_location *al)
770 struct perf_evsel *evsel = iter->evsel;
771 struct perf_sample *sample = iter->sample;
772 struct hist_entry **he_cache = iter->priv;
773 struct hist_entry *he;
774 struct hist_entry he_tmp = {
775 .hists = evsel__hists(evsel),
777 .thread = al->thread,
778 .comm = thread__comm(al->thread),
784 .parent = iter->parent,
787 struct callchain_cursor cursor;
789 callchain_cursor_snapshot(&cursor, &callchain_cursor);
791 callchain_cursor_advance(&callchain_cursor);
794 * Check if there's duplicate entries in the callchain.
795 * It's possible that it has cycles or recursive calls.
797 for (i = 0; i < iter->curr; i++) {
798 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
799 /* to avoid calling callback function */
805 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
806 sample->period, sample->weight,
807 sample->transaction, false);
812 he_cache[iter->curr++] = he;
814 if (symbol_conf.use_callchain)
815 callchain_append(he->callchain, &cursor, sample->period);
820 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
821 struct addr_location *al __maybe_unused)
829 const struct hist_iter_ops hist_iter_mem = {
830 .prepare_entry = iter_prepare_mem_entry,
831 .add_single_entry = iter_add_single_mem_entry,
832 .next_entry = iter_next_nop_entry,
833 .add_next_entry = iter_add_next_nop_entry,
834 .finish_entry = iter_finish_mem_entry,
837 const struct hist_iter_ops hist_iter_branch = {
838 .prepare_entry = iter_prepare_branch_entry,
839 .add_single_entry = iter_add_single_branch_entry,
840 .next_entry = iter_next_branch_entry,
841 .add_next_entry = iter_add_next_branch_entry,
842 .finish_entry = iter_finish_branch_entry,
845 const struct hist_iter_ops hist_iter_normal = {
846 .prepare_entry = iter_prepare_normal_entry,
847 .add_single_entry = iter_add_single_normal_entry,
848 .next_entry = iter_next_nop_entry,
849 .add_next_entry = iter_add_next_nop_entry,
850 .finish_entry = iter_finish_normal_entry,
853 const struct hist_iter_ops hist_iter_cumulative = {
854 .prepare_entry = iter_prepare_cumulative_entry,
855 .add_single_entry = iter_add_single_cumulative_entry,
856 .next_entry = iter_next_cumulative_entry,
857 .add_next_entry = iter_add_next_cumulative_entry,
858 .finish_entry = iter_finish_cumulative_entry,
861 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
862 int max_stack_depth, void *arg)
866 err = sample__resolve_callchain(iter->sample, &iter->parent,
867 iter->evsel, al, max_stack_depth);
871 iter->max_stack = max_stack_depth;
873 err = iter->ops->prepare_entry(iter, al);
877 err = iter->ops->add_single_entry(iter, al);
881 if (iter->he && iter->add_entry_cb) {
882 err = iter->add_entry_cb(iter, al, true, arg);
887 while (iter->ops->next_entry(iter, al)) {
888 err = iter->ops->add_next_entry(iter, al);
892 if (iter->he && iter->add_entry_cb) {
893 err = iter->add_entry_cb(iter, al, false, arg);
900 err2 = iter->ops->finish_entry(iter, al);
908 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
910 struct perf_hpp_fmt *fmt;
913 perf_hpp__for_each_sort_list(fmt) {
914 if (perf_hpp__should_skip(fmt))
917 cmp = fmt->cmp(fmt, left, right);
926 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
928 struct perf_hpp_fmt *fmt;
931 perf_hpp__for_each_sort_list(fmt) {
932 if (perf_hpp__should_skip(fmt))
935 cmp = fmt->collapse(fmt, left, right);
943 void hist_entry__delete(struct hist_entry *he)
945 thread__zput(he->thread);
946 map__zput(he->ms.map);
948 if (he->branch_info) {
949 map__zput(he->branch_info->from.map);
950 map__zput(he->branch_info->to.map);
951 zfree(&he->branch_info);
955 map__zput(he->mem_info->iaddr.map);
956 map__zput(he->mem_info->daddr.map);
957 zfree(&he->mem_info);
960 zfree(&he->stat_acc);
961 free_srcline(he->srcline);
962 if (he->srcfile && he->srcfile[0])
964 free_callchain(he->callchain);
969 * collapse the histogram
972 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
973 struct rb_root *root,
974 struct hist_entry *he)
976 struct rb_node **p = &root->rb_node;
977 struct rb_node *parent = NULL;
978 struct hist_entry *iter;
983 iter = rb_entry(parent, struct hist_entry, rb_node_in);
985 cmp = hist_entry__collapse(iter, he);
988 he_stat__add_stat(&iter->stat, &he->stat);
989 if (symbol_conf.cumulate_callchain)
990 he_stat__add_stat(iter->stat_acc, he->stat_acc);
992 if (symbol_conf.use_callchain) {
993 callchain_cursor_reset(&callchain_cursor);
994 callchain_merge(&callchain_cursor,
998 hist_entry__delete(he);
1005 p = &(*p)->rb_right;
1007 hists->nr_entries++;
1009 rb_link_node(&he->rb_node_in, parent, p);
1010 rb_insert_color(&he->rb_node_in, root);
1014 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1016 struct rb_root *root;
1018 pthread_mutex_lock(&hists->lock);
1020 root = hists->entries_in;
1021 if (++hists->entries_in > &hists->entries_in_array[1])
1022 hists->entries_in = &hists->entries_in_array[0];
1024 pthread_mutex_unlock(&hists->lock);
1029 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1031 hists__filter_entry_by_dso(hists, he);
1032 hists__filter_entry_by_thread(hists, he);
1033 hists__filter_entry_by_symbol(hists, he);
1034 hists__filter_entry_by_socket(hists, he);
1037 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1039 struct rb_root *root;
1040 struct rb_node *next;
1041 struct hist_entry *n;
1043 if (!sort__need_collapse)
1046 hists->nr_entries = 0;
1048 root = hists__get_rotate_entries_in(hists);
1050 next = rb_first(root);
1055 n = rb_entry(next, struct hist_entry, rb_node_in);
1056 next = rb_next(&n->rb_node_in);
1058 rb_erase(&n->rb_node_in, root);
1059 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1061 * If it wasn't combined with one of the entries already
1062 * collapsed, we need to apply the filters that may have
1063 * been set by, say, the hist_browser.
1065 hists__apply_filters(hists, n);
1068 ui_progress__update(prog, 1);
1072 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1074 struct perf_hpp_fmt *fmt;
1077 perf_hpp__for_each_sort_list(fmt) {
1078 if (perf_hpp__should_skip(fmt))
1081 cmp = fmt->sort(fmt, a, b);
1089 static void hists__reset_filter_stats(struct hists *hists)
1091 hists->nr_non_filtered_entries = 0;
1092 hists->stats.total_non_filtered_period = 0;
1095 void hists__reset_stats(struct hists *hists)
1097 hists->nr_entries = 0;
1098 hists->stats.total_period = 0;
1100 hists__reset_filter_stats(hists);
1103 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1105 hists->nr_non_filtered_entries++;
1106 hists->stats.total_non_filtered_period += h->stat.period;
1109 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1112 hists__inc_filter_stats(hists, h);
1114 hists->nr_entries++;
1115 hists->stats.total_period += h->stat.period;
1118 static void __hists__insert_output_entry(struct rb_root *entries,
1119 struct hist_entry *he,
1120 u64 min_callchain_hits,
1123 struct rb_node **p = &entries->rb_node;
1124 struct rb_node *parent = NULL;
1125 struct hist_entry *iter;
1128 callchain_param.sort(&he->sorted_chain, he->callchain,
1129 min_callchain_hits, &callchain_param);
1131 while (*p != NULL) {
1133 iter = rb_entry(parent, struct hist_entry, rb_node);
1135 if (hist_entry__sort(he, iter) > 0)
1138 p = &(*p)->rb_right;
1141 rb_link_node(&he->rb_node, parent, p);
1142 rb_insert_color(&he->rb_node, entries);
1145 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1147 struct rb_root *root;
1148 struct rb_node *next;
1149 struct hist_entry *n;
1150 u64 min_callchain_hits;
1151 struct perf_evsel *evsel = hists_to_evsel(hists);
1154 if (evsel && !symbol_conf.show_ref_callgraph)
1155 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1157 use_callchain = symbol_conf.use_callchain;
1159 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1161 if (sort__need_collapse)
1162 root = &hists->entries_collapsed;
1164 root = hists->entries_in;
1166 next = rb_first(root);
1167 hists->entries = RB_ROOT;
1169 hists__reset_stats(hists);
1170 hists__reset_col_len(hists);
1173 n = rb_entry(next, struct hist_entry, rb_node_in);
1174 next = rb_next(&n->rb_node_in);
1176 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1177 hists__inc_stats(hists, n);
1180 hists__calc_col_len(hists, n);
1183 ui_progress__update(prog, 1);
1187 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1188 enum hist_filter filter)
1190 h->filtered &= ~(1 << filter);
1194 /* force fold unfiltered entry for simplicity */
1195 h->unfolded = false;
1199 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1201 hists__inc_filter_stats(hists, h);
1202 hists__calc_col_len(hists, h);
1206 static bool hists__filter_entry_by_dso(struct hists *hists,
1207 struct hist_entry *he)
1209 if (hists->dso_filter != NULL &&
1210 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1211 he->filtered |= (1 << HIST_FILTER__DSO);
1218 void hists__filter_by_dso(struct hists *hists)
1222 hists->stats.nr_non_filtered_samples = 0;
1224 hists__reset_filter_stats(hists);
1225 hists__reset_col_len(hists);
1227 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1228 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1230 if (symbol_conf.exclude_other && !h->parent)
1233 if (hists__filter_entry_by_dso(hists, h))
1236 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1240 static bool hists__filter_entry_by_thread(struct hists *hists,
1241 struct hist_entry *he)
1243 if (hists->thread_filter != NULL &&
1244 he->thread != hists->thread_filter) {
1245 he->filtered |= (1 << HIST_FILTER__THREAD);
1252 void hists__filter_by_thread(struct hists *hists)
1256 hists->stats.nr_non_filtered_samples = 0;
1258 hists__reset_filter_stats(hists);
1259 hists__reset_col_len(hists);
1261 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1262 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1264 if (hists__filter_entry_by_thread(hists, h))
1267 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1271 static bool hists__filter_entry_by_symbol(struct hists *hists,
1272 struct hist_entry *he)
1274 if (hists->symbol_filter_str != NULL &&
1275 (!he->ms.sym || strstr(he->ms.sym->name,
1276 hists->symbol_filter_str) == NULL)) {
1277 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1284 void hists__filter_by_symbol(struct hists *hists)
1288 hists->stats.nr_non_filtered_samples = 0;
1290 hists__reset_filter_stats(hists);
1291 hists__reset_col_len(hists);
1293 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1294 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1296 if (hists__filter_entry_by_symbol(hists, h))
1299 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1303 static bool hists__filter_entry_by_socket(struct hists *hists,
1304 struct hist_entry *he)
1306 if ((hists->socket_filter > -1) &&
1307 (he->socket != hists->socket_filter)) {
1308 he->filtered |= (1 << HIST_FILTER__SOCKET);
1315 void hists__filter_by_socket(struct hists *hists)
1319 hists->stats.nr_non_filtered_samples = 0;
1321 hists__reset_filter_stats(hists);
1322 hists__reset_col_len(hists);
1324 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1325 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1327 if (hists__filter_entry_by_socket(hists, h))
1330 hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
1334 void events_stats__inc(struct events_stats *stats, u32 type)
1336 ++stats->nr_events[0];
1337 ++stats->nr_events[type];
1340 void hists__inc_nr_events(struct hists *hists, u32 type)
1342 events_stats__inc(&hists->stats, type);
1345 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1347 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1349 hists->stats.nr_non_filtered_samples++;
1352 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1353 struct hist_entry *pair)
1355 struct rb_root *root;
1357 struct rb_node *parent = NULL;
1358 struct hist_entry *he;
1361 if (sort__need_collapse)
1362 root = &hists->entries_collapsed;
1364 root = hists->entries_in;
1368 while (*p != NULL) {
1370 he = rb_entry(parent, struct hist_entry, rb_node_in);
1372 cmp = hist_entry__collapse(he, pair);
1380 p = &(*p)->rb_right;
1383 he = hist_entry__new(pair, true);
1385 memset(&he->stat, 0, sizeof(he->stat));
1387 rb_link_node(&he->rb_node_in, parent, p);
1388 rb_insert_color(&he->rb_node_in, root);
1389 hists__inc_stats(hists, he);
1396 static struct hist_entry *hists__find_entry(struct hists *hists,
1397 struct hist_entry *he)
1401 if (sort__need_collapse)
1402 n = hists->entries_collapsed.rb_node;
1404 n = hists->entries_in->rb_node;
1407 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1408 int64_t cmp = hist_entry__collapse(iter, he);
1422 * Look for pairs to link to the leader buckets (hist_entries):
1424 void hists__match(struct hists *leader, struct hists *other)
1426 struct rb_root *root;
1428 struct hist_entry *pos, *pair;
1430 if (sort__need_collapse)
1431 root = &leader->entries_collapsed;
1433 root = leader->entries_in;
1435 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1436 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1437 pair = hists__find_entry(other, pos);
1440 hist_entry__add_pair(pair, pos);
1445 * Look for entries in the other hists that are not present in the leader, if
1446 * we find them, just add a dummy entry on the leader hists, with period=0,
1447 * nr_events=0, to serve as the list header.
1449 int hists__link(struct hists *leader, struct hists *other)
1451 struct rb_root *root;
1453 struct hist_entry *pos, *pair;
1455 if (sort__need_collapse)
1456 root = &other->entries_collapsed;
1458 root = other->entries_in;
1460 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1461 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1463 if (!hist_entry__has_pairs(pos)) {
1464 pair = hists__add_dummy_entry(leader, pos);
1467 hist_entry__add_pair(pos, pair);
1474 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1475 struct perf_sample *sample, bool nonany_branch_mode)
1477 struct branch_info *bi;
1479 /* If we have branch cycles always annotate them. */
1480 if (bs && bs->nr && bs->entries[0].flags.cycles) {
1483 bi = sample__resolve_bstack(sample, al);
1485 struct addr_map_symbol *prev = NULL;
1488 * Ignore errors, still want to process the
1491 * For non standard branch modes always
1492 * force no IPC (prev == NULL)
1494 * Note that perf stores branches reversed from
1497 for (i = bs->nr - 1; i >= 0; i--) {
1498 addr_map_symbol__account_cycles(&bi[i].from,
1499 nonany_branch_mode ? NULL : prev,
1500 bi[i].flags.cycles);
1508 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1510 struct perf_evsel *pos;
1513 evlist__for_each(evlist, pos) {
1514 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1515 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1522 u64 hists__total_period(struct hists *hists)
1524 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1525 hists->stats.total_period;
1528 int parse_filter_percentage(const struct option *opt __maybe_unused,
1529 const char *arg, int unset __maybe_unused)
1531 if (!strcmp(arg, "relative"))
1532 symbol_conf.filter_relative = true;
1533 else if (!strcmp(arg, "absolute"))
1534 symbol_conf.filter_relative = false;
1541 int perf_hist_config(const char *var, const char *value)
1543 if (!strcmp(var, "hist.percentage"))
1544 return parse_filter_percentage(NULL, value, 0);
1549 static int hists_evsel__init(struct perf_evsel *evsel)
1551 struct hists *hists = evsel__hists(evsel);
1553 memset(hists, 0, sizeof(*hists));
1554 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1555 hists->entries_in = &hists->entries_in_array[0];
1556 hists->entries_collapsed = RB_ROOT;
1557 hists->entries = RB_ROOT;
1558 pthread_mutex_init(&hists->lock, NULL);
1559 hists->socket_filter = -1;
1564 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1565 * stored in the rbtree...
1568 int hists__init(void)
1570 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1571 hists_evsel__init, NULL);
1573 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);