10 static bool hists__filter_entry_by_dso(struct hists *hists,
11 struct hist_entry *he);
12 static bool hists__filter_entry_by_thread(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_symbol(struct hists *hists,
15 struct hist_entry *he);
24 struct callchain_param callchain_param = {
25 .mode = CHAIN_GRAPH_REL,
30 u16 hists__col_len(struct hists *hists, enum hist_column col)
32 return hists->col_len[col];
35 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
37 hists->col_len[col] = len;
40 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
42 if (len > hists__col_len(hists, col)) {
43 hists__set_col_len(hists, col, len);
49 void hists__reset_col_len(struct hists *hists)
53 for (col = 0; col < HISTC_NR_COLS; ++col)
54 hists__set_col_len(hists, col, 0);
57 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
59 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61 if (hists__col_len(hists, dso) < unresolved_col_width &&
62 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
63 !symbol_conf.dso_list)
64 hists__set_col_len(hists, dso, unresolved_col_width);
67 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
69 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
74 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
76 symlen = unresolved_col_width + 4 + 2;
77 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
78 hists__set_unres_dso_col_len(hists, HISTC_DSO);
81 len = thread__comm_len(h->thread);
82 if (hists__new_col_len(hists, HISTC_COMM, len))
83 hists__set_col_len(hists, HISTC_THREAD, len + 6);
86 len = dso__name_len(h->ms.map->dso);
87 hists__new_col_len(hists, HISTC_DSO, len);
91 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
95 * +4 accounts for '[x] ' priv level info
96 * +2 account of 0x prefix on raw addresses
98 if (h->branch_info->from.sym) {
99 symlen = (int)h->branch_info->from.sym->namelen + 4;
100 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
102 symlen = dso__name_len(h->branch_info->from.map->dso);
103 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
105 symlen = unresolved_col_width + 4 + 2;
106 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
107 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
110 if (h->branch_info->to.sym) {
111 symlen = (int)h->branch_info->to.sym->namelen + 4;
112 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
114 symlen = dso__name_len(h->branch_info->to.map->dso);
115 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
117 symlen = unresolved_col_width + 4 + 2;
118 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
119 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
125 * +4 accounts for '[x] ' priv level info
126 * +2 account of 0x prefix on raw addresses
128 if (h->mem_info->daddr.sym) {
129 symlen = (int)h->mem_info->daddr.sym->namelen + 4
130 + unresolved_col_width + 2;
131 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
134 symlen = unresolved_col_width + 4 + 2;
135 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
138 if (h->mem_info->daddr.map) {
139 symlen = dso__name_len(h->mem_info->daddr.map->dso);
140 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
143 symlen = unresolved_col_width + 4 + 2;
144 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
147 symlen = unresolved_col_width + 4 + 2;
148 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
149 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
152 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
153 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
154 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
155 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
156 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
157 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
160 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
162 struct rb_node *next = rb_first(&hists->entries);
163 struct hist_entry *n;
166 hists__reset_col_len(hists);
168 while (next && row++ < max_rows) {
169 n = rb_entry(next, struct hist_entry, rb_node);
171 hists__calc_col_len(hists, n);
172 next = rb_next(&n->rb_node);
176 static void hist_entry__add_cpumode_period(struct hist_entry *he,
177 unsigned int cpumode, u64 period)
180 case PERF_RECORD_MISC_KERNEL:
181 he->stat.period_sys += period;
183 case PERF_RECORD_MISC_USER:
184 he->stat.period_us += period;
186 case PERF_RECORD_MISC_GUEST_KERNEL:
187 he->stat.period_guest_sys += period;
189 case PERF_RECORD_MISC_GUEST_USER:
190 he->stat.period_guest_us += period;
197 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
201 he_stat->period += period;
202 he_stat->weight += weight;
203 he_stat->nr_events += 1;
206 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
208 dest->period += src->period;
209 dest->period_sys += src->period_sys;
210 dest->period_us += src->period_us;
211 dest->period_guest_sys += src->period_guest_sys;
212 dest->period_guest_us += src->period_guest_us;
213 dest->nr_events += src->nr_events;
214 dest->weight += src->weight;
217 static void hist_entry__decay(struct hist_entry *he)
219 he->stat.period = (he->stat.period * 7) / 8;
220 he->stat.nr_events = (he->stat.nr_events * 7) / 8;
221 /* XXX need decay for weight too? */
224 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
226 u64 prev_period = he->stat.period;
228 if (prev_period == 0)
231 hist_entry__decay(he);
234 hists->stats.total_period -= prev_period - he->stat.period;
236 return he->stat.period == 0;
239 static void __hists__decay_entries(struct hists *hists, bool zap_user,
240 bool zap_kernel, bool threaded)
242 struct rb_node *next = rb_first(&hists->entries);
243 struct hist_entry *n;
246 n = rb_entry(next, struct hist_entry, rb_node);
247 next = rb_next(&n->rb_node);
249 * We may be annotating this, for instance, so keep it here in
250 * case some it gets new samples, we'll eventually free it when
251 * the user stops browsing and it agains gets fully decayed.
253 if (((zap_user && n->level == '.') ||
254 (zap_kernel && n->level != '.') ||
255 hists__decay_entry(hists, n)) &&
257 rb_erase(&n->rb_node, &hists->entries);
259 if (sort__need_collapse || threaded)
260 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
268 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
270 return __hists__decay_entries(hists, zap_user, zap_kernel, false);
273 void hists__decay_entries_threaded(struct hists *hists,
274 bool zap_user, bool zap_kernel)
276 return __hists__decay_entries(hists, zap_user, zap_kernel, true);
280 * histogram, sorted on item, collects periods
283 static struct hist_entry *hist_entry__new(struct hist_entry *template)
285 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
286 struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
292 he->ms.map->referenced = true;
294 if (he->branch_info) {
295 if (he->branch_info->from.map)
296 he->branch_info->from.map->referenced = true;
297 if (he->branch_info->to.map)
298 he->branch_info->to.map->referenced = true;
302 if (he->mem_info->iaddr.map)
303 he->mem_info->iaddr.map->referenced = true;
304 if (he->mem_info->daddr.map)
305 he->mem_info->daddr.map->referenced = true;
308 if (symbol_conf.use_callchain)
309 callchain_init(he->callchain);
311 INIT_LIST_HEAD(&he->pairs.node);
317 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
320 hists__calc_col_len(hists, h);
322 hists->stats.total_period += h->stat.period;
326 static u8 symbol__parent_filter(const struct symbol *parent)
328 if (symbol_conf.exclude_other && parent == NULL)
329 return 1 << HIST_FILTER__PARENT;
333 static struct hist_entry *add_hist_entry(struct hists *hists,
334 struct hist_entry *entry,
335 struct addr_location *al,
340 struct rb_node *parent = NULL;
341 struct hist_entry *he;
344 pthread_mutex_lock(&hists->lock);
346 p = &hists->entries_in->rb_node;
350 he = rb_entry(parent, struct hist_entry, rb_node_in);
353 * Make sure that it receives arguments in a same order as
354 * hist_entry__collapse() so that we can use an appropriate
355 * function when searching an entry regardless which sort
358 cmp = hist_entry__cmp(he, entry);
361 he_stat__add_period(&he->stat, period, weight);
363 /* If the map of an existing hist_entry has
364 * become out-of-date due to an exec() or
365 * similar, update it. Otherwise we will
366 * mis-adjust symbol addresses when computing
367 * the history counter to increment.
369 if (he->ms.map != entry->ms.map) {
370 he->ms.map = entry->ms.map;
372 he->ms.map->referenced = true;
383 he = hist_entry__new(entry);
387 rb_link_node(&he->rb_node_in, parent, p);
388 rb_insert_color(&he->rb_node_in, hists->entries_in);
390 hist_entry__add_cpumode_period(he, al->cpumode, period);
392 pthread_mutex_unlock(&hists->lock);
396 struct hist_entry *__hists__add_mem_entry(struct hists *self,
397 struct addr_location *al,
398 struct symbol *sym_parent,
403 struct hist_entry entry = {
404 .thread = al->thread,
417 .parent = sym_parent,
418 .filtered = symbol__parent_filter(sym_parent),
423 return add_hist_entry(self, &entry, al, period, weight);
426 struct hist_entry *__hists__add_branch_entry(struct hists *self,
427 struct addr_location *al,
428 struct symbol *sym_parent,
429 struct branch_info *bi,
433 struct hist_entry entry = {
434 .thread = al->thread,
447 .parent = sym_parent,
448 .filtered = symbol__parent_filter(sym_parent),
454 return add_hist_entry(self, &entry, al, period, weight);
457 struct hist_entry *__hists__add_entry(struct hists *self,
458 struct addr_location *al,
459 struct symbol *sym_parent, u64 period,
462 struct hist_entry entry = {
463 .thread = al->thread,
476 .parent = sym_parent,
477 .filtered = symbol__parent_filter(sym_parent),
483 return add_hist_entry(self, &entry, al, period, weight);
487 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
489 struct sort_entry *se;
492 list_for_each_entry(se, &hist_entry__sort_list, list) {
493 cmp = se->se_cmp(left, right);
502 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
504 struct sort_entry *se;
507 list_for_each_entry(se, &hist_entry__sort_list, list) {
508 int64_t (*f)(struct hist_entry *, struct hist_entry *);
510 f = se->se_collapse ?: se->se_cmp;
512 cmp = f(left, right);
520 void hist_entry__free(struct hist_entry *he)
522 free(he->branch_info);
528 * collapse the histogram
531 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
532 struct rb_root *root,
533 struct hist_entry *he)
535 struct rb_node **p = &root->rb_node;
536 struct rb_node *parent = NULL;
537 struct hist_entry *iter;
542 iter = rb_entry(parent, struct hist_entry, rb_node_in);
544 cmp = hist_entry__collapse(iter, he);
547 he_stat__add_stat(&iter->stat, &he->stat);
549 if (symbol_conf.use_callchain) {
550 callchain_cursor_reset(&callchain_cursor);
551 callchain_merge(&callchain_cursor,
555 hist_entry__free(he);
565 rb_link_node(&he->rb_node_in, parent, p);
566 rb_insert_color(&he->rb_node_in, root);
570 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
572 struct rb_root *root;
574 pthread_mutex_lock(&hists->lock);
576 root = hists->entries_in;
577 if (++hists->entries_in > &hists->entries_in_array[1])
578 hists->entries_in = &hists->entries_in_array[0];
580 pthread_mutex_unlock(&hists->lock);
585 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
587 hists__filter_entry_by_dso(hists, he);
588 hists__filter_entry_by_thread(hists, he);
589 hists__filter_entry_by_symbol(hists, he);
592 static void __hists__collapse_resort(struct hists *hists, bool threaded)
594 struct rb_root *root;
595 struct rb_node *next;
596 struct hist_entry *n;
598 if (!sort__need_collapse && !threaded)
601 root = hists__get_rotate_entries_in(hists);
602 next = rb_first(root);
605 n = rb_entry(next, struct hist_entry, rb_node_in);
606 next = rb_next(&n->rb_node_in);
608 rb_erase(&n->rb_node_in, root);
609 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
611 * If it wasn't combined with one of the entries already
612 * collapsed, we need to apply the filters that may have
613 * been set by, say, the hist_browser.
615 hists__apply_filters(hists, n);
620 void hists__collapse_resort(struct hists *hists)
622 return __hists__collapse_resort(hists, false);
625 void hists__collapse_resort_threaded(struct hists *hists)
627 return __hists__collapse_resort(hists, true);
631 * reverse the map, sort on period.
634 static int period_cmp(u64 period_a, u64 period_b)
636 if (period_a > period_b)
638 if (period_a < period_b)
643 static int hist_entry__sort_on_period(struct hist_entry *a,
644 struct hist_entry *b)
648 struct perf_evsel *evsel;
649 struct hist_entry *pair;
650 u64 *periods_a, *periods_b;
652 ret = period_cmp(a->stat.period, b->stat.period);
653 if (ret || !symbol_conf.event_group)
656 evsel = hists_to_evsel(a->hists);
657 nr_members = evsel->nr_members;
661 periods_a = zalloc(sizeof(periods_a) * nr_members);
662 periods_b = zalloc(sizeof(periods_b) * nr_members);
664 if (!periods_a || !periods_b)
667 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
668 evsel = hists_to_evsel(pair->hists);
669 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
672 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
673 evsel = hists_to_evsel(pair->hists);
674 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
677 for (i = 1; i < nr_members; i++) {
678 ret = period_cmp(periods_a[i], periods_b[i]);
690 static void __hists__insert_output_entry(struct rb_root *entries,
691 struct hist_entry *he,
692 u64 min_callchain_hits)
694 struct rb_node **p = &entries->rb_node;
695 struct rb_node *parent = NULL;
696 struct hist_entry *iter;
698 if (symbol_conf.use_callchain)
699 callchain_param.sort(&he->sorted_chain, he->callchain,
700 min_callchain_hits, &callchain_param);
704 iter = rb_entry(parent, struct hist_entry, rb_node);
706 if (hist_entry__sort_on_period(he, iter) > 0)
712 rb_link_node(&he->rb_node, parent, p);
713 rb_insert_color(&he->rb_node, entries);
716 static void __hists__output_resort(struct hists *hists, bool threaded)
718 struct rb_root *root;
719 struct rb_node *next;
720 struct hist_entry *n;
721 u64 min_callchain_hits;
723 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
725 if (sort__need_collapse || threaded)
726 root = &hists->entries_collapsed;
728 root = hists->entries_in;
730 next = rb_first(root);
731 hists->entries = RB_ROOT;
733 hists->nr_entries = 0;
734 hists->stats.total_period = 0;
735 hists__reset_col_len(hists);
738 n = rb_entry(next, struct hist_entry, rb_node_in);
739 next = rb_next(&n->rb_node_in);
741 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
742 hists__inc_nr_entries(hists, n);
746 void hists__output_resort(struct hists *hists)
748 return __hists__output_resort(hists, false);
751 void hists__output_resort_threaded(struct hists *hists)
753 return __hists__output_resort(hists, true);
756 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
757 enum hist_filter filter)
759 h->filtered &= ~(1 << filter);
765 hists->nr_entries += h->nr_rows;
767 hists->stats.total_period += h->stat.period;
768 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
770 hists__calc_col_len(hists, h);
774 static bool hists__filter_entry_by_dso(struct hists *hists,
775 struct hist_entry *he)
777 if (hists->dso_filter != NULL &&
778 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
779 he->filtered |= (1 << HIST_FILTER__DSO);
786 void hists__filter_by_dso(struct hists *hists)
790 hists->nr_entries = hists->stats.total_period = 0;
791 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
792 hists__reset_col_len(hists);
794 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
795 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
797 if (symbol_conf.exclude_other && !h->parent)
800 if (hists__filter_entry_by_dso(hists, h))
803 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
807 static bool hists__filter_entry_by_thread(struct hists *hists,
808 struct hist_entry *he)
810 if (hists->thread_filter != NULL &&
811 he->thread != hists->thread_filter) {
812 he->filtered |= (1 << HIST_FILTER__THREAD);
819 void hists__filter_by_thread(struct hists *hists)
823 hists->nr_entries = hists->stats.total_period = 0;
824 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
825 hists__reset_col_len(hists);
827 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
828 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
830 if (hists__filter_entry_by_thread(hists, h))
833 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
837 static bool hists__filter_entry_by_symbol(struct hists *hists,
838 struct hist_entry *he)
840 if (hists->symbol_filter_str != NULL &&
841 (!he->ms.sym || strstr(he->ms.sym->name,
842 hists->symbol_filter_str) == NULL)) {
843 he->filtered |= (1 << HIST_FILTER__SYMBOL);
850 void hists__filter_by_symbol(struct hists *hists)
854 hists->nr_entries = hists->stats.total_period = 0;
855 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
856 hists__reset_col_len(hists);
858 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
859 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
861 if (hists__filter_entry_by_symbol(hists, h))
864 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
868 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
870 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
873 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
875 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
878 void events_stats__inc(struct events_stats *stats, u32 type)
880 ++stats->nr_events[0];
881 ++stats->nr_events[type];
884 void hists__inc_nr_events(struct hists *hists, u32 type)
886 events_stats__inc(&hists->stats, type);
889 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
890 struct hist_entry *pair)
892 struct rb_root *root;
894 struct rb_node *parent = NULL;
895 struct hist_entry *he;
898 if (sort__need_collapse)
899 root = &hists->entries_collapsed;
901 root = hists->entries_in;
907 he = rb_entry(parent, struct hist_entry, rb_node_in);
909 cmp = hist_entry__collapse(he, pair);
920 he = hist_entry__new(pair);
922 memset(&he->stat, 0, sizeof(he->stat));
924 rb_link_node(&he->rb_node_in, parent, p);
925 rb_insert_color(&he->rb_node_in, root);
926 hists__inc_nr_entries(hists, he);
932 static struct hist_entry *hists__find_entry(struct hists *hists,
933 struct hist_entry *he)
937 if (sort__need_collapse)
938 n = hists->entries_collapsed.rb_node;
940 n = hists->entries_in->rb_node;
943 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
944 int64_t cmp = hist_entry__collapse(iter, he);
958 * Look for pairs to link to the leader buckets (hist_entries):
960 void hists__match(struct hists *leader, struct hists *other)
962 struct rb_root *root;
964 struct hist_entry *pos, *pair;
966 if (sort__need_collapse)
967 root = &leader->entries_collapsed;
969 root = leader->entries_in;
971 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
972 pos = rb_entry(nd, struct hist_entry, rb_node_in);
973 pair = hists__find_entry(other, pos);
976 hist_entry__add_pair(pair, pos);
981 * Look for entries in the other hists that are not present in the leader, if
982 * we find them, just add a dummy entry on the leader hists, with period=0,
983 * nr_events=0, to serve as the list header.
985 int hists__link(struct hists *leader, struct hists *other)
987 struct rb_root *root;
989 struct hist_entry *pos, *pair;
991 if (sort__need_collapse)
992 root = &other->entries_collapsed;
994 root = other->entries_in;
996 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
997 pos = rb_entry(nd, struct hist_entry, rb_node_in);
999 if (!hist_entry__has_pairs(pos)) {
1000 pair = hists__add_dummy_entry(leader, pos);
1003 hist_entry__add_pair(pos, pair);