0cad9e07c5b4fe9800922fe19e4dd4148ba0cf01
[firefly-linux-kernel-4.4.55.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13                                        struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15                                           struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17                                           struct hist_entry *he);
18 static bool hists__filter_entry_by_socket(struct hists *hists,
19                                           struct hist_entry *he);
20
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
22 {
23         return hists->col_len[col];
24 }
25
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
27 {
28         hists->col_len[col] = len;
29 }
30
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
32 {
33         if (len > hists__col_len(hists, col)) {
34                 hists__set_col_len(hists, col, len);
35                 return true;
36         }
37         return false;
38 }
39
40 void hists__reset_col_len(struct hists *hists)
41 {
42         enum hist_column col;
43
44         for (col = 0; col < HISTC_NR_COLS; ++col)
45                 hists__set_col_len(hists, col, 0);
46 }
47
48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
49 {
50         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
51
52         if (hists__col_len(hists, dso) < unresolved_col_width &&
53             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54             !symbol_conf.dso_list)
55                 hists__set_col_len(hists, dso, unresolved_col_width);
56 }
57
58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
59 {
60         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61         int symlen;
62         u16 len;
63
64         /*
65          * +4 accounts for '[x] ' priv level info
66          * +2 accounts for 0x prefix on raw addresses
67          * +3 accounts for ' y ' symtab origin info
68          */
69         if (h->ms.sym) {
70                 symlen = h->ms.sym->namelen + 4;
71                 if (verbose)
72                         symlen += BITS_PER_LONG / 4 + 2 + 3;
73                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74         } else {
75                 symlen = unresolved_col_width + 4 + 2;
76                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
78         }
79
80         len = thread__comm_len(h->thread);
81         if (hists__new_col_len(hists, HISTC_COMM, len))
82                 hists__set_col_len(hists, HISTC_THREAD, len + 6);
83
84         if (h->ms.map) {
85                 len = dso__name_len(h->ms.map->dso);
86                 hists__new_col_len(hists, HISTC_DSO, len);
87         }
88
89         if (h->parent)
90                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91
92         if (h->branch_info) {
93                 if (h->branch_info->from.sym) {
94                         symlen = (int)h->branch_info->from.sym->namelen + 4;
95                         if (verbose)
96                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
97                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98
99                         symlen = dso__name_len(h->branch_info->from.map->dso);
100                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101                 } else {
102                         symlen = unresolved_col_width + 4 + 2;
103                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105                 }
106
107                 if (h->branch_info->to.sym) {
108                         symlen = (int)h->branch_info->to.sym->namelen + 4;
109                         if (verbose)
110                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
111                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112
113                         symlen = dso__name_len(h->branch_info->to.map->dso);
114                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
115                 } else {
116                         symlen = unresolved_col_width + 4 + 2;
117                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
119                 }
120         }
121
122         if (h->mem_info) {
123                 if (h->mem_info->daddr.sym) {
124                         symlen = (int)h->mem_info->daddr.sym->namelen + 4
125                                + unresolved_col_width + 2;
126                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
127                                            symlen);
128                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
129                                            symlen + 1);
130                 } else {
131                         symlen = unresolved_col_width + 4 + 2;
132                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133                                            symlen);
134                 }
135                 if (h->mem_info->daddr.map) {
136                         symlen = dso__name_len(h->mem_info->daddr.map->dso);
137                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
138                                            symlen);
139                 } else {
140                         symlen = unresolved_col_width + 4 + 2;
141                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
142                 }
143         } else {
144                 symlen = unresolved_col_width + 4 + 2;
145                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
146                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
147         }
148
149         hists__new_col_len(hists, HISTC_CPU, 3);
150         hists__new_col_len(hists, HISTC_SOCKET, 6);
151         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
152         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
153         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
154         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
155         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
156         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
157
158         if (h->srcline)
159                 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
160
161         if (h->srcfile)
162                 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
163
164         if (h->transaction)
165                 hists__new_col_len(hists, HISTC_TRANSACTION,
166                                    hist_entry__transaction_len());
167 }
168
169 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
170 {
171         struct rb_node *next = rb_first(&hists->entries);
172         struct hist_entry *n;
173         int row = 0;
174
175         hists__reset_col_len(hists);
176
177         while (next && row++ < max_rows) {
178                 n = rb_entry(next, struct hist_entry, rb_node);
179                 if (!n->filtered)
180                         hists__calc_col_len(hists, n);
181                 next = rb_next(&n->rb_node);
182         }
183 }
184
185 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
186                                         unsigned int cpumode, u64 period)
187 {
188         switch (cpumode) {
189         case PERF_RECORD_MISC_KERNEL:
190                 he_stat->period_sys += period;
191                 break;
192         case PERF_RECORD_MISC_USER:
193                 he_stat->period_us += period;
194                 break;
195         case PERF_RECORD_MISC_GUEST_KERNEL:
196                 he_stat->period_guest_sys += period;
197                 break;
198         case PERF_RECORD_MISC_GUEST_USER:
199                 he_stat->period_guest_us += period;
200                 break;
201         default:
202                 break;
203         }
204 }
205
206 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
207                                 u64 weight)
208 {
209
210         he_stat->period         += period;
211         he_stat->weight         += weight;
212         he_stat->nr_events      += 1;
213 }
214
215 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
216 {
217         dest->period            += src->period;
218         dest->period_sys        += src->period_sys;
219         dest->period_us         += src->period_us;
220         dest->period_guest_sys  += src->period_guest_sys;
221         dest->period_guest_us   += src->period_guest_us;
222         dest->nr_events         += src->nr_events;
223         dest->weight            += src->weight;
224 }
225
226 static void he_stat__decay(struct he_stat *he_stat)
227 {
228         he_stat->period = (he_stat->period * 7) / 8;
229         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
230         /* XXX need decay for weight too? */
231 }
232
233 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
234 {
235         u64 prev_period = he->stat.period;
236         u64 diff;
237
238         if (prev_period == 0)
239                 return true;
240
241         he_stat__decay(&he->stat);
242         if (symbol_conf.cumulate_callchain)
243                 he_stat__decay(he->stat_acc);
244
245         diff = prev_period - he->stat.period;
246
247         hists->stats.total_period -= diff;
248         if (!he->filtered)
249                 hists->stats.total_non_filtered_period -= diff;
250
251         return he->stat.period == 0;
252 }
253
254 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
255 {
256         rb_erase(&he->rb_node, &hists->entries);
257
258         if (sort__need_collapse)
259                 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
260
261         --hists->nr_entries;
262         if (!he->filtered)
263                 --hists->nr_non_filtered_entries;
264
265         hist_entry__delete(he);
266 }
267
268 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
269 {
270         struct rb_node *next = rb_first(&hists->entries);
271         struct hist_entry *n;
272
273         while (next) {
274                 n = rb_entry(next, struct hist_entry, rb_node);
275                 next = rb_next(&n->rb_node);
276                 if (((zap_user && n->level == '.') ||
277                      (zap_kernel && n->level != '.') ||
278                      hists__decay_entry(hists, n))) {
279                         hists__delete_entry(hists, n);
280                 }
281         }
282 }
283
284 void hists__delete_entries(struct hists *hists)
285 {
286         struct rb_node *next = rb_first(&hists->entries);
287         struct hist_entry *n;
288
289         while (next) {
290                 n = rb_entry(next, struct hist_entry, rb_node);
291                 next = rb_next(&n->rb_node);
292
293                 hists__delete_entry(hists, n);
294         }
295 }
296
297 /*
298  * histogram, sorted on item, collects periods
299  */
300
301 static struct hist_entry *hist_entry__new(struct hist_entry *template,
302                                           bool sample_self)
303 {
304         size_t callchain_size = 0;
305         struct hist_entry *he;
306
307         if (symbol_conf.use_callchain)
308                 callchain_size = sizeof(struct callchain_root);
309
310         he = zalloc(sizeof(*he) + callchain_size);
311
312         if (he != NULL) {
313                 *he = *template;
314
315                 if (symbol_conf.cumulate_callchain) {
316                         he->stat_acc = malloc(sizeof(he->stat));
317                         if (he->stat_acc == NULL) {
318                                 free(he);
319                                 return NULL;
320                         }
321                         memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
322                         if (!sample_self)
323                                 memset(&he->stat, 0, sizeof(he->stat));
324                 }
325
326                 map__get(he->ms.map);
327
328                 if (he->branch_info) {
329                         /*
330                          * This branch info is (a part of) allocated from
331                          * sample__resolve_bstack() and will be freed after
332                          * adding new entries.  So we need to save a copy.
333                          */
334                         he->branch_info = malloc(sizeof(*he->branch_info));
335                         if (he->branch_info == NULL) {
336                                 map__zput(he->ms.map);
337                                 free(he->stat_acc);
338                                 free(he);
339                                 return NULL;
340                         }
341
342                         memcpy(he->branch_info, template->branch_info,
343                                sizeof(*he->branch_info));
344
345                         map__get(he->branch_info->from.map);
346                         map__get(he->branch_info->to.map);
347                 }
348
349                 if (he->mem_info) {
350                         map__get(he->mem_info->iaddr.map);
351                         map__get(he->mem_info->daddr.map);
352                 }
353
354                 if (symbol_conf.use_callchain)
355                         callchain_init(he->callchain);
356
357                 INIT_LIST_HEAD(&he->pairs.node);
358                 thread__get(he->thread);
359         }
360
361         return he;
362 }
363
364 static u8 symbol__parent_filter(const struct symbol *parent)
365 {
366         if (symbol_conf.exclude_other && parent == NULL)
367                 return 1 << HIST_FILTER__PARENT;
368         return 0;
369 }
370
371 static struct hist_entry *hists__findnew_entry(struct hists *hists,
372                                                struct hist_entry *entry,
373                                                struct addr_location *al,
374                                                bool sample_self)
375 {
376         struct rb_node **p;
377         struct rb_node *parent = NULL;
378         struct hist_entry *he;
379         int64_t cmp;
380         u64 period = entry->stat.period;
381         u64 weight = entry->stat.weight;
382
383         p = &hists->entries_in->rb_node;
384
385         while (*p != NULL) {
386                 parent = *p;
387                 he = rb_entry(parent, struct hist_entry, rb_node_in);
388
389                 /*
390                  * Make sure that it receives arguments in a same order as
391                  * hist_entry__collapse() so that we can use an appropriate
392                  * function when searching an entry regardless which sort
393                  * keys were used.
394                  */
395                 cmp = hist_entry__cmp(he, entry);
396
397                 if (!cmp) {
398                         if (sample_self)
399                                 he_stat__add_period(&he->stat, period, weight);
400                         if (symbol_conf.cumulate_callchain)
401                                 he_stat__add_period(he->stat_acc, period, weight);
402
403                         /*
404                          * This mem info was allocated from sample__resolve_mem
405                          * and will not be used anymore.
406                          */
407                         zfree(&entry->mem_info);
408
409                         /* If the map of an existing hist_entry has
410                          * become out-of-date due to an exec() or
411                          * similar, update it.  Otherwise we will
412                          * mis-adjust symbol addresses when computing
413                          * the history counter to increment.
414                          */
415                         if (he->ms.map != entry->ms.map) {
416                                 map__put(he->ms.map);
417                                 he->ms.map = map__get(entry->ms.map);
418                         }
419                         goto out;
420                 }
421
422                 if (cmp < 0)
423                         p = &(*p)->rb_left;
424                 else
425                         p = &(*p)->rb_right;
426         }
427
428         he = hist_entry__new(entry, sample_self);
429         if (!he)
430                 return NULL;
431
432         hists->nr_entries++;
433
434         rb_link_node(&he->rb_node_in, parent, p);
435         rb_insert_color(&he->rb_node_in, hists->entries_in);
436 out:
437         if (sample_self)
438                 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
439         if (symbol_conf.cumulate_callchain)
440                 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
441         return he;
442 }
443
444 struct hist_entry *__hists__add_entry(struct hists *hists,
445                                       struct addr_location *al,
446                                       struct symbol *sym_parent,
447                                       struct branch_info *bi,
448                                       struct mem_info *mi,
449                                       u64 period, u64 weight, u64 transaction,
450                                       bool sample_self)
451 {
452         struct hist_entry entry = {
453                 .thread = al->thread,
454                 .comm = thread__comm(al->thread),
455                 .ms = {
456                         .map    = al->map,
457                         .sym    = al->sym,
458                 },
459                 .socket  = al->socket,
460                 .cpu     = al->cpu,
461                 .cpumode = al->cpumode,
462                 .ip      = al->addr,
463                 .level   = al->level,
464                 .stat = {
465                         .nr_events = 1,
466                         .period = period,
467                         .weight = weight,
468                 },
469                 .parent = sym_parent,
470                 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
471                 .hists  = hists,
472                 .branch_info = bi,
473                 .mem_info = mi,
474                 .transaction = transaction,
475         };
476
477         return hists__findnew_entry(hists, &entry, al, sample_self);
478 }
479
480 static int
481 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
482                     struct addr_location *al __maybe_unused)
483 {
484         return 0;
485 }
486
487 static int
488 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
489                         struct addr_location *al __maybe_unused)
490 {
491         return 0;
492 }
493
494 static int
495 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
496 {
497         struct perf_sample *sample = iter->sample;
498         struct mem_info *mi;
499
500         mi = sample__resolve_mem(sample, al);
501         if (mi == NULL)
502                 return -ENOMEM;
503
504         iter->priv = mi;
505         return 0;
506 }
507
508 static int
509 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
510 {
511         u64 cost;
512         struct mem_info *mi = iter->priv;
513         struct hists *hists = evsel__hists(iter->evsel);
514         struct hist_entry *he;
515
516         if (mi == NULL)
517                 return -EINVAL;
518
519         cost = iter->sample->weight;
520         if (!cost)
521                 cost = 1;
522
523         /*
524          * must pass period=weight in order to get the correct
525          * sorting from hists__collapse_resort() which is solely
526          * based on periods. We want sorting be done on nr_events * weight
527          * and this is indirectly achieved by passing period=weight here
528          * and the he_stat__add_period() function.
529          */
530         he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
531                                 cost, cost, 0, true);
532         if (!he)
533                 return -ENOMEM;
534
535         iter->he = he;
536         return 0;
537 }
538
539 static int
540 iter_finish_mem_entry(struct hist_entry_iter *iter,
541                       struct addr_location *al __maybe_unused)
542 {
543         struct perf_evsel *evsel = iter->evsel;
544         struct hists *hists = evsel__hists(evsel);
545         struct hist_entry *he = iter->he;
546         int err = -EINVAL;
547
548         if (he == NULL)
549                 goto out;
550
551         hists__inc_nr_samples(hists, he->filtered);
552
553         err = hist_entry__append_callchain(he, iter->sample);
554
555 out:
556         /*
557          * We don't need to free iter->priv (mem_info) here since the mem info
558          * was either already freed in hists__findnew_entry() or passed to a
559          * new hist entry by hist_entry__new().
560          */
561         iter->priv = NULL;
562
563         iter->he = NULL;
564         return err;
565 }
566
567 static int
568 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
569 {
570         struct branch_info *bi;
571         struct perf_sample *sample = iter->sample;
572
573         bi = sample__resolve_bstack(sample, al);
574         if (!bi)
575                 return -ENOMEM;
576
577         iter->curr = 0;
578         iter->total = sample->branch_stack->nr;
579
580         iter->priv = bi;
581         return 0;
582 }
583
584 static int
585 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
586                              struct addr_location *al __maybe_unused)
587 {
588         /* to avoid calling callback function */
589         iter->he = NULL;
590
591         return 0;
592 }
593
594 static int
595 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
596 {
597         struct branch_info *bi = iter->priv;
598         int i = iter->curr;
599
600         if (bi == NULL)
601                 return 0;
602
603         if (iter->curr >= iter->total)
604                 return 0;
605
606         al->map = bi[i].to.map;
607         al->sym = bi[i].to.sym;
608         al->addr = bi[i].to.addr;
609         return 1;
610 }
611
612 static int
613 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
614 {
615         struct branch_info *bi;
616         struct perf_evsel *evsel = iter->evsel;
617         struct hists *hists = evsel__hists(evsel);
618         struct hist_entry *he = NULL;
619         int i = iter->curr;
620         int err = 0;
621
622         bi = iter->priv;
623
624         if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
625                 goto out;
626
627         /*
628          * The report shows the percentage of total branches captured
629          * and not events sampled. Thus we use a pseudo period of 1.
630          */
631         he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
632                                 1, bi->flags.cycles ? bi->flags.cycles : 1,
633                                 0, true);
634         if (he == NULL)
635                 return -ENOMEM;
636
637         hists__inc_nr_samples(hists, he->filtered);
638
639 out:
640         iter->he = he;
641         iter->curr++;
642         return err;
643 }
644
645 static int
646 iter_finish_branch_entry(struct hist_entry_iter *iter,
647                          struct addr_location *al __maybe_unused)
648 {
649         zfree(&iter->priv);
650         iter->he = NULL;
651
652         return iter->curr >= iter->total ? 0 : -1;
653 }
654
655 static int
656 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
657                           struct addr_location *al __maybe_unused)
658 {
659         return 0;
660 }
661
662 static int
663 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
664 {
665         struct perf_evsel *evsel = iter->evsel;
666         struct perf_sample *sample = iter->sample;
667         struct hist_entry *he;
668
669         he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
670                                 sample->period, sample->weight,
671                                 sample->transaction, true);
672         if (he == NULL)
673                 return -ENOMEM;
674
675         iter->he = he;
676         return 0;
677 }
678
679 static int
680 iter_finish_normal_entry(struct hist_entry_iter *iter,
681                          struct addr_location *al __maybe_unused)
682 {
683         struct hist_entry *he = iter->he;
684         struct perf_evsel *evsel = iter->evsel;
685         struct perf_sample *sample = iter->sample;
686
687         if (he == NULL)
688                 return 0;
689
690         iter->he = NULL;
691
692         hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
693
694         return hist_entry__append_callchain(he, sample);
695 }
696
697 static int
698 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
699                               struct addr_location *al __maybe_unused)
700 {
701         struct hist_entry **he_cache;
702
703         callchain_cursor_commit(&callchain_cursor);
704
705         /*
706          * This is for detecting cycles or recursions so that they're
707          * cumulated only one time to prevent entries more than 100%
708          * overhead.
709          */
710         he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
711         if (he_cache == NULL)
712                 return -ENOMEM;
713
714         iter->priv = he_cache;
715         iter->curr = 0;
716
717         return 0;
718 }
719
720 static int
721 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
722                                  struct addr_location *al)
723 {
724         struct perf_evsel *evsel = iter->evsel;
725         struct hists *hists = evsel__hists(evsel);
726         struct perf_sample *sample = iter->sample;
727         struct hist_entry **he_cache = iter->priv;
728         struct hist_entry *he;
729         int err = 0;
730
731         he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
732                                 sample->period, sample->weight,
733                                 sample->transaction, true);
734         if (he == NULL)
735                 return -ENOMEM;
736
737         iter->he = he;
738         he_cache[iter->curr++] = he;
739
740         hist_entry__append_callchain(he, sample);
741
742         /*
743          * We need to re-initialize the cursor since callchain_append()
744          * advanced the cursor to the end.
745          */
746         callchain_cursor_commit(&callchain_cursor);
747
748         hists__inc_nr_samples(hists, he->filtered);
749
750         return err;
751 }
752
753 static int
754 iter_next_cumulative_entry(struct hist_entry_iter *iter,
755                            struct addr_location *al)
756 {
757         struct callchain_cursor_node *node;
758
759         node = callchain_cursor_current(&callchain_cursor);
760         if (node == NULL)
761                 return 0;
762
763         return fill_callchain_info(al, node, iter->hide_unresolved);
764 }
765
766 static int
767 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
768                                struct addr_location *al)
769 {
770         struct perf_evsel *evsel = iter->evsel;
771         struct perf_sample *sample = iter->sample;
772         struct hist_entry **he_cache = iter->priv;
773         struct hist_entry *he;
774         struct hist_entry he_tmp = {
775                 .hists = evsel__hists(evsel),
776                 .cpu = al->cpu,
777                 .thread = al->thread,
778                 .comm = thread__comm(al->thread),
779                 .ip = al->addr,
780                 .ms = {
781                         .map = al->map,
782                         .sym = al->sym,
783                 },
784                 .parent = iter->parent,
785         };
786         int i;
787         struct callchain_cursor cursor;
788
789         callchain_cursor_snapshot(&cursor, &callchain_cursor);
790
791         callchain_cursor_advance(&callchain_cursor);
792
793         /*
794          * Check if there's duplicate entries in the callchain.
795          * It's possible that it has cycles or recursive calls.
796          */
797         for (i = 0; i < iter->curr; i++) {
798                 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
799                         /* to avoid calling callback function */
800                         iter->he = NULL;
801                         return 0;
802                 }
803         }
804
805         he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
806                                 sample->period, sample->weight,
807                                 sample->transaction, false);
808         if (he == NULL)
809                 return -ENOMEM;
810
811         iter->he = he;
812         he_cache[iter->curr++] = he;
813
814         if (symbol_conf.use_callchain)
815                 callchain_append(he->callchain, &cursor, sample->period);
816         return 0;
817 }
818
819 static int
820 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
821                              struct addr_location *al __maybe_unused)
822 {
823         zfree(&iter->priv);
824         iter->he = NULL;
825
826         return 0;
827 }
828
829 const struct hist_iter_ops hist_iter_mem = {
830         .prepare_entry          = iter_prepare_mem_entry,
831         .add_single_entry       = iter_add_single_mem_entry,
832         .next_entry             = iter_next_nop_entry,
833         .add_next_entry         = iter_add_next_nop_entry,
834         .finish_entry           = iter_finish_mem_entry,
835 };
836
837 const struct hist_iter_ops hist_iter_branch = {
838         .prepare_entry          = iter_prepare_branch_entry,
839         .add_single_entry       = iter_add_single_branch_entry,
840         .next_entry             = iter_next_branch_entry,
841         .add_next_entry         = iter_add_next_branch_entry,
842         .finish_entry           = iter_finish_branch_entry,
843 };
844
845 const struct hist_iter_ops hist_iter_normal = {
846         .prepare_entry          = iter_prepare_normal_entry,
847         .add_single_entry       = iter_add_single_normal_entry,
848         .next_entry             = iter_next_nop_entry,
849         .add_next_entry         = iter_add_next_nop_entry,
850         .finish_entry           = iter_finish_normal_entry,
851 };
852
853 const struct hist_iter_ops hist_iter_cumulative = {
854         .prepare_entry          = iter_prepare_cumulative_entry,
855         .add_single_entry       = iter_add_single_cumulative_entry,
856         .next_entry             = iter_next_cumulative_entry,
857         .add_next_entry         = iter_add_next_cumulative_entry,
858         .finish_entry           = iter_finish_cumulative_entry,
859 };
860
861 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
862                          int max_stack_depth, void *arg)
863 {
864         int err, err2;
865
866         err = sample__resolve_callchain(iter->sample, &iter->parent,
867                                         iter->evsel, al, max_stack_depth);
868         if (err)
869                 return err;
870
871         iter->max_stack = max_stack_depth;
872
873         err = iter->ops->prepare_entry(iter, al);
874         if (err)
875                 goto out;
876
877         err = iter->ops->add_single_entry(iter, al);
878         if (err)
879                 goto out;
880
881         if (iter->he && iter->add_entry_cb) {
882                 err = iter->add_entry_cb(iter, al, true, arg);
883                 if (err)
884                         goto out;
885         }
886
887         while (iter->ops->next_entry(iter, al)) {
888                 err = iter->ops->add_next_entry(iter, al);
889                 if (err)
890                         break;
891
892                 if (iter->he && iter->add_entry_cb) {
893                         err = iter->add_entry_cb(iter, al, false, arg);
894                         if (err)
895                                 goto out;
896                 }
897         }
898
899 out:
900         err2 = iter->ops->finish_entry(iter, al);
901         if (!err)
902                 err = err2;
903
904         return err;
905 }
906
907 int64_t
908 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
909 {
910         struct perf_hpp_fmt *fmt;
911         int64_t cmp = 0;
912
913         perf_hpp__for_each_sort_list(fmt) {
914                 if (perf_hpp__should_skip(fmt))
915                         continue;
916
917                 cmp = fmt->cmp(fmt, left, right);
918                 if (cmp)
919                         break;
920         }
921
922         return cmp;
923 }
924
925 int64_t
926 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
927 {
928         struct perf_hpp_fmt *fmt;
929         int64_t cmp = 0;
930
931         perf_hpp__for_each_sort_list(fmt) {
932                 if (perf_hpp__should_skip(fmt))
933                         continue;
934
935                 cmp = fmt->collapse(fmt, left, right);
936                 if (cmp)
937                         break;
938         }
939
940         return cmp;
941 }
942
943 void hist_entry__delete(struct hist_entry *he)
944 {
945         thread__zput(he->thread);
946         map__zput(he->ms.map);
947
948         if (he->branch_info) {
949                 map__zput(he->branch_info->from.map);
950                 map__zput(he->branch_info->to.map);
951                 zfree(&he->branch_info);
952         }
953
954         if (he->mem_info) {
955                 map__zput(he->mem_info->iaddr.map);
956                 map__zput(he->mem_info->daddr.map);
957                 zfree(&he->mem_info);
958         }
959
960         zfree(&he->stat_acc);
961         free_srcline(he->srcline);
962         if (he->srcfile && he->srcfile[0])
963                 free(he->srcfile);
964         free_callchain(he->callchain);
965         free(he);
966 }
967
968 /*
969  * collapse the histogram
970  */
971
972 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
973                                          struct rb_root *root,
974                                          struct hist_entry *he)
975 {
976         struct rb_node **p = &root->rb_node;
977         struct rb_node *parent = NULL;
978         struct hist_entry *iter;
979         int64_t cmp;
980
981         while (*p != NULL) {
982                 parent = *p;
983                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
984
985                 cmp = hist_entry__collapse(iter, he);
986
987                 if (!cmp) {
988                         he_stat__add_stat(&iter->stat, &he->stat);
989                         if (symbol_conf.cumulate_callchain)
990                                 he_stat__add_stat(iter->stat_acc, he->stat_acc);
991
992                         if (symbol_conf.use_callchain) {
993                                 callchain_cursor_reset(&callchain_cursor);
994                                 callchain_merge(&callchain_cursor,
995                                                 iter->callchain,
996                                                 he->callchain);
997                         }
998                         hist_entry__delete(he);
999                         return false;
1000                 }
1001
1002                 if (cmp < 0)
1003                         p = &(*p)->rb_left;
1004                 else
1005                         p = &(*p)->rb_right;
1006         }
1007         hists->nr_entries++;
1008
1009         rb_link_node(&he->rb_node_in, parent, p);
1010         rb_insert_color(&he->rb_node_in, root);
1011         return true;
1012 }
1013
1014 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1015 {
1016         struct rb_root *root;
1017
1018         pthread_mutex_lock(&hists->lock);
1019
1020         root = hists->entries_in;
1021         if (++hists->entries_in > &hists->entries_in_array[1])
1022                 hists->entries_in = &hists->entries_in_array[0];
1023
1024         pthread_mutex_unlock(&hists->lock);
1025
1026         return root;
1027 }
1028
1029 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1030 {
1031         hists__filter_entry_by_dso(hists, he);
1032         hists__filter_entry_by_thread(hists, he);
1033         hists__filter_entry_by_symbol(hists, he);
1034         hists__filter_entry_by_socket(hists, he);
1035 }
1036
1037 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1038 {
1039         struct rb_root *root;
1040         struct rb_node *next;
1041         struct hist_entry *n;
1042
1043         if (!sort__need_collapse)
1044                 return;
1045
1046         hists->nr_entries = 0;
1047
1048         root = hists__get_rotate_entries_in(hists);
1049
1050         next = rb_first(root);
1051
1052         while (next) {
1053                 if (session_done())
1054                         break;
1055                 n = rb_entry(next, struct hist_entry, rb_node_in);
1056                 next = rb_next(&n->rb_node_in);
1057
1058                 rb_erase(&n->rb_node_in, root);
1059                 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1060                         /*
1061                          * If it wasn't combined with one of the entries already
1062                          * collapsed, we need to apply the filters that may have
1063                          * been set by, say, the hist_browser.
1064                          */
1065                         hists__apply_filters(hists, n);
1066                 }
1067                 if (prog)
1068                         ui_progress__update(prog, 1);
1069         }
1070 }
1071
1072 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1073 {
1074         struct perf_hpp_fmt *fmt;
1075         int64_t cmp = 0;
1076
1077         perf_hpp__for_each_sort_list(fmt) {
1078                 if (perf_hpp__should_skip(fmt))
1079                         continue;
1080
1081                 cmp = fmt->sort(fmt, a, b);
1082                 if (cmp)
1083                         break;
1084         }
1085
1086         return cmp;
1087 }
1088
1089 static void hists__reset_filter_stats(struct hists *hists)
1090 {
1091         hists->nr_non_filtered_entries = 0;
1092         hists->stats.total_non_filtered_period = 0;
1093 }
1094
1095 void hists__reset_stats(struct hists *hists)
1096 {
1097         hists->nr_entries = 0;
1098         hists->stats.total_period = 0;
1099
1100         hists__reset_filter_stats(hists);
1101 }
1102
1103 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1104 {
1105         hists->nr_non_filtered_entries++;
1106         hists->stats.total_non_filtered_period += h->stat.period;
1107 }
1108
1109 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1110 {
1111         if (!h->filtered)
1112                 hists__inc_filter_stats(hists, h);
1113
1114         hists->nr_entries++;
1115         hists->stats.total_period += h->stat.period;
1116 }
1117
1118 static void __hists__insert_output_entry(struct rb_root *entries,
1119                                          struct hist_entry *he,
1120                                          u64 min_callchain_hits,
1121                                          bool use_callchain)
1122 {
1123         struct rb_node **p = &entries->rb_node;
1124         struct rb_node *parent = NULL;
1125         struct hist_entry *iter;
1126
1127         if (use_callchain)
1128                 callchain_param.sort(&he->sorted_chain, he->callchain,
1129                                       min_callchain_hits, &callchain_param);
1130
1131         while (*p != NULL) {
1132                 parent = *p;
1133                 iter = rb_entry(parent, struct hist_entry, rb_node);
1134
1135                 if (hist_entry__sort(he, iter) > 0)
1136                         p = &(*p)->rb_left;
1137                 else
1138                         p = &(*p)->rb_right;
1139         }
1140
1141         rb_link_node(&he->rb_node, parent, p);
1142         rb_insert_color(&he->rb_node, entries);
1143 }
1144
1145 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1146 {
1147         struct rb_root *root;
1148         struct rb_node *next;
1149         struct hist_entry *n;
1150         u64 min_callchain_hits;
1151         struct perf_evsel *evsel = hists_to_evsel(hists);
1152         bool use_callchain;
1153
1154         if (evsel && !symbol_conf.show_ref_callgraph)
1155                 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1156         else
1157                 use_callchain = symbol_conf.use_callchain;
1158
1159         min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1160
1161         if (sort__need_collapse)
1162                 root = &hists->entries_collapsed;
1163         else
1164                 root = hists->entries_in;
1165
1166         next = rb_first(root);
1167         hists->entries = RB_ROOT;
1168
1169         hists__reset_stats(hists);
1170         hists__reset_col_len(hists);
1171
1172         while (next) {
1173                 n = rb_entry(next, struct hist_entry, rb_node_in);
1174                 next = rb_next(&n->rb_node_in);
1175
1176                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1177                 hists__inc_stats(hists, n);
1178
1179                 if (!n->filtered)
1180                         hists__calc_col_len(hists, n);
1181
1182                 if (prog)
1183                         ui_progress__update(prog, 1);
1184         }
1185 }
1186
1187 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1188                                        enum hist_filter filter)
1189 {
1190         h->filtered &= ~(1 << filter);
1191         if (h->filtered)
1192                 return;
1193
1194         /* force fold unfiltered entry for simplicity */
1195         h->unfolded = false;
1196         h->row_offset = 0;
1197         h->nr_rows = 0;
1198
1199         hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1200
1201         hists__inc_filter_stats(hists, h);
1202         hists__calc_col_len(hists, h);
1203 }
1204
1205
1206 static bool hists__filter_entry_by_dso(struct hists *hists,
1207                                        struct hist_entry *he)
1208 {
1209         if (hists->dso_filter != NULL &&
1210             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1211                 he->filtered |= (1 << HIST_FILTER__DSO);
1212                 return true;
1213         }
1214
1215         return false;
1216 }
1217
1218 void hists__filter_by_dso(struct hists *hists)
1219 {
1220         struct rb_node *nd;
1221
1222         hists->stats.nr_non_filtered_samples = 0;
1223
1224         hists__reset_filter_stats(hists);
1225         hists__reset_col_len(hists);
1226
1227         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1228                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1229
1230                 if (symbol_conf.exclude_other && !h->parent)
1231                         continue;
1232
1233                 if (hists__filter_entry_by_dso(hists, h))
1234                         continue;
1235
1236                 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1237         }
1238 }
1239
1240 static bool hists__filter_entry_by_thread(struct hists *hists,
1241                                           struct hist_entry *he)
1242 {
1243         if (hists->thread_filter != NULL &&
1244             he->thread != hists->thread_filter) {
1245                 he->filtered |= (1 << HIST_FILTER__THREAD);
1246                 return true;
1247         }
1248
1249         return false;
1250 }
1251
1252 void hists__filter_by_thread(struct hists *hists)
1253 {
1254         struct rb_node *nd;
1255
1256         hists->stats.nr_non_filtered_samples = 0;
1257
1258         hists__reset_filter_stats(hists);
1259         hists__reset_col_len(hists);
1260
1261         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1262                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1263
1264                 if (hists__filter_entry_by_thread(hists, h))
1265                         continue;
1266
1267                 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1268         }
1269 }
1270
1271 static bool hists__filter_entry_by_symbol(struct hists *hists,
1272                                           struct hist_entry *he)
1273 {
1274         if (hists->symbol_filter_str != NULL &&
1275             (!he->ms.sym || strstr(he->ms.sym->name,
1276                                    hists->symbol_filter_str) == NULL)) {
1277                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1278                 return true;
1279         }
1280
1281         return false;
1282 }
1283
1284 void hists__filter_by_symbol(struct hists *hists)
1285 {
1286         struct rb_node *nd;
1287
1288         hists->stats.nr_non_filtered_samples = 0;
1289
1290         hists__reset_filter_stats(hists);
1291         hists__reset_col_len(hists);
1292
1293         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1294                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1295
1296                 if (hists__filter_entry_by_symbol(hists, h))
1297                         continue;
1298
1299                 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1300         }
1301 }
1302
1303 static bool hists__filter_entry_by_socket(struct hists *hists,
1304                                           struct hist_entry *he)
1305 {
1306         if ((hists->socket_filter > -1) &&
1307             (he->socket != hists->socket_filter)) {
1308                 he->filtered |= (1 << HIST_FILTER__SOCKET);
1309                 return true;
1310         }
1311
1312         return false;
1313 }
1314
1315 void hists__filter_by_socket(struct hists *hists)
1316 {
1317         struct rb_node *nd;
1318
1319         hists->stats.nr_non_filtered_samples = 0;
1320
1321         hists__reset_filter_stats(hists);
1322         hists__reset_col_len(hists);
1323
1324         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1325                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1326
1327                 if (hists__filter_entry_by_socket(hists, h))
1328                         continue;
1329
1330                 hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
1331         }
1332 }
1333
1334 void events_stats__inc(struct events_stats *stats, u32 type)
1335 {
1336         ++stats->nr_events[0];
1337         ++stats->nr_events[type];
1338 }
1339
1340 void hists__inc_nr_events(struct hists *hists, u32 type)
1341 {
1342         events_stats__inc(&hists->stats, type);
1343 }
1344
1345 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1346 {
1347         events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1348         if (!filtered)
1349                 hists->stats.nr_non_filtered_samples++;
1350 }
1351
1352 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1353                                                  struct hist_entry *pair)
1354 {
1355         struct rb_root *root;
1356         struct rb_node **p;
1357         struct rb_node *parent = NULL;
1358         struct hist_entry *he;
1359         int64_t cmp;
1360
1361         if (sort__need_collapse)
1362                 root = &hists->entries_collapsed;
1363         else
1364                 root = hists->entries_in;
1365
1366         p = &root->rb_node;
1367
1368         while (*p != NULL) {
1369                 parent = *p;
1370                 he = rb_entry(parent, struct hist_entry, rb_node_in);
1371
1372                 cmp = hist_entry__collapse(he, pair);
1373
1374                 if (!cmp)
1375                         goto out;
1376
1377                 if (cmp < 0)
1378                         p = &(*p)->rb_left;
1379                 else
1380                         p = &(*p)->rb_right;
1381         }
1382
1383         he = hist_entry__new(pair, true);
1384         if (he) {
1385                 memset(&he->stat, 0, sizeof(he->stat));
1386                 he->hists = hists;
1387                 rb_link_node(&he->rb_node_in, parent, p);
1388                 rb_insert_color(&he->rb_node_in, root);
1389                 hists__inc_stats(hists, he);
1390                 he->dummy = true;
1391         }
1392 out:
1393         return he;
1394 }
1395
1396 static struct hist_entry *hists__find_entry(struct hists *hists,
1397                                             struct hist_entry *he)
1398 {
1399         struct rb_node *n;
1400
1401         if (sort__need_collapse)
1402                 n = hists->entries_collapsed.rb_node;
1403         else
1404                 n = hists->entries_in->rb_node;
1405
1406         while (n) {
1407                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1408                 int64_t cmp = hist_entry__collapse(iter, he);
1409
1410                 if (cmp < 0)
1411                         n = n->rb_left;
1412                 else if (cmp > 0)
1413                         n = n->rb_right;
1414                 else
1415                         return iter;
1416         }
1417
1418         return NULL;
1419 }
1420
1421 /*
1422  * Look for pairs to link to the leader buckets (hist_entries):
1423  */
1424 void hists__match(struct hists *leader, struct hists *other)
1425 {
1426         struct rb_root *root;
1427         struct rb_node *nd;
1428         struct hist_entry *pos, *pair;
1429
1430         if (sort__need_collapse)
1431                 root = &leader->entries_collapsed;
1432         else
1433                 root = leader->entries_in;
1434
1435         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1436                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
1437                 pair = hists__find_entry(other, pos);
1438
1439                 if (pair)
1440                         hist_entry__add_pair(pair, pos);
1441         }
1442 }
1443
1444 /*
1445  * Look for entries in the other hists that are not present in the leader, if
1446  * we find them, just add a dummy entry on the leader hists, with period=0,
1447  * nr_events=0, to serve as the list header.
1448  */
1449 int hists__link(struct hists *leader, struct hists *other)
1450 {
1451         struct rb_root *root;
1452         struct rb_node *nd;
1453         struct hist_entry *pos, *pair;
1454
1455         if (sort__need_collapse)
1456                 root = &other->entries_collapsed;
1457         else
1458                 root = other->entries_in;
1459
1460         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1461                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1462
1463                 if (!hist_entry__has_pairs(pos)) {
1464                         pair = hists__add_dummy_entry(leader, pos);
1465                         if (pair == NULL)
1466                                 return -1;
1467                         hist_entry__add_pair(pos, pair);
1468                 }
1469         }
1470
1471         return 0;
1472 }
1473
1474 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1475                           struct perf_sample *sample, bool nonany_branch_mode)
1476 {
1477         struct branch_info *bi;
1478
1479         /* If we have branch cycles always annotate them. */
1480         if (bs && bs->nr && bs->entries[0].flags.cycles) {
1481                 int i;
1482
1483                 bi = sample__resolve_bstack(sample, al);
1484                 if (bi) {
1485                         struct addr_map_symbol *prev = NULL;
1486
1487                         /*
1488                          * Ignore errors, still want to process the
1489                          * other entries.
1490                          *
1491                          * For non standard branch modes always
1492                          * force no IPC (prev == NULL)
1493                          *
1494                          * Note that perf stores branches reversed from
1495                          * program order!
1496                          */
1497                         for (i = bs->nr - 1; i >= 0; i--) {
1498                                 addr_map_symbol__account_cycles(&bi[i].from,
1499                                         nonany_branch_mode ? NULL : prev,
1500                                         bi[i].flags.cycles);
1501                                 prev = &bi[i].to;
1502                         }
1503                         free(bi);
1504                 }
1505         }
1506 }
1507
1508 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1509 {
1510         struct perf_evsel *pos;
1511         size_t ret = 0;
1512
1513         evlist__for_each(evlist, pos) {
1514                 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1515                 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1516         }
1517
1518         return ret;
1519 }
1520
1521
1522 u64 hists__total_period(struct hists *hists)
1523 {
1524         return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1525                 hists->stats.total_period;
1526 }
1527
1528 int parse_filter_percentage(const struct option *opt __maybe_unused,
1529                             const char *arg, int unset __maybe_unused)
1530 {
1531         if (!strcmp(arg, "relative"))
1532                 symbol_conf.filter_relative = true;
1533         else if (!strcmp(arg, "absolute"))
1534                 symbol_conf.filter_relative = false;
1535         else
1536                 return -1;
1537
1538         return 0;
1539 }
1540
1541 int perf_hist_config(const char *var, const char *value)
1542 {
1543         if (!strcmp(var, "hist.percentage"))
1544                 return parse_filter_percentage(NULL, value, 0);
1545
1546         return 0;
1547 }
1548
1549 static int hists_evsel__init(struct perf_evsel *evsel)
1550 {
1551         struct hists *hists = evsel__hists(evsel);
1552
1553         memset(hists, 0, sizeof(*hists));
1554         hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1555         hists->entries_in = &hists->entries_in_array[0];
1556         hists->entries_collapsed = RB_ROOT;
1557         hists->entries = RB_ROOT;
1558         pthread_mutex_init(&hists->lock, NULL);
1559         hists->socket_filter = -1;
1560         return 0;
1561 }
1562
1563 /*
1564  * XXX We probably need a hists_evsel__exit() to free the hist_entries
1565  * stored in the rbtree...
1566  */
1567
1568 int hists__init(void)
1569 {
1570         int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1571                                             hists_evsel__init, NULL);
1572         if (err)
1573                 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1574
1575         return err;
1576 }