perf hists: Move locking to its call-sites
[firefly-linux-kernel-4.4.55.git] / tools / perf / util / hist.c
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include "evsel.h"
8 #include <math.h>
9
10 static bool hists__filter_entry_by_dso(struct hists *hists,
11                                        struct hist_entry *he);
12 static bool hists__filter_entry_by_thread(struct hists *hists,
13                                           struct hist_entry *he);
14 static bool hists__filter_entry_by_symbol(struct hists *hists,
15                                           struct hist_entry *he);
16
17 enum hist_filter {
18         HIST_FILTER__DSO,
19         HIST_FILTER__THREAD,
20         HIST_FILTER__PARENT,
21         HIST_FILTER__SYMBOL,
22 };
23
24 struct callchain_param  callchain_param = {
25         .mode   = CHAIN_GRAPH_REL,
26         .min_percent = 0.5,
27         .order  = ORDER_CALLEE
28 };
29
30 u16 hists__col_len(struct hists *hists, enum hist_column col)
31 {
32         return hists->col_len[col];
33 }
34
35 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
36 {
37         hists->col_len[col] = len;
38 }
39
40 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
41 {
42         if (len > hists__col_len(hists, col)) {
43                 hists__set_col_len(hists, col, len);
44                 return true;
45         }
46         return false;
47 }
48
49 void hists__reset_col_len(struct hists *hists)
50 {
51         enum hist_column col;
52
53         for (col = 0; col < HISTC_NR_COLS; ++col)
54                 hists__set_col_len(hists, col, 0);
55 }
56
57 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
58 {
59         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
60
61         if (hists__col_len(hists, dso) < unresolved_col_width &&
62             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
63             !symbol_conf.dso_list)
64                 hists__set_col_len(hists, dso, unresolved_col_width);
65 }
66
67 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
68 {
69         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
70         int symlen;
71         u16 len;
72
73         /*
74          * +4 accounts for '[x] ' priv level info
75          * +2 accounts for 0x prefix on raw addresses
76          * +3 accounts for ' y ' symtab origin info
77          */
78         if (h->ms.sym) {
79                 symlen = h->ms.sym->namelen + 4;
80                 if (verbose)
81                         symlen += BITS_PER_LONG / 4 + 2 + 3;
82                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
83         } else {
84                 symlen = unresolved_col_width + 4 + 2;
85                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
86                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
87         }
88
89         len = thread__comm_len(h->thread);
90         if (hists__new_col_len(hists, HISTC_COMM, len))
91                 hists__set_col_len(hists, HISTC_THREAD, len + 6);
92
93         if (h->ms.map) {
94                 len = dso__name_len(h->ms.map->dso);
95                 hists__new_col_len(hists, HISTC_DSO, len);
96         }
97
98         if (h->parent)
99                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
100
101         if (h->branch_info) {
102                 if (h->branch_info->from.sym) {
103                         symlen = (int)h->branch_info->from.sym->namelen + 4;
104                         if (verbose)
105                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
106                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
107
108                         symlen = dso__name_len(h->branch_info->from.map->dso);
109                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
110                 } else {
111                         symlen = unresolved_col_width + 4 + 2;
112                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
113                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
114                 }
115
116                 if (h->branch_info->to.sym) {
117                         symlen = (int)h->branch_info->to.sym->namelen + 4;
118                         if (verbose)
119                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
120                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
121
122                         symlen = dso__name_len(h->branch_info->to.map->dso);
123                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
124                 } else {
125                         symlen = unresolved_col_width + 4 + 2;
126                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
127                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
128                 }
129         }
130
131         if (h->mem_info) {
132                 if (h->mem_info->daddr.sym) {
133                         symlen = (int)h->mem_info->daddr.sym->namelen + 4
134                                + unresolved_col_width + 2;
135                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
136                                            symlen);
137                 } else {
138                         symlen = unresolved_col_width + 4 + 2;
139                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
140                                            symlen);
141                 }
142                 if (h->mem_info->daddr.map) {
143                         symlen = dso__name_len(h->mem_info->daddr.map->dso);
144                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
145                                            symlen);
146                 } else {
147                         symlen = unresolved_col_width + 4 + 2;
148                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
149                 }
150         } else {
151                 symlen = unresolved_col_width + 4 + 2;
152                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
153                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
154         }
155
156         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
157         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
158         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
159         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
160         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
161         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
162 }
163
164 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
165 {
166         struct rb_node *next = rb_first(&hists->entries);
167         struct hist_entry *n;
168         int row = 0;
169
170         hists__reset_col_len(hists);
171
172         while (next && row++ < max_rows) {
173                 n = rb_entry(next, struct hist_entry, rb_node);
174                 if (!n->filtered)
175                         hists__calc_col_len(hists, n);
176                 next = rb_next(&n->rb_node);
177         }
178 }
179
180 static void hist_entry__add_cpumode_period(struct hist_entry *he,
181                                            unsigned int cpumode, u64 period)
182 {
183         switch (cpumode) {
184         case PERF_RECORD_MISC_KERNEL:
185                 he->stat.period_sys += period;
186                 break;
187         case PERF_RECORD_MISC_USER:
188                 he->stat.period_us += period;
189                 break;
190         case PERF_RECORD_MISC_GUEST_KERNEL:
191                 he->stat.period_guest_sys += period;
192                 break;
193         case PERF_RECORD_MISC_GUEST_USER:
194                 he->stat.period_guest_us += period;
195                 break;
196         default:
197                 break;
198         }
199 }
200
201 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
202                                 u64 weight)
203 {
204
205         he_stat->period         += period;
206         he_stat->weight         += weight;
207         he_stat->nr_events      += 1;
208 }
209
210 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
211 {
212         dest->period            += src->period;
213         dest->period_sys        += src->period_sys;
214         dest->period_us         += src->period_us;
215         dest->period_guest_sys  += src->period_guest_sys;
216         dest->period_guest_us   += src->period_guest_us;
217         dest->nr_events         += src->nr_events;
218         dest->weight            += src->weight;
219 }
220
221 static void hist_entry__decay(struct hist_entry *he)
222 {
223         he->stat.period = (he->stat.period * 7) / 8;
224         he->stat.nr_events = (he->stat.nr_events * 7) / 8;
225         /* XXX need decay for weight too? */
226 }
227
228 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
229 {
230         u64 prev_period = he->stat.period;
231
232         if (prev_period == 0)
233                 return true;
234
235         hist_entry__decay(he);
236
237         if (!he->filtered)
238                 hists->stats.total_period -= prev_period - he->stat.period;
239
240         return he->stat.period == 0;
241 }
242
243 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
244 {
245         struct rb_node *next = rb_first(&hists->entries);
246         struct hist_entry *n;
247
248         while (next) {
249                 n = rb_entry(next, struct hist_entry, rb_node);
250                 next = rb_next(&n->rb_node);
251                 /*
252                  * We may be annotating this, for instance, so keep it here in
253                  * case some it gets new samples, we'll eventually free it when
254                  * the user stops browsing and it agains gets fully decayed.
255                  */
256                 if (((zap_user && n->level == '.') ||
257                      (zap_kernel && n->level != '.') ||
258                      hists__decay_entry(hists, n)) &&
259                     !n->used) {
260                         rb_erase(&n->rb_node, &hists->entries);
261
262                         if (sort__need_collapse)
263                                 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
264
265                         hist_entry__free(n);
266                         --hists->nr_entries;
267                 }
268         }
269 }
270
271 /*
272  * histogram, sorted on item, collects periods
273  */
274
275 static struct hist_entry *hist_entry__new(struct hist_entry *template)
276 {
277         size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
278         struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
279
280         if (he != NULL) {
281                 *he = *template;
282
283                 if (he->ms.map)
284                         he->ms.map->referenced = true;
285
286                 if (he->branch_info) {
287                         /*
288                          * This branch info is (a part of) allocated from
289                          * machine__resolve_bstack() and will be freed after
290                          * adding new entries.  So we need to save a copy.
291                          */
292                         he->branch_info = malloc(sizeof(*he->branch_info));
293                         if (he->branch_info == NULL) {
294                                 free(he);
295                                 return NULL;
296                         }
297
298                         memcpy(he->branch_info, template->branch_info,
299                                sizeof(*he->branch_info));
300
301                         if (he->branch_info->from.map)
302                                 he->branch_info->from.map->referenced = true;
303                         if (he->branch_info->to.map)
304                                 he->branch_info->to.map->referenced = true;
305                 }
306
307                 if (he->mem_info) {
308                         if (he->mem_info->iaddr.map)
309                                 he->mem_info->iaddr.map->referenced = true;
310                         if (he->mem_info->daddr.map)
311                                 he->mem_info->daddr.map->referenced = true;
312                 }
313
314                 if (symbol_conf.use_callchain)
315                         callchain_init(he->callchain);
316
317                 INIT_LIST_HEAD(&he->pairs.node);
318         }
319
320         return he;
321 }
322
323 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
324 {
325         if (!h->filtered) {
326                 hists__calc_col_len(hists, h);
327                 ++hists->nr_entries;
328                 hists->stats.total_period += h->stat.period;
329         }
330 }
331
332 static u8 symbol__parent_filter(const struct symbol *parent)
333 {
334         if (symbol_conf.exclude_other && parent == NULL)
335                 return 1 << HIST_FILTER__PARENT;
336         return 0;
337 }
338
339 static struct hist_entry *add_hist_entry(struct hists *hists,
340                                       struct hist_entry *entry,
341                                       struct addr_location *al,
342                                       u64 period,
343                                       u64 weight)
344 {
345         struct rb_node **p;
346         struct rb_node *parent = NULL;
347         struct hist_entry *he;
348         int cmp;
349
350         p = &hists->entries_in->rb_node;
351
352         while (*p != NULL) {
353                 parent = *p;
354                 he = rb_entry(parent, struct hist_entry, rb_node_in);
355
356                 /*
357                  * Make sure that it receives arguments in a same order as
358                  * hist_entry__collapse() so that we can use an appropriate
359                  * function when searching an entry regardless which sort
360                  * keys were used.
361                  */
362                 cmp = hist_entry__cmp(he, entry);
363
364                 if (!cmp) {
365                         he_stat__add_period(&he->stat, period, weight);
366
367                         /*
368                          * This mem info was allocated from machine__resolve_mem
369                          * and will not be used anymore.
370                          */
371                         free(entry->mem_info);
372
373                         /* If the map of an existing hist_entry has
374                          * become out-of-date due to an exec() or
375                          * similar, update it.  Otherwise we will
376                          * mis-adjust symbol addresses when computing
377                          * the history counter to increment.
378                          */
379                         if (he->ms.map != entry->ms.map) {
380                                 he->ms.map = entry->ms.map;
381                                 if (he->ms.map)
382                                         he->ms.map->referenced = true;
383                         }
384                         goto out;
385                 }
386
387                 if (cmp < 0)
388                         p = &(*p)->rb_left;
389                 else
390                         p = &(*p)->rb_right;
391         }
392
393         he = hist_entry__new(entry);
394         if (!he)
395                 return NULL;
396
397         rb_link_node(&he->rb_node_in, parent, p);
398         rb_insert_color(&he->rb_node_in, hists->entries_in);
399 out:
400         hist_entry__add_cpumode_period(he, al->cpumode, period);
401         return he;
402 }
403
404 struct hist_entry *__hists__add_mem_entry(struct hists *self,
405                                           struct addr_location *al,
406                                           struct symbol *sym_parent,
407                                           struct mem_info *mi,
408                                           u64 period,
409                                           u64 weight)
410 {
411         struct hist_entry entry = {
412                 .thread = al->thread,
413                 .ms = {
414                         .map    = al->map,
415                         .sym    = al->sym,
416                 },
417                 .stat = {
418                         .period = period,
419                         .weight = weight,
420                         .nr_events = 1,
421                 },
422                 .cpu    = al->cpu,
423                 .ip     = al->addr,
424                 .level  = al->level,
425                 .parent = sym_parent,
426                 .filtered = symbol__parent_filter(sym_parent),
427                 .hists = self,
428                 .mem_info = mi,
429                 .branch_info = NULL,
430         };
431         return add_hist_entry(self, &entry, al, period, weight);
432 }
433
434 struct hist_entry *__hists__add_branch_entry(struct hists *self,
435                                              struct addr_location *al,
436                                              struct symbol *sym_parent,
437                                              struct branch_info *bi,
438                                              u64 period,
439                                              u64 weight)
440 {
441         struct hist_entry entry = {
442                 .thread = al->thread,
443                 .ms = {
444                         .map    = bi->to.map,
445                         .sym    = bi->to.sym,
446                 },
447                 .cpu    = al->cpu,
448                 .ip     = bi->to.addr,
449                 .level  = al->level,
450                 .stat = {
451                         .period = period,
452                         .nr_events = 1,
453                         .weight = weight,
454                 },
455                 .parent = sym_parent,
456                 .filtered = symbol__parent_filter(sym_parent),
457                 .branch_info = bi,
458                 .hists  = self,
459                 .mem_info = NULL,
460         };
461
462         return add_hist_entry(self, &entry, al, period, weight);
463 }
464
465 struct hist_entry *__hists__add_entry(struct hists *self,
466                                       struct addr_location *al,
467                                       struct symbol *sym_parent, u64 period,
468                                       u64 weight)
469 {
470         struct hist_entry entry = {
471                 .thread = al->thread,
472                 .ms = {
473                         .map    = al->map,
474                         .sym    = al->sym,
475                 },
476                 .cpu    = al->cpu,
477                 .ip     = al->addr,
478                 .level  = al->level,
479                 .stat = {
480                         .period = period,
481                         .nr_events = 1,
482                         .weight = weight,
483                 },
484                 .parent = sym_parent,
485                 .filtered = symbol__parent_filter(sym_parent),
486                 .hists  = self,
487                 .branch_info = NULL,
488                 .mem_info = NULL,
489         };
490
491         return add_hist_entry(self, &entry, al, period, weight);
492 }
493
494 int64_t
495 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
496 {
497         struct sort_entry *se;
498         int64_t cmp = 0;
499
500         list_for_each_entry(se, &hist_entry__sort_list, list) {
501                 cmp = se->se_cmp(left, right);
502                 if (cmp)
503                         break;
504         }
505
506         return cmp;
507 }
508
509 int64_t
510 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
511 {
512         struct sort_entry *se;
513         int64_t cmp = 0;
514
515         list_for_each_entry(se, &hist_entry__sort_list, list) {
516                 int64_t (*f)(struct hist_entry *, struct hist_entry *);
517
518                 f = se->se_collapse ?: se->se_cmp;
519
520                 cmp = f(left, right);
521                 if (cmp)
522                         break;
523         }
524
525         return cmp;
526 }
527
528 void hist_entry__free(struct hist_entry *he)
529 {
530         free(he->branch_info);
531         free(he->mem_info);
532         free(he);
533 }
534
535 /*
536  * collapse the histogram
537  */
538
539 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
540                                          struct rb_root *root,
541                                          struct hist_entry *he)
542 {
543         struct rb_node **p = &root->rb_node;
544         struct rb_node *parent = NULL;
545         struct hist_entry *iter;
546         int64_t cmp;
547
548         while (*p != NULL) {
549                 parent = *p;
550                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
551
552                 cmp = hist_entry__collapse(iter, he);
553
554                 if (!cmp) {
555                         he_stat__add_stat(&iter->stat, &he->stat);
556
557                         if (symbol_conf.use_callchain) {
558                                 callchain_cursor_reset(&callchain_cursor);
559                                 callchain_merge(&callchain_cursor,
560                                                 iter->callchain,
561                                                 he->callchain);
562                         }
563                         hist_entry__free(he);
564                         return false;
565                 }
566
567                 if (cmp < 0)
568                         p = &(*p)->rb_left;
569                 else
570                         p = &(*p)->rb_right;
571         }
572
573         rb_link_node(&he->rb_node_in, parent, p);
574         rb_insert_color(&he->rb_node_in, root);
575         return true;
576 }
577
578 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
579 {
580         struct rb_root *root;
581
582         pthread_mutex_lock(&hists->lock);
583
584         root = hists->entries_in;
585         if (++hists->entries_in > &hists->entries_in_array[1])
586                 hists->entries_in = &hists->entries_in_array[0];
587
588         pthread_mutex_unlock(&hists->lock);
589
590         return root;
591 }
592
593 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
594 {
595         hists__filter_entry_by_dso(hists, he);
596         hists__filter_entry_by_thread(hists, he);
597         hists__filter_entry_by_symbol(hists, he);
598 }
599
600 void hists__collapse_resort(struct hists *hists)
601 {
602         struct rb_root *root;
603         struct rb_node *next;
604         struct hist_entry *n;
605
606         if (!sort__need_collapse)
607                 return;
608
609         root = hists__get_rotate_entries_in(hists);
610         next = rb_first(root);
611
612         while (next) {
613                 n = rb_entry(next, struct hist_entry, rb_node_in);
614                 next = rb_next(&n->rb_node_in);
615
616                 rb_erase(&n->rb_node_in, root);
617                 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
618                         /*
619                          * If it wasn't combined with one of the entries already
620                          * collapsed, we need to apply the filters that may have
621                          * been set by, say, the hist_browser.
622                          */
623                         hists__apply_filters(hists, n);
624                 }
625         }
626 }
627
628 /*
629  * reverse the map, sort on period.
630  */
631
632 static int period_cmp(u64 period_a, u64 period_b)
633 {
634         if (period_a > period_b)
635                 return 1;
636         if (period_a < period_b)
637                 return -1;
638         return 0;
639 }
640
641 static int hist_entry__sort_on_period(struct hist_entry *a,
642                                       struct hist_entry *b)
643 {
644         int ret;
645         int i, nr_members;
646         struct perf_evsel *evsel;
647         struct hist_entry *pair;
648         u64 *periods_a, *periods_b;
649
650         ret = period_cmp(a->stat.period, b->stat.period);
651         if (ret || !symbol_conf.event_group)
652                 return ret;
653
654         evsel = hists_to_evsel(a->hists);
655         nr_members = evsel->nr_members;
656         if (nr_members <= 1)
657                 return ret;
658
659         periods_a = zalloc(sizeof(periods_a) * nr_members);
660         periods_b = zalloc(sizeof(periods_b) * nr_members);
661
662         if (!periods_a || !periods_b)
663                 goto out;
664
665         list_for_each_entry(pair, &a->pairs.head, pairs.node) {
666                 evsel = hists_to_evsel(pair->hists);
667                 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
668         }
669
670         list_for_each_entry(pair, &b->pairs.head, pairs.node) {
671                 evsel = hists_to_evsel(pair->hists);
672                 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
673         }
674
675         for (i = 1; i < nr_members; i++) {
676                 ret = period_cmp(periods_a[i], periods_b[i]);
677                 if (ret)
678                         break;
679         }
680
681 out:
682         free(periods_a);
683         free(periods_b);
684
685         return ret;
686 }
687
688 static void __hists__insert_output_entry(struct rb_root *entries,
689                                          struct hist_entry *he,
690                                          u64 min_callchain_hits)
691 {
692         struct rb_node **p = &entries->rb_node;
693         struct rb_node *parent = NULL;
694         struct hist_entry *iter;
695
696         if (symbol_conf.use_callchain)
697                 callchain_param.sort(&he->sorted_chain, he->callchain,
698                                       min_callchain_hits, &callchain_param);
699
700         while (*p != NULL) {
701                 parent = *p;
702                 iter = rb_entry(parent, struct hist_entry, rb_node);
703
704                 if (hist_entry__sort_on_period(he, iter) > 0)
705                         p = &(*p)->rb_left;
706                 else
707                         p = &(*p)->rb_right;
708         }
709
710         rb_link_node(&he->rb_node, parent, p);
711         rb_insert_color(&he->rb_node, entries);
712 }
713
714 void hists__output_resort(struct hists *hists)
715 {
716         struct rb_root *root;
717         struct rb_node *next;
718         struct hist_entry *n;
719         u64 min_callchain_hits;
720
721         min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
722
723         if (sort__need_collapse)
724                 root = &hists->entries_collapsed;
725         else
726                 root = hists->entries_in;
727
728         next = rb_first(root);
729         hists->entries = RB_ROOT;
730
731         hists->nr_entries = 0;
732         hists->stats.total_period = 0;
733         hists__reset_col_len(hists);
734
735         while (next) {
736                 n = rb_entry(next, struct hist_entry, rb_node_in);
737                 next = rb_next(&n->rb_node_in);
738
739                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
740                 hists__inc_nr_entries(hists, n);
741         }
742 }
743
744 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
745                                        enum hist_filter filter)
746 {
747         h->filtered &= ~(1 << filter);
748         if (h->filtered)
749                 return;
750
751         ++hists->nr_entries;
752         if (h->ms.unfolded)
753                 hists->nr_entries += h->nr_rows;
754         h->row_offset = 0;
755         hists->stats.total_period += h->stat.period;
756         hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
757
758         hists__calc_col_len(hists, h);
759 }
760
761
762 static bool hists__filter_entry_by_dso(struct hists *hists,
763                                        struct hist_entry *he)
764 {
765         if (hists->dso_filter != NULL &&
766             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
767                 he->filtered |= (1 << HIST_FILTER__DSO);
768                 return true;
769         }
770
771         return false;
772 }
773
774 void hists__filter_by_dso(struct hists *hists)
775 {
776         struct rb_node *nd;
777
778         hists->nr_entries = hists->stats.total_period = 0;
779         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
780         hists__reset_col_len(hists);
781
782         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
783                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
784
785                 if (symbol_conf.exclude_other && !h->parent)
786                         continue;
787
788                 if (hists__filter_entry_by_dso(hists, h))
789                         continue;
790
791                 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
792         }
793 }
794
795 static bool hists__filter_entry_by_thread(struct hists *hists,
796                                           struct hist_entry *he)
797 {
798         if (hists->thread_filter != NULL &&
799             he->thread != hists->thread_filter) {
800                 he->filtered |= (1 << HIST_FILTER__THREAD);
801                 return true;
802         }
803
804         return false;
805 }
806
807 void hists__filter_by_thread(struct hists *hists)
808 {
809         struct rb_node *nd;
810
811         hists->nr_entries = hists->stats.total_period = 0;
812         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
813         hists__reset_col_len(hists);
814
815         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
816                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
817
818                 if (hists__filter_entry_by_thread(hists, h))
819                         continue;
820
821                 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
822         }
823 }
824
825 static bool hists__filter_entry_by_symbol(struct hists *hists,
826                                           struct hist_entry *he)
827 {
828         if (hists->symbol_filter_str != NULL &&
829             (!he->ms.sym || strstr(he->ms.sym->name,
830                                    hists->symbol_filter_str) == NULL)) {
831                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
832                 return true;
833         }
834
835         return false;
836 }
837
838 void hists__filter_by_symbol(struct hists *hists)
839 {
840         struct rb_node *nd;
841
842         hists->nr_entries = hists->stats.total_period = 0;
843         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
844         hists__reset_col_len(hists);
845
846         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
847                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
848
849                 if (hists__filter_entry_by_symbol(hists, h))
850                         continue;
851
852                 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
853         }
854 }
855
856 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
857 {
858         return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
859 }
860
861 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
862 {
863         return symbol__annotate(he->ms.sym, he->ms.map, privsize);
864 }
865
866 void events_stats__inc(struct events_stats *stats, u32 type)
867 {
868         ++stats->nr_events[0];
869         ++stats->nr_events[type];
870 }
871
872 void hists__inc_nr_events(struct hists *hists, u32 type)
873 {
874         events_stats__inc(&hists->stats, type);
875 }
876
877 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
878                                                  struct hist_entry *pair)
879 {
880         struct rb_root *root;
881         struct rb_node **p;
882         struct rb_node *parent = NULL;
883         struct hist_entry *he;
884         int cmp;
885
886         if (sort__need_collapse)
887                 root = &hists->entries_collapsed;
888         else
889                 root = hists->entries_in;
890
891         p = &root->rb_node;
892
893         while (*p != NULL) {
894                 parent = *p;
895                 he = rb_entry(parent, struct hist_entry, rb_node_in);
896
897                 cmp = hist_entry__collapse(he, pair);
898
899                 if (!cmp)
900                         goto out;
901
902                 if (cmp < 0)
903                         p = &(*p)->rb_left;
904                 else
905                         p = &(*p)->rb_right;
906         }
907
908         he = hist_entry__new(pair);
909         if (he) {
910                 memset(&he->stat, 0, sizeof(he->stat));
911                 he->hists = hists;
912                 rb_link_node(&he->rb_node_in, parent, p);
913                 rb_insert_color(&he->rb_node_in, root);
914                 hists__inc_nr_entries(hists, he);
915         }
916 out:
917         return he;
918 }
919
920 static struct hist_entry *hists__find_entry(struct hists *hists,
921                                             struct hist_entry *he)
922 {
923         struct rb_node *n;
924
925         if (sort__need_collapse)
926                 n = hists->entries_collapsed.rb_node;
927         else
928                 n = hists->entries_in->rb_node;
929
930         while (n) {
931                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
932                 int64_t cmp = hist_entry__collapse(iter, he);
933
934                 if (cmp < 0)
935                         n = n->rb_left;
936                 else if (cmp > 0)
937                         n = n->rb_right;
938                 else
939                         return iter;
940         }
941
942         return NULL;
943 }
944
945 /*
946  * Look for pairs to link to the leader buckets (hist_entries):
947  */
948 void hists__match(struct hists *leader, struct hists *other)
949 {
950         struct rb_root *root;
951         struct rb_node *nd;
952         struct hist_entry *pos, *pair;
953
954         if (sort__need_collapse)
955                 root = &leader->entries_collapsed;
956         else
957                 root = leader->entries_in;
958
959         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
960                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
961                 pair = hists__find_entry(other, pos);
962
963                 if (pair)
964                         hist_entry__add_pair(pair, pos);
965         }
966 }
967
968 /*
969  * Look for entries in the other hists that are not present in the leader, if
970  * we find them, just add a dummy entry on the leader hists, with period=0,
971  * nr_events=0, to serve as the list header.
972  */
973 int hists__link(struct hists *leader, struct hists *other)
974 {
975         struct rb_root *root;
976         struct rb_node *nd;
977         struct hist_entry *pos, *pair;
978
979         if (sort__need_collapse)
980                 root = &other->entries_collapsed;
981         else
982                 root = other->entries_in;
983
984         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
985                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
986
987                 if (!hist_entry__has_pairs(pos)) {
988                         pair = hists__add_dummy_entry(leader, pos);
989                         if (pair == NULL)
990                                 return -1;
991                         hist_entry__add_pair(pos, pair);
992                 }
993         }
994
995         return 0;
996 }