9 const char default_parent_pattern[] = "^sys_|^do_page_fault";
10 const char *parent_pattern = default_parent_pattern;
11 const char default_sort_order[] = "comm,dso,symbol";
12 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,dso_to,symbol_to";
13 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
14 const char default_top_sort_order[] = "dso,symbol";
15 const char default_diff_sort_order[] = "dso,symbol";
16 const char *sort_order;
17 const char *field_order;
18 regex_t ignore_callees_regex;
19 int have_ignore_callees = 0;
20 int sort__need_collapse = 0;
21 int sort__has_parent = 0;
22 int sort__has_sym = 0;
23 int sort__has_dso = 0;
24 enum sort_mode sort__mode = SORT_MODE__NORMAL;
27 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
33 n = vsnprintf(bf, size, fmt, ap);
34 if (symbol_conf.field_sep && n > 0) {
38 sep = strchr(sep, *symbol_conf.field_sep);
51 static int64_t cmp_null(const void *l, const void *r)
64 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
66 return right->thread->tid - left->thread->tid;
69 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
70 size_t size, unsigned int width)
72 const char *comm = thread__comm_str(he->thread);
74 width = max(7U, width) - 6;
75 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
76 width, width, comm ?: "");
79 struct sort_entry sort_thread = {
80 .se_header = " Pid:Command",
81 .se_cmp = sort__thread_cmp,
82 .se_snprintf = hist_entry__thread_snprintf,
83 .se_width_idx = HISTC_THREAD,
89 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
91 /* Compare the addr that should be unique among comm */
92 return comm__str(right->comm) - comm__str(left->comm);
96 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
98 /* Compare the addr that should be unique among comm */
99 return comm__str(right->comm) - comm__str(left->comm);
103 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
105 return strcmp(comm__str(right->comm), comm__str(left->comm));
108 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
109 size_t size, unsigned int width)
111 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
114 struct sort_entry sort_comm = {
115 .se_header = "Command",
116 .se_cmp = sort__comm_cmp,
117 .se_collapse = sort__comm_collapse,
118 .se_sort = sort__comm_sort,
119 .se_snprintf = hist_entry__comm_snprintf,
120 .se_width_idx = HISTC_COMM,
125 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
127 struct dso *dso_l = map_l ? map_l->dso : NULL;
128 struct dso *dso_r = map_r ? map_r->dso : NULL;
129 const char *dso_name_l, *dso_name_r;
131 if (!dso_l || !dso_r)
132 return cmp_null(dso_r, dso_l);
135 dso_name_l = dso_l->long_name;
136 dso_name_r = dso_r->long_name;
138 dso_name_l = dso_l->short_name;
139 dso_name_r = dso_r->short_name;
142 return strcmp(dso_name_l, dso_name_r);
146 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
148 return _sort__dso_cmp(right->ms.map, left->ms.map);
151 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
152 size_t size, unsigned int width)
154 if (map && map->dso) {
155 const char *dso_name = !verbose ? map->dso->short_name :
157 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
160 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
163 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
164 size_t size, unsigned int width)
166 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
169 struct sort_entry sort_dso = {
170 .se_header = "Shared Object",
171 .se_cmp = sort__dso_cmp,
172 .se_snprintf = hist_entry__dso_snprintf,
173 .se_width_idx = HISTC_DSO,
178 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
180 return (int64_t)(right_ip - left_ip);
183 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
187 if (!sym_l || !sym_r)
188 return cmp_null(sym_l, sym_r);
196 return (int64_t)(ip_r - ip_l);
200 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
204 if (!left->ms.sym && !right->ms.sym)
205 return _sort__addr_cmp(left->ip, right->ip);
208 * comparing symbol address alone is not enough since it's a
209 * relative address within a dso.
211 if (!sort__has_dso) {
212 ret = sort__dso_cmp(left, right);
217 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
221 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
223 if (!left->ms.sym || !right->ms.sym)
224 return cmp_null(left->ms.sym, right->ms.sym);
226 return strcmp(right->ms.sym->name, left->ms.sym->name);
229 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
230 u64 ip, char level, char *bf, size_t size,
236 char o = map ? dso__symtab_origin(map->dso) : '!';
237 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
238 BITS_PER_LONG / 4 + 2, ip, o);
241 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
243 if (map->type == MAP__VARIABLE) {
244 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
245 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
246 ip - map->unmap_ip(map, sym->start));
247 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
250 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
255 size_t len = BITS_PER_LONG / 4;
256 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
258 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
268 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
269 size_t size, unsigned int width)
271 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
272 he->level, bf, size, width);
275 struct sort_entry sort_sym = {
276 .se_header = "Symbol",
277 .se_cmp = sort__sym_cmp,
278 .se_sort = sort__sym_sort,
279 .se_snprintf = hist_entry__sym_snprintf,
280 .se_width_idx = HISTC_SYMBOL,
286 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
288 if (!left->srcline) {
290 left->srcline = SRCLINE_UNKNOWN;
292 struct map *map = left->ms.map;
293 left->srcline = get_srcline(map->dso,
294 map__rip_2objdump(map, left->ip));
297 if (!right->srcline) {
299 right->srcline = SRCLINE_UNKNOWN;
301 struct map *map = right->ms.map;
302 right->srcline = get_srcline(map->dso,
303 map__rip_2objdump(map, right->ip));
306 return strcmp(right->srcline, left->srcline);
309 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
310 size_t size, unsigned int width)
312 return repsep_snprintf(bf, size, "%*.*-s", width, width, he->srcline);
315 struct sort_entry sort_srcline = {
316 .se_header = "Source:Line",
317 .se_cmp = sort__srcline_cmp,
318 .se_snprintf = hist_entry__srcline_snprintf,
319 .se_width_idx = HISTC_SRCLINE,
325 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
327 struct symbol *sym_l = left->parent;
328 struct symbol *sym_r = right->parent;
330 if (!sym_l || !sym_r)
331 return cmp_null(sym_l, sym_r);
333 return strcmp(sym_r->name, sym_l->name);
336 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
337 size_t size, unsigned int width)
339 return repsep_snprintf(bf, size, "%-*.*s", width, width,
340 he->parent ? he->parent->name : "[other]");
343 struct sort_entry sort_parent = {
344 .se_header = "Parent symbol",
345 .se_cmp = sort__parent_cmp,
346 .se_snprintf = hist_entry__parent_snprintf,
347 .se_width_idx = HISTC_PARENT,
353 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
355 return right->cpu - left->cpu;
358 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
359 size_t size, unsigned int width)
361 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
364 struct sort_entry sort_cpu = {
366 .se_cmp = sort__cpu_cmp,
367 .se_snprintf = hist_entry__cpu_snprintf,
368 .se_width_idx = HISTC_CPU,
371 /* sort keys for branch stacks */
374 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
376 return _sort__dso_cmp(left->branch_info->from.map,
377 right->branch_info->from.map);
380 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
381 size_t size, unsigned int width)
383 return _hist_entry__dso_snprintf(he->branch_info->from.map,
388 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
390 return _sort__dso_cmp(left->branch_info->to.map,
391 right->branch_info->to.map);
394 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
395 size_t size, unsigned int width)
397 return _hist_entry__dso_snprintf(he->branch_info->to.map,
402 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
404 struct addr_map_symbol *from_l = &left->branch_info->from;
405 struct addr_map_symbol *from_r = &right->branch_info->from;
407 if (!left->branch_info || !right->branch_info)
408 return cmp_null(left->branch_info, right->branch_info);
410 from_l = &left->branch_info->from;
411 from_r = &right->branch_info->from;
413 if (!from_l->sym && !from_r->sym)
414 return _sort__addr_cmp(from_l->addr, from_r->addr);
416 return _sort__sym_cmp(from_l->sym, from_r->sym);
420 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
422 struct addr_map_symbol *to_l, *to_r;
424 if (!left->branch_info || !right->branch_info)
425 return cmp_null(left->branch_info, right->branch_info);
427 to_l = &left->branch_info->to;
428 to_r = &right->branch_info->to;
430 if (!to_l->sym && !to_r->sym)
431 return _sort__addr_cmp(to_l->addr, to_r->addr);
433 return _sort__sym_cmp(to_l->sym, to_r->sym);
436 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
437 size_t size, unsigned int width)
439 if (he->branch_info) {
440 struct addr_map_symbol *from = &he->branch_info->from;
442 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
443 he->level, bf, size, width);
446 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
449 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
450 size_t size, unsigned int width)
452 if (he->branch_info) {
453 struct addr_map_symbol *to = &he->branch_info->to;
455 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
456 he->level, bf, size, width);
459 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
462 struct sort_entry sort_dso_from = {
463 .se_header = "Source Shared Object",
464 .se_cmp = sort__dso_from_cmp,
465 .se_snprintf = hist_entry__dso_from_snprintf,
466 .se_width_idx = HISTC_DSO_FROM,
469 struct sort_entry sort_dso_to = {
470 .se_header = "Target Shared Object",
471 .se_cmp = sort__dso_to_cmp,
472 .se_snprintf = hist_entry__dso_to_snprintf,
473 .se_width_idx = HISTC_DSO_TO,
476 struct sort_entry sort_sym_from = {
477 .se_header = "Source Symbol",
478 .se_cmp = sort__sym_from_cmp,
479 .se_snprintf = hist_entry__sym_from_snprintf,
480 .se_width_idx = HISTC_SYMBOL_FROM,
483 struct sort_entry sort_sym_to = {
484 .se_header = "Target Symbol",
485 .se_cmp = sort__sym_to_cmp,
486 .se_snprintf = hist_entry__sym_to_snprintf,
487 .se_width_idx = HISTC_SYMBOL_TO,
491 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
495 if (!left->branch_info || !right->branch_info)
496 return cmp_null(left->branch_info, right->branch_info);
498 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
499 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
503 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
504 size_t size, unsigned int width){
505 static const char *out = "N/A";
507 if (he->branch_info) {
508 if (he->branch_info->flags.predicted)
510 else if (he->branch_info->flags.mispred)
514 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
517 /* --sort daddr_sym */
519 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
521 uint64_t l = 0, r = 0;
524 l = left->mem_info->daddr.addr;
526 r = right->mem_info->daddr.addr;
528 return (int64_t)(r - l);
531 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
532 size_t size, unsigned int width)
535 struct map *map = NULL;
536 struct symbol *sym = NULL;
539 addr = he->mem_info->daddr.addr;
540 map = he->mem_info->daddr.map;
541 sym = he->mem_info->daddr.sym;
543 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
548 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
550 struct map *map_l = NULL;
551 struct map *map_r = NULL;
554 map_l = left->mem_info->daddr.map;
556 map_r = right->mem_info->daddr.map;
558 return _sort__dso_cmp(map_l, map_r);
561 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
562 size_t size, unsigned int width)
564 struct map *map = NULL;
567 map = he->mem_info->daddr.map;
569 return _hist_entry__dso_snprintf(map, bf, size, width);
573 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
575 union perf_mem_data_src data_src_l;
576 union perf_mem_data_src data_src_r;
579 data_src_l = left->mem_info->data_src;
581 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
584 data_src_r = right->mem_info->data_src;
586 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
588 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
591 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
592 size_t size, unsigned int width)
595 u64 mask = PERF_MEM_LOCK_NA;
598 mask = he->mem_info->data_src.mem_lock;
600 if (mask & PERF_MEM_LOCK_NA)
602 else if (mask & PERF_MEM_LOCK_LOCKED)
607 return repsep_snprintf(bf, size, "%-*s", width, out);
611 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
613 union perf_mem_data_src data_src_l;
614 union perf_mem_data_src data_src_r;
617 data_src_l = left->mem_info->data_src;
619 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
622 data_src_r = right->mem_info->data_src;
624 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
626 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
629 static const char * const tlb_access[] = {
638 #define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
640 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
641 size_t size, unsigned int width)
644 size_t sz = sizeof(out) - 1; /* -1 for null termination */
646 u64 m = PERF_MEM_TLB_NA;
652 m = he->mem_info->data_src.mem_dtlb;
654 hit = m & PERF_MEM_TLB_HIT;
655 miss = m & PERF_MEM_TLB_MISS;
657 /* already taken care of */
658 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
660 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
667 strncat(out, tlb_access[i], sz - l);
668 l += strlen(tlb_access[i]);
673 strncat(out, " hit", sz - l);
675 strncat(out, " miss", sz - l);
677 return repsep_snprintf(bf, size, "%-*s", width, out);
681 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
683 union perf_mem_data_src data_src_l;
684 union perf_mem_data_src data_src_r;
687 data_src_l = left->mem_info->data_src;
689 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
692 data_src_r = right->mem_info->data_src;
694 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
696 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
699 static const char * const mem_lvl[] = {
708 "Remote RAM (1 hop)",
709 "Remote RAM (2 hops)",
710 "Remote Cache (1 hop)",
711 "Remote Cache (2 hops)",
715 #define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
717 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
718 size_t size, unsigned int width)
721 size_t sz = sizeof(out) - 1; /* -1 for null termination */
723 u64 m = PERF_MEM_LVL_NA;
727 m = he->mem_info->data_src.mem_lvl;
731 hit = m & PERF_MEM_LVL_HIT;
732 miss = m & PERF_MEM_LVL_MISS;
734 /* already taken care of */
735 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
737 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
744 strncat(out, mem_lvl[i], sz - l);
745 l += strlen(mem_lvl[i]);
750 strncat(out, " hit", sz - l);
752 strncat(out, " miss", sz - l);
754 return repsep_snprintf(bf, size, "%-*s", width, out);
758 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
760 union perf_mem_data_src data_src_l;
761 union perf_mem_data_src data_src_r;
764 data_src_l = left->mem_info->data_src;
766 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
769 data_src_r = right->mem_info->data_src;
771 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
773 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
776 static const char * const snoop_access[] = {
783 #define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
785 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
786 size_t size, unsigned int width)
789 size_t sz = sizeof(out) - 1; /* -1 for null termination */
791 u64 m = PERF_MEM_SNOOP_NA;
796 m = he->mem_info->data_src.mem_snoop;
798 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
805 strncat(out, snoop_access[i], sz - l);
806 l += strlen(snoop_access[i]);
812 return repsep_snprintf(bf, size, "%-*s", width, out);
815 static inline u64 cl_address(u64 address)
817 /* return the cacheline of the address */
818 return (address & ~(cacheline_size - 1));
822 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
825 struct map *l_map, *r_map;
827 if (!left->mem_info) return -1;
828 if (!right->mem_info) return 1;
830 /* group event types together */
831 if (left->cpumode > right->cpumode) return -1;
832 if (left->cpumode < right->cpumode) return 1;
834 l_map = left->mem_info->daddr.map;
835 r_map = right->mem_info->daddr.map;
837 /* if both are NULL, jump to sort on al_addr instead */
838 if (!l_map && !r_map)
841 if (!l_map) return -1;
842 if (!r_map) return 1;
844 if (l_map->maj > r_map->maj) return -1;
845 if (l_map->maj < r_map->maj) return 1;
847 if (l_map->min > r_map->min) return -1;
848 if (l_map->min < r_map->min) return 1;
850 if (l_map->ino > r_map->ino) return -1;
851 if (l_map->ino < r_map->ino) return 1;
853 if (l_map->ino_generation > r_map->ino_generation) return -1;
854 if (l_map->ino_generation < r_map->ino_generation) return 1;
857 * Addresses with no major/minor numbers are assumed to be
858 * anonymous in userspace. Sort those on pid then address.
860 * The kernel and non-zero major/minor mapped areas are
861 * assumed to be unity mapped. Sort those on address.
864 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
865 (!(l_map->flags & MAP_SHARED)) &&
866 !l_map->maj && !l_map->min && !l_map->ino &&
867 !l_map->ino_generation) {
868 /* userspace anonymous */
870 if (left->thread->pid_ > right->thread->pid_) return -1;
871 if (left->thread->pid_ < right->thread->pid_) return 1;
875 /* al_addr does all the right addr - start + offset calculations */
876 l = cl_address(left->mem_info->daddr.al_addr);
877 r = cl_address(right->mem_info->daddr.al_addr);
879 if (l > r) return -1;
885 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
886 size_t size, unsigned int width)
890 struct map *map = NULL;
891 struct symbol *sym = NULL;
892 char level = he->level;
895 addr = cl_address(he->mem_info->daddr.al_addr);
896 map = he->mem_info->daddr.map;
897 sym = he->mem_info->daddr.sym;
899 /* print [s] for shared data mmaps */
900 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
901 map && (map->type == MAP__VARIABLE) &&
902 (map->flags & MAP_SHARED) &&
903 (map->maj || map->min || map->ino ||
904 map->ino_generation))
909 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
913 struct sort_entry sort_mispredict = {
914 .se_header = "Branch Mispredicted",
915 .se_cmp = sort__mispredict_cmp,
916 .se_snprintf = hist_entry__mispredict_snprintf,
917 .se_width_idx = HISTC_MISPREDICT,
920 static u64 he_weight(struct hist_entry *he)
922 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
926 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
928 return he_weight(left) - he_weight(right);
931 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
932 size_t size, unsigned int width)
934 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
937 struct sort_entry sort_local_weight = {
938 .se_header = "Local Weight",
939 .se_cmp = sort__local_weight_cmp,
940 .se_snprintf = hist_entry__local_weight_snprintf,
941 .se_width_idx = HISTC_LOCAL_WEIGHT,
945 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
947 return left->stat.weight - right->stat.weight;
950 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
951 size_t size, unsigned int width)
953 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
956 struct sort_entry sort_global_weight = {
957 .se_header = "Weight",
958 .se_cmp = sort__global_weight_cmp,
959 .se_snprintf = hist_entry__global_weight_snprintf,
960 .se_width_idx = HISTC_GLOBAL_WEIGHT,
963 struct sort_entry sort_mem_daddr_sym = {
964 .se_header = "Data Symbol",
965 .se_cmp = sort__daddr_cmp,
966 .se_snprintf = hist_entry__daddr_snprintf,
967 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
970 struct sort_entry sort_mem_daddr_dso = {
971 .se_header = "Data Object",
972 .se_cmp = sort__dso_daddr_cmp,
973 .se_snprintf = hist_entry__dso_daddr_snprintf,
974 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
977 struct sort_entry sort_mem_locked = {
978 .se_header = "Locked",
979 .se_cmp = sort__locked_cmp,
980 .se_snprintf = hist_entry__locked_snprintf,
981 .se_width_idx = HISTC_MEM_LOCKED,
984 struct sort_entry sort_mem_tlb = {
985 .se_header = "TLB access",
986 .se_cmp = sort__tlb_cmp,
987 .se_snprintf = hist_entry__tlb_snprintf,
988 .se_width_idx = HISTC_MEM_TLB,
991 struct sort_entry sort_mem_lvl = {
992 .se_header = "Memory access",
993 .se_cmp = sort__lvl_cmp,
994 .se_snprintf = hist_entry__lvl_snprintf,
995 .se_width_idx = HISTC_MEM_LVL,
998 struct sort_entry sort_mem_snoop = {
999 .se_header = "Snoop",
1000 .se_cmp = sort__snoop_cmp,
1001 .se_snprintf = hist_entry__snoop_snprintf,
1002 .se_width_idx = HISTC_MEM_SNOOP,
1005 struct sort_entry sort_mem_dcacheline = {
1006 .se_header = "Data Cacheline",
1007 .se_cmp = sort__dcacheline_cmp,
1008 .se_snprintf = hist_entry__dcacheline_snprintf,
1009 .se_width_idx = HISTC_MEM_DCACHELINE,
1013 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1015 if (!left->branch_info || !right->branch_info)
1016 return cmp_null(left->branch_info, right->branch_info);
1018 return left->branch_info->flags.abort !=
1019 right->branch_info->flags.abort;
1022 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1023 size_t size, unsigned int width)
1025 static const char *out = "N/A";
1027 if (he->branch_info) {
1028 if (he->branch_info->flags.abort)
1034 return repsep_snprintf(bf, size, "%-*s", width, out);
1037 struct sort_entry sort_abort = {
1038 .se_header = "Transaction abort",
1039 .se_cmp = sort__abort_cmp,
1040 .se_snprintf = hist_entry__abort_snprintf,
1041 .se_width_idx = HISTC_ABORT,
1045 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1047 if (!left->branch_info || !right->branch_info)
1048 return cmp_null(left->branch_info, right->branch_info);
1050 return left->branch_info->flags.in_tx !=
1051 right->branch_info->flags.in_tx;
1054 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1055 size_t size, unsigned int width)
1057 static const char *out = "N/A";
1059 if (he->branch_info) {
1060 if (he->branch_info->flags.in_tx)
1066 return repsep_snprintf(bf, size, "%-*s", width, out);
1069 struct sort_entry sort_in_tx = {
1070 .se_header = "Branch in transaction",
1071 .se_cmp = sort__in_tx_cmp,
1072 .se_snprintf = hist_entry__in_tx_snprintf,
1073 .se_width_idx = HISTC_IN_TX,
1077 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1079 return left->transaction - right->transaction;
1082 static inline char *add_str(char *p, const char *str)
1085 return p + strlen(str);
1088 static struct txbit {
1093 { PERF_TXN_ELISION, "EL ", 0 },
1094 { PERF_TXN_TRANSACTION, "TX ", 1 },
1095 { PERF_TXN_SYNC, "SYNC ", 1 },
1096 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1097 { PERF_TXN_RETRY, "RETRY ", 0 },
1098 { PERF_TXN_CONFLICT, "CON ", 0 },
1099 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1100 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1104 int hist_entry__transaction_len(void)
1109 for (i = 0; txbits[i].name; i++) {
1110 if (!txbits[i].skip_for_len)
1111 len += strlen(txbits[i].name);
1113 len += 4; /* :XX<space> */
1117 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1118 size_t size, unsigned int width)
1120 u64 t = he->transaction;
1126 for (i = 0; txbits[i].name; i++)
1127 if (txbits[i].flag & t)
1128 p = add_str(p, txbits[i].name);
1129 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1130 p = add_str(p, "NEITHER ");
1131 if (t & PERF_TXN_ABORT_MASK) {
1132 sprintf(p, ":%" PRIx64,
1133 (t & PERF_TXN_ABORT_MASK) >>
1134 PERF_TXN_ABORT_SHIFT);
1138 return repsep_snprintf(bf, size, "%-*s", width, buf);
1141 struct sort_entry sort_transaction = {
1142 .se_header = "Transaction ",
1143 .se_cmp = sort__transaction_cmp,
1144 .se_snprintf = hist_entry__transaction_snprintf,
1145 .se_width_idx = HISTC_TRANSACTION,
1148 struct sort_dimension {
1150 struct sort_entry *entry;
1154 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1156 static struct sort_dimension common_sort_dimensions[] = {
1157 DIM(SORT_PID, "pid", sort_thread),
1158 DIM(SORT_COMM, "comm", sort_comm),
1159 DIM(SORT_DSO, "dso", sort_dso),
1160 DIM(SORT_SYM, "symbol", sort_sym),
1161 DIM(SORT_PARENT, "parent", sort_parent),
1162 DIM(SORT_CPU, "cpu", sort_cpu),
1163 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1164 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1165 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1166 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1171 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1173 static struct sort_dimension bstack_sort_dimensions[] = {
1174 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1175 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1176 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1177 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1178 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1179 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1180 DIM(SORT_ABORT, "abort", sort_abort),
1185 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1187 static struct sort_dimension memory_sort_dimensions[] = {
1188 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1189 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1190 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1191 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1192 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1193 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1194 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1199 struct hpp_dimension {
1201 struct perf_hpp_fmt *fmt;
1205 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1207 static struct hpp_dimension hpp_sort_dimensions[] = {
1208 DIM(PERF_HPP__OVERHEAD, "overhead"),
1209 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1210 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1211 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1212 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1213 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1214 DIM(PERF_HPP__SAMPLES, "sample"),
1215 DIM(PERF_HPP__PERIOD, "period"),
1220 struct hpp_sort_entry {
1221 struct perf_hpp_fmt hpp;
1222 struct sort_entry *se;
1225 bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1227 struct hpp_sort_entry *hse_a;
1228 struct hpp_sort_entry *hse_b;
1230 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1233 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1234 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1236 return hse_a->se == hse_b->se;
1239 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1241 struct hpp_sort_entry *hse;
1243 if (!perf_hpp__is_sort_entry(fmt))
1246 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1247 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1250 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1251 struct perf_evsel *evsel)
1253 struct hpp_sort_entry *hse;
1254 size_t len = fmt->user_len;
1256 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1259 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1261 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1264 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1265 struct perf_hpp *hpp __maybe_unused,
1266 struct perf_evsel *evsel)
1268 struct hpp_sort_entry *hse;
1269 size_t len = fmt->user_len;
1271 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1274 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1279 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1280 struct hist_entry *he)
1282 struct hpp_sort_entry *hse;
1283 size_t len = fmt->user_len;
1285 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1288 len = hists__col_len(he->hists, hse->se->se_width_idx);
1290 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1293 static struct hpp_sort_entry *
1294 __sort_dimension__alloc_hpp(struct sort_dimension *sd)
1296 struct hpp_sort_entry *hse;
1298 hse = malloc(sizeof(*hse));
1300 pr_err("Memory allocation failed\n");
1304 hse->se = sd->entry;
1305 hse->hpp.name = sd->entry->se_header;
1306 hse->hpp.header = __sort__hpp_header;
1307 hse->hpp.width = __sort__hpp_width;
1308 hse->hpp.entry = __sort__hpp_entry;
1309 hse->hpp.color = NULL;
1311 hse->hpp.cmp = sd->entry->se_cmp;
1312 hse->hpp.collapse = sd->entry->se_collapse ? : sd->entry->se_cmp;
1313 hse->hpp.sort = sd->entry->se_sort ? : hse->hpp.collapse;
1315 INIT_LIST_HEAD(&hse->hpp.list);
1316 INIT_LIST_HEAD(&hse->hpp.sort_list);
1317 hse->hpp.elide = false;
1319 hse->hpp.user_len = 0;
1324 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1326 return format->header == __sort__hpp_header;
1329 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1331 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1336 perf_hpp__register_sort_field(&hse->hpp);
1340 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
1342 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1347 perf_hpp__column_register(&hse->hpp);
1351 static int __sort_dimension__add(struct sort_dimension *sd)
1356 if (__sort_dimension__add_hpp_sort(sd) < 0)
1359 if (sd->entry->se_collapse)
1360 sort__need_collapse = 1;
1367 static int __hpp_dimension__add(struct hpp_dimension *hd)
1372 perf_hpp__register_sort_field(hd->fmt);
1377 static int __sort_dimension__add_output(struct sort_dimension *sd)
1382 if (__sort_dimension__add_hpp_output(sd) < 0)
1389 static int __hpp_dimension__add_output(struct hpp_dimension *hd)
1394 perf_hpp__column_register(hd->fmt);
1399 int sort_dimension__add(const char *tok)
1403 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
1404 struct sort_dimension *sd = &common_sort_dimensions[i];
1406 if (strncasecmp(tok, sd->name, strlen(tok)))
1409 if (sd->entry == &sort_parent) {
1410 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
1414 regerror(ret, &parent_regex, err, sizeof(err));
1415 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
1418 sort__has_parent = 1;
1419 } else if (sd->entry == &sort_sym) {
1421 } else if (sd->entry == &sort_dso) {
1425 return __sort_dimension__add(sd);
1428 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
1429 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
1431 if (strncasecmp(tok, hd->name, strlen(tok)))
1434 return __hpp_dimension__add(hd);
1437 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
1438 struct sort_dimension *sd = &bstack_sort_dimensions[i];
1440 if (strncasecmp(tok, sd->name, strlen(tok)))
1443 if (sort__mode != SORT_MODE__BRANCH)
1446 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
1449 __sort_dimension__add(sd);
1453 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
1454 struct sort_dimension *sd = &memory_sort_dimensions[i];
1456 if (strncasecmp(tok, sd->name, strlen(tok)))
1459 if (sort__mode != SORT_MODE__MEMORY)
1462 if (sd->entry == &sort_mem_daddr_sym)
1465 __sort_dimension__add(sd);
1472 static const char *get_default_sort_order(void)
1474 const char *default_sort_orders[] = {
1476 default_branch_sort_order,
1477 default_mem_sort_order,
1478 default_top_sort_order,
1479 default_diff_sort_order,
1482 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
1484 return default_sort_orders[sort__mode];
1487 static int setup_sort_order(void)
1489 char *new_sort_order;
1492 * Append '+'-prefixed sort order to the default sort
1495 if (!sort_order || is_strict_order(sort_order))
1498 if (sort_order[1] == '\0') {
1499 error("Invalid --sort key: `+'");
1504 * We allocate new sort_order string, but we never free it,
1505 * because it's checked over the rest of the code.
1507 if (asprintf(&new_sort_order, "%s,%s",
1508 get_default_sort_order(), sort_order + 1) < 0) {
1509 error("Not enough memory to set up --sort");
1513 sort_order = new_sort_order;
1517 static int __setup_sorting(void)
1519 char *tmp, *tok, *str;
1520 const char *sort_keys;
1523 ret = setup_sort_order();
1527 sort_keys = sort_order;
1528 if (sort_keys == NULL) {
1529 if (is_strict_order(field_order)) {
1531 * If user specified field order but no sort order,
1532 * we'll honor it and not add default sort orders.
1537 sort_keys = get_default_sort_order();
1540 str = strdup(sort_keys);
1542 error("Not enough memory to setup sort keys");
1546 for (tok = strtok_r(str, ", ", &tmp);
1547 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1548 ret = sort_dimension__add(tok);
1549 if (ret == -EINVAL) {
1550 error("Invalid --sort key: `%s'", tok);
1552 } else if (ret == -ESRCH) {
1553 error("Unknown --sort key: `%s'", tok);
1562 void perf_hpp__set_elide(int idx, bool elide)
1564 struct perf_hpp_fmt *fmt;
1565 struct hpp_sort_entry *hse;
1567 perf_hpp__for_each_format(fmt) {
1568 if (!perf_hpp__is_sort_entry(fmt))
1571 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1572 if (hse->se->se_width_idx == idx) {
1579 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
1581 if (list && strlist__nr_entries(list) == 1) {
1583 fprintf(fp, "# %s: %s\n", list_name,
1584 strlist__entry(list, 0)->s);
1590 static bool get_elide(int idx, FILE *output)
1594 return __get_elide(symbol_conf.sym_list, "symbol", output);
1596 return __get_elide(symbol_conf.dso_list, "dso", output);
1598 return __get_elide(symbol_conf.comm_list, "comm", output);
1603 if (sort__mode != SORT_MODE__BRANCH)
1607 case HISTC_SYMBOL_FROM:
1608 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
1609 case HISTC_SYMBOL_TO:
1610 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
1611 case HISTC_DSO_FROM:
1612 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
1614 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
1622 void sort__setup_elide(FILE *output)
1624 struct perf_hpp_fmt *fmt;
1625 struct hpp_sort_entry *hse;
1627 perf_hpp__for_each_format(fmt) {
1628 if (!perf_hpp__is_sort_entry(fmt))
1631 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1632 fmt->elide = get_elide(hse->se->se_width_idx, output);
1636 * It makes no sense to elide all of sort entries.
1637 * Just revert them to show up again.
1639 perf_hpp__for_each_format(fmt) {
1640 if (!perf_hpp__is_sort_entry(fmt))
1647 perf_hpp__for_each_format(fmt) {
1648 if (!perf_hpp__is_sort_entry(fmt))
1655 static int output_field_add(char *tok)
1659 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
1660 struct sort_dimension *sd = &common_sort_dimensions[i];
1662 if (strncasecmp(tok, sd->name, strlen(tok)))
1665 return __sort_dimension__add_output(sd);
1668 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
1669 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
1671 if (strncasecmp(tok, hd->name, strlen(tok)))
1674 return __hpp_dimension__add_output(hd);
1677 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
1678 struct sort_dimension *sd = &bstack_sort_dimensions[i];
1680 if (strncasecmp(tok, sd->name, strlen(tok)))
1683 return __sort_dimension__add_output(sd);
1686 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
1687 struct sort_dimension *sd = &memory_sort_dimensions[i];
1689 if (strncasecmp(tok, sd->name, strlen(tok)))
1692 return __sort_dimension__add_output(sd);
1698 static void reset_dimensions(void)
1702 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
1703 common_sort_dimensions[i].taken = 0;
1705 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
1706 hpp_sort_dimensions[i].taken = 0;
1708 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
1709 bstack_sort_dimensions[i].taken = 0;
1711 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
1712 memory_sort_dimensions[i].taken = 0;
1715 bool is_strict_order(const char *order)
1717 return order && (*order != '+');
1720 static int __setup_output_field(void)
1722 char *tmp, *tok, *str, *strp;
1725 if (field_order == NULL)
1730 strp = str = strdup(field_order);
1732 error("Not enough memory to setup output fields");
1736 if (!is_strict_order(field_order))
1739 if (!strlen(strp)) {
1740 error("Invalid --fields key: `+'");
1744 for (tok = strtok_r(strp, ", ", &tmp);
1745 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1746 ret = output_field_add(tok);
1747 if (ret == -EINVAL) {
1748 error("Invalid --fields key: `%s'", tok);
1750 } else if (ret == -ESRCH) {
1751 error("Unknown --fields key: `%s'", tok);
1761 int setup_sorting(void)
1765 err = __setup_sorting();
1769 if (parent_pattern != default_parent_pattern) {
1770 err = sort_dimension__add("parent");
1778 * perf diff doesn't use default hpp output fields.
1780 if (sort__mode != SORT_MODE__DIFF)
1783 err = __setup_output_field();
1787 /* copy sort keys to output fields */
1788 perf_hpp__setup_output_field();
1789 /* and then copy output fields to sort keys */
1790 perf_hpp__append_sort_keys();
1795 void reset_output_field(void)
1797 sort__need_collapse = 0;
1798 sort__has_parent = 0;
1806 perf_hpp__reset_output_field();