9 const char default_parent_pattern[] = "^sys_|^do_page_fault";
10 const char *parent_pattern = default_parent_pattern;
11 const char default_sort_order[] = "comm,dso,symbol";
12 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,dso_to,symbol_to";
13 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
14 const char default_top_sort_order[] = "dso,symbol";
15 const char default_diff_sort_order[] = "dso,symbol";
16 const char *sort_order;
17 const char *field_order;
18 regex_t ignore_callees_regex;
19 int have_ignore_callees = 0;
20 int sort__need_collapse = 0;
21 int sort__has_parent = 0;
22 int sort__has_sym = 0;
23 int sort__has_dso = 0;
24 enum sort_mode sort__mode = SORT_MODE__NORMAL;
27 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
33 n = vsnprintf(bf, size, fmt, ap);
34 if (symbol_conf.field_sep && n > 0) {
38 sep = strchr(sep, *symbol_conf.field_sep);
51 static int64_t cmp_null(const void *l, const void *r)
64 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
66 return right->thread->tid - left->thread->tid;
69 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
70 size_t size, unsigned int width)
72 const char *comm = thread__comm_str(he->thread);
74 width = max(7U, width) - 6;
75 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
76 width, width, comm ?: "");
79 struct sort_entry sort_thread = {
80 .se_header = " Pid:Command",
81 .se_cmp = sort__thread_cmp,
82 .se_snprintf = hist_entry__thread_snprintf,
83 .se_width_idx = HISTC_THREAD,
89 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
91 /* Compare the addr that should be unique among comm */
92 return comm__str(right->comm) - comm__str(left->comm);
96 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
98 /* Compare the addr that should be unique among comm */
99 return comm__str(right->comm) - comm__str(left->comm);
103 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
105 return strcmp(comm__str(right->comm), comm__str(left->comm));
108 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
109 size_t size, unsigned int width)
111 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
114 struct sort_entry sort_comm = {
115 .se_header = "Command",
116 .se_cmp = sort__comm_cmp,
117 .se_collapse = sort__comm_collapse,
118 .se_sort = sort__comm_sort,
119 .se_snprintf = hist_entry__comm_snprintf,
120 .se_width_idx = HISTC_COMM,
125 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
127 struct dso *dso_l = map_l ? map_l->dso : NULL;
128 struct dso *dso_r = map_r ? map_r->dso : NULL;
129 const char *dso_name_l, *dso_name_r;
131 if (!dso_l || !dso_r)
132 return cmp_null(dso_r, dso_l);
135 dso_name_l = dso_l->long_name;
136 dso_name_r = dso_r->long_name;
138 dso_name_l = dso_l->short_name;
139 dso_name_r = dso_r->short_name;
142 return strcmp(dso_name_l, dso_name_r);
146 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
148 return _sort__dso_cmp(right->ms.map, left->ms.map);
151 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
152 size_t size, unsigned int width)
154 if (map && map->dso) {
155 const char *dso_name = !verbose ? map->dso->short_name :
157 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
160 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
163 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
164 size_t size, unsigned int width)
166 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
169 struct sort_entry sort_dso = {
170 .se_header = "Shared Object",
171 .se_cmp = sort__dso_cmp,
172 .se_snprintf = hist_entry__dso_snprintf,
173 .se_width_idx = HISTC_DSO,
178 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
180 return (int64_t)(right_ip - left_ip);
183 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
187 if (!sym_l || !sym_r)
188 return cmp_null(sym_l, sym_r);
196 return (int64_t)(ip_r - ip_l);
200 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
204 if (!left->ms.sym && !right->ms.sym)
205 return _sort__addr_cmp(left->ip, right->ip);
208 * comparing symbol address alone is not enough since it's a
209 * relative address within a dso.
211 if (!sort__has_dso) {
212 ret = sort__dso_cmp(left, right);
217 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
221 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
223 if (!left->ms.sym || !right->ms.sym)
224 return cmp_null(left->ms.sym, right->ms.sym);
226 return strcmp(right->ms.sym->name, left->ms.sym->name);
229 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
230 u64 ip, char level, char *bf, size_t size,
236 char o = map ? dso__symtab_origin(map->dso) : '!';
237 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
238 BITS_PER_LONG / 4 + 2, ip, o);
241 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
243 if (map->type == MAP__VARIABLE) {
244 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
245 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
246 ip - map->unmap_ip(map, sym->start));
247 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
250 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
255 size_t len = BITS_PER_LONG / 4;
256 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
258 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
268 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
269 size_t size, unsigned int width)
271 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
272 he->level, bf, size, width);
275 struct sort_entry sort_sym = {
276 .se_header = "Symbol",
277 .se_cmp = sort__sym_cmp,
278 .se_sort = sort__sym_sort,
279 .se_snprintf = hist_entry__sym_snprintf,
280 .se_width_idx = HISTC_SYMBOL,
286 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
288 if (!left->srcline) {
290 left->srcline = SRCLINE_UNKNOWN;
292 struct map *map = left->ms.map;
293 left->srcline = get_srcline(map->dso,
294 map__rip_2objdump(map, left->ip));
297 if (!right->srcline) {
299 right->srcline = SRCLINE_UNKNOWN;
301 struct map *map = right->ms.map;
302 right->srcline = get_srcline(map->dso,
303 map__rip_2objdump(map, right->ip));
306 return strcmp(right->srcline, left->srcline);
309 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
310 size_t size, unsigned int width)
312 return repsep_snprintf(bf, size, "%*.*-s", width, width, he->srcline);
315 struct sort_entry sort_srcline = {
316 .se_header = "Source:Line",
317 .se_cmp = sort__srcline_cmp,
318 .se_snprintf = hist_entry__srcline_snprintf,
319 .se_width_idx = HISTC_SRCLINE,
325 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
327 struct symbol *sym_l = left->parent;
328 struct symbol *sym_r = right->parent;
330 if (!sym_l || !sym_r)
331 return cmp_null(sym_l, sym_r);
333 return strcmp(sym_r->name, sym_l->name);
336 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
337 size_t size, unsigned int width)
339 return repsep_snprintf(bf, size, "%-*.*s", width, width,
340 he->parent ? he->parent->name : "[other]");
343 struct sort_entry sort_parent = {
344 .se_header = "Parent symbol",
345 .se_cmp = sort__parent_cmp,
346 .se_snprintf = hist_entry__parent_snprintf,
347 .se_width_idx = HISTC_PARENT,
353 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
355 return right->cpu - left->cpu;
358 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
359 size_t size, unsigned int width)
361 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
364 struct sort_entry sort_cpu = {
366 .se_cmp = sort__cpu_cmp,
367 .se_snprintf = hist_entry__cpu_snprintf,
368 .se_width_idx = HISTC_CPU,
371 /* sort keys for branch stacks */
374 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
376 return _sort__dso_cmp(left->branch_info->from.map,
377 right->branch_info->from.map);
380 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
381 size_t size, unsigned int width)
383 return _hist_entry__dso_snprintf(he->branch_info->from.map,
388 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
390 return _sort__dso_cmp(left->branch_info->to.map,
391 right->branch_info->to.map);
394 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
395 size_t size, unsigned int width)
397 return _hist_entry__dso_snprintf(he->branch_info->to.map,
402 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
404 struct addr_map_symbol *from_l = &left->branch_info->from;
405 struct addr_map_symbol *from_r = &right->branch_info->from;
407 if (!from_l->sym && !from_r->sym)
408 return _sort__addr_cmp(from_l->addr, from_r->addr);
410 return _sort__sym_cmp(from_l->sym, from_r->sym);
414 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
416 struct addr_map_symbol *to_l = &left->branch_info->to;
417 struct addr_map_symbol *to_r = &right->branch_info->to;
419 if (!to_l->sym && !to_r->sym)
420 return _sort__addr_cmp(to_l->addr, to_r->addr);
422 return _sort__sym_cmp(to_l->sym, to_r->sym);
425 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
426 size_t size, unsigned int width)
428 struct addr_map_symbol *from = &he->branch_info->from;
429 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
430 he->level, bf, size, width);
434 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
435 size_t size, unsigned int width)
437 struct addr_map_symbol *to = &he->branch_info->to;
438 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
439 he->level, bf, size, width);
443 struct sort_entry sort_dso_from = {
444 .se_header = "Source Shared Object",
445 .se_cmp = sort__dso_from_cmp,
446 .se_snprintf = hist_entry__dso_from_snprintf,
447 .se_width_idx = HISTC_DSO_FROM,
450 struct sort_entry sort_dso_to = {
451 .se_header = "Target Shared Object",
452 .se_cmp = sort__dso_to_cmp,
453 .se_snprintf = hist_entry__dso_to_snprintf,
454 .se_width_idx = HISTC_DSO_TO,
457 struct sort_entry sort_sym_from = {
458 .se_header = "Source Symbol",
459 .se_cmp = sort__sym_from_cmp,
460 .se_snprintf = hist_entry__sym_from_snprintf,
461 .se_width_idx = HISTC_SYMBOL_FROM,
464 struct sort_entry sort_sym_to = {
465 .se_header = "Target Symbol",
466 .se_cmp = sort__sym_to_cmp,
467 .se_snprintf = hist_entry__sym_to_snprintf,
468 .se_width_idx = HISTC_SYMBOL_TO,
472 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
474 const unsigned char mp = left->branch_info->flags.mispred !=
475 right->branch_info->flags.mispred;
476 const unsigned char p = left->branch_info->flags.predicted !=
477 right->branch_info->flags.predicted;
482 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
483 size_t size, unsigned int width){
484 static const char *out = "N/A";
486 if (he->branch_info->flags.predicted)
488 else if (he->branch_info->flags.mispred)
491 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
494 /* --sort daddr_sym */
496 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
498 uint64_t l = 0, r = 0;
501 l = left->mem_info->daddr.addr;
503 r = right->mem_info->daddr.addr;
505 return (int64_t)(r - l);
508 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
509 size_t size, unsigned int width)
512 struct map *map = NULL;
513 struct symbol *sym = NULL;
516 addr = he->mem_info->daddr.addr;
517 map = he->mem_info->daddr.map;
518 sym = he->mem_info->daddr.sym;
520 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
525 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
527 struct map *map_l = NULL;
528 struct map *map_r = NULL;
531 map_l = left->mem_info->daddr.map;
533 map_r = right->mem_info->daddr.map;
535 return _sort__dso_cmp(map_l, map_r);
538 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
539 size_t size, unsigned int width)
541 struct map *map = NULL;
544 map = he->mem_info->daddr.map;
546 return _hist_entry__dso_snprintf(map, bf, size, width);
550 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
552 union perf_mem_data_src data_src_l;
553 union perf_mem_data_src data_src_r;
556 data_src_l = left->mem_info->data_src;
558 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
561 data_src_r = right->mem_info->data_src;
563 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
565 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
568 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
569 size_t size, unsigned int width)
572 u64 mask = PERF_MEM_LOCK_NA;
575 mask = he->mem_info->data_src.mem_lock;
577 if (mask & PERF_MEM_LOCK_NA)
579 else if (mask & PERF_MEM_LOCK_LOCKED)
584 return repsep_snprintf(bf, size, "%-*s", width, out);
588 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
590 union perf_mem_data_src data_src_l;
591 union perf_mem_data_src data_src_r;
594 data_src_l = left->mem_info->data_src;
596 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
599 data_src_r = right->mem_info->data_src;
601 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
603 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
606 static const char * const tlb_access[] = {
615 #define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
617 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
618 size_t size, unsigned int width)
621 size_t sz = sizeof(out) - 1; /* -1 for null termination */
623 u64 m = PERF_MEM_TLB_NA;
629 m = he->mem_info->data_src.mem_dtlb;
631 hit = m & PERF_MEM_TLB_HIT;
632 miss = m & PERF_MEM_TLB_MISS;
634 /* already taken care of */
635 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
637 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
644 strncat(out, tlb_access[i], sz - l);
645 l += strlen(tlb_access[i]);
650 strncat(out, " hit", sz - l);
652 strncat(out, " miss", sz - l);
654 return repsep_snprintf(bf, size, "%-*s", width, out);
658 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
660 union perf_mem_data_src data_src_l;
661 union perf_mem_data_src data_src_r;
664 data_src_l = left->mem_info->data_src;
666 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
669 data_src_r = right->mem_info->data_src;
671 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
673 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
676 static const char * const mem_lvl[] = {
685 "Remote RAM (1 hop)",
686 "Remote RAM (2 hops)",
687 "Remote Cache (1 hop)",
688 "Remote Cache (2 hops)",
692 #define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
694 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
695 size_t size, unsigned int width)
698 size_t sz = sizeof(out) - 1; /* -1 for null termination */
700 u64 m = PERF_MEM_LVL_NA;
704 m = he->mem_info->data_src.mem_lvl;
708 hit = m & PERF_MEM_LVL_HIT;
709 miss = m & PERF_MEM_LVL_MISS;
711 /* already taken care of */
712 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
714 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
721 strncat(out, mem_lvl[i], sz - l);
722 l += strlen(mem_lvl[i]);
727 strncat(out, " hit", sz - l);
729 strncat(out, " miss", sz - l);
731 return repsep_snprintf(bf, size, "%-*s", width, out);
735 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
737 union perf_mem_data_src data_src_l;
738 union perf_mem_data_src data_src_r;
741 data_src_l = left->mem_info->data_src;
743 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
746 data_src_r = right->mem_info->data_src;
748 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
750 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
753 static const char * const snoop_access[] = {
760 #define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
762 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
763 size_t size, unsigned int width)
766 size_t sz = sizeof(out) - 1; /* -1 for null termination */
768 u64 m = PERF_MEM_SNOOP_NA;
773 m = he->mem_info->data_src.mem_snoop;
775 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
782 strncat(out, snoop_access[i], sz - l);
783 l += strlen(snoop_access[i]);
789 return repsep_snprintf(bf, size, "%-*s", width, out);
792 static inline u64 cl_address(u64 address)
794 /* return the cacheline of the address */
795 return (address & ~(cacheline_size - 1));
799 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
802 struct map *l_map, *r_map;
804 if (!left->mem_info) return -1;
805 if (!right->mem_info) return 1;
807 /* group event types together */
808 if (left->cpumode > right->cpumode) return -1;
809 if (left->cpumode < right->cpumode) return 1;
811 l_map = left->mem_info->daddr.map;
812 r_map = right->mem_info->daddr.map;
814 /* if both are NULL, jump to sort on al_addr instead */
815 if (!l_map && !r_map)
818 if (!l_map) return -1;
819 if (!r_map) return 1;
821 if (l_map->maj > r_map->maj) return -1;
822 if (l_map->maj < r_map->maj) return 1;
824 if (l_map->min > r_map->min) return -1;
825 if (l_map->min < r_map->min) return 1;
827 if (l_map->ino > r_map->ino) return -1;
828 if (l_map->ino < r_map->ino) return 1;
830 if (l_map->ino_generation > r_map->ino_generation) return -1;
831 if (l_map->ino_generation < r_map->ino_generation) return 1;
834 * Addresses with no major/minor numbers are assumed to be
835 * anonymous in userspace. Sort those on pid then address.
837 * The kernel and non-zero major/minor mapped areas are
838 * assumed to be unity mapped. Sort those on address.
841 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
842 (!(l_map->flags & MAP_SHARED)) &&
843 !l_map->maj && !l_map->min && !l_map->ino &&
844 !l_map->ino_generation) {
845 /* userspace anonymous */
847 if (left->thread->pid_ > right->thread->pid_) return -1;
848 if (left->thread->pid_ < right->thread->pid_) return 1;
852 /* al_addr does all the right addr - start + offset calculations */
853 l = cl_address(left->mem_info->daddr.al_addr);
854 r = cl_address(right->mem_info->daddr.al_addr);
856 if (l > r) return -1;
862 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
863 size_t size, unsigned int width)
867 struct map *map = NULL;
868 struct symbol *sym = NULL;
869 char level = he->level;
872 addr = cl_address(he->mem_info->daddr.al_addr);
873 map = he->mem_info->daddr.map;
874 sym = he->mem_info->daddr.sym;
876 /* print [s] for shared data mmaps */
877 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
878 map && (map->type == MAP__VARIABLE) &&
879 (map->flags & MAP_SHARED) &&
880 (map->maj || map->min || map->ino ||
881 map->ino_generation))
886 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
890 struct sort_entry sort_mispredict = {
891 .se_header = "Branch Mispredicted",
892 .se_cmp = sort__mispredict_cmp,
893 .se_snprintf = hist_entry__mispredict_snprintf,
894 .se_width_idx = HISTC_MISPREDICT,
897 static u64 he_weight(struct hist_entry *he)
899 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
903 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
905 return he_weight(left) - he_weight(right);
908 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
909 size_t size, unsigned int width)
911 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
914 struct sort_entry sort_local_weight = {
915 .se_header = "Local Weight",
916 .se_cmp = sort__local_weight_cmp,
917 .se_snprintf = hist_entry__local_weight_snprintf,
918 .se_width_idx = HISTC_LOCAL_WEIGHT,
922 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
924 return left->stat.weight - right->stat.weight;
927 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
928 size_t size, unsigned int width)
930 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
933 struct sort_entry sort_global_weight = {
934 .se_header = "Weight",
935 .se_cmp = sort__global_weight_cmp,
936 .se_snprintf = hist_entry__global_weight_snprintf,
937 .se_width_idx = HISTC_GLOBAL_WEIGHT,
940 struct sort_entry sort_mem_daddr_sym = {
941 .se_header = "Data Symbol",
942 .se_cmp = sort__daddr_cmp,
943 .se_snprintf = hist_entry__daddr_snprintf,
944 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
947 struct sort_entry sort_mem_daddr_dso = {
948 .se_header = "Data Object",
949 .se_cmp = sort__dso_daddr_cmp,
950 .se_snprintf = hist_entry__dso_daddr_snprintf,
951 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
954 struct sort_entry sort_mem_locked = {
955 .se_header = "Locked",
956 .se_cmp = sort__locked_cmp,
957 .se_snprintf = hist_entry__locked_snprintf,
958 .se_width_idx = HISTC_MEM_LOCKED,
961 struct sort_entry sort_mem_tlb = {
962 .se_header = "TLB access",
963 .se_cmp = sort__tlb_cmp,
964 .se_snprintf = hist_entry__tlb_snprintf,
965 .se_width_idx = HISTC_MEM_TLB,
968 struct sort_entry sort_mem_lvl = {
969 .se_header = "Memory access",
970 .se_cmp = sort__lvl_cmp,
971 .se_snprintf = hist_entry__lvl_snprintf,
972 .se_width_idx = HISTC_MEM_LVL,
975 struct sort_entry sort_mem_snoop = {
976 .se_header = "Snoop",
977 .se_cmp = sort__snoop_cmp,
978 .se_snprintf = hist_entry__snoop_snprintf,
979 .se_width_idx = HISTC_MEM_SNOOP,
982 struct sort_entry sort_mem_dcacheline = {
983 .se_header = "Data Cacheline",
984 .se_cmp = sort__dcacheline_cmp,
985 .se_snprintf = hist_entry__dcacheline_snprintf,
986 .se_width_idx = HISTC_MEM_DCACHELINE,
990 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
992 return left->branch_info->flags.abort !=
993 right->branch_info->flags.abort;
996 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
997 size_t size, unsigned int width)
999 static const char *out = ".";
1001 if (he->branch_info->flags.abort)
1003 return repsep_snprintf(bf, size, "%-*s", width, out);
1006 struct sort_entry sort_abort = {
1007 .se_header = "Transaction abort",
1008 .se_cmp = sort__abort_cmp,
1009 .se_snprintf = hist_entry__abort_snprintf,
1010 .se_width_idx = HISTC_ABORT,
1014 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1016 return left->branch_info->flags.in_tx !=
1017 right->branch_info->flags.in_tx;
1020 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1021 size_t size, unsigned int width)
1023 static const char *out = ".";
1025 if (he->branch_info->flags.in_tx)
1028 return repsep_snprintf(bf, size, "%-*s", width, out);
1031 struct sort_entry sort_in_tx = {
1032 .se_header = "Branch in transaction",
1033 .se_cmp = sort__in_tx_cmp,
1034 .se_snprintf = hist_entry__in_tx_snprintf,
1035 .se_width_idx = HISTC_IN_TX,
1039 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1041 return left->transaction - right->transaction;
1044 static inline char *add_str(char *p, const char *str)
1047 return p + strlen(str);
1050 static struct txbit {
1055 { PERF_TXN_ELISION, "EL ", 0 },
1056 { PERF_TXN_TRANSACTION, "TX ", 1 },
1057 { PERF_TXN_SYNC, "SYNC ", 1 },
1058 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1059 { PERF_TXN_RETRY, "RETRY ", 0 },
1060 { PERF_TXN_CONFLICT, "CON ", 0 },
1061 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1062 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1066 int hist_entry__transaction_len(void)
1071 for (i = 0; txbits[i].name; i++) {
1072 if (!txbits[i].skip_for_len)
1073 len += strlen(txbits[i].name);
1075 len += 4; /* :XX<space> */
1079 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1080 size_t size, unsigned int width)
1082 u64 t = he->transaction;
1088 for (i = 0; txbits[i].name; i++)
1089 if (txbits[i].flag & t)
1090 p = add_str(p, txbits[i].name);
1091 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1092 p = add_str(p, "NEITHER ");
1093 if (t & PERF_TXN_ABORT_MASK) {
1094 sprintf(p, ":%" PRIx64,
1095 (t & PERF_TXN_ABORT_MASK) >>
1096 PERF_TXN_ABORT_SHIFT);
1100 return repsep_snprintf(bf, size, "%-*s", width, buf);
1103 struct sort_entry sort_transaction = {
1104 .se_header = "Transaction ",
1105 .se_cmp = sort__transaction_cmp,
1106 .se_snprintf = hist_entry__transaction_snprintf,
1107 .se_width_idx = HISTC_TRANSACTION,
1110 struct sort_dimension {
1112 struct sort_entry *entry;
1116 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1118 static struct sort_dimension common_sort_dimensions[] = {
1119 DIM(SORT_PID, "pid", sort_thread),
1120 DIM(SORT_COMM, "comm", sort_comm),
1121 DIM(SORT_DSO, "dso", sort_dso),
1122 DIM(SORT_SYM, "symbol", sort_sym),
1123 DIM(SORT_PARENT, "parent", sort_parent),
1124 DIM(SORT_CPU, "cpu", sort_cpu),
1125 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1126 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1127 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1128 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1133 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1135 static struct sort_dimension bstack_sort_dimensions[] = {
1136 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1137 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1138 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1139 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1140 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1141 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1142 DIM(SORT_ABORT, "abort", sort_abort),
1147 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1149 static struct sort_dimension memory_sort_dimensions[] = {
1150 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1151 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1152 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1153 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1154 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1155 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1156 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1161 struct hpp_dimension {
1163 struct perf_hpp_fmt *fmt;
1167 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1169 static struct hpp_dimension hpp_sort_dimensions[] = {
1170 DIM(PERF_HPP__OVERHEAD, "overhead"),
1171 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1172 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1173 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1174 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1175 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1176 DIM(PERF_HPP__SAMPLES, "sample"),
1177 DIM(PERF_HPP__PERIOD, "period"),
1182 struct hpp_sort_entry {
1183 struct perf_hpp_fmt hpp;
1184 struct sort_entry *se;
1187 bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1189 struct hpp_sort_entry *hse_a;
1190 struct hpp_sort_entry *hse_b;
1192 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1195 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1196 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1198 return hse_a->se == hse_b->se;
1201 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1203 struct hpp_sort_entry *hse;
1205 if (!perf_hpp__is_sort_entry(fmt))
1208 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1209 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1212 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1213 struct perf_evsel *evsel)
1215 struct hpp_sort_entry *hse;
1216 size_t len = fmt->user_len;
1218 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1221 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1223 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1226 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1227 struct perf_hpp *hpp __maybe_unused,
1228 struct perf_evsel *evsel)
1230 struct hpp_sort_entry *hse;
1231 size_t len = fmt->user_len;
1233 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1236 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1241 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1242 struct hist_entry *he)
1244 struct hpp_sort_entry *hse;
1245 size_t len = fmt->user_len;
1247 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1250 len = hists__col_len(he->hists, hse->se->se_width_idx);
1252 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1255 static struct hpp_sort_entry *
1256 __sort_dimension__alloc_hpp(struct sort_dimension *sd)
1258 struct hpp_sort_entry *hse;
1260 hse = malloc(sizeof(*hse));
1262 pr_err("Memory allocation failed\n");
1266 hse->se = sd->entry;
1267 hse->hpp.name = sd->entry->se_header;
1268 hse->hpp.header = __sort__hpp_header;
1269 hse->hpp.width = __sort__hpp_width;
1270 hse->hpp.entry = __sort__hpp_entry;
1271 hse->hpp.color = NULL;
1273 hse->hpp.cmp = sd->entry->se_cmp;
1274 hse->hpp.collapse = sd->entry->se_collapse ? : sd->entry->se_cmp;
1275 hse->hpp.sort = sd->entry->se_sort ? : hse->hpp.collapse;
1277 INIT_LIST_HEAD(&hse->hpp.list);
1278 INIT_LIST_HEAD(&hse->hpp.sort_list);
1279 hse->hpp.elide = false;
1281 hse->hpp.user_len = 0;
1286 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1288 return format->header == __sort__hpp_header;
1291 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1293 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1298 perf_hpp__register_sort_field(&hse->hpp);
1302 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
1304 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1309 perf_hpp__column_register(&hse->hpp);
1313 static int __sort_dimension__add(struct sort_dimension *sd)
1318 if (__sort_dimension__add_hpp_sort(sd) < 0)
1321 if (sd->entry->se_collapse)
1322 sort__need_collapse = 1;
1329 static int __hpp_dimension__add(struct hpp_dimension *hd)
1334 perf_hpp__register_sort_field(hd->fmt);
1339 static int __sort_dimension__add_output(struct sort_dimension *sd)
1344 if (__sort_dimension__add_hpp_output(sd) < 0)
1351 static int __hpp_dimension__add_output(struct hpp_dimension *hd)
1356 perf_hpp__column_register(hd->fmt);
1361 int sort_dimension__add(const char *tok)
1365 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
1366 struct sort_dimension *sd = &common_sort_dimensions[i];
1368 if (strncasecmp(tok, sd->name, strlen(tok)))
1371 if (sd->entry == &sort_parent) {
1372 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
1376 regerror(ret, &parent_regex, err, sizeof(err));
1377 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
1380 sort__has_parent = 1;
1381 } else if (sd->entry == &sort_sym) {
1383 } else if (sd->entry == &sort_dso) {
1387 return __sort_dimension__add(sd);
1390 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
1391 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
1393 if (strncasecmp(tok, hd->name, strlen(tok)))
1396 return __hpp_dimension__add(hd);
1399 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
1400 struct sort_dimension *sd = &bstack_sort_dimensions[i];
1402 if (strncasecmp(tok, sd->name, strlen(tok)))
1405 if (sort__mode != SORT_MODE__BRANCH)
1408 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
1411 __sort_dimension__add(sd);
1415 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
1416 struct sort_dimension *sd = &memory_sort_dimensions[i];
1418 if (strncasecmp(tok, sd->name, strlen(tok)))
1421 if (sort__mode != SORT_MODE__MEMORY)
1424 if (sd->entry == &sort_mem_daddr_sym)
1427 __sort_dimension__add(sd);
1434 static const char *get_default_sort_order(void)
1436 const char *default_sort_orders[] = {
1438 default_branch_sort_order,
1439 default_mem_sort_order,
1440 default_top_sort_order,
1441 default_diff_sort_order,
1444 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
1446 return default_sort_orders[sort__mode];
1449 static int setup_sort_order(void)
1451 char *new_sort_order;
1454 * Append '+'-prefixed sort order to the default sort
1457 if (!sort_order || is_strict_order(sort_order))
1460 if (sort_order[1] == '\0') {
1461 error("Invalid --sort key: `+'");
1466 * We allocate new sort_order string, but we never free it,
1467 * because it's checked over the rest of the code.
1469 if (asprintf(&new_sort_order, "%s,%s",
1470 get_default_sort_order(), sort_order + 1) < 0) {
1471 error("Not enough memory to set up --sort");
1475 sort_order = new_sort_order;
1479 static int __setup_sorting(void)
1481 char *tmp, *tok, *str;
1482 const char *sort_keys;
1485 ret = setup_sort_order();
1489 sort_keys = sort_order;
1490 if (sort_keys == NULL) {
1491 if (is_strict_order(field_order)) {
1493 * If user specified field order but no sort order,
1494 * we'll honor it and not add default sort orders.
1499 sort_keys = get_default_sort_order();
1502 str = strdup(sort_keys);
1504 error("Not enough memory to setup sort keys");
1508 for (tok = strtok_r(str, ", ", &tmp);
1509 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1510 ret = sort_dimension__add(tok);
1511 if (ret == -EINVAL) {
1512 error("Invalid --sort key: `%s'", tok);
1514 } else if (ret == -ESRCH) {
1515 error("Unknown --sort key: `%s'", tok);
1524 void perf_hpp__set_elide(int idx, bool elide)
1526 struct perf_hpp_fmt *fmt;
1527 struct hpp_sort_entry *hse;
1529 perf_hpp__for_each_format(fmt) {
1530 if (!perf_hpp__is_sort_entry(fmt))
1533 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1534 if (hse->se->se_width_idx == idx) {
1541 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
1543 if (list && strlist__nr_entries(list) == 1) {
1545 fprintf(fp, "# %s: %s\n", list_name,
1546 strlist__entry(list, 0)->s);
1552 static bool get_elide(int idx, FILE *output)
1556 return __get_elide(symbol_conf.sym_list, "symbol", output);
1558 return __get_elide(symbol_conf.dso_list, "dso", output);
1560 return __get_elide(symbol_conf.comm_list, "comm", output);
1565 if (sort__mode != SORT_MODE__BRANCH)
1569 case HISTC_SYMBOL_FROM:
1570 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
1571 case HISTC_SYMBOL_TO:
1572 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
1573 case HISTC_DSO_FROM:
1574 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
1576 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
1584 void sort__setup_elide(FILE *output)
1586 struct perf_hpp_fmt *fmt;
1587 struct hpp_sort_entry *hse;
1589 perf_hpp__for_each_format(fmt) {
1590 if (!perf_hpp__is_sort_entry(fmt))
1593 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1594 fmt->elide = get_elide(hse->se->se_width_idx, output);
1598 * It makes no sense to elide all of sort entries.
1599 * Just revert them to show up again.
1601 perf_hpp__for_each_format(fmt) {
1602 if (!perf_hpp__is_sort_entry(fmt))
1609 perf_hpp__for_each_format(fmt) {
1610 if (!perf_hpp__is_sort_entry(fmt))
1617 static int output_field_add(char *tok)
1621 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
1622 struct sort_dimension *sd = &common_sort_dimensions[i];
1624 if (strncasecmp(tok, sd->name, strlen(tok)))
1627 return __sort_dimension__add_output(sd);
1630 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
1631 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
1633 if (strncasecmp(tok, hd->name, strlen(tok)))
1636 return __hpp_dimension__add_output(hd);
1639 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
1640 struct sort_dimension *sd = &bstack_sort_dimensions[i];
1642 if (strncasecmp(tok, sd->name, strlen(tok)))
1645 return __sort_dimension__add_output(sd);
1648 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
1649 struct sort_dimension *sd = &memory_sort_dimensions[i];
1651 if (strncasecmp(tok, sd->name, strlen(tok)))
1654 return __sort_dimension__add_output(sd);
1660 static void reset_dimensions(void)
1664 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
1665 common_sort_dimensions[i].taken = 0;
1667 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
1668 hpp_sort_dimensions[i].taken = 0;
1670 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
1671 bstack_sort_dimensions[i].taken = 0;
1673 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
1674 memory_sort_dimensions[i].taken = 0;
1677 bool is_strict_order(const char *order)
1679 return order && (*order != '+');
1682 static int __setup_output_field(void)
1684 char *tmp, *tok, *str, *strp;
1687 if (field_order == NULL)
1692 strp = str = strdup(field_order);
1694 error("Not enough memory to setup output fields");
1698 if (!is_strict_order(field_order))
1701 if (!strlen(strp)) {
1702 error("Invalid --fields key: `+'");
1706 for (tok = strtok_r(strp, ", ", &tmp);
1707 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1708 ret = output_field_add(tok);
1709 if (ret == -EINVAL) {
1710 error("Invalid --fields key: `%s'", tok);
1712 } else if (ret == -ESRCH) {
1713 error("Unknown --fields key: `%s'", tok);
1723 int setup_sorting(void)
1727 err = __setup_sorting();
1731 if (parent_pattern != default_parent_pattern) {
1732 err = sort_dimension__add("parent");
1740 * perf diff doesn't use default hpp output fields.
1742 if (sort__mode != SORT_MODE__DIFF)
1745 err = __setup_output_field();
1749 /* copy sort keys to output fields */
1750 perf_hpp__setup_output_field();
1751 /* and then copy output fields to sort keys */
1752 perf_hpp__append_sort_keys();
1757 void reset_output_field(void)
1759 sort__need_collapse = 0;
1760 sort__has_parent = 0;
1768 perf_hpp__reset_output_field();