4 * Builtin report command: Analyze the perf.data input file,
5 * look up and read DSOs and symbol information and display
6 * a histogram of results, along various sorting keys.
10 #include "util/util.h"
12 #include "util/color.h"
13 #include "util/list.h"
14 #include "util/cache.h"
15 #include "util/rbtree.h"
16 #include "util/symbol.h"
17 #include "util/string.h"
20 #include "util/header.h"
22 #include "util/parse-options.h"
23 #include "util/parse-events.h"
29 static char const *input_name = "perf.data";
30 static char *vmlinux = NULL;
32 static char default_sort_order[] = "comm,dso";
33 static char *sort_order = default_sort_order;
36 static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
38 static int dump_trace = 0;
39 #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
40 #define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
43 #define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
45 static int full_paths;
47 static unsigned long page_size;
48 static unsigned long mmap_window = 32;
50 static char default_parent_pattern[] = "^sys_|^do_page_fault";
51 static char *parent_pattern = default_parent_pattern;
52 static regex_t parent_regex;
54 static int exclude_other = 1;
57 struct perf_event_header header;
60 unsigned char __more_data[];
69 struct perf_event_header header;
74 char filename[PATH_MAX];
78 struct perf_event_header header;
84 struct perf_event_header header;
89 struct perf_event_header header;
96 struct perf_event_header header;
101 typedef union event_union {
102 struct perf_event_header header;
104 struct mmap_event mmap;
105 struct comm_event comm;
106 struct fork_event fork;
107 struct period_event period;
108 struct lost_event lost;
111 static LIST_HEAD(dsos);
112 static struct dso *kernel_dso;
113 static struct dso *vdso;
115 static void dsos__add(struct dso *dso)
117 list_add_tail(&dso->node, &dsos);
120 static struct dso *dsos__find(const char *name)
124 list_for_each_entry(pos, &dsos, node)
125 if (strcmp(pos->name, name) == 0)
130 static struct dso *dsos__findnew(const char *name)
132 struct dso *dso = dsos__find(name);
138 dso = dso__new(name, 0);
142 nr = dso__load(dso, NULL, verbose);
144 eprintf("Failed to open: %s\n", name);
148 eprintf("No symbols found in: %s, maybe install a debug package?\n", name);
159 static void dsos__fprintf(FILE *fp)
163 list_for_each_entry(pos, &dsos, node)
164 dso__fprintf(pos, fp);
167 static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
169 return dso__find_symbol(kernel_dso, ip);
172 static int load_kernel(void)
176 kernel_dso = dso__new("[kernel]", 0);
180 err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
182 dso__delete(kernel_dso);
185 dsos__add(kernel_dso);
187 vdso = dso__new("[vdso]", 0);
191 vdso->find_symbol = vdso__find_symbol;
198 static char __cwd[PATH_MAX];
199 static char *cwd = __cwd;
202 static int strcommon(const char *pathname)
206 while (pathname[n] == cwd[n] && n < cwdlen)
213 struct list_head node;
217 u64 (*map_ip)(struct map *, u64);
221 static u64 map__map_ip(struct map *map, u64 ip)
223 return ip - map->start + map->pgoff;
226 static u64 vdso__map_ip(struct map *map, u64 ip)
231 static inline int is_anon_memory(const char *filename)
233 return strcmp(filename, "//anon") == 0;
236 static struct map *map__new(struct mmap_event *event)
238 struct map *self = malloc(sizeof(*self));
241 const char *filename = event->filename;
242 char newfilename[PATH_MAX];
246 int n = strcommon(filename);
249 snprintf(newfilename, sizeof(newfilename),
250 ".%s", filename + n);
251 filename = newfilename;
255 anon = is_anon_memory(filename);
258 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
259 filename = newfilename;
262 self->start = event->start;
263 self->end = event->start + event->len;
264 self->pgoff = event->pgoff;
266 self->dso = dsos__findnew(filename);
267 if (self->dso == NULL)
270 if (self->dso == vdso || anon)
271 self->map_ip = vdso__map_ip;
273 self->map_ip = map__map_ip;
281 static struct map *map__clone(struct map *self)
283 struct map *map = malloc(sizeof(*self));
288 memcpy(map, self, sizeof(*self));
293 static int map__overlap(struct map *l, struct map *r)
295 if (l->start > r->start) {
301 if (l->end > r->start)
307 static size_t map__fprintf(struct map *self, FILE *fp)
309 return fprintf(fp, " %Lx-%Lx %Lx %s\n",
310 self->start, self->end, self->pgoff, self->dso->name);
315 struct rb_node rb_node;
316 struct list_head maps;
321 static struct thread *thread__new(pid_t pid)
323 struct thread *self = malloc(sizeof(*self));
327 self->comm = malloc(32);
329 snprintf(self->comm, 32, ":%d", self->pid);
330 INIT_LIST_HEAD(&self->maps);
336 static int thread__set_comm(struct thread *self, const char *comm)
340 self->comm = strdup(comm);
341 return self->comm ? 0 : -ENOMEM;
344 static size_t thread__fprintf(struct thread *self, FILE *fp)
347 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
349 list_for_each_entry(pos, &self->maps, node)
350 ret += map__fprintf(pos, fp);
356 static struct rb_root threads;
357 static struct thread *last_match;
359 static struct thread *threads__findnew(pid_t pid)
361 struct rb_node **p = &threads.rb_node;
362 struct rb_node *parent = NULL;
366 * Font-end cache - PID lookups come in blocks,
367 * so most of the time we dont have to look up
370 if (last_match && last_match->pid == pid)
375 th = rb_entry(parent, struct thread, rb_node);
377 if (th->pid == pid) {
388 th = thread__new(pid);
390 rb_link_node(&th->rb_node, parent, p);
391 rb_insert_color(&th->rb_node, &threads);
398 static void thread__insert_map(struct thread *self, struct map *map)
400 struct map *pos, *tmp;
402 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
403 if (map__overlap(pos, map)) {
405 printf("overlapping maps:\n");
406 map__fprintf(map, stdout);
407 map__fprintf(pos, stdout);
410 if (map->start <= pos->start && map->end > pos->start)
411 pos->start = map->end;
413 if (map->end >= pos->end && map->start < pos->end)
414 pos->end = map->start;
417 printf("after collision:\n");
418 map__fprintf(pos, stdout);
421 if (pos->start >= pos->end) {
422 list_del_init(&pos->node);
428 list_add_tail(&map->node, &self->maps);
431 static int thread__fork(struct thread *self, struct thread *parent)
437 self->comm = strdup(parent->comm);
441 list_for_each_entry(map, &parent->maps, node) {
442 struct map *new = map__clone(map);
445 thread__insert_map(self, new);
451 static struct map *thread__find_map(struct thread *self, u64 ip)
458 list_for_each_entry(pos, &self->maps, node)
459 if (ip >= pos->start && ip <= pos->end)
465 static size_t threads__fprintf(FILE *fp)
470 for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
471 struct thread *pos = rb_entry(nd, struct thread, rb_node);
473 ret += thread__fprintf(pos, fp);
480 * histogram, sorted on item, collects counts
483 static struct rb_root hist;
486 struct rb_node rb_node;
488 struct thread *thread;
492 struct symbol *parent;
500 * configurable sorting bits
504 struct list_head list;
508 int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
509 int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
510 size_t (*print)(FILE *fp, struct hist_entry *);
513 static int64_t cmp_null(void *l, void *r)
526 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
528 return right->thread->pid - left->thread->pid;
532 sort__thread_print(FILE *fp, struct hist_entry *self)
534 return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid);
537 static struct sort_entry sort_thread = {
538 .header = " Command: Pid",
539 .cmp = sort__thread_cmp,
540 .print = sort__thread_print,
546 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
548 return right->thread->pid - left->thread->pid;
552 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
554 char *comm_l = left->thread->comm;
555 char *comm_r = right->thread->comm;
557 if (!comm_l || !comm_r)
558 return cmp_null(comm_l, comm_r);
560 return strcmp(comm_l, comm_r);
564 sort__comm_print(FILE *fp, struct hist_entry *self)
566 return fprintf(fp, "%16s", self->thread->comm);
569 static struct sort_entry sort_comm = {
570 .header = " Command",
571 .cmp = sort__comm_cmp,
572 .collapse = sort__comm_collapse,
573 .print = sort__comm_print,
579 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
581 struct dso *dso_l = left->dso;
582 struct dso *dso_r = right->dso;
584 if (!dso_l || !dso_r)
585 return cmp_null(dso_l, dso_r);
587 return strcmp(dso_l->name, dso_r->name);
591 sort__dso_print(FILE *fp, struct hist_entry *self)
594 return fprintf(fp, "%-25s", self->dso->name);
596 return fprintf(fp, "%016llx ", (u64)self->ip);
599 static struct sort_entry sort_dso = {
600 .header = "Shared Object ",
601 .cmp = sort__dso_cmp,
602 .print = sort__dso_print,
608 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
612 if (left->sym == right->sym)
615 ip_l = left->sym ? left->sym->start : left->ip;
616 ip_r = right->sym ? right->sym->start : right->ip;
618 return (int64_t)(ip_r - ip_l);
622 sort__sym_print(FILE *fp, struct hist_entry *self)
627 ret += fprintf(fp, "%#018llx ", (u64)self->ip);
630 ret += fprintf(fp, "[%c] %s",
631 self->dso == kernel_dso ? 'k' : '.', self->sym->name);
633 ret += fprintf(fp, "%#016llx", (u64)self->ip);
639 static struct sort_entry sort_sym = {
641 .cmp = sort__sym_cmp,
642 .print = sort__sym_print,
648 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
650 struct symbol *sym_l = left->parent;
651 struct symbol *sym_r = right->parent;
653 if (!sym_l || !sym_r)
654 return cmp_null(sym_l, sym_r);
656 return strcmp(sym_l->name, sym_r->name);
660 sort__parent_print(FILE *fp, struct hist_entry *self)
664 ret += fprintf(fp, "%-20s", self->parent ? self->parent->name : "[other]");
669 static struct sort_entry sort_parent = {
670 .header = "Parent symbol ",
671 .cmp = sort__parent_cmp,
672 .print = sort__parent_print,
675 static int sort__need_collapse = 0;
676 static int sort__has_parent = 0;
678 struct sort_dimension {
680 struct sort_entry *entry;
684 static struct sort_dimension sort_dimensions[] = {
685 { .name = "pid", .entry = &sort_thread, },
686 { .name = "comm", .entry = &sort_comm, },
687 { .name = "dso", .entry = &sort_dso, },
688 { .name = "symbol", .entry = &sort_sym, },
689 { .name = "parent", .entry = &sort_parent, },
692 static LIST_HEAD(hist_entry__sort_list);
694 static int sort_dimension__add(char *tok)
698 for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
699 struct sort_dimension *sd = &sort_dimensions[i];
704 if (strncasecmp(tok, sd->name, strlen(tok)))
707 if (sd->entry->collapse)
708 sort__need_collapse = 1;
710 if (sd->entry == &sort_parent) {
711 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
715 regerror(ret, &parent_regex, err, sizeof(err));
716 fprintf(stderr, "Invalid regex: %s\n%s",
717 parent_pattern, err);
720 sort__has_parent = 1;
723 list_add_tail(&sd->entry->list, &hist_entry__sort_list);
733 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
735 struct sort_entry *se;
738 list_for_each_entry(se, &hist_entry__sort_list, list) {
739 cmp = se->cmp(left, right);
748 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
750 struct sort_entry *se;
753 list_for_each_entry(se, &hist_entry__sort_list, list) {
754 int64_t (*f)(struct hist_entry *, struct hist_entry *);
756 f = se->collapse ?: se->cmp;
758 cmp = f(left, right);
767 hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
769 struct sort_entry *se;
772 if (exclude_other && !self->parent)
776 double percent = self->count * 100.0 / total_samples;
777 char *color = PERF_COLOR_NORMAL;
780 * We color high-overhead entries in red, mid-overhead
781 * entries in green - and keep the low overhead places
784 if (percent >= 5.0) {
785 color = PERF_COLOR_RED;
788 color = PERF_COLOR_GREEN;
791 ret = color_fprintf(fp, color, " %6.2f%%",
792 (self->count * 100.0) / total_samples);
794 ret = fprintf(fp, "%12Ld ", self->count);
796 list_for_each_entry(se, &hist_entry__sort_list, list) {
797 if (exclude_other && (se == &sort_parent))
801 ret += se->print(fp, self);
804 ret += fprintf(fp, "\n");
813 static struct symbol *
814 resolve_symbol(struct thread *thread, struct map **mapp,
815 struct dso **dsop, u64 *ipp)
817 struct dso *dso = dsop ? *dsop : NULL;
818 struct map *map = mapp ? *mapp : NULL;
830 map = thread__find_map(thread, ip);
835 ip = map->map_ip(map, ip);
840 * If this is outside of all known maps,
841 * and is a negative address, try to look it
842 * up in the kernel dso, as it might be a
843 * vsyscall (which executes in user-mode):
845 if ((long long)ip < 0)
848 dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
849 dprintf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
858 return dso->find_symbol(dso, ip);
861 static int call__match(struct symbol *sym)
863 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
870 * collect histogram counts
874 hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
875 struct symbol *sym, u64 ip, struct ip_callchain *chain,
876 char level, u64 count)
878 struct rb_node **p = &hist.rb_node;
879 struct rb_node *parent = NULL;
880 struct hist_entry *he;
881 struct hist_entry entry = {
893 if (sort__has_parent && chain) {
894 u64 context = PERF_CONTEXT_MAX;
897 for (i = 0; i < chain->nr; i++) {
898 u64 ip = chain->ips[i];
899 struct dso *dso = NULL;
902 if (ip >= PERF_CONTEXT_MAX) {
908 case PERF_CONTEXT_KERNEL:
915 sym = resolve_symbol(thread, NULL, &dso, &ip);
917 if (sym && call__match(sym)) {
926 he = rb_entry(parent, struct hist_entry, rb_node);
928 cmp = hist_entry__cmp(&entry, he);
941 he = malloc(sizeof(*he));
945 rb_link_node(&he->rb_node, parent, p);
946 rb_insert_color(&he->rb_node, &hist);
951 static void hist_entry__free(struct hist_entry *he)
957 * collapse the histogram
960 static struct rb_root collapse_hists;
962 static void collapse__insert_entry(struct hist_entry *he)
964 struct rb_node **p = &collapse_hists.rb_node;
965 struct rb_node *parent = NULL;
966 struct hist_entry *iter;
971 iter = rb_entry(parent, struct hist_entry, rb_node);
973 cmp = hist_entry__collapse(iter, he);
976 iter->count += he->count;
977 hist_entry__free(he);
987 rb_link_node(&he->rb_node, parent, p);
988 rb_insert_color(&he->rb_node, &collapse_hists);
991 static void collapse__resort(void)
993 struct rb_node *next;
994 struct hist_entry *n;
996 if (!sort__need_collapse)
999 next = rb_first(&hist);
1001 n = rb_entry(next, struct hist_entry, rb_node);
1002 next = rb_next(&n->rb_node);
1004 rb_erase(&n->rb_node, &hist);
1005 collapse__insert_entry(n);
1010 * reverse the map, sort on count.
1013 static struct rb_root output_hists;
1015 static void output__insert_entry(struct hist_entry *he)
1017 struct rb_node **p = &output_hists.rb_node;
1018 struct rb_node *parent = NULL;
1019 struct hist_entry *iter;
1021 while (*p != NULL) {
1023 iter = rb_entry(parent, struct hist_entry, rb_node);
1025 if (he->count > iter->count)
1028 p = &(*p)->rb_right;
1031 rb_link_node(&he->rb_node, parent, p);
1032 rb_insert_color(&he->rb_node, &output_hists);
1035 static void output__resort(void)
1037 struct rb_node *next;
1038 struct hist_entry *n;
1039 struct rb_root *tree = &hist;
1041 if (sort__need_collapse)
1042 tree = &collapse_hists;
1044 next = rb_first(tree);
1047 n = rb_entry(next, struct hist_entry, rb_node);
1048 next = rb_next(&n->rb_node);
1050 rb_erase(&n->rb_node, tree);
1051 output__insert_entry(n);
1055 static size_t output__fprintf(FILE *fp, u64 total_samples)
1057 struct hist_entry *pos;
1058 struct sort_entry *se;
1064 fprintf(fp, "# (%Ld samples)\n", (u64)total_samples);
1067 fprintf(fp, "# Overhead");
1068 list_for_each_entry(se, &hist_entry__sort_list, list) {
1069 if (exclude_other && (se == &sort_parent))
1071 fprintf(fp, " %s", se->header);
1075 fprintf(fp, "# ........");
1076 list_for_each_entry(se, &hist_entry__sort_list, list) {
1079 if (exclude_other && (se == &sort_parent))
1083 for (i = 0; i < strlen(se->header); i++)
1090 for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
1091 pos = rb_entry(nd, struct hist_entry, rb_node);
1092 ret += hist_entry__fprintf(fp, pos, total_samples);
1095 if (sort_order == default_sort_order &&
1096 parent_pattern == default_parent_pattern) {
1098 fprintf(fp, "# (For more details, try: perf report --sort comm,dso,symbol)\n");
1106 static void register_idle_thread(void)
1108 struct thread *thread = threads__findnew(0);
1110 if (thread == NULL ||
1111 thread__set_comm(thread, "[idle]")) {
1112 fprintf(stderr, "problem inserting idle task.\n");
1117 static unsigned long total = 0,
1124 static int validate_chain(struct ip_callchain *chain, event_t *event)
1126 unsigned int chain_size;
1128 chain_size = event->header.size;
1129 chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
1131 if (chain->nr*sizeof(u64) > chain_size)
1138 process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
1142 struct dso *dso = NULL;
1143 struct thread *thread = threads__findnew(event->ip.pid);
1144 u64 ip = event->ip.ip;
1146 struct map *map = NULL;
1147 void *more_data = event->ip.__more_data;
1148 struct ip_callchain *chain = NULL;
1150 if (event->header.type & PERF_SAMPLE_PERIOD) {
1151 period = *(u64 *)more_data;
1152 more_data += sizeof(u64);
1155 dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p period: %Ld\n",
1156 (void *)(offset + head),
1157 (void *)(long)(event->header.size),
1163 if (event->header.type & PERF_SAMPLE_CALLCHAIN) {
1166 chain = (void *)more_data;
1168 dprintf("... chain: nr:%Lu\n", chain->nr);
1170 if (validate_chain(chain, event) < 0) {
1171 eprintf("call-chain problem with event, skipping it.\n");
1176 for (i = 0; i < chain->nr; i++)
1177 dprintf("..... %2d: %016Lx\n", i, chain->ips[i]);
1181 dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1183 if (thread == NULL) {
1184 eprintf("problem processing %d event, skipping it.\n",
1185 event->header.type);
1189 if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
1195 dprintf(" ...... dso: %s\n", dso->name);
1197 } else if (event->header.misc & PERF_EVENT_MISC_USER) {
1205 dprintf(" ...... dso: [hypervisor]\n");
1208 if (show & show_mask) {
1209 struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip);
1211 if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) {
1212 eprintf("problem incrementing symbol count, skipping event\n");
1222 process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1224 struct thread *thread = threads__findnew(event->mmap.pid);
1225 struct map *map = map__new(&event->mmap);
1227 dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
1228 (void *)(offset + head),
1229 (void *)(long)(event->header.size),
1231 (void *)(long)event->mmap.start,
1232 (void *)(long)event->mmap.len,
1233 (void *)(long)event->mmap.pgoff,
1234 event->mmap.filename);
1236 if (thread == NULL || map == NULL) {
1237 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
1241 thread__insert_map(thread, map);
1248 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
1250 struct thread *thread = threads__findnew(event->comm.pid);
1252 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
1253 (void *)(offset + head),
1254 (void *)(long)(event->header.size),
1255 event->comm.comm, event->comm.pid);
1257 if (thread == NULL ||
1258 thread__set_comm(thread, event->comm.comm)) {
1259 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
1268 process_fork_event(event_t *event, unsigned long offset, unsigned long head)
1270 struct thread *thread = threads__findnew(event->fork.pid);
1271 struct thread *parent = threads__findnew(event->fork.ppid);
1273 dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
1274 (void *)(offset + head),
1275 (void *)(long)(event->header.size),
1276 event->fork.pid, event->fork.ppid);
1278 if (!thread || !parent || thread__fork(thread, parent)) {
1279 dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
1288 process_period_event(event_t *event, unsigned long offset, unsigned long head)
1290 dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n",
1291 (void *)(offset + head),
1292 (void *)(long)(event->header.size),
1295 event->period.sample_period);
1301 process_lost_event(event_t *event, unsigned long offset, unsigned long head)
1303 dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
1304 (void *)(offset + head),
1305 (void *)(long)(event->header.size),
1309 total_lost += event->lost.lost;
1314 static void trace_event(event_t *event)
1316 unsigned char *raw_event = (void *)event;
1317 char *color = PERF_COLOR_BLUE;
1324 cdprintf("\n. ... raw event: size %d bytes\n", event->header.size);
1326 for (i = 0; i < event->header.size; i++) {
1327 if ((i & 15) == 0) {
1329 cdprintf(" %04x: ", i);
1332 cdprintf(" %02x", raw_event[i]);
1334 if (((i & 15) == 15) || i == event->header.size-1) {
1336 for (j = 0; j < 15-(i & 15); j++)
1338 for (j = 0; j < (i & 15); j++) {
1339 if (isprint(raw_event[i-15+j]))
1340 cdprintf("%c", raw_event[i-15+j]);
1351 process_event(event_t *event, unsigned long offset, unsigned long head)
1355 if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
1356 return process_overflow_event(event, offset, head);
1358 switch (event->header.type) {
1359 case PERF_EVENT_MMAP:
1360 return process_mmap_event(event, offset, head);
1362 case PERF_EVENT_COMM:
1363 return process_comm_event(event, offset, head);
1365 case PERF_EVENT_FORK:
1366 return process_fork_event(event, offset, head);
1368 case PERF_EVENT_PERIOD:
1369 return process_period_event(event, offset, head);
1371 case PERF_EVENT_LOST:
1372 return process_lost_event(event, offset, head);
1375 * We dont process them right now but they are fine:
1378 case PERF_EVENT_THROTTLE:
1379 case PERF_EVENT_UNTHROTTLE:
1389 static struct perf_header *header;
1391 static int perf_header__has_sample(u64 sample_mask)
1395 for (i = 0; i < header->attrs; i++) {
1396 struct perf_header_attr *attr = header->attr[i];
1398 if (!(attr->attr.sample_type & sample_mask))
1405 static int __cmd_report(void)
1407 int ret, rc = EXIT_FAILURE;
1408 unsigned long offset = 0;
1409 unsigned long head, shift;
1415 register_idle_thread();
1417 input = open(input_name, O_RDONLY);
1419 fprintf(stderr, " failed to open file: %s", input_name);
1420 if (!strcmp(input_name, "perf.data"))
1421 fprintf(stderr, " (try 'perf record' first)");
1422 fprintf(stderr, "\n");
1426 ret = fstat(input, &stat);
1428 perror("failed to stat file");
1432 if (!stat.st_size) {
1433 fprintf(stderr, "zero-sized file, nothing to do!\n");
1437 header = perf_header__read(input);
1438 head = header->data_offset;
1440 if (sort__has_parent &&
1441 !perf_header__has_sample(PERF_SAMPLE_CALLCHAIN)) {
1442 fprintf(stderr, "selected --sort parent, but no callchain data\n");
1446 if (load_kernel() < 0) {
1447 perror("failed to load kernel symbols");
1448 return EXIT_FAILURE;
1452 if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
1453 perror("failed to get the current directory");
1454 return EXIT_FAILURE;
1456 cwdlen = strlen(cwd);
1462 shift = page_size * (head / page_size);
1467 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1468 MAP_SHARED, input, offset);
1469 if (buf == MAP_FAILED) {
1470 perror("failed to mmap file");
1475 event = (event_t *)(buf + head);
1477 size = event->header.size;
1481 if (head + event->header.size >= page_size * mmap_window) {
1484 shift = page_size * (head / page_size);
1486 ret = munmap(buf, page_size * mmap_window);
1494 size = event->header.size;
1496 dprintf("\n%p [%p]: event: %d\n",
1497 (void *)(offset + head),
1498 (void *)(long)event->header.size,
1499 event->header.type);
1501 if (!size || process_event(event, offset, head) < 0) {
1503 dprintf("%p [%p]: skipping unknown header type: %d\n",
1504 (void *)(offset + head),
1505 (void *)(long)(event->header.size),
1506 event->header.type);
1511 * assume we lost track of the stream, check alignment, and
1512 * increment a single u64 in the hope to catch on again 'soon'.
1515 if (unlikely(head & 7))
1523 if (offset + head >= header->data_offset + header->data_size)
1526 if (offset + head < stat.st_size)
1533 dprintf(" IP events: %10ld\n", total);
1534 dprintf(" mmap events: %10ld\n", total_mmap);
1535 dprintf(" comm events: %10ld\n", total_comm);
1536 dprintf(" fork events: %10ld\n", total_fork);
1537 dprintf(" lost events: %10ld\n", total_lost);
1538 dprintf(" unknown events: %10ld\n", total_unknown);
1544 threads__fprintf(stdout);
1547 dsos__fprintf(stdout);
1551 output__fprintf(stdout, total);
1556 static const char * const report_usage[] = {
1557 "perf report [<options>] <command>",
1561 static const struct option options[] = {
1562 OPT_STRING('i', "input", &input_name, "file",
1564 OPT_BOOLEAN('v', "verbose", &verbose,
1565 "be more verbose (show symbol address, etc)"),
1566 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1567 "dump raw trace in ASCII"),
1568 OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
1569 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1570 "sort by key(s): pid, comm, dso, symbol, parent"),
1571 OPT_BOOLEAN('P', "full-paths", &full_paths,
1572 "Don't shorten the pathnames taking into account the cwd"),
1573 OPT_STRING('p', "parent", &parent_pattern, "regex",
1574 "regex filter to identify parent, see: '--sort parent'"),
1575 OPT_BOOLEAN('x', "exclude-other", &exclude_other,
1576 "Only display entries with parent-match"),
1580 static void setup_sorting(void)
1582 char *tmp, *tok, *str = strdup(sort_order);
1584 for (tok = strtok_r(str, ", ", &tmp);
1585 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1586 if (sort_dimension__add(tok) < 0) {
1587 error("Unknown --sort key: `%s'", tok);
1588 usage_with_options(report_usage, options);
1595 int cmd_report(int argc, const char **argv, const char *prefix)
1599 page_size = getpagesize();
1601 argc = parse_options(argc, argv, options, report_usage, 0);
1605 if (parent_pattern != default_parent_pattern)
1606 sort_dimension__add("parent");
1611 * Any (unrecognized) arguments left?
1614 usage_with_options(report_usage, options);
1618 return __cmd_report();