Merge branch 'next' into for-linus
[firefly-linux-kernel-4.4.55.git] / tools / perf / util / machine.c
1 #include "callchain.h"
2 #include "debug.h"
3 #include "event.h"
4 #include "evsel.h"
5 #include "hist.h"
6 #include "machine.h"
7 #include "map.h"
8 #include "sort.h"
9 #include "strlist.h"
10 #include "thread.h"
11 #include <stdbool.h>
12 #include "unwind.h"
13
14 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
15 {
16         map_groups__init(&machine->kmaps);
17         RB_CLEAR_NODE(&machine->rb_node);
18         INIT_LIST_HEAD(&machine->user_dsos);
19         INIT_LIST_HEAD(&machine->kernel_dsos);
20
21         machine->threads = RB_ROOT;
22         INIT_LIST_HEAD(&machine->dead_threads);
23         machine->last_match = NULL;
24
25         machine->kmaps.machine = machine;
26         machine->pid = pid;
27
28         machine->root_dir = strdup(root_dir);
29         if (machine->root_dir == NULL)
30                 return -ENOMEM;
31
32         if (pid != HOST_KERNEL_ID) {
33                 struct thread *thread = machine__findnew_thread(machine, pid);
34                 char comm[64];
35
36                 if (thread == NULL)
37                         return -ENOMEM;
38
39                 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
40                 thread__set_comm(thread, comm);
41         }
42
43         return 0;
44 }
45
46 static void dsos__delete(struct list_head *dsos)
47 {
48         struct dso *pos, *n;
49
50         list_for_each_entry_safe(pos, n, dsos, node) {
51                 list_del(&pos->node);
52                 dso__delete(pos);
53         }
54 }
55
56 void machine__delete_dead_threads(struct machine *machine)
57 {
58         struct thread *n, *t;
59
60         list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
61                 list_del(&t->node);
62                 thread__delete(t);
63         }
64 }
65
66 void machine__delete_threads(struct machine *machine)
67 {
68         struct rb_node *nd = rb_first(&machine->threads);
69
70         while (nd) {
71                 struct thread *t = rb_entry(nd, struct thread, rb_node);
72
73                 rb_erase(&t->rb_node, &machine->threads);
74                 nd = rb_next(nd);
75                 thread__delete(t);
76         }
77 }
78
79 void machine__exit(struct machine *machine)
80 {
81         map_groups__exit(&machine->kmaps);
82         dsos__delete(&machine->user_dsos);
83         dsos__delete(&machine->kernel_dsos);
84         free(machine->root_dir);
85         machine->root_dir = NULL;
86 }
87
88 void machine__delete(struct machine *machine)
89 {
90         machine__exit(machine);
91         free(machine);
92 }
93
94 void machines__init(struct machines *machines)
95 {
96         machine__init(&machines->host, "", HOST_KERNEL_ID);
97         machines->guests = RB_ROOT;
98 }
99
100 void machines__exit(struct machines *machines)
101 {
102         machine__exit(&machines->host);
103         /* XXX exit guest */
104 }
105
106 struct machine *machines__add(struct machines *machines, pid_t pid,
107                               const char *root_dir)
108 {
109         struct rb_node **p = &machines->guests.rb_node;
110         struct rb_node *parent = NULL;
111         struct machine *pos, *machine = malloc(sizeof(*machine));
112
113         if (machine == NULL)
114                 return NULL;
115
116         if (machine__init(machine, root_dir, pid) != 0) {
117                 free(machine);
118                 return NULL;
119         }
120
121         while (*p != NULL) {
122                 parent = *p;
123                 pos = rb_entry(parent, struct machine, rb_node);
124                 if (pid < pos->pid)
125                         p = &(*p)->rb_left;
126                 else
127                         p = &(*p)->rb_right;
128         }
129
130         rb_link_node(&machine->rb_node, parent, p);
131         rb_insert_color(&machine->rb_node, &machines->guests);
132
133         return machine;
134 }
135
136 struct machine *machines__find(struct machines *machines, pid_t pid)
137 {
138         struct rb_node **p = &machines->guests.rb_node;
139         struct rb_node *parent = NULL;
140         struct machine *machine;
141         struct machine *default_machine = NULL;
142
143         if (pid == HOST_KERNEL_ID)
144                 return &machines->host;
145
146         while (*p != NULL) {
147                 parent = *p;
148                 machine = rb_entry(parent, struct machine, rb_node);
149                 if (pid < machine->pid)
150                         p = &(*p)->rb_left;
151                 else if (pid > machine->pid)
152                         p = &(*p)->rb_right;
153                 else
154                         return machine;
155                 if (!machine->pid)
156                         default_machine = machine;
157         }
158
159         return default_machine;
160 }
161
162 struct machine *machines__findnew(struct machines *machines, pid_t pid)
163 {
164         char path[PATH_MAX];
165         const char *root_dir = "";
166         struct machine *machine = machines__find(machines, pid);
167
168         if (machine && (machine->pid == pid))
169                 goto out;
170
171         if ((pid != HOST_KERNEL_ID) &&
172             (pid != DEFAULT_GUEST_KERNEL_ID) &&
173             (symbol_conf.guestmount)) {
174                 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
175                 if (access(path, R_OK)) {
176                         static struct strlist *seen;
177
178                         if (!seen)
179                                 seen = strlist__new(true, NULL);
180
181                         if (!strlist__has_entry(seen, path)) {
182                                 pr_err("Can't access file %s\n", path);
183                                 strlist__add(seen, path);
184                         }
185                         machine = NULL;
186                         goto out;
187                 }
188                 root_dir = path;
189         }
190
191         machine = machines__add(machines, pid, root_dir);
192 out:
193         return machine;
194 }
195
196 void machines__process_guests(struct machines *machines,
197                               machine__process_t process, void *data)
198 {
199         struct rb_node *nd;
200
201         for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
202                 struct machine *pos = rb_entry(nd, struct machine, rb_node);
203                 process(pos, data);
204         }
205 }
206
207 char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
208 {
209         if (machine__is_host(machine))
210                 snprintf(bf, size, "[%s]", "kernel.kallsyms");
211         else if (machine__is_default_guest(machine))
212                 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
213         else {
214                 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
215                          machine->pid);
216         }
217
218         return bf;
219 }
220
221 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
222 {
223         struct rb_node *node;
224         struct machine *machine;
225
226         machines->host.id_hdr_size = id_hdr_size;
227
228         for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
229                 machine = rb_entry(node, struct machine, rb_node);
230                 machine->id_hdr_size = id_hdr_size;
231         }
232
233         return;
234 }
235
236 static struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid,
237                                                 bool create)
238 {
239         struct rb_node **p = &machine->threads.rb_node;
240         struct rb_node *parent = NULL;
241         struct thread *th;
242
243         /*
244          * Font-end cache - PID lookups come in blocks,
245          * so most of the time we dont have to look up
246          * the full rbtree:
247          */
248         if (machine->last_match && machine->last_match->pid == pid)
249                 return machine->last_match;
250
251         while (*p != NULL) {
252                 parent = *p;
253                 th = rb_entry(parent, struct thread, rb_node);
254
255                 if (th->pid == pid) {
256                         machine->last_match = th;
257                         return th;
258                 }
259
260                 if (pid < th->pid)
261                         p = &(*p)->rb_left;
262                 else
263                         p = &(*p)->rb_right;
264         }
265
266         if (!create)
267                 return NULL;
268
269         th = thread__new(pid);
270         if (th != NULL) {
271                 rb_link_node(&th->rb_node, parent, p);
272                 rb_insert_color(&th->rb_node, &machine->threads);
273                 machine->last_match = th;
274         }
275
276         return th;
277 }
278
279 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid)
280 {
281         return __machine__findnew_thread(machine, pid, true);
282 }
283
284 struct thread *machine__find_thread(struct machine *machine, pid_t pid)
285 {
286         return __machine__findnew_thread(machine, pid, false);
287 }
288
289 int machine__process_comm_event(struct machine *machine, union perf_event *event)
290 {
291         struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
292
293         if (dump_trace)
294                 perf_event__fprintf_comm(event, stdout);
295
296         if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
297                 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
298                 return -1;
299         }
300
301         return 0;
302 }
303
304 int machine__process_lost_event(struct machine *machine __maybe_unused,
305                                 union perf_event *event)
306 {
307         dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
308                     event->lost.id, event->lost.lost);
309         return 0;
310 }
311
312 struct map *machine__new_module(struct machine *machine, u64 start,
313                                 const char *filename)
314 {
315         struct map *map;
316         struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
317
318         if (dso == NULL)
319                 return NULL;
320
321         map = map__new2(start, dso, MAP__FUNCTION);
322         if (map == NULL)
323                 return NULL;
324
325         if (machine__is_host(machine))
326                 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
327         else
328                 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
329         map_groups__insert(&machine->kmaps, map);
330         return map;
331 }
332
333 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
334 {
335         struct rb_node *nd;
336         size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) +
337                      __dsos__fprintf(&machines->host.user_dsos, fp);
338
339         for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
340                 struct machine *pos = rb_entry(nd, struct machine, rb_node);
341                 ret += __dsos__fprintf(&pos->kernel_dsos, fp);
342                 ret += __dsos__fprintf(&pos->user_dsos, fp);
343         }
344
345         return ret;
346 }
347
348 size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
349                                      bool (skip)(struct dso *dso, int parm), int parm)
350 {
351         return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) +
352                __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm);
353 }
354
355 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
356                                      bool (skip)(struct dso *dso, int parm), int parm)
357 {
358         struct rb_node *nd;
359         size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
360
361         for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
362                 struct machine *pos = rb_entry(nd, struct machine, rb_node);
363                 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
364         }
365         return ret;
366 }
367
368 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
369 {
370         int i;
371         size_t printed = 0;
372         struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
373
374         if (kdso->has_build_id) {
375                 char filename[PATH_MAX];
376                 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
377                         printed += fprintf(fp, "[0] %s\n", filename);
378         }
379
380         for (i = 0; i < vmlinux_path__nr_entries; ++i)
381                 printed += fprintf(fp, "[%d] %s\n",
382                                    i + kdso->has_build_id, vmlinux_path[i]);
383
384         return printed;
385 }
386
387 size_t machine__fprintf(struct machine *machine, FILE *fp)
388 {
389         size_t ret = 0;
390         struct rb_node *nd;
391
392         for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
393                 struct thread *pos = rb_entry(nd, struct thread, rb_node);
394
395                 ret += thread__fprintf(pos, fp);
396         }
397
398         return ret;
399 }
400
401 static struct dso *machine__get_kernel(struct machine *machine)
402 {
403         const char *vmlinux_name = NULL;
404         struct dso *kernel;
405
406         if (machine__is_host(machine)) {
407                 vmlinux_name = symbol_conf.vmlinux_name;
408                 if (!vmlinux_name)
409                         vmlinux_name = "[kernel.kallsyms]";
410
411                 kernel = dso__kernel_findnew(machine, vmlinux_name,
412                                              "[kernel]",
413                                              DSO_TYPE_KERNEL);
414         } else {
415                 char bf[PATH_MAX];
416
417                 if (machine__is_default_guest(machine))
418                         vmlinux_name = symbol_conf.default_guest_vmlinux_name;
419                 if (!vmlinux_name)
420                         vmlinux_name = machine__mmap_name(machine, bf,
421                                                           sizeof(bf));
422
423                 kernel = dso__kernel_findnew(machine, vmlinux_name,
424                                              "[guest.kernel]",
425                                              DSO_TYPE_GUEST_KERNEL);
426         }
427
428         if (kernel != NULL && (!kernel->has_build_id))
429                 dso__read_running_kernel_build_id(kernel, machine);
430
431         return kernel;
432 }
433
434 struct process_args {
435         u64 start;
436 };
437
438 static int symbol__in_kernel(void *arg, const char *name,
439                              char type __maybe_unused, u64 start)
440 {
441         struct process_args *args = arg;
442
443         if (strchr(name, '['))
444                 return 0;
445
446         args->start = start;
447         return 1;
448 }
449
450 /* Figure out the start address of kernel map from /proc/kallsyms */
451 static u64 machine__get_kernel_start_addr(struct machine *machine)
452 {
453         const char *filename;
454         char path[PATH_MAX];
455         struct process_args args;
456
457         if (machine__is_host(machine)) {
458                 filename = "/proc/kallsyms";
459         } else {
460                 if (machine__is_default_guest(machine))
461                         filename = (char *)symbol_conf.default_guest_kallsyms;
462                 else {
463                         sprintf(path, "%s/proc/kallsyms", machine->root_dir);
464                         filename = path;
465                 }
466         }
467
468         if (symbol__restricted_filename(filename, "/proc/kallsyms"))
469                 return 0;
470
471         if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
472                 return 0;
473
474         return args.start;
475 }
476
477 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
478 {
479         enum map_type type;
480         u64 start = machine__get_kernel_start_addr(machine);
481
482         for (type = 0; type < MAP__NR_TYPES; ++type) {
483                 struct kmap *kmap;
484
485                 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
486                 if (machine->vmlinux_maps[type] == NULL)
487                         return -1;
488
489                 machine->vmlinux_maps[type]->map_ip =
490                         machine->vmlinux_maps[type]->unmap_ip =
491                                 identity__map_ip;
492                 kmap = map__kmap(machine->vmlinux_maps[type]);
493                 kmap->kmaps = &machine->kmaps;
494                 map_groups__insert(&machine->kmaps,
495                                    machine->vmlinux_maps[type]);
496         }
497
498         return 0;
499 }
500
501 void machine__destroy_kernel_maps(struct machine *machine)
502 {
503         enum map_type type;
504
505         for (type = 0; type < MAP__NR_TYPES; ++type) {
506                 struct kmap *kmap;
507
508                 if (machine->vmlinux_maps[type] == NULL)
509                         continue;
510
511                 kmap = map__kmap(machine->vmlinux_maps[type]);
512                 map_groups__remove(&machine->kmaps,
513                                    machine->vmlinux_maps[type]);
514                 if (kmap->ref_reloc_sym) {
515                         /*
516                          * ref_reloc_sym is shared among all maps, so free just
517                          * on one of them.
518                          */
519                         if (type == MAP__FUNCTION) {
520                                 free((char *)kmap->ref_reloc_sym->name);
521                                 kmap->ref_reloc_sym->name = NULL;
522                                 free(kmap->ref_reloc_sym);
523                         }
524                         kmap->ref_reloc_sym = NULL;
525                 }
526
527                 map__delete(machine->vmlinux_maps[type]);
528                 machine->vmlinux_maps[type] = NULL;
529         }
530 }
531
532 int machines__create_guest_kernel_maps(struct machines *machines)
533 {
534         int ret = 0;
535         struct dirent **namelist = NULL;
536         int i, items = 0;
537         char path[PATH_MAX];
538         pid_t pid;
539         char *endp;
540
541         if (symbol_conf.default_guest_vmlinux_name ||
542             symbol_conf.default_guest_modules ||
543             symbol_conf.default_guest_kallsyms) {
544                 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
545         }
546
547         if (symbol_conf.guestmount) {
548                 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
549                 if (items <= 0)
550                         return -ENOENT;
551                 for (i = 0; i < items; i++) {
552                         if (!isdigit(namelist[i]->d_name[0])) {
553                                 /* Filter out . and .. */
554                                 continue;
555                         }
556                         pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
557                         if ((*endp != '\0') ||
558                             (endp == namelist[i]->d_name) ||
559                             (errno == ERANGE)) {
560                                 pr_debug("invalid directory (%s). Skipping.\n",
561                                          namelist[i]->d_name);
562                                 continue;
563                         }
564                         sprintf(path, "%s/%s/proc/kallsyms",
565                                 symbol_conf.guestmount,
566                                 namelist[i]->d_name);
567                         ret = access(path, R_OK);
568                         if (ret) {
569                                 pr_debug("Can't access file %s\n", path);
570                                 goto failure;
571                         }
572                         machines__create_kernel_maps(machines, pid);
573                 }
574 failure:
575                 free(namelist);
576         }
577
578         return ret;
579 }
580
581 void machines__destroy_kernel_maps(struct machines *machines)
582 {
583         struct rb_node *next = rb_first(&machines->guests);
584
585         machine__destroy_kernel_maps(&machines->host);
586
587         while (next) {
588                 struct machine *pos = rb_entry(next, struct machine, rb_node);
589
590                 next = rb_next(&pos->rb_node);
591                 rb_erase(&pos->rb_node, &machines->guests);
592                 machine__delete(pos);
593         }
594 }
595
596 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
597 {
598         struct machine *machine = machines__findnew(machines, pid);
599
600         if (machine == NULL)
601                 return -1;
602
603         return machine__create_kernel_maps(machine);
604 }
605
606 int machine__load_kallsyms(struct machine *machine, const char *filename,
607                            enum map_type type, symbol_filter_t filter)
608 {
609         struct map *map = machine->vmlinux_maps[type];
610         int ret = dso__load_kallsyms(map->dso, filename, map, filter);
611
612         if (ret > 0) {
613                 dso__set_loaded(map->dso, type);
614                 /*
615                  * Since /proc/kallsyms will have multiple sessions for the
616                  * kernel, with modules between them, fixup the end of all
617                  * sections.
618                  */
619                 __map_groups__fixup_end(&machine->kmaps, type);
620         }
621
622         return ret;
623 }
624
625 int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
626                                symbol_filter_t filter)
627 {
628         struct map *map = machine->vmlinux_maps[type];
629         int ret = dso__load_vmlinux_path(map->dso, map, filter);
630
631         if (ret > 0) {
632                 dso__set_loaded(map->dso, type);
633                 map__reloc_vmlinux(map);
634         }
635
636         return ret;
637 }
638
639 static void map_groups__fixup_end(struct map_groups *mg)
640 {
641         int i;
642         for (i = 0; i < MAP__NR_TYPES; ++i)
643                 __map_groups__fixup_end(mg, i);
644 }
645
646 static char *get_kernel_version(const char *root_dir)
647 {
648         char version[PATH_MAX];
649         FILE *file;
650         char *name, *tmp;
651         const char *prefix = "Linux version ";
652
653         sprintf(version, "%s/proc/version", root_dir);
654         file = fopen(version, "r");
655         if (!file)
656                 return NULL;
657
658         version[0] = '\0';
659         tmp = fgets(version, sizeof(version), file);
660         fclose(file);
661
662         name = strstr(version, prefix);
663         if (!name)
664                 return NULL;
665         name += strlen(prefix);
666         tmp = strchr(name, ' ');
667         if (tmp)
668                 *tmp = '\0';
669
670         return strdup(name);
671 }
672
673 static int map_groups__set_modules_path_dir(struct map_groups *mg,
674                                 const char *dir_name)
675 {
676         struct dirent *dent;
677         DIR *dir = opendir(dir_name);
678         int ret = 0;
679
680         if (!dir) {
681                 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
682                 return -1;
683         }
684
685         while ((dent = readdir(dir)) != NULL) {
686                 char path[PATH_MAX];
687                 struct stat st;
688
689                 /*sshfs might return bad dent->d_type, so we have to stat*/
690                 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
691                 if (stat(path, &st))
692                         continue;
693
694                 if (S_ISDIR(st.st_mode)) {
695                         if (!strcmp(dent->d_name, ".") ||
696                             !strcmp(dent->d_name, ".."))
697                                 continue;
698
699                         ret = map_groups__set_modules_path_dir(mg, path);
700                         if (ret < 0)
701                                 goto out;
702                 } else {
703                         char *dot = strrchr(dent->d_name, '.'),
704                              dso_name[PATH_MAX];
705                         struct map *map;
706                         char *long_name;
707
708                         if (dot == NULL || strcmp(dot, ".ko"))
709                                 continue;
710                         snprintf(dso_name, sizeof(dso_name), "[%.*s]",
711                                  (int)(dot - dent->d_name), dent->d_name);
712
713                         strxfrchar(dso_name, '-', '_');
714                         map = map_groups__find_by_name(mg, MAP__FUNCTION,
715                                                        dso_name);
716                         if (map == NULL)
717                                 continue;
718
719                         long_name = strdup(path);
720                         if (long_name == NULL) {
721                                 ret = -1;
722                                 goto out;
723                         }
724                         dso__set_long_name(map->dso, long_name);
725                         map->dso->lname_alloc = 1;
726                         dso__kernel_module_get_build_id(map->dso, "");
727                 }
728         }
729
730 out:
731         closedir(dir);
732         return ret;
733 }
734
735 static int machine__set_modules_path(struct machine *machine)
736 {
737         char *version;
738         char modules_path[PATH_MAX];
739
740         version = get_kernel_version(machine->root_dir);
741         if (!version)
742                 return -1;
743
744         snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
745                  machine->root_dir, version);
746         free(version);
747
748         return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
749 }
750
751 static int machine__create_modules(struct machine *machine)
752 {
753         char *line = NULL;
754         size_t n;
755         FILE *file;
756         struct map *map;
757         const char *modules;
758         char path[PATH_MAX];
759
760         if (machine__is_default_guest(machine))
761                 modules = symbol_conf.default_guest_modules;
762         else {
763                 sprintf(path, "%s/proc/modules", machine->root_dir);
764                 modules = path;
765         }
766
767         if (symbol__restricted_filename(path, "/proc/modules"))
768                 return -1;
769
770         file = fopen(modules, "r");
771         if (file == NULL)
772                 return -1;
773
774         while (!feof(file)) {
775                 char name[PATH_MAX];
776                 u64 start;
777                 char *sep;
778                 int line_len;
779
780                 line_len = getline(&line, &n, file);
781                 if (line_len < 0)
782                         break;
783
784                 if (!line)
785                         goto out_failure;
786
787                 line[--line_len] = '\0'; /* \n */
788
789                 sep = strrchr(line, 'x');
790                 if (sep == NULL)
791                         continue;
792
793                 hex2u64(sep + 1, &start);
794
795                 sep = strchr(line, ' ');
796                 if (sep == NULL)
797                         continue;
798
799                 *sep = '\0';
800
801                 snprintf(name, sizeof(name), "[%s]", line);
802                 map = machine__new_module(machine, start, name);
803                 if (map == NULL)
804                         goto out_delete_line;
805                 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
806         }
807
808         free(line);
809         fclose(file);
810
811         return machine__set_modules_path(machine);
812
813 out_delete_line:
814         free(line);
815 out_failure:
816         return -1;
817 }
818
819 int machine__create_kernel_maps(struct machine *machine)
820 {
821         struct dso *kernel = machine__get_kernel(machine);
822
823         if (kernel == NULL ||
824             __machine__create_kernel_maps(machine, kernel) < 0)
825                 return -1;
826
827         if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
828                 if (machine__is_host(machine))
829                         pr_debug("Problems creating module maps, "
830                                  "continuing anyway...\n");
831                 else
832                         pr_debug("Problems creating module maps for guest %d, "
833                                  "continuing anyway...\n", machine->pid);
834         }
835
836         /*
837          * Now that we have all the maps created, just set the ->end of them:
838          */
839         map_groups__fixup_end(&machine->kmaps);
840         return 0;
841 }
842
843 static void machine__set_kernel_mmap_len(struct machine *machine,
844                                          union perf_event *event)
845 {
846         int i;
847
848         for (i = 0; i < MAP__NR_TYPES; i++) {
849                 machine->vmlinux_maps[i]->start = event->mmap.start;
850                 machine->vmlinux_maps[i]->end   = (event->mmap.start +
851                                                    event->mmap.len);
852                 /*
853                  * Be a bit paranoid here, some perf.data file came with
854                  * a zero sized synthesized MMAP event for the kernel.
855                  */
856                 if (machine->vmlinux_maps[i]->end == 0)
857                         machine->vmlinux_maps[i]->end = ~0ULL;
858         }
859 }
860
861 static int machine__process_kernel_mmap_event(struct machine *machine,
862                                               union perf_event *event)
863 {
864         struct map *map;
865         char kmmap_prefix[PATH_MAX];
866         enum dso_kernel_type kernel_type;
867         bool is_kernel_mmap;
868
869         machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
870         if (machine__is_host(machine))
871                 kernel_type = DSO_TYPE_KERNEL;
872         else
873                 kernel_type = DSO_TYPE_GUEST_KERNEL;
874
875         is_kernel_mmap = memcmp(event->mmap.filename,
876                                 kmmap_prefix,
877                                 strlen(kmmap_prefix) - 1) == 0;
878         if (event->mmap.filename[0] == '/' ||
879             (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
880
881                 char short_module_name[1024];
882                 char *name, *dot;
883
884                 if (event->mmap.filename[0] == '/') {
885                         name = strrchr(event->mmap.filename, '/');
886                         if (name == NULL)
887                                 goto out_problem;
888
889                         ++name; /* skip / */
890                         dot = strrchr(name, '.');
891                         if (dot == NULL)
892                                 goto out_problem;
893                         snprintf(short_module_name, sizeof(short_module_name),
894                                         "[%.*s]", (int)(dot - name), name);
895                         strxfrchar(short_module_name, '-', '_');
896                 } else
897                         strcpy(short_module_name, event->mmap.filename);
898
899                 map = machine__new_module(machine, event->mmap.start,
900                                           event->mmap.filename);
901                 if (map == NULL)
902                         goto out_problem;
903
904                 name = strdup(short_module_name);
905                 if (name == NULL)
906                         goto out_problem;
907
908                 map->dso->short_name = name;
909                 map->dso->sname_alloc = 1;
910                 map->end = map->start + event->mmap.len;
911         } else if (is_kernel_mmap) {
912                 const char *symbol_name = (event->mmap.filename +
913                                 strlen(kmmap_prefix));
914                 /*
915                  * Should be there already, from the build-id table in
916                  * the header.
917                  */
918                 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
919                                                      kmmap_prefix);
920                 if (kernel == NULL)
921                         goto out_problem;
922
923                 kernel->kernel = kernel_type;
924                 if (__machine__create_kernel_maps(machine, kernel) < 0)
925                         goto out_problem;
926
927                 machine__set_kernel_mmap_len(machine, event);
928
929                 /*
930                  * Avoid using a zero address (kptr_restrict) for the ref reloc
931                  * symbol. Effectively having zero here means that at record
932                  * time /proc/sys/kernel/kptr_restrict was non zero.
933                  */
934                 if (event->mmap.pgoff != 0) {
935                         maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
936                                                          symbol_name,
937                                                          event->mmap.pgoff);
938                 }
939
940                 if (machine__is_default_guest(machine)) {
941                         /*
942                          * preload dso of guest kernel and modules
943                          */
944                         dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
945                                   NULL);
946                 }
947         }
948         return 0;
949 out_problem:
950         return -1;
951 }
952
953 int machine__process_mmap_event(struct machine *machine, union perf_event *event)
954 {
955         u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
956         struct thread *thread;
957         struct map *map;
958         int ret = 0;
959
960         if (dump_trace)
961                 perf_event__fprintf_mmap(event, stdout);
962
963         if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
964             cpumode == PERF_RECORD_MISC_KERNEL) {
965                 ret = machine__process_kernel_mmap_event(machine, event);
966                 if (ret < 0)
967                         goto out_problem;
968                 return 0;
969         }
970
971         thread = machine__findnew_thread(machine, event->mmap.pid);
972         if (thread == NULL)
973                 goto out_problem;
974         map = map__new(&machine->user_dsos, event->mmap.start,
975                         event->mmap.len, event->mmap.pgoff,
976                         event->mmap.pid, event->mmap.filename,
977                         MAP__FUNCTION);
978         if (map == NULL)
979                 goto out_problem;
980
981         thread__insert_map(thread, map);
982         return 0;
983
984 out_problem:
985         dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
986         return 0;
987 }
988
989 int machine__process_fork_event(struct machine *machine, union perf_event *event)
990 {
991         struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
992         struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
993
994         if (dump_trace)
995                 perf_event__fprintf_task(event, stdout);
996
997         if (thread == NULL || parent == NULL ||
998             thread__fork(thread, parent) < 0) {
999                 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1000                 return -1;
1001         }
1002
1003         return 0;
1004 }
1005
1006 int machine__process_exit_event(struct machine *machine, union perf_event *event)
1007 {
1008         struct thread *thread = machine__find_thread(machine, event->fork.tid);
1009
1010         if (dump_trace)
1011                 perf_event__fprintf_task(event, stdout);
1012
1013         if (thread != NULL)
1014                 machine__remove_thread(machine, thread);
1015
1016         return 0;
1017 }
1018
1019 int machine__process_event(struct machine *machine, union perf_event *event)
1020 {
1021         int ret;
1022
1023         switch (event->header.type) {
1024         case PERF_RECORD_COMM:
1025                 ret = machine__process_comm_event(machine, event); break;
1026         case PERF_RECORD_MMAP:
1027                 ret = machine__process_mmap_event(machine, event); break;
1028         case PERF_RECORD_FORK:
1029                 ret = machine__process_fork_event(machine, event); break;
1030         case PERF_RECORD_EXIT:
1031                 ret = machine__process_exit_event(machine, event); break;
1032         case PERF_RECORD_LOST:
1033                 ret = machine__process_lost_event(machine, event); break;
1034         default:
1035                 ret = -1;
1036                 break;
1037         }
1038
1039         return ret;
1040 }
1041
1042 void machine__remove_thread(struct machine *machine, struct thread *th)
1043 {
1044         machine->last_match = NULL;
1045         rb_erase(&th->rb_node, &machine->threads);
1046         /*
1047          * We may have references to this thread, for instance in some hist_entry
1048          * instances, so just move them to a separate list.
1049          */
1050         list_add_tail(&th->node, &machine->dead_threads);
1051 }
1052
1053 static bool symbol__match_parent_regex(struct symbol *sym)
1054 {
1055         if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
1056                 return 1;
1057
1058         return 0;
1059 }
1060
1061 static const u8 cpumodes[] = {
1062         PERF_RECORD_MISC_USER,
1063         PERF_RECORD_MISC_KERNEL,
1064         PERF_RECORD_MISC_GUEST_USER,
1065         PERF_RECORD_MISC_GUEST_KERNEL
1066 };
1067 #define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
1068
1069 static void ip__resolve_ams(struct machine *machine, struct thread *thread,
1070                             struct addr_map_symbol *ams,
1071                             u64 ip)
1072 {
1073         struct addr_location al;
1074         size_t i;
1075         u8 m;
1076
1077         memset(&al, 0, sizeof(al));
1078
1079         for (i = 0; i < NCPUMODES; i++) {
1080                 m = cpumodes[i];
1081                 /*
1082                  * We cannot use the header.misc hint to determine whether a
1083                  * branch stack address is user, kernel, guest, hypervisor.
1084                  * Branches may straddle the kernel/user/hypervisor boundaries.
1085                  * Thus, we have to try consecutively until we find a match
1086                  * or else, the symbol is unknown
1087                  */
1088                 thread__find_addr_location(thread, machine, m, MAP__FUNCTION,
1089                                 ip, &al, NULL);
1090                 if (al.sym)
1091                         goto found;
1092         }
1093 found:
1094         ams->addr = ip;
1095         ams->al_addr = al.addr;
1096         ams->sym = al.sym;
1097         ams->map = al.map;
1098 }
1099
1100 struct branch_info *machine__resolve_bstack(struct machine *machine,
1101                                             struct thread *thr,
1102                                             struct branch_stack *bs)
1103 {
1104         struct branch_info *bi;
1105         unsigned int i;
1106
1107         bi = calloc(bs->nr, sizeof(struct branch_info));
1108         if (!bi)
1109                 return NULL;
1110
1111         for (i = 0; i < bs->nr; i++) {
1112                 ip__resolve_ams(machine, thr, &bi[i].to, bs->entries[i].to);
1113                 ip__resolve_ams(machine, thr, &bi[i].from, bs->entries[i].from);
1114                 bi[i].flags = bs->entries[i].flags;
1115         }
1116         return bi;
1117 }
1118
1119 static int machine__resolve_callchain_sample(struct machine *machine,
1120                                              struct thread *thread,
1121                                              struct ip_callchain *chain,
1122                                              struct symbol **parent)
1123
1124 {
1125         u8 cpumode = PERF_RECORD_MISC_USER;
1126         unsigned int i;
1127         int err;
1128
1129         callchain_cursor_reset(&callchain_cursor);
1130
1131         if (chain->nr > PERF_MAX_STACK_DEPTH) {
1132                 pr_warning("corrupted callchain. skipping...\n");
1133                 return 0;
1134         }
1135
1136         for (i = 0; i < chain->nr; i++) {
1137                 u64 ip;
1138                 struct addr_location al;
1139
1140                 if (callchain_param.order == ORDER_CALLEE)
1141                         ip = chain->ips[i];
1142                 else
1143                         ip = chain->ips[chain->nr - i - 1];
1144
1145                 if (ip >= PERF_CONTEXT_MAX) {
1146                         switch (ip) {
1147                         case PERF_CONTEXT_HV:
1148                                 cpumode = PERF_RECORD_MISC_HYPERVISOR;
1149                                 break;
1150                         case PERF_CONTEXT_KERNEL:
1151                                 cpumode = PERF_RECORD_MISC_KERNEL;
1152                                 break;
1153                         case PERF_CONTEXT_USER:
1154                                 cpumode = PERF_RECORD_MISC_USER;
1155                                 break;
1156                         default:
1157                                 pr_debug("invalid callchain context: "
1158                                          "%"PRId64"\n", (s64) ip);
1159                                 /*
1160                                  * It seems the callchain is corrupted.
1161                                  * Discard all.
1162                                  */
1163                                 callchain_cursor_reset(&callchain_cursor);
1164                                 return 0;
1165                         }
1166                         continue;
1167                 }
1168
1169                 al.filtered = false;
1170                 thread__find_addr_location(thread, machine, cpumode,
1171                                            MAP__FUNCTION, ip, &al, NULL);
1172                 if (al.sym != NULL) {
1173                         if (sort__has_parent && !*parent &&
1174                             symbol__match_parent_regex(al.sym))
1175                                 *parent = al.sym;
1176                         if (!symbol_conf.use_callchain)
1177                                 break;
1178                 }
1179
1180                 err = callchain_cursor_append(&callchain_cursor,
1181                                               ip, al.map, al.sym);
1182                 if (err)
1183                         return err;
1184         }
1185
1186         return 0;
1187 }
1188
1189 static int unwind_entry(struct unwind_entry *entry, void *arg)
1190 {
1191         struct callchain_cursor *cursor = arg;
1192         return callchain_cursor_append(cursor, entry->ip,
1193                                        entry->map, entry->sym);
1194 }
1195
1196 int machine__resolve_callchain(struct machine *machine,
1197                                struct perf_evsel *evsel,
1198                                struct thread *thread,
1199                                struct perf_sample *sample,
1200                                struct symbol **parent)
1201
1202 {
1203         int ret;
1204
1205         callchain_cursor_reset(&callchain_cursor);
1206
1207         ret = machine__resolve_callchain_sample(machine, thread,
1208                                                 sample->callchain, parent);
1209         if (ret)
1210                 return ret;
1211
1212         /* Can we do dwarf post unwind? */
1213         if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
1214               (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
1215                 return 0;
1216
1217         /* Bail out if nothing was captured. */
1218         if ((!sample->user_regs.regs) ||
1219             (!sample->user_stack.size))
1220                 return 0;
1221
1222         return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
1223                                    thread, evsel->attr.sample_regs_user,
1224                                    sample);
1225
1226 }