ff0188c657836ac3fd728fd42f838f4d980dc888
[firefly-linux-kernel-4.4.55.git] / tools / perf / util / session.c
1 #include <linux/kernel.h>
2 #include <traceevent/event-parse.h>
3
4 #include <byteswap.h>
5 #include <unistd.h>
6 #include <sys/types.h>
7 #include <sys/mman.h>
8
9 #include "evlist.h"
10 #include "evsel.h"
11 #include "session.h"
12 #include "tool.h"
13 #include "sort.h"
14 #include "util.h"
15 #include "cpumap.h"
16 #include "perf_regs.h"
17
18 static int perf_session__open(struct perf_session *session)
19 {
20         struct perf_data_file *file = session->file;
21
22         if (perf_session__read_header(session) < 0) {
23                 pr_err("incompatible file format (rerun with -v to learn more)");
24                 return -1;
25         }
26
27         if (perf_data_file__is_pipe(file))
28                 return 0;
29
30         if (!perf_evlist__valid_sample_type(session->evlist)) {
31                 pr_err("non matching sample_type");
32                 return -1;
33         }
34
35         if (!perf_evlist__valid_sample_id_all(session->evlist)) {
36                 pr_err("non matching sample_id_all");
37                 return -1;
38         }
39
40         if (!perf_evlist__valid_read_format(session->evlist)) {
41                 pr_err("non matching read_format");
42                 return -1;
43         }
44
45         return 0;
46 }
47
48 void perf_session__set_id_hdr_size(struct perf_session *session)
49 {
50         u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
51
52         machines__set_id_hdr_size(&session->machines, id_hdr_size);
53 }
54
55 int perf_session__create_kernel_maps(struct perf_session *session)
56 {
57         int ret = machine__create_kernel_maps(&session->machines.host);
58
59         if (ret >= 0)
60                 ret = machines__create_guest_kernel_maps(&session->machines);
61         return ret;
62 }
63
64 static void perf_session__destroy_kernel_maps(struct perf_session *session)
65 {
66         machines__destroy_kernel_maps(&session->machines);
67 }
68
69 struct perf_session *perf_session__new(struct perf_data_file *file,
70                                        bool repipe, struct perf_tool *tool)
71 {
72         struct perf_session *session = zalloc(sizeof(*session));
73
74         if (!session)
75                 goto out;
76
77         session->repipe = repipe;
78         INIT_LIST_HEAD(&session->ordered_events.events);
79         INIT_LIST_HEAD(&session->ordered_events.cache);
80         INIT_LIST_HEAD(&session->ordered_events.to_free);
81         machines__init(&session->machines);
82
83         if (file) {
84                 if (perf_data_file__open(file))
85                         goto out_delete;
86
87                 session->file = file;
88
89                 if (perf_data_file__is_read(file)) {
90                         if (perf_session__open(session) < 0)
91                                 goto out_close;
92
93                         perf_session__set_id_hdr_size(session);
94                 }
95         }
96
97         if (!file || perf_data_file__is_write(file)) {
98                 /*
99                  * In O_RDONLY mode this will be performed when reading the
100                  * kernel MMAP event, in perf_event__process_mmap().
101                  */
102                 if (perf_session__create_kernel_maps(session) < 0)
103                         goto out_delete;
104         }
105
106         if (tool && tool->ordering_requires_timestamps &&
107             tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
108                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
109                 tool->ordered_events = false;
110         }
111
112         return session;
113
114  out_close:
115         perf_data_file__close(file);
116  out_delete:
117         perf_session__delete(session);
118  out:
119         return NULL;
120 }
121
122 static void perf_session__delete_dead_threads(struct perf_session *session)
123 {
124         machine__delete_dead_threads(&session->machines.host);
125 }
126
127 static void perf_session__delete_threads(struct perf_session *session)
128 {
129         machine__delete_threads(&session->machines.host);
130 }
131
132 static void perf_session_env__delete(struct perf_session_env *env)
133 {
134         zfree(&env->hostname);
135         zfree(&env->os_release);
136         zfree(&env->version);
137         zfree(&env->arch);
138         zfree(&env->cpu_desc);
139         zfree(&env->cpuid);
140
141         zfree(&env->cmdline);
142         zfree(&env->sibling_cores);
143         zfree(&env->sibling_threads);
144         zfree(&env->numa_nodes);
145         zfree(&env->pmu_mappings);
146 }
147
148 void perf_session__delete(struct perf_session *session)
149 {
150         perf_session__destroy_kernel_maps(session);
151         perf_session__delete_dead_threads(session);
152         perf_session__delete_threads(session);
153         perf_session_env__delete(&session->header.env);
154         machines__exit(&session->machines);
155         if (session->file)
156                 perf_data_file__close(session->file);
157         free(session);
158 }
159
160 static int process_event_synth_tracing_data_stub(struct perf_tool *tool
161                                                  __maybe_unused,
162                                                  union perf_event *event
163                                                  __maybe_unused,
164                                                  struct perf_session *session
165                                                 __maybe_unused)
166 {
167         dump_printf(": unhandled!\n");
168         return 0;
169 }
170
171 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
172                                          union perf_event *event __maybe_unused,
173                                          struct perf_evlist **pevlist
174                                          __maybe_unused)
175 {
176         dump_printf(": unhandled!\n");
177         return 0;
178 }
179
180 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
181                                      union perf_event *event __maybe_unused,
182                                      struct perf_sample *sample __maybe_unused,
183                                      struct perf_evsel *evsel __maybe_unused,
184                                      struct machine *machine __maybe_unused)
185 {
186         dump_printf(": unhandled!\n");
187         return 0;
188 }
189
190 static int process_event_stub(struct perf_tool *tool __maybe_unused,
191                               union perf_event *event __maybe_unused,
192                               struct perf_sample *sample __maybe_unused,
193                               struct machine *machine __maybe_unused)
194 {
195         dump_printf(": unhandled!\n");
196         return 0;
197 }
198
199 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
200                                        union perf_event *event __maybe_unused,
201                                        struct perf_session *perf_session
202                                        __maybe_unused)
203 {
204         dump_printf(": unhandled!\n");
205         return 0;
206 }
207
208 static int process_finished_round(struct perf_tool *tool,
209                                   union perf_event *event,
210                                   struct perf_session *session);
211
212 void perf_tool__fill_defaults(struct perf_tool *tool)
213 {
214         if (tool->sample == NULL)
215                 tool->sample = process_event_sample_stub;
216         if (tool->mmap == NULL)
217                 tool->mmap = process_event_stub;
218         if (tool->mmap2 == NULL)
219                 tool->mmap2 = process_event_stub;
220         if (tool->comm == NULL)
221                 tool->comm = process_event_stub;
222         if (tool->fork == NULL)
223                 tool->fork = process_event_stub;
224         if (tool->exit == NULL)
225                 tool->exit = process_event_stub;
226         if (tool->lost == NULL)
227                 tool->lost = perf_event__process_lost;
228         if (tool->read == NULL)
229                 tool->read = process_event_sample_stub;
230         if (tool->throttle == NULL)
231                 tool->throttle = process_event_stub;
232         if (tool->unthrottle == NULL)
233                 tool->unthrottle = process_event_stub;
234         if (tool->attr == NULL)
235                 tool->attr = process_event_synth_attr_stub;
236         if (tool->tracing_data == NULL)
237                 tool->tracing_data = process_event_synth_tracing_data_stub;
238         if (tool->build_id == NULL)
239                 tool->build_id = process_finished_round_stub;
240         if (tool->finished_round == NULL) {
241                 if (tool->ordered_events)
242                         tool->finished_round = process_finished_round;
243                 else
244                         tool->finished_round = process_finished_round_stub;
245         }
246 }
247  
248 static void swap_sample_id_all(union perf_event *event, void *data)
249 {
250         void *end = (void *) event + event->header.size;
251         int size = end - data;
252
253         BUG_ON(size % sizeof(u64));
254         mem_bswap_64(data, size);
255 }
256
257 static void perf_event__all64_swap(union perf_event *event,
258                                    bool sample_id_all __maybe_unused)
259 {
260         struct perf_event_header *hdr = &event->header;
261         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
262 }
263
264 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
265 {
266         event->comm.pid = bswap_32(event->comm.pid);
267         event->comm.tid = bswap_32(event->comm.tid);
268
269         if (sample_id_all) {
270                 void *data = &event->comm.comm;
271
272                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
273                 swap_sample_id_all(event, data);
274         }
275 }
276
277 static void perf_event__mmap_swap(union perf_event *event,
278                                   bool sample_id_all)
279 {
280         event->mmap.pid   = bswap_32(event->mmap.pid);
281         event->mmap.tid   = bswap_32(event->mmap.tid);
282         event->mmap.start = bswap_64(event->mmap.start);
283         event->mmap.len   = bswap_64(event->mmap.len);
284         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
285
286         if (sample_id_all) {
287                 void *data = &event->mmap.filename;
288
289                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
290                 swap_sample_id_all(event, data);
291         }
292 }
293
294 static void perf_event__mmap2_swap(union perf_event *event,
295                                   bool sample_id_all)
296 {
297         event->mmap2.pid   = bswap_32(event->mmap2.pid);
298         event->mmap2.tid   = bswap_32(event->mmap2.tid);
299         event->mmap2.start = bswap_64(event->mmap2.start);
300         event->mmap2.len   = bswap_64(event->mmap2.len);
301         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
302         event->mmap2.maj   = bswap_32(event->mmap2.maj);
303         event->mmap2.min   = bswap_32(event->mmap2.min);
304         event->mmap2.ino   = bswap_64(event->mmap2.ino);
305
306         if (sample_id_all) {
307                 void *data = &event->mmap2.filename;
308
309                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
310                 swap_sample_id_all(event, data);
311         }
312 }
313 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
314 {
315         event->fork.pid  = bswap_32(event->fork.pid);
316         event->fork.tid  = bswap_32(event->fork.tid);
317         event->fork.ppid = bswap_32(event->fork.ppid);
318         event->fork.ptid = bswap_32(event->fork.ptid);
319         event->fork.time = bswap_64(event->fork.time);
320
321         if (sample_id_all)
322                 swap_sample_id_all(event, &event->fork + 1);
323 }
324
325 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
326 {
327         event->read.pid          = bswap_32(event->read.pid);
328         event->read.tid          = bswap_32(event->read.tid);
329         event->read.value        = bswap_64(event->read.value);
330         event->read.time_enabled = bswap_64(event->read.time_enabled);
331         event->read.time_running = bswap_64(event->read.time_running);
332         event->read.id           = bswap_64(event->read.id);
333
334         if (sample_id_all)
335                 swap_sample_id_all(event, &event->read + 1);
336 }
337
338 static void perf_event__throttle_swap(union perf_event *event,
339                                       bool sample_id_all)
340 {
341         event->throttle.time      = bswap_64(event->throttle.time);
342         event->throttle.id        = bswap_64(event->throttle.id);
343         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
344
345         if (sample_id_all)
346                 swap_sample_id_all(event, &event->throttle + 1);
347 }
348
349 static u8 revbyte(u8 b)
350 {
351         int rev = (b >> 4) | ((b & 0xf) << 4);
352         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
353         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
354         return (u8) rev;
355 }
356
357 /*
358  * XXX this is hack in attempt to carry flags bitfield
359  * throught endian village. ABI says:
360  *
361  * Bit-fields are allocated from right to left (least to most significant)
362  * on little-endian implementations and from left to right (most to least
363  * significant) on big-endian implementations.
364  *
365  * The above seems to be byte specific, so we need to reverse each
366  * byte of the bitfield. 'Internet' also says this might be implementation
367  * specific and we probably need proper fix and carry perf_event_attr
368  * bitfield flags in separate data file FEAT_ section. Thought this seems
369  * to work for now.
370  */
371 static void swap_bitfield(u8 *p, unsigned len)
372 {
373         unsigned i;
374
375         for (i = 0; i < len; i++) {
376                 *p = revbyte(*p);
377                 p++;
378         }
379 }
380
381 /* exported for swapping attributes in file header */
382 void perf_event__attr_swap(struct perf_event_attr *attr)
383 {
384         attr->type              = bswap_32(attr->type);
385         attr->size              = bswap_32(attr->size);
386         attr->config            = bswap_64(attr->config);
387         attr->sample_period     = bswap_64(attr->sample_period);
388         attr->sample_type       = bswap_64(attr->sample_type);
389         attr->read_format       = bswap_64(attr->read_format);
390         attr->wakeup_events     = bswap_32(attr->wakeup_events);
391         attr->bp_type           = bswap_32(attr->bp_type);
392         attr->bp_addr           = bswap_64(attr->bp_addr);
393         attr->bp_len            = bswap_64(attr->bp_len);
394         attr->branch_sample_type = bswap_64(attr->branch_sample_type);
395         attr->sample_regs_user   = bswap_64(attr->sample_regs_user);
396         attr->sample_stack_user  = bswap_32(attr->sample_stack_user);
397
398         swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
399 }
400
401 static void perf_event__hdr_attr_swap(union perf_event *event,
402                                       bool sample_id_all __maybe_unused)
403 {
404         size_t size;
405
406         perf_event__attr_swap(&event->attr.attr);
407
408         size = event->header.size;
409         size -= (void *)&event->attr.id - (void *)event;
410         mem_bswap_64(event->attr.id, size);
411 }
412
413 static void perf_event__event_type_swap(union perf_event *event,
414                                         bool sample_id_all __maybe_unused)
415 {
416         event->event_type.event_type.event_id =
417                 bswap_64(event->event_type.event_type.event_id);
418 }
419
420 static void perf_event__tracing_data_swap(union perf_event *event,
421                                           bool sample_id_all __maybe_unused)
422 {
423         event->tracing_data.size = bswap_32(event->tracing_data.size);
424 }
425
426 typedef void (*perf_event__swap_op)(union perf_event *event,
427                                     bool sample_id_all);
428
429 static perf_event__swap_op perf_event__swap_ops[] = {
430         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
431         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
432         [PERF_RECORD_COMM]                = perf_event__comm_swap,
433         [PERF_RECORD_FORK]                = perf_event__task_swap,
434         [PERF_RECORD_EXIT]                = perf_event__task_swap,
435         [PERF_RECORD_LOST]                = perf_event__all64_swap,
436         [PERF_RECORD_READ]                = perf_event__read_swap,
437         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
438         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
439         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
440         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
441         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
442         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
443         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
444         [PERF_RECORD_HEADER_MAX]          = NULL,
445 };
446
447 struct ordered_event {
448         u64                     timestamp;
449         u64                     file_offset;
450         union perf_event        *event;
451         struct list_head        list;
452 };
453
454 static void perf_session_free_sample_buffers(struct perf_session *session)
455 {
456         struct ordered_events *oe = &session->ordered_events;
457
458         while (!list_empty(&oe->to_free)) {
459                 struct ordered_event *event;
460
461                 event = list_entry(oe->to_free.next, struct ordered_event, list);
462                 list_del(&event->list);
463                 free(event);
464         }
465 }
466
467 /* The queue is ordered by time */
468 static void queue_event(struct ordered_events *oe, struct ordered_event *new)
469 {
470         struct ordered_event *last = oe->last;
471         u64 timestamp = new->timestamp;
472         struct list_head *p;
473
474         ++oe->nr_events;
475         oe->last = new;
476
477         if (!last) {
478                 list_add(&new->list, &oe->events);
479                 oe->max_timestamp = timestamp;
480                 return;
481         }
482
483         /*
484          * last event might point to some random place in the list as it's
485          * the last queued event. We expect that the new event is close to
486          * this.
487          */
488         if (last->timestamp <= timestamp) {
489                 while (last->timestamp <= timestamp) {
490                         p = last->list.next;
491                         if (p == &oe->events) {
492                                 list_add_tail(&new->list, &oe->events);
493                                 oe->max_timestamp = timestamp;
494                                 return;
495                         }
496                         last = list_entry(p, struct ordered_event, list);
497                 }
498                 list_add_tail(&new->list, &last->list);
499         } else {
500                 while (last->timestamp > timestamp) {
501                         p = last->list.prev;
502                         if (p == &oe->events) {
503                                 list_add(&new->list, &oe->events);
504                                 return;
505                         }
506                         last = list_entry(p, struct ordered_event, list);
507                 }
508                 list_add(&new->list, &last->list);
509         }
510 }
511
512 #define MAX_SAMPLE_BUFFER       (64 * 1024 / sizeof(struct ordered_event))
513 static struct ordered_event *alloc_event(struct ordered_events *oe)
514 {
515         struct list_head *cache = &oe->cache;
516         struct ordered_event *new;
517
518         if (!list_empty(cache)) {
519                 new = list_entry(cache->next, struct ordered_event, list);
520                 list_del(&new->list);
521         } else if (oe->buffer) {
522                 new = oe->buffer + oe->buffer_idx;
523                 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
524                         oe->buffer = NULL;
525         } else {
526                 oe->buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
527                 if (!oe->buffer)
528                         return NULL;
529                 list_add(&oe->buffer->list, &oe->to_free);
530
531                 /* First entry is abused to maintain the to_free list. */
532                 oe->buffer_idx = 2;
533                 new = oe->buffer + 1;
534         }
535
536         return new;
537 }
538
539 static struct ordered_event *
540 ordered_events__new(struct ordered_events *oe, u64 timestamp)
541 {
542         struct ordered_event *new;
543
544         new = alloc_event(oe);
545         if (new) {
546                 new->timestamp = timestamp;
547                 queue_event(oe, new);
548         }
549
550         return new;
551 }
552
553 static void
554 ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
555 {
556         list_del(&event->list);
557         list_add(&event->list, &oe->cache);
558         oe->nr_events--;
559 }
560
561 static int perf_session_deliver_event(struct perf_session *session,
562                                       union perf_event *event,
563                                       struct perf_sample *sample,
564                                       struct perf_tool *tool,
565                                       u64 file_offset);
566
567 static int ordered_events__flush(struct perf_session *s,
568                                  struct perf_tool *tool)
569 {
570         struct ordered_events *oe = &s->ordered_events;
571         struct list_head *head = &oe->events;
572         struct ordered_event *tmp, *iter;
573         struct perf_sample sample;
574         u64 limit = oe->next_flush;
575         u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
576         bool show_progress = limit == ULLONG_MAX;
577         struct ui_progress prog;
578         int ret;
579
580         if (!tool->ordered_events || !limit)
581                 return 0;
582
583         if (show_progress)
584                 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
585
586         list_for_each_entry_safe(iter, tmp, head, list) {
587                 if (session_done())
588                         return 0;
589
590                 if (iter->timestamp > limit)
591                         break;
592
593                 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
594                 if (ret)
595                         pr_err("Can't parse sample, err = %d\n", ret);
596                 else {
597                         ret = perf_session_deliver_event(s, iter->event, &sample, tool,
598                                                          iter->file_offset);
599                         if (ret)
600                                 return ret;
601                 }
602
603                 ordered_events__delete(oe, iter);
604                 oe->last_flush = iter->timestamp;
605
606                 if (show_progress)
607                         ui_progress__update(&prog, 1);
608         }
609
610         if (list_empty(head))
611                 oe->last = NULL;
612         else if (last_ts <= limit)
613                 oe->last = list_entry(head->prev, struct ordered_event, list);
614
615         return 0;
616 }
617
618 /*
619  * When perf record finishes a pass on every buffers, it records this pseudo
620  * event.
621  * We record the max timestamp t found in the pass n.
622  * Assuming these timestamps are monotonic across cpus, we know that if
623  * a buffer still has events with timestamps below t, they will be all
624  * available and then read in the pass n + 1.
625  * Hence when we start to read the pass n + 2, we can safely flush every
626  * events with timestamps below t.
627  *
628  *    ============ PASS n =================
629  *       CPU 0         |   CPU 1
630  *                     |
631  *    cnt1 timestamps  |   cnt2 timestamps
632  *          1          |         2
633  *          2          |         3
634  *          -          |         4  <--- max recorded
635  *
636  *    ============ PASS n + 1 ==============
637  *       CPU 0         |   CPU 1
638  *                     |
639  *    cnt1 timestamps  |   cnt2 timestamps
640  *          3          |         5
641  *          4          |         6
642  *          5          |         7 <---- max recorded
643  *
644  *      Flush every events below timestamp 4
645  *
646  *    ============ PASS n + 2 ==============
647  *       CPU 0         |   CPU 1
648  *                     |
649  *    cnt1 timestamps  |   cnt2 timestamps
650  *          6          |         8
651  *          7          |         9
652  *          -          |         10
653  *
654  *      Flush every events below timestamp 7
655  *      etc...
656  */
657 static int process_finished_round(struct perf_tool *tool,
658                                   union perf_event *event __maybe_unused,
659                                   struct perf_session *session)
660 {
661         int ret = ordered_events__flush(session, tool);
662         if (!ret)
663                 session->ordered_events.next_flush = session->ordered_events.max_timestamp;
664
665         return ret;
666 }
667
668 int perf_session_queue_event(struct perf_session *s, union perf_event *event,
669                                     struct perf_sample *sample, u64 file_offset)
670 {
671         struct ordered_events *oe = &s->ordered_events;
672         u64 timestamp = sample->time;
673         struct ordered_event *new;
674
675         if (!timestamp || timestamp == ~0ULL)
676                 return -ETIME;
677
678         if (timestamp < s->ordered_events.last_flush) {
679                 printf("Warning: Timestamp below last timeslice flush\n");
680                 return -EINVAL;
681         }
682
683         new = ordered_events__new(oe, timestamp);
684         if (!new)
685                 return -ENOMEM;
686
687         new->file_offset = file_offset;
688         new->event = event;
689         return 0;
690 }
691
692 static void callchain__printf(struct perf_sample *sample)
693 {
694         unsigned int i;
695
696         printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
697
698         for (i = 0; i < sample->callchain->nr; i++)
699                 printf("..... %2d: %016" PRIx64 "\n",
700                        i, sample->callchain->ips[i]);
701 }
702
703 static void branch_stack__printf(struct perf_sample *sample)
704 {
705         uint64_t i;
706
707         printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
708
709         for (i = 0; i < sample->branch_stack->nr; i++)
710                 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
711                         i, sample->branch_stack->entries[i].from,
712                         sample->branch_stack->entries[i].to);
713 }
714
715 static void regs_dump__printf(u64 mask, u64 *regs)
716 {
717         unsigned rid, i = 0;
718
719         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
720                 u64 val = regs[i++];
721
722                 printf(".... %-5s 0x%" PRIx64 "\n",
723                        perf_reg_name(rid), val);
724         }
725 }
726
727 static void regs_user__printf(struct perf_sample *sample)
728 {
729         struct regs_dump *user_regs = &sample->user_regs;
730
731         if (user_regs->regs) {
732                 u64 mask = user_regs->mask;
733                 printf("... user regs: mask 0x%" PRIx64 "\n", mask);
734                 regs_dump__printf(mask, user_regs->regs);
735         }
736 }
737
738 static void stack_user__printf(struct stack_dump *dump)
739 {
740         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
741                dump->size, dump->offset);
742 }
743
744 static void perf_session__print_tstamp(struct perf_session *session,
745                                        union perf_event *event,
746                                        struct perf_sample *sample)
747 {
748         u64 sample_type = __perf_evlist__combined_sample_type(session->evlist);
749
750         if (event->header.type != PERF_RECORD_SAMPLE &&
751             !perf_evlist__sample_id_all(session->evlist)) {
752                 fputs("-1 -1 ", stdout);
753                 return;
754         }
755
756         if ((sample_type & PERF_SAMPLE_CPU))
757                 printf("%u ", sample->cpu);
758
759         if (sample_type & PERF_SAMPLE_TIME)
760                 printf("%" PRIu64 " ", sample->time);
761 }
762
763 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
764 {
765         printf("... sample_read:\n");
766
767         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
768                 printf("...... time enabled %016" PRIx64 "\n",
769                        sample->read.time_enabled);
770
771         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
772                 printf("...... time running %016" PRIx64 "\n",
773                        sample->read.time_running);
774
775         if (read_format & PERF_FORMAT_GROUP) {
776                 u64 i;
777
778                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
779
780                 for (i = 0; i < sample->read.group.nr; i++) {
781                         struct sample_read_value *value;
782
783                         value = &sample->read.group.values[i];
784                         printf("..... id %016" PRIx64
785                                ", value %016" PRIx64 "\n",
786                                value->id, value->value);
787                 }
788         } else
789                 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
790                         sample->read.one.id, sample->read.one.value);
791 }
792
793 static void dump_event(struct perf_session *session, union perf_event *event,
794                        u64 file_offset, struct perf_sample *sample)
795 {
796         if (!dump_trace)
797                 return;
798
799         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
800                file_offset, event->header.size, event->header.type);
801
802         trace_event(event);
803
804         if (sample)
805                 perf_session__print_tstamp(session, event, sample);
806
807         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
808                event->header.size, perf_event__name(event->header.type));
809 }
810
811 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
812                         struct perf_sample *sample)
813 {
814         u64 sample_type;
815
816         if (!dump_trace)
817                 return;
818
819         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
820                event->header.misc, sample->pid, sample->tid, sample->ip,
821                sample->period, sample->addr);
822
823         sample_type = evsel->attr.sample_type;
824
825         if (sample_type & PERF_SAMPLE_CALLCHAIN)
826                 callchain__printf(sample);
827
828         if (sample_type & PERF_SAMPLE_BRANCH_STACK)
829                 branch_stack__printf(sample);
830
831         if (sample_type & PERF_SAMPLE_REGS_USER)
832                 regs_user__printf(sample);
833
834         if (sample_type & PERF_SAMPLE_STACK_USER)
835                 stack_user__printf(&sample->user_stack);
836
837         if (sample_type & PERF_SAMPLE_WEIGHT)
838                 printf("... weight: %" PRIu64 "\n", sample->weight);
839
840         if (sample_type & PERF_SAMPLE_DATA_SRC)
841                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
842
843         if (sample_type & PERF_SAMPLE_TRANSACTION)
844                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
845
846         if (sample_type & PERF_SAMPLE_READ)
847                 sample_read__printf(sample, evsel->attr.read_format);
848 }
849
850 static struct machine *
851         perf_session__find_machine_for_cpumode(struct perf_session *session,
852                                                union perf_event *event,
853                                                struct perf_sample *sample)
854 {
855         const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
856         struct machine *machine;
857
858         if (perf_guest &&
859             ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
860              (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
861                 u32 pid;
862
863                 if (event->header.type == PERF_RECORD_MMAP
864                     || event->header.type == PERF_RECORD_MMAP2)
865                         pid = event->mmap.pid;
866                 else
867                         pid = sample->pid;
868
869                 machine = perf_session__find_machine(session, pid);
870                 if (!machine)
871                         machine = perf_session__findnew_machine(session,
872                                                 DEFAULT_GUEST_KERNEL_ID);
873                 return machine;
874         }
875
876         return &session->machines.host;
877 }
878
879 static int deliver_sample_value(struct perf_session *session,
880                                 struct perf_tool *tool,
881                                 union perf_event *event,
882                                 struct perf_sample *sample,
883                                 struct sample_read_value *v,
884                                 struct machine *machine)
885 {
886         struct perf_sample_id *sid;
887
888         sid = perf_evlist__id2sid(session->evlist, v->id);
889         if (sid) {
890                 sample->id     = v->id;
891                 sample->period = v->value - sid->period;
892                 sid->period    = v->value;
893         }
894
895         if (!sid || sid->evsel == NULL) {
896                 ++session->stats.nr_unknown_id;
897                 return 0;
898         }
899
900         return tool->sample(tool, event, sample, sid->evsel, machine);
901 }
902
903 static int deliver_sample_group(struct perf_session *session,
904                                 struct perf_tool *tool,
905                                 union  perf_event *event,
906                                 struct perf_sample *sample,
907                                 struct machine *machine)
908 {
909         int ret = -EINVAL;
910         u64 i;
911
912         for (i = 0; i < sample->read.group.nr; i++) {
913                 ret = deliver_sample_value(session, tool, event, sample,
914                                            &sample->read.group.values[i],
915                                            machine);
916                 if (ret)
917                         break;
918         }
919
920         return ret;
921 }
922
923 static int
924 perf_session__deliver_sample(struct perf_session *session,
925                              struct perf_tool *tool,
926                              union  perf_event *event,
927                              struct perf_sample *sample,
928                              struct perf_evsel *evsel,
929                              struct machine *machine)
930 {
931         /* We know evsel != NULL. */
932         u64 sample_type = evsel->attr.sample_type;
933         u64 read_format = evsel->attr.read_format;
934
935         /* Standard sample delievery. */
936         if (!(sample_type & PERF_SAMPLE_READ))
937                 return tool->sample(tool, event, sample, evsel, machine);
938
939         /* For PERF_SAMPLE_READ we have either single or group mode. */
940         if (read_format & PERF_FORMAT_GROUP)
941                 return deliver_sample_group(session, tool, event, sample,
942                                             machine);
943         else
944                 return deliver_sample_value(session, tool, event, sample,
945                                             &sample->read.one, machine);
946 }
947
948 static int perf_session_deliver_event(struct perf_session *session,
949                                       union perf_event *event,
950                                       struct perf_sample *sample,
951                                       struct perf_tool *tool,
952                                       u64 file_offset)
953 {
954         struct perf_evsel *evsel;
955         struct machine *machine;
956
957         dump_event(session, event, file_offset, sample);
958
959         evsel = perf_evlist__id2evsel(session->evlist, sample->id);
960         if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
961                 /*
962                  * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
963                  * because the tools right now may apply filters, discarding
964                  * some of the samples. For consistency, in the future we
965                  * should have something like nr_filtered_samples and remove
966                  * the sample->period from total_sample_period, etc, KISS for
967                  * now tho.
968                  *
969                  * Also testing against NULL allows us to handle files without
970                  * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
971                  * future probably it'll be a good idea to restrict event
972                  * processing via perf_session to files with both set.
973                  */
974                 hists__inc_nr_events(&evsel->hists, event->header.type);
975         }
976
977         machine = perf_session__find_machine_for_cpumode(session, event,
978                                                          sample);
979
980         switch (event->header.type) {
981         case PERF_RECORD_SAMPLE:
982                 dump_sample(evsel, event, sample);
983                 if (evsel == NULL) {
984                         ++session->stats.nr_unknown_id;
985                         return 0;
986                 }
987                 if (machine == NULL) {
988                         ++session->stats.nr_unprocessable_samples;
989                         return 0;
990                 }
991                 return perf_session__deliver_sample(session, tool, event,
992                                                     sample, evsel, machine);
993         case PERF_RECORD_MMAP:
994                 return tool->mmap(tool, event, sample, machine);
995         case PERF_RECORD_MMAP2:
996                 return tool->mmap2(tool, event, sample, machine);
997         case PERF_RECORD_COMM:
998                 return tool->comm(tool, event, sample, machine);
999         case PERF_RECORD_FORK:
1000                 return tool->fork(tool, event, sample, machine);
1001         case PERF_RECORD_EXIT:
1002                 return tool->exit(tool, event, sample, machine);
1003         case PERF_RECORD_LOST:
1004                 if (tool->lost == perf_event__process_lost)
1005                         session->stats.total_lost += event->lost.lost;
1006                 return tool->lost(tool, event, sample, machine);
1007         case PERF_RECORD_READ:
1008                 return tool->read(tool, event, sample, evsel, machine);
1009         case PERF_RECORD_THROTTLE:
1010                 return tool->throttle(tool, event, sample, machine);
1011         case PERF_RECORD_UNTHROTTLE:
1012                 return tool->unthrottle(tool, event, sample, machine);
1013         default:
1014                 ++session->stats.nr_unknown_events;
1015                 return -1;
1016         }
1017 }
1018
1019 static s64 perf_session__process_user_event(struct perf_session *session,
1020                                             union perf_event *event,
1021                                             struct perf_tool *tool,
1022                                             u64 file_offset)
1023 {
1024         int fd = perf_data_file__fd(session->file);
1025         int err;
1026
1027         dump_event(session, event, file_offset, NULL);
1028
1029         /* These events are processed right away */
1030         switch (event->header.type) {
1031         case PERF_RECORD_HEADER_ATTR:
1032                 err = tool->attr(tool, event, &session->evlist);
1033                 if (err == 0)
1034                         perf_session__set_id_hdr_size(session);
1035                 return err;
1036         case PERF_RECORD_HEADER_EVENT_TYPE:
1037                 /*
1038                  * Depreceated, but we need to handle it for sake
1039                  * of old data files create in pipe mode.
1040                  */
1041                 return 0;
1042         case PERF_RECORD_HEADER_TRACING_DATA:
1043                 /* setup for reading amidst mmap */
1044                 lseek(fd, file_offset, SEEK_SET);
1045                 return tool->tracing_data(tool, event, session);
1046         case PERF_RECORD_HEADER_BUILD_ID:
1047                 return tool->build_id(tool, event, session);
1048         case PERF_RECORD_FINISHED_ROUND:
1049                 return tool->finished_round(tool, event, session);
1050         default:
1051                 return -EINVAL;
1052         }
1053 }
1054
1055 static void event_swap(union perf_event *event, bool sample_id_all)
1056 {
1057         perf_event__swap_op swap;
1058
1059         swap = perf_event__swap_ops[event->header.type];
1060         if (swap)
1061                 swap(event, sample_id_all);
1062 }
1063
1064 static s64 perf_session__process_event(struct perf_session *session,
1065                                        union perf_event *event,
1066                                        struct perf_tool *tool,
1067                                        u64 file_offset)
1068 {
1069         struct perf_sample sample;
1070         int ret;
1071
1072         if (session->header.needs_swap)
1073                 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1074
1075         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1076                 return -EINVAL;
1077
1078         events_stats__inc(&session->stats, event->header.type);
1079
1080         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1081                 return perf_session__process_user_event(session, event, tool, file_offset);
1082
1083         /*
1084          * For all kernel events we get the sample data
1085          */
1086         ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1087         if (ret)
1088                 return ret;
1089
1090         if (tool->ordered_events) {
1091                 ret = perf_session_queue_event(session, event, &sample,
1092                                                file_offset);
1093                 if (ret != -ETIME)
1094                         return ret;
1095         }
1096
1097         return perf_session_deliver_event(session, event, &sample, tool,
1098                                           file_offset);
1099 }
1100
1101 void perf_event_header__bswap(struct perf_event_header *hdr)
1102 {
1103         hdr->type = bswap_32(hdr->type);
1104         hdr->misc = bswap_16(hdr->misc);
1105         hdr->size = bswap_16(hdr->size);
1106 }
1107
1108 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1109 {
1110         return machine__findnew_thread(&session->machines.host, -1, pid);
1111 }
1112
1113 static struct thread *perf_session__register_idle_thread(struct perf_session *session)
1114 {
1115         struct thread *thread;
1116
1117         thread = machine__findnew_thread(&session->machines.host, 0, 0);
1118         if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1119                 pr_err("problem inserting idle task.\n");
1120                 thread = NULL;
1121         }
1122
1123         return thread;
1124 }
1125
1126 static void perf_session__warn_about_errors(const struct perf_session *session,
1127                                             const struct perf_tool *tool)
1128 {
1129         if (tool->lost == perf_event__process_lost &&
1130             session->stats.nr_events[PERF_RECORD_LOST] != 0) {
1131                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1132                             "Check IO/CPU overload!\n\n",
1133                             session->stats.nr_events[0],
1134                             session->stats.nr_events[PERF_RECORD_LOST]);
1135         }
1136
1137         if (session->stats.nr_unknown_events != 0) {
1138                 ui__warning("Found %u unknown events!\n\n"
1139                             "Is this an older tool processing a perf.data "
1140                             "file generated by a more recent tool?\n\n"
1141                             "If that is not the case, consider "
1142                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1143                             session->stats.nr_unknown_events);
1144         }
1145
1146         if (session->stats.nr_unknown_id != 0) {
1147                 ui__warning("%u samples with id not present in the header\n",
1148                             session->stats.nr_unknown_id);
1149         }
1150
1151         if (session->stats.nr_invalid_chains != 0) {
1152                 ui__warning("Found invalid callchains!\n\n"
1153                             "%u out of %u events were discarded for this reason.\n\n"
1154                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1155                             session->stats.nr_invalid_chains,
1156                             session->stats.nr_events[PERF_RECORD_SAMPLE]);
1157         }
1158
1159         if (session->stats.nr_unprocessable_samples != 0) {
1160                 ui__warning("%u unprocessable samples recorded.\n"
1161                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1162                             session->stats.nr_unprocessable_samples);
1163         }
1164 }
1165
1166 volatile int session_done;
1167
1168 static int __perf_session__process_pipe_events(struct perf_session *session,
1169                                                struct perf_tool *tool)
1170 {
1171         int fd = perf_data_file__fd(session->file);
1172         union perf_event *event;
1173         uint32_t size, cur_size = 0;
1174         void *buf = NULL;
1175         s64 skip = 0;
1176         u64 head;
1177         ssize_t err;
1178         void *p;
1179
1180         perf_tool__fill_defaults(tool);
1181
1182         head = 0;
1183         cur_size = sizeof(union perf_event);
1184
1185         buf = malloc(cur_size);
1186         if (!buf)
1187                 return -errno;
1188 more:
1189         event = buf;
1190         err = readn(fd, event, sizeof(struct perf_event_header));
1191         if (err <= 0) {
1192                 if (err == 0)
1193                         goto done;
1194
1195                 pr_err("failed to read event header\n");
1196                 goto out_err;
1197         }
1198
1199         if (session->header.needs_swap)
1200                 perf_event_header__bswap(&event->header);
1201
1202         size = event->header.size;
1203         if (size < sizeof(struct perf_event_header)) {
1204                 pr_err("bad event header size\n");
1205                 goto out_err;
1206         }
1207
1208         if (size > cur_size) {
1209                 void *new = realloc(buf, size);
1210                 if (!new) {
1211                         pr_err("failed to allocate memory to read event\n");
1212                         goto out_err;
1213                 }
1214                 buf = new;
1215                 cur_size = size;
1216                 event = buf;
1217         }
1218         p = event;
1219         p += sizeof(struct perf_event_header);
1220
1221         if (size - sizeof(struct perf_event_header)) {
1222                 err = readn(fd, p, size - sizeof(struct perf_event_header));
1223                 if (err <= 0) {
1224                         if (err == 0) {
1225                                 pr_err("unexpected end of event stream\n");
1226                                 goto done;
1227                         }
1228
1229                         pr_err("failed to read event data\n");
1230                         goto out_err;
1231                 }
1232         }
1233
1234         if ((skip = perf_session__process_event(session, event, tool, head)) < 0) {
1235                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1236                        head, event->header.size, event->header.type);
1237                 err = -EINVAL;
1238                 goto out_err;
1239         }
1240
1241         head += size;
1242
1243         if (skip > 0)
1244                 head += skip;
1245
1246         if (!session_done())
1247                 goto more;
1248 done:
1249         /* do the final flush for ordered samples */
1250         session->ordered_events.next_flush = ULLONG_MAX;
1251         err = ordered_events__flush(session, tool);
1252 out_err:
1253         free(buf);
1254         perf_session__warn_about_errors(session, tool);
1255         perf_session_free_sample_buffers(session);
1256         return err;
1257 }
1258
1259 static union perf_event *
1260 fetch_mmaped_event(struct perf_session *session,
1261                    u64 head, size_t mmap_size, char *buf)
1262 {
1263         union perf_event *event;
1264
1265         /*
1266          * Ensure we have enough space remaining to read
1267          * the size of the event in the headers.
1268          */
1269         if (head + sizeof(event->header) > mmap_size)
1270                 return NULL;
1271
1272         event = (union perf_event *)(buf + head);
1273
1274         if (session->header.needs_swap)
1275                 perf_event_header__bswap(&event->header);
1276
1277         if (head + event->header.size > mmap_size) {
1278                 /* We're not fetching the event so swap back again */
1279                 if (session->header.needs_swap)
1280                         perf_event_header__bswap(&event->header);
1281                 return NULL;
1282         }
1283
1284         return event;
1285 }
1286
1287 /*
1288  * On 64bit we can mmap the data file in one go. No need for tiny mmap
1289  * slices. On 32bit we use 32MB.
1290  */
1291 #if BITS_PER_LONG == 64
1292 #define MMAP_SIZE ULLONG_MAX
1293 #define NUM_MMAPS 1
1294 #else
1295 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1296 #define NUM_MMAPS 128
1297 #endif
1298
1299 int __perf_session__process_events(struct perf_session *session,
1300                                    u64 data_offset, u64 data_size,
1301                                    u64 file_size, struct perf_tool *tool)
1302 {
1303         int fd = perf_data_file__fd(session->file);
1304         u64 head, page_offset, file_offset, file_pos, size;
1305         int err, mmap_prot, mmap_flags, map_idx = 0;
1306         size_t  mmap_size;
1307         char *buf, *mmaps[NUM_MMAPS];
1308         union perf_event *event;
1309         struct ui_progress prog;
1310         s64 skip;
1311
1312         perf_tool__fill_defaults(tool);
1313
1314         page_offset = page_size * (data_offset / page_size);
1315         file_offset = page_offset;
1316         head = data_offset - page_offset;
1317
1318         if (data_size && (data_offset + data_size < file_size))
1319                 file_size = data_offset + data_size;
1320
1321         ui_progress__init(&prog, file_size, "Processing events...");
1322
1323         mmap_size = MMAP_SIZE;
1324         if (mmap_size > file_size) {
1325                 mmap_size = file_size;
1326                 session->one_mmap = true;
1327         }
1328
1329         memset(mmaps, 0, sizeof(mmaps));
1330
1331         mmap_prot  = PROT_READ;
1332         mmap_flags = MAP_SHARED;
1333
1334         if (session->header.needs_swap) {
1335                 mmap_prot  |= PROT_WRITE;
1336                 mmap_flags = MAP_PRIVATE;
1337         }
1338 remap:
1339         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1340                    file_offset);
1341         if (buf == MAP_FAILED) {
1342                 pr_err("failed to mmap file\n");
1343                 err = -errno;
1344                 goto out_err;
1345         }
1346         mmaps[map_idx] = buf;
1347         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1348         file_pos = file_offset + head;
1349         if (session->one_mmap) {
1350                 session->one_mmap_addr = buf;
1351                 session->one_mmap_offset = file_offset;
1352         }
1353
1354 more:
1355         event = fetch_mmaped_event(session, head, mmap_size, buf);
1356         if (!event) {
1357                 if (mmaps[map_idx]) {
1358                         munmap(mmaps[map_idx], mmap_size);
1359                         mmaps[map_idx] = NULL;
1360                 }
1361
1362                 page_offset = page_size * (head / page_size);
1363                 file_offset += page_offset;
1364                 head -= page_offset;
1365                 goto remap;
1366         }
1367
1368         size = event->header.size;
1369
1370         if (size < sizeof(struct perf_event_header) ||
1371             (skip = perf_session__process_event(session, event, tool, file_pos))
1372                                                                         < 0) {
1373                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1374                        file_offset + head, event->header.size,
1375                        event->header.type);
1376                 err = -EINVAL;
1377                 goto out_err;
1378         }
1379
1380         if (skip)
1381                 size += skip;
1382
1383         head += size;
1384         file_pos += size;
1385
1386         ui_progress__update(&prog, size);
1387
1388         if (session_done())
1389                 goto out;
1390
1391         if (file_pos < file_size)
1392                 goto more;
1393
1394 out:
1395         /* do the final flush for ordered samples */
1396         session->ordered_events.next_flush = ULLONG_MAX;
1397         err = ordered_events__flush(session, tool);
1398 out_err:
1399         ui_progress__finish();
1400         perf_session__warn_about_errors(session, tool);
1401         perf_session_free_sample_buffers(session);
1402         session->one_mmap = false;
1403         return err;
1404 }
1405
1406 int perf_session__process_events(struct perf_session *session,
1407                                  struct perf_tool *tool)
1408 {
1409         u64 size = perf_data_file__size(session->file);
1410         int err;
1411
1412         if (perf_session__register_idle_thread(session) == NULL)
1413                 return -ENOMEM;
1414
1415         if (!perf_data_file__is_pipe(session->file))
1416                 err = __perf_session__process_events(session,
1417                                                      session->header.data_offset,
1418                                                      session->header.data_size,
1419                                                      size, tool);
1420         else
1421                 err = __perf_session__process_pipe_events(session, tool);
1422
1423         return err;
1424 }
1425
1426 bool perf_session__has_traces(struct perf_session *session, const char *msg)
1427 {
1428         struct perf_evsel *evsel;
1429
1430         evlist__for_each(session->evlist, evsel) {
1431                 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1432                         return true;
1433         }
1434
1435         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1436         return false;
1437 }
1438
1439 int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1440                                      const char *symbol_name, u64 addr)
1441 {
1442         char *bracket;
1443         enum map_type i;
1444         struct ref_reloc_sym *ref;
1445
1446         ref = zalloc(sizeof(struct ref_reloc_sym));
1447         if (ref == NULL)
1448                 return -ENOMEM;
1449
1450         ref->name = strdup(symbol_name);
1451         if (ref->name == NULL) {
1452                 free(ref);
1453                 return -ENOMEM;
1454         }
1455
1456         bracket = strchr(ref->name, ']');
1457         if (bracket)
1458                 *bracket = '\0';
1459
1460         ref->addr = addr;
1461
1462         for (i = 0; i < MAP__NR_TYPES; ++i) {
1463                 struct kmap *kmap = map__kmap(maps[i]);
1464                 kmap->ref_reloc_sym = ref;
1465         }
1466
1467         return 0;
1468 }
1469
1470 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1471 {
1472         return machines__fprintf_dsos(&session->machines, fp);
1473 }
1474
1475 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
1476                                           bool (skip)(struct dso *dso, int parm), int parm)
1477 {
1478         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
1479 }
1480
1481 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1482 {
1483         struct perf_evsel *pos;
1484         size_t ret = fprintf(fp, "Aggregated stats:\n");
1485
1486         ret += events_stats__fprintf(&session->stats, fp);
1487
1488         evlist__for_each(session->evlist, pos) {
1489                 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1490                 ret += events_stats__fprintf(&pos->hists.stats, fp);
1491         }
1492
1493         return ret;
1494 }
1495
1496 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1497 {
1498         /*
1499          * FIXME: Here we have to actually print all the machines in this
1500          * session, not just the host...
1501          */
1502         return machine__fprintf(&session->machines.host, fp);
1503 }
1504
1505 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1506                                               unsigned int type)
1507 {
1508         struct perf_evsel *pos;
1509
1510         evlist__for_each(session->evlist, pos) {
1511                 if (pos->attr.type == type)
1512                         return pos;
1513         }
1514         return NULL;
1515 }
1516
1517 void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
1518                           struct addr_location *al,
1519                           unsigned int print_opts, unsigned int stack_depth)
1520 {
1521         struct callchain_cursor_node *node;
1522         int print_ip = print_opts & PRINT_IP_OPT_IP;
1523         int print_sym = print_opts & PRINT_IP_OPT_SYM;
1524         int print_dso = print_opts & PRINT_IP_OPT_DSO;
1525         int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
1526         int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
1527         int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE;
1528         char s = print_oneline ? ' ' : '\t';
1529
1530         if (symbol_conf.use_callchain && sample->callchain) {
1531                 struct addr_location node_al;
1532
1533                 if (machine__resolve_callchain(al->machine, evsel, al->thread,
1534                                                sample, NULL, NULL,
1535                                                PERF_MAX_STACK_DEPTH) != 0) {
1536                         if (verbose)
1537                                 error("Failed to resolve callchain. Skipping\n");
1538                         return;
1539                 }
1540                 callchain_cursor_commit(&callchain_cursor);
1541
1542                 if (print_symoffset)
1543                         node_al = *al;
1544
1545                 while (stack_depth) {
1546                         u64 addr = 0;
1547
1548                         node = callchain_cursor_current(&callchain_cursor);
1549                         if (!node)
1550                                 break;
1551
1552                         if (node->sym && node->sym->ignore)
1553                                 goto next;
1554
1555                         if (print_ip)
1556                                 printf("%c%16" PRIx64, s, node->ip);
1557
1558                         if (node->map)
1559                                 addr = node->map->map_ip(node->map, node->ip);
1560
1561                         if (print_sym) {
1562                                 printf(" ");
1563                                 if (print_symoffset) {
1564                                         node_al.addr = addr;
1565                                         node_al.map  = node->map;
1566                                         symbol__fprintf_symname_offs(node->sym, &node_al, stdout);
1567                                 } else
1568                                         symbol__fprintf_symname(node->sym, stdout);
1569                         }
1570
1571                         if (print_dso) {
1572                                 printf(" (");
1573                                 map__fprintf_dsoname(node->map, stdout);
1574                                 printf(")");
1575                         }
1576
1577                         if (print_srcline)
1578                                 map__fprintf_srcline(node->map, addr, "\n  ",
1579                                                      stdout);
1580
1581                         if (!print_oneline)
1582                                 printf("\n");
1583
1584                         stack_depth--;
1585 next:
1586                         callchain_cursor_advance(&callchain_cursor);
1587                 }
1588
1589         } else {
1590                 if (al->sym && al->sym->ignore)
1591                         return;
1592
1593                 if (print_ip)
1594                         printf("%16" PRIx64, sample->ip);
1595
1596                 if (print_sym) {
1597                         printf(" ");
1598                         if (print_symoffset)
1599                                 symbol__fprintf_symname_offs(al->sym, al,
1600                                                              stdout);
1601                         else
1602                                 symbol__fprintf_symname(al->sym, stdout);
1603                 }
1604
1605                 if (print_dso) {
1606                         printf(" (");
1607                         map__fprintf_dsoname(al->map, stdout);
1608                         printf(")");
1609                 }
1610
1611                 if (print_srcline)
1612                         map__fprintf_srcline(al->map, al->addr, "\n  ", stdout);
1613         }
1614 }
1615
1616 int perf_session__cpu_bitmap(struct perf_session *session,
1617                              const char *cpu_list, unsigned long *cpu_bitmap)
1618 {
1619         int i, err = -1;
1620         struct cpu_map *map;
1621
1622         for (i = 0; i < PERF_TYPE_MAX; ++i) {
1623                 struct perf_evsel *evsel;
1624
1625                 evsel = perf_session__find_first_evtype(session, i);
1626                 if (!evsel)
1627                         continue;
1628
1629                 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
1630                         pr_err("File does not contain CPU events. "
1631                                "Remove -c option to proceed.\n");
1632                         return -1;
1633                 }
1634         }
1635
1636         map = cpu_map__new(cpu_list);
1637         if (map == NULL) {
1638                 pr_err("Invalid cpu_list\n");
1639                 return -1;
1640         }
1641
1642         for (i = 0; i < map->nr; i++) {
1643                 int cpu = map->map[i];
1644
1645                 if (cpu >= MAX_NR_CPUS) {
1646                         pr_err("Requested CPU %d too large. "
1647                                "Consider raising MAX_NR_CPUS\n", cpu);
1648                         goto out_delete_map;
1649                 }
1650
1651                 set_bit(cpu, cpu_bitmap);
1652         }
1653
1654         err = 0;
1655
1656 out_delete_map:
1657         cpu_map__delete(map);
1658         return err;
1659 }
1660
1661 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
1662                                 bool full)
1663 {
1664         struct stat st;
1665         int fd, ret;
1666
1667         if (session == NULL || fp == NULL)
1668                 return;
1669
1670         fd = perf_data_file__fd(session->file);
1671
1672         ret = fstat(fd, &st);
1673         if (ret == -1)
1674                 return;
1675
1676         fprintf(fp, "# ========\n");
1677         fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
1678         perf_header__fprintf_info(session, fp, full);
1679         fprintf(fp, "# ========\n#\n");
1680 }
1681
1682
1683 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
1684                                              const struct perf_evsel_str_handler *assocs,
1685                                              size_t nr_assocs)
1686 {
1687         struct perf_evsel *evsel;
1688         size_t i;
1689         int err;
1690
1691         for (i = 0; i < nr_assocs; i++) {
1692                 /*
1693                  * Adding a handler for an event not in the session,
1694                  * just ignore it.
1695                  */
1696                 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
1697                 if (evsel == NULL)
1698                         continue;
1699
1700                 err = -EEXIST;
1701                 if (evsel->handler != NULL)
1702                         goto out;
1703                 evsel->handler = assocs[i].handler;
1704         }
1705
1706         err = 0;
1707 out:
1708         return err;
1709 }