cs-etm: removing unecessary structure field
[firefly-linux-kernel-4.4.55.git] / tools / perf / util / cs-etm.c
1 /*
2  * Copyright(C) 2016 Linaro Limited. All rights reserved.
3  * Author: Tor Jeremiassen <tor.jeremiassen@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/bitops.h>
21 #include <linux/log2.h>
22
23 #include "perf.h"
24 #include "thread_map.h"
25 #include "thread.h"
26 #include "thread-stack.h"
27 #include "callchain.h"
28 #include "auxtrace.h"
29 #include "evlist.h"
30 #include "machine.h"
31 #include "util.h"
32 #include "color.h"
33 #include "cs-etm.h"
34 #include "cs-etm-decoder/cs-etm-decoder.h"
35 #include "debug.h"
36
37 #include <stdlib.h>
38
39 #define KiB(x) ((x) * 1024)
40 #define MiB(x) ((x) * 1024 * 1024)
41 #define MAX_TIMESTAMP (~0ULL)
42
43 struct cs_etm_auxtrace {
44         struct auxtrace         auxtrace;
45         struct auxtrace_queues  queues;
46         struct auxtrace_heap    heap;
47         u64                    **metadata;
48         u32                     auxtrace_type;
49         struct perf_session    *session;
50         struct machine         *machine;
51         struct perf_evsel      *switch_evsel;
52         struct thread          *unknown_thread;
53         uint32_t                num_cpu;
54         bool                    timeless_decoding;
55         bool                    sampling_mode;
56         bool                    snapshot_mode;
57         bool                    data_queued;
58         bool                    sync_switch;
59         bool                    synth_needs_swap;
60         int                     have_sched_switch;
61
62         bool                    sample_instructions;
63         u64                     instructions_sample_type;
64         u64                     instructions_sample_period;
65         u64                     instructions_id;
66         struct itrace_synth_opts synth_opts;
67         unsigned                pmu_type;
68 };
69
70 struct cs_etm_queue {
71         struct cs_etm_auxtrace *etm;
72         unsigned                queue_nr;
73         struct auxtrace_buffer *buffer;
74         const struct           cs_etm_state *state;
75         struct ip_callchain    *chain;
76         union perf_event       *event_buf;
77         bool                    on_heap;
78         bool                    step_through_buffers;
79         bool                    use_buffer_pid_tid;
80         pid_t                   pid, tid;
81         int                     cpu;
82         struct thread          *thread;
83         u64                     time;
84         u64                     timestamp;
85         bool                    stop;
86         struct cs_etm_decoder  *decoder;
87         u64                     offset;
88         bool                    eot;
89         bool                    kernel_mapped;
90 };
91
92 static int cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq);
93 static int cs_etm__update_queues(struct cs_etm_auxtrace *);
94 static int cs_etm__process_queues(struct cs_etm_auxtrace *, u64);
95 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *, pid_t, u64);
96 static uint32_t cs_etm__mem_access(struct cs_etm_queue *, uint64_t , size_t , uint8_t *);
97
98 static void cs_etm__packet_dump(const char *pkt_string)
99 {
100         const char *color = PERF_COLOR_BLUE;
101
102         color_fprintf(stdout,color, "  %s\n", pkt_string);
103         fflush(stdout);
104 }
105
106 static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
107                               struct auxtrace_buffer *buffer)
108 {
109         const char *color = PERF_COLOR_BLUE;
110         struct cs_etm_decoder_params d_params;
111         struct cs_etm_trace_params *t_params;
112         struct cs_etm_decoder *decoder;
113         size_t buffer_used = 0;
114         size_t i;
115
116         fprintf(stdout,"\n");
117         color_fprintf(stdout, color,
118                      ". ... CoreSight ETM Trace data: size %zu bytes\n",
119                      buffer->size);
120
121         t_params = zalloc(sizeof(struct cs_etm_trace_params) * etm->num_cpu);
122         for (i = 0; i < etm->num_cpu; ++i) {
123                 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
124                 t_params[i].reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
125                 t_params[i].reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
126                 t_params[i].reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
127                 t_params[i].reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
128                 t_params[i].reg_configr = etm->metadata[i][CS_ETMV4_TRCCONFIGR];
129                 t_params[i].reg_traceidr = etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
130   //[CS_ETMV4_TRCAUTHSTATUS] = "   TRCAUTHSTATUS                  %"PRIx64"\n",
131         }
132         d_params.packet_printer = cs_etm__packet_dump;
133         d_params.operation = CS_ETM_OPERATION_PRINT;
134         d_params.formatted = true;
135         d_params.fsyncs = false;
136         d_params.hsyncs = false;
137         d_params.frame_aligned = true;
138
139         decoder = cs_etm_decoder__new(etm->num_cpu,&d_params, t_params);
140
141         zfree(&t_params);
142
143         if (decoder == NULL) {
144                 return; 
145         }
146         do {
147             size_t consumed;
148             cs_etm_decoder__process_data_block(decoder,buffer->offset,&(((uint8_t *)buffer->data)[buffer_used]),buffer->size - buffer_used, &consumed);
149             buffer_used += consumed;
150         } while(buffer_used < buffer->size);
151         cs_etm_decoder__free(decoder);
152 }
153                               
154 static int cs_etm__flush_events(struct perf_session *session, struct perf_tool *tool){
155         struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
156                                                    struct cs_etm_auxtrace,
157                                                    auxtrace);
158
159         int ret;
160
161         if (dump_trace)
162                 return 0;
163
164         if (!tool->ordered_events)
165                 return -EINVAL;
166
167         ret = cs_etm__update_queues(etm);
168
169         if (ret < 0) 
170                 return ret;
171
172         if (etm->timeless_decoding)
173                 return cs_etm__process_timeless_queues(etm,-1,MAX_TIMESTAMP - 1);
174
175         return cs_etm__process_queues(etm, MAX_TIMESTAMP);
176 }
177
178 static void  cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
179                                     struct auxtrace_queue *queue)
180 {
181         struct cs_etm_queue *etmq = queue->priv;
182
183         if ((queue->tid == -1) || (etm->have_sched_switch)) {
184                 etmq->tid = machine__get_current_tid(etm->machine, etmq->cpu);
185                 thread__zput(etmq->thread);
186         }
187
188         if ((!etmq->thread) && (etmq->tid != -1)) {
189                 etmq->thread = machine__find_thread(etm->machine,-1,etmq->tid);
190         }
191
192         if (etmq->thread) {
193                 etmq->pid = etmq->thread->pid_;
194                 if (queue->cpu == -1) {
195                         etmq->cpu = etmq->thread->cpu;
196                 }
197         }
198 }
199
200 static void cs_etm__free_queue(void *priv)
201 {
202         struct cs_etm_queue *etmq = priv;
203
204         if (!etmq)
205                 return;
206
207         thread__zput(etmq->thread);
208         cs_etm_decoder__free(etmq->decoder);
209         zfree(&etmq->event_buf);
210         zfree(&etmq->chain);
211         free(etmq);
212 }
213
214 static void cs_etm__free_events(struct perf_session *session)
215 {
216         struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
217                                                    struct cs_etm_auxtrace,
218                                                    auxtrace);
219
220         struct auxtrace_queues *queues = &(aux->queues);
221
222         unsigned i;
223
224         for (i = 0; i < queues->nr_queues; ++i) {
225                 cs_etm__free_queue(queues->queue_array[i].priv);
226                 queues->queue_array[i].priv = 0;
227         }
228
229         auxtrace_queues__free(queues);
230
231 }
232
233 static void cs_etm__free(struct perf_session *session)
234 {
235
236         size_t i;
237         struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
238                                                    struct cs_etm_auxtrace,
239                                                    auxtrace);
240         auxtrace_heap__free(&aux->heap);
241         cs_etm__free_events(session);
242         session->auxtrace = NULL;
243         //thread__delete(aux->unknown_thread);
244         for (i = 0; i < aux->num_cpu; ++i) {
245                 zfree(&aux->metadata[i]);
246         }
247         zfree(&aux->metadata);
248         free(aux);
249 }
250
251 static void cs_etm__use_buffer_pid_tid(struct cs_etm_queue *etmq,
252                                       struct auxtrace_queue *queue,
253                                       struct auxtrace_buffer *buffer)
254 {
255         if ((queue->cpu == -1) && (buffer->cpu != -1)) {
256                 etmq->cpu = buffer->cpu;
257         }
258
259         etmq->pid = buffer->pid;
260         etmq->tid = buffer->tid;
261
262         thread__zput(etmq->thread);
263
264         if (etmq->tid != -1) {
265                 if (etmq->pid != -1) {
266                         etmq->thread = machine__findnew_thread(etmq->etm->machine,
267                                                                etmq->pid,
268                                                                etmq->tid);
269                 } else {
270                         etmq->thread = machine__findnew_thread(etmq->etm->machine,
271                                                                -1,
272                                                                etmq->tid);
273                 }
274         }
275 }
276
277
278 static int cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
279 {
280         struct auxtrace_buffer *aux_buffer = etmq->buffer;
281         struct auxtrace_buffer *old_buffer = aux_buffer;
282         struct auxtrace_queue *queue;
283
284         if (etmq->stop) {
285                 buff->len = 0;
286                 return 0;
287         }
288
289         queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
290
291         aux_buffer = auxtrace_buffer__next(queue,aux_buffer);
292
293         if (!aux_buffer) {
294                 if (old_buffer) {
295                         auxtrace_buffer__drop_data(old_buffer);
296                 }
297                 buff->len = 0;
298                 return 0;
299         }
300
301         etmq->buffer = aux_buffer;
302
303         if (!aux_buffer->data) {
304                 int fd = perf_data_file__fd(etmq->etm->session->file);
305
306                 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
307                 if (!aux_buffer->data)
308                         return -ENOMEM;
309         }
310
311         if (old_buffer)
312                 auxtrace_buffer__drop_data(old_buffer);
313
314         if (aux_buffer->use_data) {
315                 buff->offset = aux_buffer->offset;
316                 buff->len = aux_buffer->use_size;
317                 buff->buf = aux_buffer->use_data;
318         } else {
319                 buff->offset = aux_buffer->offset;
320                 buff->len = aux_buffer->size;
321                 buff->buf = aux_buffer->data;
322         }
323         /*
324         buff->offset = 0;
325         buff->len = sizeof(cstrace);
326         buff->buf = cstrace;
327         */
328
329         buff->ref_timestamp = aux_buffer->reference;
330
331         if (etmq->use_buffer_pid_tid && 
332             ((etmq->pid != aux_buffer->pid) || 
333              (etmq->tid != aux_buffer->tid))) {
334                 cs_etm__use_buffer_pid_tid(etmq,queue,aux_buffer);
335         }
336
337         if (etmq->step_through_buffers)
338                 etmq->stop = true;
339
340         return buff->len;
341 }
342
343 static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
344                                                unsigned int queue_nr)
345 {
346         struct cs_etm_decoder_params d_params;
347         struct cs_etm_trace_params   *t_params;
348         struct cs_etm_queue *etmq;
349         size_t i;
350
351         etmq = zalloc(sizeof(struct cs_etm_queue));
352         if (!etmq)
353                 return NULL;
354
355         if (etm->synth_opts.callchain) {
356                 size_t sz = sizeof(struct ip_callchain);
357
358                 sz += etm->synth_opts.callchain_sz * sizeof(u64);
359                 etmq->chain = zalloc(sz);
360                 if (!etmq->chain)
361                         goto out_free;
362         } else {
363                 etmq->chain = NULL;
364         }
365
366         etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
367         if (!etmq->event_buf)
368                 goto out_free;
369
370         etmq->etm = etm;
371         etmq->queue_nr = queue_nr;
372         etmq->pid = -1;
373         etmq->tid = -1;
374         etmq->cpu = -1;
375         etmq->stop = false;
376         etmq->kernel_mapped = false;
377
378         t_params = zalloc(sizeof(struct cs_etm_trace_params)*etm->num_cpu);
379
380         for (i = 0; i < etm->num_cpu; ++i) {
381                 t_params[i].reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
382                 t_params[i].reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
383                 t_params[i].reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
384                 t_params[i].reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
385                 t_params[i].reg_configr = etm->metadata[i][CS_ETMV4_TRCCONFIGR];
386                 t_params[i].reg_traceidr = etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
387                 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
388         }
389         d_params.packet_printer = cs_etm__packet_dump;
390         d_params.operation = CS_ETM_OPERATION_DECODE;    
391         d_params.formatted = true;
392         d_params.fsyncs = false;
393         d_params.hsyncs = false;
394         d_params.frame_aligned = true;
395         d_params.data = etmq;
396
397         etmq->decoder = cs_etm_decoder__new(etm->num_cpu,&d_params,t_params);
398
399
400         zfree(&t_params);
401
402         if (!etmq->decoder)
403                 goto out_free;
404
405         etmq->offset = 0;
406         etmq->eot = false;
407
408         return etmq;
409
410 out_free:
411         zfree(&etmq->event_buf);
412         zfree(&etmq->chain);
413         free(etmq);
414         return NULL;
415 }
416
417 static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm, 
418                               struct auxtrace_queue *queue,
419                               unsigned int queue_nr)
420 {
421         struct cs_etm_queue *etmq = queue->priv;
422
423         if (list_empty(&(queue->head))) 
424                 return 0;
425
426         if (etmq == NULL) {
427                 etmq = cs_etm__alloc_queue(etm,queue_nr);
428
429                 if (etmq == NULL) {
430                         return -ENOMEM;
431                 }
432
433                 queue->priv = etmq;
434
435                 if (queue->cpu != -1) {
436                         etmq->cpu = queue->cpu;
437                 }
438
439                 etmq->tid = queue->tid;
440
441                 if (etm->sampling_mode) {
442                         if (etm->timeless_decoding)
443                                 etmq->step_through_buffers = true;
444                         if (etm->timeless_decoding || !etm->have_sched_switch)
445                                 etmq->use_buffer_pid_tid = true;
446                 }
447         }
448         
449         if (!etmq->on_heap && 
450             (!etm->sync_switch)) {
451                 const struct cs_etm_state *state;
452                 int ret = 0;
453
454                 if (etm->timeless_decoding)
455                         return ret;
456
457                 //cs_etm__log("queue %u getting timestamp\n",queue_nr);
458                 //cs_etm__log("queue %u decoding cpu %d pid %d tid %d\n",
459                            //queue_nr, etmq->cpu, etmq->pid, etmq->tid);
460                 (void) state;
461                 return ret;
462                 /*
463                 while (1) {
464                         state = cs_etm_decoder__decode(etmq->decoder);
465                         if (state->err) {
466                                 if (state->err == CS_ETM_ERR_NODATA) {
467                                         //cs_etm__log("queue %u has no timestamp\n",
468                                                    //queue_nr);
469                                         return 0;
470                                 }
471                                 continue;
472                         }
473                         if (state->timestamp)
474                                 break;
475                 }
476
477                 etmq->timestamp = state->timestamp;
478                 //cs_etm__log("queue %u timestamp 0x%"PRIx64 "\n",
479                            //queue_nr, etmq->timestamp);
480                 etmq->state = state;
481                 etmq->have_sample = true;
482                 //cs_etm__sample_flags(etmq);
483                 ret = auxtrace_heap__add(&etm->heap, queue_nr, etmq->timestamp);
484                 if (ret)
485                         return ret;
486                 etmq->on_heap = true;
487                 */
488         }
489         
490         return 0;
491 }
492
493
494 static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
495 {
496         unsigned int i;
497         int ret;
498
499         for (i = 0; i < etm->queues.nr_queues; i++) {
500                 ret = cs_etm__setup_queue(etm, &(etm->queues.queue_array[i]),i);
501                 if (ret)
502                         return ret;
503         }
504         return 0;
505 }
506
507 #if 0
508 struct cs_etm_cache_entry {
509         struct auxtrace_cache_entry     entry;
510         uint64_t                        icount;
511         uint64_t                        bcount;
512 };
513
514 static size_t cs_etm__cache_divisor(void)
515 {
516         static size_t d = 64;
517
518         return d;
519 }
520
521 static size_t cs_etm__cache_size(struct dso *dso,
522                                 struct machine *machine)
523 {
524         off_t size;
525
526         size = dso__data_size(dso,machine);
527         size /= cs_etm__cache_divisor();
528
529         if (size < 1000) 
530                 return 10;
531
532         if (size > (1 << 21)) 
533                 return 21;
534
535         return 32 - __builtin_clz(size);
536 }
537
538 static struct auxtrace_cache *cs_etm__cache(struct dso *dso,
539                                            struct machine *machine)
540 {
541         struct auxtrace_cache *c;
542         size_t bits;
543
544         if (dso->auxtrace_cache)
545                 return dso->auxtrace_cache;
546
547         bits = cs_etm__cache_size(dso,machine);
548
549         c = auxtrace_cache__new(bits, sizeof(struct cs_etm_cache_entry), 200);
550
551         dso->auxtrace_cache = c;
552
553         return c;
554 }
555
556 static int cs_etm__cache_add(struct dso *dso, struct machine *machine,
557                             uint64_t offset, uint64_t icount, uint64_t bcount)
558 {
559         struct auxtrace_cache *c = cs_etm__cache(dso, machine);
560         struct cs_etm_cache_entry *e;
561         int err;
562
563         if (!c)
564                 return -ENOMEM;
565
566         e = auxtrace_cache__alloc_entry(c);
567         if (!e)
568                 return -ENOMEM;
569
570         e->icount = icount;
571         e->bcount = bcount;
572
573         err = auxtrace_cache__add(c, offset, &e->entry);
574
575         if (err)
576                 auxtrace_cache__free_entry(c, e);
577
578         return err;
579 }
580
581 static struct cs_etm_cache_entry *cs_etm__cache_lookup(struct dso *dso,
582                                                       struct machine *machine,
583                                                       uint64_t offset)
584 {
585         struct auxtrace_cache *c = cs_etm__cache(dso, machine);
586
587         if (!c)
588                 return NULL;
589
590         return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
591 }
592 #endif
593
594 static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
595                                            struct cs_etm_packet *packet)
596 {
597         int ret = 0;
598         struct cs_etm_auxtrace *etm = etmq->etm;
599         union perf_event *event = etmq->event_buf;
600         struct perf_sample sample = {.ip = 0,};
601         uint64_t start_addr = packet->start_addr;
602         uint64_t end_addr = packet->end_addr;
603
604         event->sample.header.type = PERF_RECORD_SAMPLE;
605         event->sample.header.misc = PERF_RECORD_MISC_USER;
606         event->sample.header.size = sizeof(struct perf_event_header);
607
608
609         sample.ip = start_addr;
610         sample.pid = etmq->pid;
611         sample.tid = etmq->tid;
612         sample.addr = end_addr;
613         sample.id = etmq->etm->instructions_id;
614         sample.stream_id = etmq->etm->instructions_id;
615         sample.period = (end_addr - start_addr) >> 2; 
616         sample.cpu = etmq->cpu;
617         sample.flags = 0; // etmq->flags;
618         sample.insn_len = 1; // etmq->insn_len;
619
620         //etmq->last_insn_cnt = etmq->state->tot_insn_cnt;
621
622 #if 0
623         {
624                 struct   addr_location al;
625                 uint64_t offset;
626                 struct   thread *thread;
627                 struct   machine *machine = etmq->etm->machine;
628                 uint8_t  cpumode;
629                 struct   cs_etm_cache_entry *e;
630                 uint8_t  buf[256];
631                 size_t   bufsz;
632
633                 thread = etmq->thread;
634
635                 if (!thread) {
636                         thread = etmq->etm->unknown_thread;
637                 }
638
639                 if (start_addr > 0xffffffc000000000UL) {
640                         cpumode = PERF_RECORD_MISC_KERNEL;
641                 } else {
642                         cpumode = PERF_RECORD_MISC_USER;
643                 }
644
645                 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, start_addr,&al);
646                 if (!al.map || !al.map->dso) {
647                         goto endTest;
648                 }
649                 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
650                     dso__data_status_seen(al.map->dso,DSO_DATA_STATUS_SEEN_ITRACE)) {
651                         goto endTest;
652                 }
653
654                 offset = al.map->map_ip(al.map,start_addr);
655
656
657                 e = cs_etm__cache_lookup(al.map->dso, machine, offset);
658
659                 if (e) {
660                   (void) e;
661                 } else {
662                         int len;
663                         map__load(al.map, machine->symbol_filter);
664
665                         bufsz = sizeof(buf);
666                         len = dso__data_read_offset(al.map->dso, machine,
667                                                     offset, buf, bufsz);
668
669                         if (len <= 0) {
670                                 goto endTest;
671                         }
672
673                         cs_etm__cache_add(al.map->dso, machine, offset, (end_addr - start_addr) >> 2, end_addr - start_addr);
674
675                 }
676 endTest:
677                 (void) offset;
678         }
679 #endif
680
681         ret = perf_session__deliver_synth_event(etm->session,event, &sample);
682
683         if (ret) {
684                 pr_err("CS ETM Trace: failed to deliver instruction event, error %d\n", ret);
685
686         }
687         return ret;
688 }
689
690 struct cs_etm_synth {
691         struct perf_tool dummy_tool;
692         struct perf_session *session;
693 };
694
695
696 static int cs_etm__event_synth(struct perf_tool *tool,
697                               union perf_event *event,
698                               struct perf_sample *sample,
699                               struct machine *machine)
700 {
701         struct cs_etm_synth *cs_etm_synth =
702                       container_of(tool, struct cs_etm_synth, dummy_tool);
703
704         (void) sample;
705         (void) machine;
706
707         return perf_session__deliver_synth_event(cs_etm_synth->session, event, NULL);
708
709 }
710
711
712 static int cs_etm__synth_event(struct perf_session *session,
713                               struct perf_event_attr *attr, u64 id)
714 {
715         struct cs_etm_synth cs_etm_synth;
716
717         memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
718         cs_etm_synth.session = session;
719
720         return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
721                                            &id, cs_etm__event_synth);
722 }
723
724 static int cs_etm__synth_events(struct cs_etm_auxtrace *etm, 
725                                struct perf_session *session)
726 {
727         struct perf_evlist *evlist = session->evlist;
728         struct perf_evsel *evsel;
729         struct perf_event_attr attr;
730         bool found = false;
731         u64 id;
732         int err;
733
734         evlist__for_each(evlist, evsel) {
735
736                 if (evsel->attr.type == etm->pmu_type) {
737                         found = true;
738                         break;
739                 }
740         }
741
742         if (!found) {
743                 pr_debug("There are no selected events with Core Sight Trace data\n");
744                 return 0;
745         }
746
747         memset(&attr, 0, sizeof(struct perf_event_attr));
748         attr.size = sizeof(struct perf_event_attr);
749         attr.type = PERF_TYPE_HARDWARE;
750         attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
751         attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
752                             PERF_SAMPLE_PERIOD;
753         if (etm->timeless_decoding) 
754                 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
755         else
756                 attr.sample_type |= PERF_SAMPLE_TIME;
757
758         attr.exclude_user = evsel->attr.exclude_user;
759         attr.exclude_kernel = evsel->attr.exclude_kernel;
760         attr.exclude_hv = evsel->attr.exclude_hv;
761         attr.exclude_host = evsel->attr.exclude_host;
762         attr.exclude_guest = evsel->attr.exclude_guest;
763         attr.sample_id_all = evsel->attr.sample_id_all;
764         attr.read_format = evsel->attr.read_format;
765
766         id = evsel->id[0] + 1000000000;
767
768         if (!id)
769                 id = 1;
770
771         if (etm->synth_opts.instructions) {
772                 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
773                 attr.sample_period = etm->synth_opts.period;
774                 etm->instructions_sample_period = attr.sample_period;
775                 err = cs_etm__synth_event(session, &attr, id);
776
777                 if (err) {
778                         pr_err("%s: failed to synthesize 'instructions' event type\n",
779                                __func__);
780                         return err;
781                 }
782                 etm->sample_instructions = true;
783                 etm->instructions_sample_type = attr.sample_type;
784                 etm->instructions_id = id;
785                 id += 1;
786         }
787
788         etm->synth_needs_swap = evsel->needs_swap;
789         return 0;
790 }
791
792 static int cs_etm__sample(struct cs_etm_queue *etmq)
793 {
794         //const struct cs_etm_state *state = etmq->state;
795         struct cs_etm_packet packet;
796         //struct cs_etm_auxtrace *etm = etmq->etm;
797         int err;
798
799         err = cs_etm_decoder__get_packet(etmq->decoder,&packet);
800         // if there is no sample, it returns err = -1, no real error
801
802         if (!err && packet.sample_type & CS_ETM_RANGE) {
803                 err = cs_etm__synth_instruction_sample(etmq,&packet);
804                 if (err)
805                         return err;
806         }
807         return 0;
808 }
809
810 static int cs_etm__run_decoder(struct cs_etm_queue *etmq, u64 *timestamp)
811 {
812         struct cs_etm_buffer buffer;
813         size_t buffer_used;
814         int err = 0;
815
816         /* Go through each buffer in the queue and decode them one by one */
817 more:
818         buffer_used = 0;
819         memset(&buffer, 0, sizeof(buffer));
820         err = cs_etm__get_trace(&buffer,etmq);
821         if (err <= 0)
822                 return err;
823
824         do {
825             size_t processed = 0;
826             etmq->state = cs_etm_decoder__process_data_block(etmq->decoder,
827                                                etmq->offset,
828                                                &buffer.buf[buffer_used],
829                                                buffer.len-buffer_used,
830                                                &processed);
831             err = etmq->state->err;
832             etmq->offset += processed;
833             buffer_used += processed;
834             if (!err)
835                 cs_etm__sample(etmq);
836         } while (!etmq->eot && (buffer.len > buffer_used));
837 goto more;
838
839         (void) timestamp;
840
841         return err;
842 }
843
844 static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
845 {
846   if (etm->queues.new_data) {
847         etm->queues.new_data = false;
848         return cs_etm__setup_queues(etm);
849   }
850   return 0;
851 }
852
853 static int cs_etm__process_queues(struct cs_etm_auxtrace *etm, u64 timestamp)
854 {
855         unsigned int queue_nr;
856         u64 ts;
857         int ret;
858
859         while (1) {
860                 struct auxtrace_queue *queue;
861                 struct cs_etm_queue *etmq;
862         
863                 if (!etm->heap.heap_cnt)
864                         return 0;
865         
866                 if (etm->heap.heap_array[0].ordinal >= timestamp)
867                         return 0;
868         
869                 queue_nr = etm->heap.heap_array[0].queue_nr;
870                 queue = &etm->queues.queue_array[queue_nr];
871                 etmq = queue->priv;
872         
873                 //cs_etm__log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
874                            //queue_nr, etm->heap.heap_array[0].ordinal,
875                            //timestamp);
876
877                 auxtrace_heap__pop(&etm->heap);
878
879                 if (etm->heap.heap_cnt) {
880                         ts = etm->heap.heap_array[0].ordinal + 1;
881                         if (ts > timestamp)
882                                 ts = timestamp;
883                 } else {
884                         ts = timestamp;
885                 }
886
887                 cs_etm__set_pid_tid_cpu(etm, queue);
888
889                 ret = cs_etm__run_decoder(etmq, &ts);
890
891                 if (ret < 0) {
892                         auxtrace_heap__add(&etm->heap, queue_nr, ts);
893                         return ret;
894                 }
895
896                 if (!ret) {
897                         ret = auxtrace_heap__add(&etm->heap, queue_nr, ts);
898                         if (ret < 0)
899                                 return ret;
900                 } else {
901                         etmq->on_heap = false;
902                 }
903         }
904         return 0;
905 }
906
907 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
908                                           pid_t tid,
909                                           u64 time_)
910 {
911         struct auxtrace_queues *queues = &etm->queues;
912         unsigned int i;
913         u64 ts = 0;
914         
915         for (i = 0; i < queues->nr_queues; ++i) {
916                 struct auxtrace_queue *queue = &(etm->queues.queue_array[i]);
917                 struct cs_etm_queue *etmq = queue->priv;
918
919                 if (etmq && ((tid == -1) || (etmq->tid == tid))) {
920                         etmq->time = time_;
921                         cs_etm__set_pid_tid_cpu(etm, queue);
922                         cs_etm__run_decoder(etmq,&ts);
923
924                 }
925         }
926         return 0;
927 }
928
929 static struct cs_etm_queue *cs_etm__cpu_to_etmq(struct cs_etm_auxtrace *etm, 
930                                                int cpu)
931 {
932         unsigned q,j;
933
934         if (etm->queues.nr_queues == 0)
935                 return NULL;
936
937         if (cpu < 0)
938                 q = 0;
939         else if ((unsigned) cpu >= etm->queues.nr_queues)
940                 q = etm->queues.nr_queues - 1;
941         else 
942                 q = cpu;
943
944         if (etm->queues.queue_array[q].cpu == cpu)
945                 return etm->queues.queue_array[q].priv;
946
947         for (j = 0; q > 0; j++) {
948                 if (etm->queues.queue_array[--q].cpu == cpu)
949                         return etm->queues.queue_array[q].priv;
950         }
951
952         for (; j < etm->queues.nr_queues; j++) {
953                 if (etm->queues.queue_array[j].cpu == cpu)
954                         return etm->queues.queue_array[j].priv;
955
956         }
957
958         return NULL;
959 }
960
961 static uint32_t cs_etm__mem_access(struct cs_etm_queue *etmq, uint64_t address, size_t size, uint8_t *buffer)
962 {
963         struct   addr_location al;
964         uint64_t offset;
965         struct   thread *thread;
966         struct   machine *machine;
967         uint8_t  cpumode;
968         int len;
969
970         if (etmq == NULL)
971                 return -1;
972
973         machine = etmq->etm->machine;
974         thread = etmq->thread;
975         if (address > 0xffffffc000000000UL) {
976                 cpumode = PERF_RECORD_MISC_KERNEL;
977         } else {
978                 cpumode = PERF_RECORD_MISC_USER;
979         }
980
981         thread__find_addr_map(thread, cpumode, MAP__FUNCTION, address,&al);
982
983         if (!al.map || !al.map->dso) {
984                 return 0;
985         }
986
987         if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
988             dso__data_status_seen(al.map->dso,DSO_DATA_STATUS_SEEN_ITRACE)) {
989                 return 0;
990         }
991
992         offset = al.map->map_ip(al.map,address);
993
994         map__load(al.map, machine->symbol_filter);
995
996         len = dso__data_read_offset(al.map->dso, machine,
997                                     offset, buffer, size);
998
999         if (len <= 0) {
1000                 return 0;
1001         }
1002
1003         return len;
1004 }
1005
1006 static bool check_need_swap(int file_endian)
1007 {
1008         const int data = 1;
1009         u8 *check = (u8 *)&data;
1010         int host_endian;
1011
1012         if (check[0] == 1)
1013                 host_endian = ELFDATA2LSB;
1014         else
1015                 host_endian = ELFDATA2MSB;
1016
1017         return host_endian != file_endian;
1018 }
1019
1020 static int cs_etm__read_elf_info(const char *fname, uint64_t *foffset, uint64_t *fstart, uint64_t *fsize)
1021 {
1022         FILE *fp;
1023         u8 e_ident[EI_NIDENT];
1024         int ret = -1;
1025         bool need_swap = false;
1026         size_t buf_size;
1027         void *buf;
1028         int i;
1029
1030         fp = fopen(fname, "r");
1031         if (fp == NULL)
1032                 return -1;
1033
1034         if (fread(e_ident, sizeof(e_ident), 1, fp) != 1)
1035                 goto out;
1036
1037         if (memcmp(e_ident, ELFMAG, SELFMAG) ||
1038             e_ident[EI_VERSION] != EV_CURRENT)
1039                 goto out;
1040
1041         need_swap = check_need_swap(e_ident[EI_DATA]);
1042
1043         /* for simplicity */
1044         fseek(fp, 0, SEEK_SET);
1045
1046         if (e_ident[EI_CLASS] == ELFCLASS32) {
1047                 Elf32_Ehdr ehdr;
1048                 Elf32_Phdr *phdr;
1049
1050                 if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
1051                         goto out;
1052
1053                 if (need_swap) {
1054                         ehdr.e_phoff = bswap_32(ehdr.e_phoff);
1055                         ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
1056                         ehdr.e_phnum = bswap_16(ehdr.e_phnum);
1057                 }
1058
1059                 buf_size = ehdr.e_phentsize * ehdr.e_phnum;
1060                 buf = malloc(buf_size);
1061                 if (buf == NULL)
1062                         goto out;
1063
1064                 fseek(fp, ehdr.e_phoff, SEEK_SET);
1065                 if (fread(buf, buf_size, 1, fp) != 1)
1066                         goto out_free;
1067
1068                 for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
1069
1070                         if (need_swap) {
1071                                 phdr->p_type = bswap_32(phdr->p_type);
1072                                 phdr->p_offset = bswap_32(phdr->p_offset);
1073                                 phdr->p_filesz = bswap_32(phdr->p_filesz);
1074                         }
1075
1076                         if (phdr->p_type != PT_LOAD)
1077                                 continue;
1078
1079                         *foffset = phdr->p_offset;
1080                         *fstart = phdr->p_vaddr;
1081                         *fsize = phdr->p_filesz;
1082                         ret = 0;
1083                         break;
1084                 }
1085         } else {
1086                 Elf64_Ehdr ehdr;
1087                 Elf64_Phdr *phdr;
1088
1089                 if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
1090                         goto out;
1091
1092                 if (need_swap) {
1093                         ehdr.e_phoff = bswap_64(ehdr.e_phoff);
1094                         ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
1095                         ehdr.e_phnum = bswap_16(ehdr.e_phnum);
1096                 }
1097
1098                 buf_size = ehdr.e_phentsize * ehdr.e_phnum;
1099                 buf = malloc(buf_size);
1100                 if (buf == NULL)
1101                         goto out;
1102
1103                 fseek(fp, ehdr.e_phoff, SEEK_SET);
1104                 if (fread(buf, buf_size, 1, fp) != 1)
1105                         goto out_free;
1106
1107                 for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
1108
1109                         if (need_swap) {
1110                                 phdr->p_type = bswap_32(phdr->p_type);
1111                                 phdr->p_offset = bswap_64(phdr->p_offset);
1112                                 phdr->p_filesz = bswap_64(phdr->p_filesz);
1113                         }
1114
1115                         if (phdr->p_type != PT_LOAD)
1116                                 continue;
1117
1118                         *foffset = phdr->p_offset;
1119                         *fstart = phdr->p_vaddr;
1120                         *fsize = phdr->p_filesz;
1121                         ret = 0;
1122                         break;
1123                 }
1124         }
1125 out_free:
1126         free(buf);
1127 out:
1128         fclose(fp);
1129         return ret;
1130 }
1131
1132 static int cs_etm__process_event(struct perf_session *session,
1133                                 union perf_event *event,
1134                                 struct perf_sample *sample,
1135                                 struct perf_tool *tool)
1136 {
1137         struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1138                                                    struct cs_etm_auxtrace,
1139                                                    auxtrace);
1140
1141         u64 timestamp;
1142         int err = 0;
1143
1144         if (dump_trace) 
1145                 return 0;
1146
1147         if (!tool->ordered_events) {
1148                 pr_err("CoreSight ETM Trace requires ordered events\n");
1149                 return -EINVAL;
1150         }
1151
1152         if (sample->time && (sample->time != (u64)-1))
1153                 timestamp = sample->time;
1154         else
1155                 timestamp = 0;
1156
1157         if (timestamp || etm->timeless_decoding) {
1158                 err = cs_etm__update_queues(etm);
1159                 if (err)
1160                         return err;
1161
1162         }
1163
1164         if (event->header.type == PERF_RECORD_MMAP2) {
1165                 struct dso *dso;
1166                 int cpu;
1167                 struct cs_etm_queue *etmq;
1168
1169                 cpu = sample->cpu;
1170
1171                 etmq = cs_etm__cpu_to_etmq(etm,cpu);
1172
1173                 if (!etmq) {
1174                         return -1;
1175                 }
1176
1177                 dso = dsos__find(&(etm->machine->dsos),event->mmap2.filename,false);
1178                 if (NULL != dso) {
1179                         err = cs_etm_decoder__add_mem_access_cb(
1180                             etmq->decoder,
1181                             event->mmap2.start, 
1182                             event->mmap2.len, 
1183                             cs_etm__mem_access);
1184                 }
1185
1186                 if ((symbol_conf.vmlinux_name != NULL) && (!etmq->kernel_mapped)) {
1187                         uint64_t foffset;
1188                         uint64_t fstart;
1189                         uint64_t fsize;
1190
1191                         err = cs_etm__read_elf_info(symbol_conf.vmlinux_name,
1192                                                       &foffset,&fstart,&fsize);
1193
1194                         if (!err) {
1195                                 cs_etm_decoder__add_bin_file(
1196                                         etmq->decoder,
1197                                         foffset,
1198                                         fstart,
1199                                         fsize & ~0x1ULL,
1200                                         symbol_conf.vmlinux_name);
1201
1202                                 etmq->kernel_mapped = true;
1203                         }
1204                 }
1205
1206         }
1207
1208         if (etm->timeless_decoding) {
1209                 if (event->header.type == PERF_RECORD_EXIT) {
1210                         err = cs_etm__process_timeless_queues(etm,
1211                                                              event->fork.tid,
1212                                                              sample->time);
1213                 }
1214         } else if (timestamp) {
1215                 err = cs_etm__process_queues(etm, timestamp);
1216         }
1217
1218         //cs_etm__log("event %s (%u): cpu %d time%"PRIu64" tsc %#"PRIx64"\n",
1219                    //perf_event__name(event->header.type), event->header.type,
1220                    //sample->cpu, sample->time, timestamp);
1221         return err;
1222 }
1223
1224 static int cs_etm__process_auxtrace_event(struct perf_session *session,
1225                                   union perf_event *event,
1226                                   struct perf_tool *tool)
1227 {
1228         struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1229                                                    struct cs_etm_auxtrace,
1230                                                    auxtrace);
1231
1232         (void) tool;
1233
1234         if (!etm->data_queued) {
1235                 struct auxtrace_buffer *buffer;
1236                 off_t  data_offset;
1237                 int fd = perf_data_file__fd(session->file);
1238                 bool is_pipe = perf_data_file__is_pipe(session->file);
1239                 int err;
1240
1241                 if (is_pipe) {
1242                         data_offset = 0;
1243                 } else {
1244                         data_offset = lseek(fd, 0, SEEK_CUR);
1245                         if (data_offset == -1) {
1246                                 return -errno;
1247                         }
1248                 }
1249
1250                 err = auxtrace_queues__add_event(&etm->queues,
1251                                                  session,
1252                                                  event,
1253                                                  data_offset,
1254                                                  &buffer);
1255                 if (err)
1256                         return err;
1257
1258                 if (dump_trace)
1259                 {
1260                         if (auxtrace_buffer__get_data(buffer,fd)) {
1261                                 cs_etm__dump_event(etm,buffer);
1262                                 auxtrace_buffer__put_data(buffer);
1263                         }
1264                 }
1265         } 
1266
1267         return 0;
1268
1269 }
1270
1271 static const char * const cs_etm_global_header_fmts[] = {
1272   [CS_HEADER_VERSION_0]    = "   Header version                 %"PRIx64"\n",
1273   [CS_PMU_TYPE_CPUS]       = "   PMU type/num cpus              %"PRIx64"\n",
1274   [CS_ETM_SNAPSHOT]        = "   Snapshot                       %"PRIx64"\n",
1275 };
1276
1277 static const char * const cs_etm_priv_fmts[] = {
1278   [CS_ETM_MAGIC]           = "   Magic number                   %"PRIx64"\n",
1279   [CS_ETM_CPU]             = "   CPU                            %"PRIx64"\n",
1280   [CS_ETM_ETMCR]           = "   ETMCR                          %"PRIx64"\n",
1281   [CS_ETM_ETMTRACEIDR]     = "   ETMTRACEIDR                    %"PRIx64"\n",
1282   [CS_ETM_ETMCCER]         = "   ETMCCER                        %"PRIx64"\n",
1283   [CS_ETM_ETMIDR]          = "   ETMIDR                         %"PRIx64"\n",
1284 };
1285
1286 static const char * const cs_etmv4_priv_fmts[] = {
1287   [CS_ETM_MAGIC]           = "   Magic number                   %"PRIx64"\n",
1288   [CS_ETM_CPU]             = "   CPU                            %"PRIx64"\n",
1289   [CS_ETMV4_TRCCONFIGR]    = "   TRCCONFIGR                     %"PRIx64"\n",
1290   [CS_ETMV4_TRCTRACEIDR]   = "   TRCTRACEIDR                    %"PRIx64"\n",
1291   [CS_ETMV4_TRCIDR0]       = "   TRCIDR0                        %"PRIx64"\n",
1292   [CS_ETMV4_TRCIDR1]       = "   TRCIDR1                        %"PRIx64"\n",
1293   [CS_ETMV4_TRCIDR2]       = "   TRCIDR2                        %"PRIx64"\n",
1294   [CS_ETMV4_TRCIDR8]       = "   TRCIDR8                        %"PRIx64"\n",
1295   [CS_ETMV4_TRCAUTHSTATUS] = "   TRCAUTHSTATUS                  %"PRIx64"\n",
1296 };
1297
1298 static void cs_etm__print_auxtrace_info(u64 *val, size_t num)
1299 {
1300         unsigned i,j,cpu;
1301
1302         for (i = 0, cpu = 0; cpu < num; ++cpu) {
1303
1304                 if (val[i] == __perf_cs_etmv3_magic) {
1305                         for (j = 0; j < CS_ETM_PRIV_MAX; ++j, ++i) {
1306                                 fprintf(stdout,cs_etm_priv_fmts[j],val[i]);
1307                         }
1308                 } else if (val[i] == __perf_cs_etmv4_magic) {
1309                         for (j = 0; j < CS_ETMV4_PRIV_MAX; ++j, ++i) {
1310                                 fprintf(stdout,cs_etmv4_priv_fmts[j],val[i]);
1311                         }
1312                 } else {
1313                         // failure.. return
1314                         return;
1315                 }
1316         }
1317 }
1318
1319 int cs_etm__process_auxtrace_info(union perf_event *event,
1320                                  struct perf_session *session)
1321 {
1322         struct auxtrace_info_event *auxtrace_info = &(event->auxtrace_info);
1323         size_t event_header_size = sizeof(struct perf_event_header);
1324         size_t info_header_size = 8;
1325         size_t total_size = auxtrace_info->header.size;
1326         size_t priv_size = 0;
1327         size_t num_cpu;
1328         struct cs_etm_auxtrace *etm = 0;
1329         int err = 0;
1330         u64 *ptr;
1331         u64 *hdr = NULL;
1332         u64 **metadata = NULL;
1333         size_t i,j,k;
1334         unsigned pmu_type;
1335
1336         if (total_size < (event_header_size + info_header_size))
1337                 return -EINVAL;
1338
1339         priv_size = total_size - event_header_size - info_header_size;
1340
1341         // First the global part
1342
1343         ptr = (u64 *) auxtrace_info->priv;
1344         if (ptr[0] == 0) {
1345                 hdr = zalloc(sizeof(u64 *) * CS_HEADER_VERSION_0_MAX);
1346                 if (hdr == NULL) {
1347                         return -EINVAL;
1348                 }
1349                 for (i = 0; i < CS_HEADER_VERSION_0_MAX; ++i) {
1350                         hdr[i] = ptr[i];
1351                 }
1352                 num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
1353                 pmu_type = (unsigned) ((hdr[CS_PMU_TYPE_CPUS] >> 32) & 0xffffffff);
1354         } else {
1355                 return -EINVAL;
1356         }
1357
1358         metadata = zalloc(sizeof(u64 *) * num_cpu);
1359
1360         if (metadata == NULL) {
1361                 return -EINVAL;
1362         }
1363
1364         for (j = 0; j < num_cpu; ++j) {
1365                 if (ptr[i] == __perf_cs_etmv3_magic) {
1366                         metadata[j] = zalloc(sizeof(u64)*CS_ETM_PRIV_MAX);
1367                         if (metadata == NULL)
1368                                 return -EINVAL;
1369                         for (k = 0; k < CS_ETM_PRIV_MAX; k++) {
1370                                 metadata[j][k] = ptr[i+k];
1371                         }
1372                         i += CS_ETM_PRIV_MAX;
1373                 } else if (ptr[i] == __perf_cs_etmv4_magic) {
1374                         metadata[j] = zalloc(sizeof(u64)*CS_ETMV4_PRIV_MAX);
1375                         if (metadata == NULL)
1376                                 return -EINVAL;
1377                         for (k = 0; k < CS_ETMV4_PRIV_MAX; k++) {
1378                                 metadata[j][k] = ptr[i+k];
1379                         }
1380                         i += CS_ETMV4_PRIV_MAX;
1381                 }
1382         }
1383
1384         if (i*8 != priv_size)
1385                 return -EINVAL;
1386
1387         if (dump_trace)
1388                 cs_etm__print_auxtrace_info(auxtrace_info->priv,num_cpu);
1389
1390         etm = zalloc(sizeof(struct cs_etm_auxtrace));
1391
1392         etm->num_cpu = num_cpu;
1393         etm->pmu_type = pmu_type;
1394         etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
1395
1396         if (!etm)
1397                 return -ENOMEM;
1398
1399
1400         err = auxtrace_queues__init(&etm->queues);
1401         if (err)
1402                 goto err_free;
1403
1404         etm->unknown_thread = thread__new(999999999,999999999);
1405         if (etm->unknown_thread == NULL) {
1406                 err = -ENOMEM;
1407                 goto err_free_queues;
1408         }
1409         err = thread__set_comm(etm->unknown_thread, "unknown", 0);
1410         if (err) {
1411                 goto err_delete_thread;
1412         }
1413
1414         if (thread__init_map_groups(etm->unknown_thread,
1415                                     etm->machine)) {
1416                 err = -ENOMEM;
1417                 goto err_delete_thread;
1418         }
1419
1420         etm->timeless_decoding = true;
1421         etm->sampling_mode = false;
1422         etm->metadata = metadata;
1423         etm->session = session;
1424         etm->machine = &session->machines.host;
1425         etm->auxtrace_type = auxtrace_info->type;
1426
1427         etm->auxtrace.process_event = cs_etm__process_event;
1428         etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
1429         etm->auxtrace.flush_events = cs_etm__flush_events;
1430         etm->auxtrace.free_events  = cs_etm__free_events;
1431         etm->auxtrace.free         = cs_etm__free;
1432         session->auxtrace = &(etm->auxtrace);
1433
1434         if (dump_trace)
1435                 return 0;
1436
1437         if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
1438                 etm->synth_opts = *session->itrace_synth_opts;
1439         } else {
1440                 itrace_synth_opts__set_default(&etm->synth_opts);
1441         }
1442         etm->synth_opts.branches = false;
1443         etm->synth_opts.callchain = false;
1444         etm->synth_opts.calls = false;
1445         etm->synth_opts.returns = false;
1446
1447         err = cs_etm__synth_events(etm, session);
1448         if (err)
1449                 goto err_delete_thread;
1450
1451         err = auxtrace_queues__process_index(&etm->queues, session);
1452         if (err)
1453                 goto err_delete_thread;
1454
1455         etm->data_queued = etm->queues.populated;
1456
1457         return 0;
1458
1459 err_delete_thread:
1460         thread__delete(etm->unknown_thread);
1461 err_free_queues:
1462         auxtrace_queues__free(&etm->queues);
1463         session->auxtrace = NULL;
1464 err_free:
1465         free(etm);
1466         return err;
1467 }