2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
13 #include "thread_map.h"
19 #include "parse-events.h"
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30 struct thread_map *threads)
34 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35 INIT_HLIST_HEAD(&evlist->heads[i]);
36 INIT_LIST_HEAD(&evlist->entries);
37 perf_evlist__set_maps(evlist, cpus, threads);
38 evlist->workload.pid = -1;
41 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
42 struct thread_map *threads)
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
47 perf_evlist__init(evlist, cpus, threads);
52 void perf_evlist__config(struct perf_evlist *evlist,
53 struct perf_record_opts *opts)
55 struct perf_evsel *evsel;
57 * Set the evsel leader links before we configure attributes,
58 * since some might depend on this info.
61 perf_evlist__set_leader(evlist);
63 if (evlist->cpus->map[0] < 0)
64 opts->no_inherit = true;
66 list_for_each_entry(evsel, &evlist->entries, node) {
67 perf_evsel__config(evsel, opts);
69 if (evlist->nr_entries > 1)
70 perf_evsel__set_sample_id(evsel);
74 static void perf_evlist__purge(struct perf_evlist *evlist)
76 struct perf_evsel *pos, *n;
78 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
79 list_del_init(&pos->node);
80 perf_evsel__delete(pos);
83 evlist->nr_entries = 0;
86 void perf_evlist__exit(struct perf_evlist *evlist)
91 evlist->pollfd = NULL;
94 void perf_evlist__delete(struct perf_evlist *evlist)
96 perf_evlist__purge(evlist);
97 perf_evlist__exit(evlist);
101 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
103 list_add_tail(&entry->node, &evlist->entries);
104 ++evlist->nr_entries;
107 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
108 struct list_head *list,
111 list_splice_tail(list, &evlist->entries);
112 evlist->nr_entries += nr_entries;
115 void __perf_evlist__set_leader(struct list_head *list)
117 struct perf_evsel *evsel, *leader;
119 leader = list_entry(list->next, struct perf_evsel, node);
120 evsel = list_entry(list->prev, struct perf_evsel, node);
122 leader->nr_members = evsel->idx - leader->idx + 1;
124 list_for_each_entry(evsel, list, node) {
125 evsel->leader = leader;
129 void perf_evlist__set_leader(struct perf_evlist *evlist)
131 if (evlist->nr_entries) {
132 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
133 __perf_evlist__set_leader(&evlist->entries);
137 int perf_evlist__add_default(struct perf_evlist *evlist)
139 struct perf_event_attr attr = {
140 .type = PERF_TYPE_HARDWARE,
141 .config = PERF_COUNT_HW_CPU_CYCLES,
143 struct perf_evsel *evsel;
145 event_attr_init(&attr);
147 evsel = perf_evsel__new(&attr, 0);
151 /* use strdup() because free(evsel) assumes name is allocated */
152 evsel->name = strdup("cycles");
156 perf_evlist__add(evlist, evsel);
159 perf_evsel__delete(evsel);
164 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
165 struct perf_event_attr *attrs, size_t nr_attrs)
167 struct perf_evsel *evsel, *n;
171 for (i = 0; i < nr_attrs; i++) {
172 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
174 goto out_delete_partial_list;
175 list_add_tail(&evsel->node, &head);
178 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
182 out_delete_partial_list:
183 list_for_each_entry_safe(evsel, n, &head, node)
184 perf_evsel__delete(evsel);
188 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
189 struct perf_event_attr *attrs, size_t nr_attrs)
193 for (i = 0; i < nr_attrs; i++)
194 event_attr_init(attrs + i);
196 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
200 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
202 struct perf_evsel *evsel;
204 list_for_each_entry(evsel, &evlist->entries, node) {
205 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
206 (int)evsel->attr.config == id)
213 int perf_evlist__add_newtp(struct perf_evlist *evlist,
214 const char *sys, const char *name, void *handler)
216 struct perf_evsel *evsel;
218 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
222 evsel->handler.func = handler;
223 perf_evlist__add(evlist, evsel);
227 void perf_evlist__disable(struct perf_evlist *evlist)
230 struct perf_evsel *pos;
232 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
233 list_for_each_entry(pos, &evlist->entries, node) {
234 if (!perf_evsel__is_group_leader(pos))
236 for (thread = 0; thread < evlist->threads->nr; thread++)
237 ioctl(FD(pos, cpu, thread),
238 PERF_EVENT_IOC_DISABLE, 0);
243 void perf_evlist__enable(struct perf_evlist *evlist)
246 struct perf_evsel *pos;
248 for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
249 list_for_each_entry(pos, &evlist->entries, node) {
250 if (!perf_evsel__is_group_leader(pos))
252 for (thread = 0; thread < evlist->threads->nr; thread++)
253 ioctl(FD(pos, cpu, thread),
254 PERF_EVENT_IOC_ENABLE, 0);
259 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
261 int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries;
262 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
263 return evlist->pollfd != NULL ? 0 : -ENOMEM;
266 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
268 fcntl(fd, F_SETFL, O_NONBLOCK);
269 evlist->pollfd[evlist->nr_fds].fd = fd;
270 evlist->pollfd[evlist->nr_fds].events = POLLIN;
274 static void perf_evlist__id_hash(struct perf_evlist *evlist,
275 struct perf_evsel *evsel,
276 int cpu, int thread, u64 id)
279 struct perf_sample_id *sid = SID(evsel, cpu, thread);
283 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
284 hlist_add_head(&sid->node, &evlist->heads[hash]);
287 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
288 int cpu, int thread, u64 id)
290 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
291 evsel->id[evsel->ids++] = id;
294 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
295 struct perf_evsel *evsel,
296 int cpu, int thread, int fd)
298 u64 read_data[4] = { 0, };
299 int id_idx = 1; /* The first entry is the counter value */
301 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
302 read(fd, &read_data, sizeof(read_data)) == -1)
305 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
307 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
310 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
314 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
316 struct hlist_head *head;
317 struct hlist_node *pos;
318 struct perf_sample_id *sid;
321 if (evlist->nr_entries == 1)
322 return perf_evlist__first(evlist);
324 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
325 head = &evlist->heads[hash];
327 hlist_for_each_entry(sid, pos, head, node)
331 if (!perf_evlist__sample_id_all(evlist))
332 return perf_evlist__first(evlist);
337 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
339 struct perf_mmap *md = &evlist->mmap[idx];
340 unsigned int head = perf_mmap__read_head(md);
341 unsigned int old = md->prev;
342 unsigned char *data = md->base + page_size;
343 union perf_event *event = NULL;
345 if (evlist->overwrite) {
347 * If we're further behind than half the buffer, there's a chance
348 * the writer will bite our tail and mess up the samples under us.
350 * If we somehow ended up ahead of the head, we got messed up.
352 * In either case, truncate and restart at head.
354 int diff = head - old;
355 if (diff > md->mask / 2 || diff < 0) {
356 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
359 * head points to a known good entry, start there.
368 event = (union perf_event *)&data[old & md->mask];
369 size = event->header.size;
372 * Event straddles the mmap boundary -- header should always
373 * be inside due to u64 alignment of output.
375 if ((old & md->mask) + size != ((old + size) & md->mask)) {
376 unsigned int offset = old;
377 unsigned int len = min(sizeof(*event), size), cpy;
378 void *dst = &md->event_copy;
381 cpy = min(md->mask + 1 - (offset & md->mask), len);
382 memcpy(dst, &data[offset & md->mask], cpy);
388 event = &md->event_copy;
396 if (!evlist->overwrite)
397 perf_mmap__write_tail(md, old);
402 void perf_evlist__munmap(struct perf_evlist *evlist)
406 for (i = 0; i < evlist->nr_mmaps; i++) {
407 if (evlist->mmap[i].base != NULL) {
408 munmap(evlist->mmap[i].base, evlist->mmap_len);
409 evlist->mmap[i].base = NULL;
417 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
419 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
420 if (cpu_map__all(evlist->cpus))
421 evlist->nr_mmaps = evlist->threads->nr;
422 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
423 return evlist->mmap != NULL ? 0 : -ENOMEM;
426 static int __perf_evlist__mmap(struct perf_evlist *evlist,
427 int idx, int prot, int mask, int fd)
429 evlist->mmap[idx].prev = 0;
430 evlist->mmap[idx].mask = mask;
431 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
433 if (evlist->mmap[idx].base == MAP_FAILED) {
434 evlist->mmap[idx].base = NULL;
438 perf_evlist__add_pollfd(evlist, fd);
442 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
444 struct perf_evsel *evsel;
447 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
450 for (thread = 0; thread < evlist->threads->nr; thread++) {
451 list_for_each_entry(evsel, &evlist->entries, node) {
452 int fd = FD(evsel, cpu, thread);
456 if (__perf_evlist__mmap(evlist, cpu,
457 prot, mask, output) < 0)
460 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
464 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
465 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
474 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
475 if (evlist->mmap[cpu].base != NULL) {
476 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
477 evlist->mmap[cpu].base = NULL;
483 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
485 struct perf_evsel *evsel;
488 for (thread = 0; thread < evlist->threads->nr; thread++) {
491 list_for_each_entry(evsel, &evlist->entries, node) {
492 int fd = FD(evsel, 0, thread);
496 if (__perf_evlist__mmap(evlist, thread,
497 prot, mask, output) < 0)
500 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
504 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
505 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
513 for (thread = 0; thread < evlist->threads->nr; thread++) {
514 if (evlist->mmap[thread].base != NULL) {
515 munmap(evlist->mmap[thread].base, evlist->mmap_len);
516 evlist->mmap[thread].base = NULL;
522 /** perf_evlist__mmap - Create per cpu maps to receive events
524 * @evlist - list of events
525 * @pages - map length in pages
526 * @overwrite - overwrite older events?
528 * If overwrite is false the user needs to signal event consuption using:
530 * struct perf_mmap *m = &evlist->mmap[cpu];
531 * unsigned int head = perf_mmap__read_head(m);
533 * perf_mmap__write_tail(m, head)
535 * Using perf_evlist__read_on_cpu does this automatically.
537 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
540 struct perf_evsel *evsel;
541 const struct cpu_map *cpus = evlist->cpus;
542 const struct thread_map *threads = evlist->threads;
543 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
545 /* 512 kiB: default amount of unprivileged mlocked memory */
546 if (pages == UINT_MAX)
547 pages = (512 * 1024) / page_size;
548 else if (!is_power_of_2(pages))
551 mask = pages * page_size - 1;
553 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
556 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
559 evlist->overwrite = overwrite;
560 evlist->mmap_len = (pages + 1) * page_size;
562 list_for_each_entry(evsel, &evlist->entries, node) {
563 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
564 evsel->sample_id == NULL &&
565 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
569 if (cpu_map__all(cpus))
570 return perf_evlist__mmap_per_thread(evlist, prot, mask);
572 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
575 int perf_evlist__create_maps(struct perf_evlist *evlist,
576 struct perf_target *target)
578 evlist->threads = thread_map__new_str(target->pid, target->tid,
581 if (evlist->threads == NULL)
584 if (perf_target__has_task(target))
585 evlist->cpus = cpu_map__dummy_new();
586 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
587 evlist->cpus = cpu_map__dummy_new();
589 evlist->cpus = cpu_map__new(target->cpu_list);
591 if (evlist->cpus == NULL)
592 goto out_delete_threads;
597 thread_map__delete(evlist->threads);
601 void perf_evlist__delete_maps(struct perf_evlist *evlist)
603 cpu_map__delete(evlist->cpus);
604 thread_map__delete(evlist->threads);
606 evlist->threads = NULL;
609 int perf_evlist__apply_filters(struct perf_evlist *evlist)
611 struct perf_evsel *evsel;
613 const int ncpus = cpu_map__nr(evlist->cpus),
614 nthreads = evlist->threads->nr;
616 list_for_each_entry(evsel, &evlist->entries, node) {
617 if (evsel->filter == NULL)
620 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
628 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
630 struct perf_evsel *evsel;
632 const int ncpus = cpu_map__nr(evlist->cpus),
633 nthreads = evlist->threads->nr;
635 list_for_each_entry(evsel, &evlist->entries, node) {
636 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
644 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
646 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
648 list_for_each_entry_continue(pos, &evlist->entries, node) {
649 if (first->attr.sample_type != pos->attr.sample_type)
656 u64 perf_evlist__sample_type(struct perf_evlist *evlist)
658 struct perf_evsel *first = perf_evlist__first(evlist);
659 return first->attr.sample_type;
662 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
664 struct perf_evsel *first = perf_evlist__first(evlist);
665 struct perf_sample *data;
669 if (!first->attr.sample_id_all)
672 sample_type = first->attr.sample_type;
674 if (sample_type & PERF_SAMPLE_TID)
675 size += sizeof(data->tid) * 2;
677 if (sample_type & PERF_SAMPLE_TIME)
678 size += sizeof(data->time);
680 if (sample_type & PERF_SAMPLE_ID)
681 size += sizeof(data->id);
683 if (sample_type & PERF_SAMPLE_STREAM_ID)
684 size += sizeof(data->stream_id);
686 if (sample_type & PERF_SAMPLE_CPU)
687 size += sizeof(data->cpu) * 2;
692 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
694 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
696 list_for_each_entry_continue(pos, &evlist->entries, node) {
697 if (first->attr.sample_id_all != pos->attr.sample_id_all)
704 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
706 struct perf_evsel *first = perf_evlist__first(evlist);
707 return first->attr.sample_id_all;
710 void perf_evlist__set_selected(struct perf_evlist *evlist,
711 struct perf_evsel *evsel)
713 evlist->selected = evsel;
716 int perf_evlist__open(struct perf_evlist *evlist)
718 struct perf_evsel *evsel;
719 int err, ncpus, nthreads;
721 list_for_each_entry(evsel, &evlist->entries, node) {
722 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
729 ncpus = evlist->cpus ? evlist->cpus->nr : 1;
730 nthreads = evlist->threads ? evlist->threads->nr : 1;
732 list_for_each_entry_reverse(evsel, &evlist->entries, node)
733 perf_evsel__close(evsel, ncpus, nthreads);
739 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
740 struct perf_record_opts *opts,
743 int child_ready_pipe[2], go_pipe[2];
746 if (pipe(child_ready_pipe) < 0) {
747 perror("failed to create 'ready' pipe");
751 if (pipe(go_pipe) < 0) {
752 perror("failed to create 'go' pipe");
753 goto out_close_ready_pipe;
756 evlist->workload.pid = fork();
757 if (evlist->workload.pid < 0) {
758 perror("failed to fork");
759 goto out_close_pipes;
762 if (!evlist->workload.pid) {
763 if (opts->pipe_output)
766 close(child_ready_pipe[0]);
768 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
771 * Do a dummy execvp to get the PLT entry resolved,
772 * so we avoid the resolver overhead on the real
775 execvp("", (char **)argv);
778 * Tell the parent we're ready to go
780 close(child_ready_pipe[1]);
783 * Wait until the parent tells us to go.
785 if (read(go_pipe[0], &bf, 1) == -1)
786 perror("unable to read pipe");
788 execvp(argv[0], (char **)argv);
791 kill(getppid(), SIGUSR1);
795 if (perf_target__none(&opts->target))
796 evlist->threads->map[0] = evlist->workload.pid;
798 close(child_ready_pipe[1]);
801 * wait for child to settle
803 if (read(child_ready_pipe[0], &bf, 1) == -1) {
804 perror("unable to read pipe");
805 goto out_close_pipes;
808 evlist->workload.cork_fd = go_pipe[1];
809 close(child_ready_pipe[0]);
815 out_close_ready_pipe:
816 close(child_ready_pipe[0]);
817 close(child_ready_pipe[1]);
821 int perf_evlist__start_workload(struct perf_evlist *evlist)
823 if (evlist->workload.cork_fd > 0) {
825 * Remove the cork, let it rip!
827 return close(evlist->workload.cork_fd);
833 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
834 struct perf_sample *sample)
836 struct perf_evsel *evsel = perf_evlist__first(evlist);
837 return perf_evsel__parse_sample(evsel, event, sample);
840 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
842 struct perf_evsel *evsel;
845 list_for_each_entry(evsel, &evlist->entries, node) {
846 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
847 perf_evsel__name(evsel));
850 return printed + fprintf(fp, "\n");;