perf test: Validate PERF_RECORD_ events and perf_sample fields
[firefly-linux-kernel-4.4.55.git] / tools / perf / builtin-test.c
1 /*
2  * builtin-test.c
3  *
4  * Builtin regression testing command: ever growing number of sanity tests
5  */
6 #include "builtin.h"
7
8 #include "util/cache.h"
9 #include "util/debug.h"
10 #include "util/debugfs.h"
11 #include "util/evlist.h"
12 #include "util/parse-options.h"
13 #include "util/parse-events.h"
14 #include "util/symbol.h"
15 #include "util/thread_map.h"
16 #include "../../include/linux/hw_breakpoint.h"
17
18 static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
19 {
20         bool *visited = symbol__priv(sym);
21         *visited = true;
22         return 0;
23 }
24
25 static int test__vmlinux_matches_kallsyms(void)
26 {
27         int err = -1;
28         struct rb_node *nd;
29         struct symbol *sym;
30         struct map *kallsyms_map, *vmlinux_map;
31         struct machine kallsyms, vmlinux;
32         enum map_type type = MAP__FUNCTION;
33         long page_size = sysconf(_SC_PAGE_SIZE);
34         struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
35
36         /*
37          * Step 1:
38          *
39          * Init the machines that will hold kernel, modules obtained from
40          * both vmlinux + .ko files and from /proc/kallsyms split by modules.
41          */
42         machine__init(&kallsyms, "", HOST_KERNEL_ID);
43         machine__init(&vmlinux, "", HOST_KERNEL_ID);
44
45         /*
46          * Step 2:
47          *
48          * Create the kernel maps for kallsyms and the DSO where we will then
49          * load /proc/kallsyms. Also create the modules maps from /proc/modules
50          * and find the .ko files that match them in /lib/modules/`uname -r`/.
51          */
52         if (machine__create_kernel_maps(&kallsyms) < 0) {
53                 pr_debug("machine__create_kernel_maps ");
54                 return -1;
55         }
56
57         /*
58          * Step 3:
59          *
60          * Load and split /proc/kallsyms into multiple maps, one per module.
61          */
62         if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
63                 pr_debug("dso__load_kallsyms ");
64                 goto out;
65         }
66
67         /*
68          * Step 4:
69          *
70          * kallsyms will be internally on demand sorted by name so that we can
71          * find the reference relocation * symbol, i.e. the symbol we will use
72          * to see if the running kernel was relocated by checking if it has the
73          * same value in the vmlinux file we load.
74          */
75         kallsyms_map = machine__kernel_map(&kallsyms, type);
76
77         sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
78         if (sym == NULL) {
79                 pr_debug("dso__find_symbol_by_name ");
80                 goto out;
81         }
82
83         ref_reloc_sym.addr = sym->start;
84
85         /*
86          * Step 5:
87          *
88          * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
89          */
90         if (machine__create_kernel_maps(&vmlinux) < 0) {
91                 pr_debug("machine__create_kernel_maps ");
92                 goto out;
93         }
94
95         vmlinux_map = machine__kernel_map(&vmlinux, type);
96         map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
97
98         /*
99          * Step 6:
100          *
101          * Locate a vmlinux file in the vmlinux path that has a buildid that
102          * matches the one of the running kernel.
103          *
104          * While doing that look if we find the ref reloc symbol, if we find it
105          * we'll have its ref_reloc_symbol.unrelocated_addr and then
106          * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
107          * to fixup the symbols.
108          */
109         if (machine__load_vmlinux_path(&vmlinux, type,
110                                        vmlinux_matches_kallsyms_filter) <= 0) {
111                 pr_debug("machine__load_vmlinux_path ");
112                 goto out;
113         }
114
115         err = 0;
116         /*
117          * Step 7:
118          *
119          * Now look at the symbols in the vmlinux DSO and check if we find all of them
120          * in the kallsyms dso. For the ones that are in both, check its names and
121          * end addresses too.
122          */
123         for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
124                 struct symbol *pair, *first_pair;
125                 bool backwards = true;
126
127                 sym  = rb_entry(nd, struct symbol, rb_node);
128
129                 if (sym->start == sym->end)
130                         continue;
131
132                 first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
133                 pair = first_pair;
134
135                 if (pair && pair->start == sym->start) {
136 next_pair:
137                         if (strcmp(sym->name, pair->name) == 0) {
138                                 /*
139                                  * kallsyms don't have the symbol end, so we
140                                  * set that by using the next symbol start - 1,
141                                  * in some cases we get this up to a page
142                                  * wrong, trace_kmalloc when I was developing
143                                  * this code was one such example, 2106 bytes
144                                  * off the real size. More than that and we
145                                  * _really_ have a problem.
146                                  */
147                                 s64 skew = sym->end - pair->end;
148                                 if (llabs(skew) < page_size)
149                                         continue;
150
151                                 pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
152                                          sym->start, sym->name, sym->end, pair->end);
153                         } else {
154                                 struct rb_node *nnd;
155 detour:
156                                 nnd = backwards ? rb_prev(&pair->rb_node) :
157                                                   rb_next(&pair->rb_node);
158                                 if (nnd) {
159                                         struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
160
161                                         if (next->start == sym->start) {
162                                                 pair = next;
163                                                 goto next_pair;
164                                         }
165                                 }
166
167                                 if (backwards) {
168                                         backwards = false;
169                                         pair = first_pair;
170                                         goto detour;
171                                 }
172
173                                 pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
174                                          sym->start, sym->name, pair->name);
175                         }
176                 } else
177                         pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
178
179                 err = -1;
180         }
181
182         if (!verbose)
183                 goto out;
184
185         pr_info("Maps only in vmlinux:\n");
186
187         for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
188                 struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
189                 /*
190                  * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
191                  * the kernel will have the path for the vmlinux file being used,
192                  * so use the short name, less descriptive but the same ("[kernel]" in
193                  * both cases.
194                  */
195                 pair = map_groups__find_by_name(&kallsyms.kmaps, type,
196                                                 (pos->dso->kernel ?
197                                                         pos->dso->short_name :
198                                                         pos->dso->name));
199                 if (pair)
200                         pair->priv = 1;
201                 else
202                         map__fprintf(pos, stderr);
203         }
204
205         pr_info("Maps in vmlinux with a different name in kallsyms:\n");
206
207         for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
208                 struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
209
210                 pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
211                 if (pair == NULL || pair->priv)
212                         continue;
213
214                 if (pair->start == pos->start) {
215                         pair->priv = 1;
216                         pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
217                                 pos->start, pos->end, pos->pgoff, pos->dso->name);
218                         if (pos->pgoff != pair->pgoff || pos->end != pair->end)
219                                 pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
220                                         pair->start, pair->end, pair->pgoff);
221                         pr_info(" %s\n", pair->dso->name);
222                         pair->priv = 1;
223                 }
224         }
225
226         pr_info("Maps only in kallsyms:\n");
227
228         for (nd = rb_first(&kallsyms.kmaps.maps[type]);
229              nd; nd = rb_next(nd)) {
230                 struct map *pos = rb_entry(nd, struct map, rb_node);
231
232                 if (!pos->priv)
233                         map__fprintf(pos, stderr);
234         }
235 out:
236         return err;
237 }
238
239 #include "util/cpumap.h"
240 #include "util/evsel.h"
241 #include <sys/types.h>
242
243 static int trace_event__id(const char *evname)
244 {
245         char *filename;
246         int err = -1, fd;
247
248         if (asprintf(&filename,
249                      "%s/syscalls/%s/id",
250                      tracing_events_path, evname) < 0)
251                 return -1;
252
253         fd = open(filename, O_RDONLY);
254         if (fd >= 0) {
255                 char id[16];
256                 if (read(fd, id, sizeof(id)) > 0)
257                         err = atoi(id);
258                 close(fd);
259         }
260
261         free(filename);
262         return err;
263 }
264
265 static int test__open_syscall_event(void)
266 {
267         int err = -1, fd;
268         struct thread_map *threads;
269         struct perf_evsel *evsel;
270         struct perf_event_attr attr;
271         unsigned int nr_open_calls = 111, i;
272         int id = trace_event__id("sys_enter_open");
273
274         if (id < 0) {
275                 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
276                 return -1;
277         }
278
279         threads = thread_map__new(-1, getpid());
280         if (threads == NULL) {
281                 pr_debug("thread_map__new\n");
282                 return -1;
283         }
284
285         memset(&attr, 0, sizeof(attr));
286         attr.type = PERF_TYPE_TRACEPOINT;
287         attr.config = id;
288         evsel = perf_evsel__new(&attr, 0);
289         if (evsel == NULL) {
290                 pr_debug("perf_evsel__new\n");
291                 goto out_thread_map_delete;
292         }
293
294         if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) {
295                 pr_debug("failed to open counter: %s, "
296                          "tweak /proc/sys/kernel/perf_event_paranoid?\n",
297                          strerror(errno));
298                 goto out_evsel_delete;
299         }
300
301         for (i = 0; i < nr_open_calls; ++i) {
302                 fd = open("/etc/passwd", O_RDONLY);
303                 close(fd);
304         }
305
306         if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
307                 pr_debug("perf_evsel__read_on_cpu\n");
308                 goto out_close_fd;
309         }
310
311         if (evsel->counts->cpu[0].val != nr_open_calls) {
312                 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
313                          nr_open_calls, evsel->counts->cpu[0].val);
314                 goto out_close_fd;
315         }
316         
317         err = 0;
318 out_close_fd:
319         perf_evsel__close_fd(evsel, 1, threads->nr);
320 out_evsel_delete:
321         perf_evsel__delete(evsel);
322 out_thread_map_delete:
323         thread_map__delete(threads);
324         return err;
325 }
326
327 #include <sched.h>
328
329 static int test__open_syscall_event_on_all_cpus(void)
330 {
331         int err = -1, fd, cpu;
332         struct thread_map *threads;
333         struct cpu_map *cpus;
334         struct perf_evsel *evsel;
335         struct perf_event_attr attr;
336         unsigned int nr_open_calls = 111, i;
337         cpu_set_t cpu_set;
338         int id = trace_event__id("sys_enter_open");
339
340         if (id < 0) {
341                 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
342                 return -1;
343         }
344
345         threads = thread_map__new(-1, getpid());
346         if (threads == NULL) {
347                 pr_debug("thread_map__new\n");
348                 return -1;
349         }
350
351         cpus = cpu_map__new(NULL);
352         if (cpus == NULL) {
353                 pr_debug("cpu_map__new\n");
354                 goto out_thread_map_delete;
355         }
356
357
358         CPU_ZERO(&cpu_set);
359
360         memset(&attr, 0, sizeof(attr));
361         attr.type = PERF_TYPE_TRACEPOINT;
362         attr.config = id;
363         evsel = perf_evsel__new(&attr, 0);
364         if (evsel == NULL) {
365                 pr_debug("perf_evsel__new\n");
366                 goto out_thread_map_delete;
367         }
368
369         if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) {
370                 pr_debug("failed to open counter: %s, "
371                          "tweak /proc/sys/kernel/perf_event_paranoid?\n",
372                          strerror(errno));
373                 goto out_evsel_delete;
374         }
375
376         for (cpu = 0; cpu < cpus->nr; ++cpu) {
377                 unsigned int ncalls = nr_open_calls + cpu;
378                 /*
379                  * XXX eventually lift this restriction in a way that
380                  * keeps perf building on older glibc installations
381                  * without CPU_ALLOC. 1024 cpus in 2010 still seems
382                  * a reasonable upper limit tho :-)
383                  */
384                 if (cpus->map[cpu] >= CPU_SETSIZE) {
385                         pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
386                         continue;
387                 }
388
389                 CPU_SET(cpus->map[cpu], &cpu_set);
390                 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
391                         pr_debug("sched_setaffinity() failed on CPU %d: %s ",
392                                  cpus->map[cpu],
393                                  strerror(errno));
394                         goto out_close_fd;
395                 }
396                 for (i = 0; i < ncalls; ++i) {
397                         fd = open("/etc/passwd", O_RDONLY);
398                         close(fd);
399                 }
400                 CPU_CLR(cpus->map[cpu], &cpu_set);
401         }
402
403         /*
404          * Here we need to explicitely preallocate the counts, as if
405          * we use the auto allocation it will allocate just for 1 cpu,
406          * as we start by cpu 0.
407          */
408         if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
409                 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
410                 goto out_close_fd;
411         }
412
413         err = 0;
414
415         for (cpu = 0; cpu < cpus->nr; ++cpu) {
416                 unsigned int expected;
417
418                 if (cpus->map[cpu] >= CPU_SETSIZE)
419                         continue;
420
421                 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
422                         pr_debug("perf_evsel__read_on_cpu\n");
423                         err = -1;
424                         break;
425                 }
426
427                 expected = nr_open_calls + cpu;
428                 if (evsel->counts->cpu[cpu].val != expected) {
429                         pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
430                                  expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
431                         err = -1;
432                 }
433         }
434
435 out_close_fd:
436         perf_evsel__close_fd(evsel, 1, threads->nr);
437 out_evsel_delete:
438         perf_evsel__delete(evsel);
439 out_thread_map_delete:
440         thread_map__delete(threads);
441         return err;
442 }
443
444 /*
445  * This test will generate random numbers of calls to some getpid syscalls,
446  * then establish an mmap for a group of events that are created to monitor
447  * the syscalls.
448  *
449  * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
450  * sample.id field to map back to its respective perf_evsel instance.
451  *
452  * Then it checks if the number of syscalls reported as perf events by
453  * the kernel corresponds to the number of syscalls made.
454  */
455 static int test__basic_mmap(void)
456 {
457         int err = -1;
458         union perf_event *event;
459         struct thread_map *threads;
460         struct cpu_map *cpus;
461         struct perf_evlist *evlist;
462         struct perf_event_attr attr = {
463                 .type           = PERF_TYPE_TRACEPOINT,
464                 .read_format    = PERF_FORMAT_ID,
465                 .sample_type    = PERF_SAMPLE_ID,
466                 .watermark      = 0,
467         };
468         cpu_set_t cpu_set;
469         const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
470                                         "getpgid", };
471         pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
472                                       (void*)getpgid };
473 #define nsyscalls ARRAY_SIZE(syscall_names)
474         int ids[nsyscalls];
475         unsigned int nr_events[nsyscalls],
476                      expected_nr_events[nsyscalls], i, j;
477         struct perf_evsel *evsels[nsyscalls], *evsel;
478         int sample_size = __perf_evsel__sample_size(attr.sample_type);
479
480         for (i = 0; i < nsyscalls; ++i) {
481                 char name[64];
482
483                 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
484                 ids[i] = trace_event__id(name);
485                 if (ids[i] < 0) {
486                         pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
487                         return -1;
488                 }
489                 nr_events[i] = 0;
490                 expected_nr_events[i] = random() % 257;
491         }
492
493         threads = thread_map__new(-1, getpid());
494         if (threads == NULL) {
495                 pr_debug("thread_map__new\n");
496                 return -1;
497         }
498
499         cpus = cpu_map__new(NULL);
500         if (cpus == NULL) {
501                 pr_debug("cpu_map__new\n");
502                 goto out_free_threads;
503         }
504
505         CPU_ZERO(&cpu_set);
506         CPU_SET(cpus->map[0], &cpu_set);
507         sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
508         if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
509                 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
510                          cpus->map[0], strerror(errno));
511                 goto out_free_cpus;
512         }
513
514         evlist = perf_evlist__new(cpus, threads);
515         if (evlist == NULL) {
516                 pr_debug("perf_evlist__new\n");
517                 goto out_free_cpus;
518         }
519
520         /* anonymous union fields, can't be initialized above */
521         attr.wakeup_events = 1;
522         attr.sample_period = 1;
523
524         for (i = 0; i < nsyscalls; ++i) {
525                 attr.config = ids[i];
526                 evsels[i] = perf_evsel__new(&attr, i);
527                 if (evsels[i] == NULL) {
528                         pr_debug("perf_evsel__new\n");
529                         goto out_free_evlist;
530                 }
531
532                 perf_evlist__add(evlist, evsels[i]);
533
534                 if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) {
535                         pr_debug("failed to open counter: %s, "
536                                  "tweak /proc/sys/kernel/perf_event_paranoid?\n",
537                                  strerror(errno));
538                         goto out_close_fd;
539                 }
540         }
541
542         if (perf_evlist__mmap(evlist, 128, true) < 0) {
543                 pr_debug("failed to mmap events: %d (%s)\n", errno,
544                          strerror(errno));
545                 goto out_close_fd;
546         }
547
548         for (i = 0; i < nsyscalls; ++i)
549                 for (j = 0; j < expected_nr_events[i]; ++j) {
550                         int foo = syscalls[i]();
551                         ++foo;
552                 }
553
554         while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
555                 struct perf_sample sample;
556
557                 if (event->header.type != PERF_RECORD_SAMPLE) {
558                         pr_debug("unexpected %s event\n",
559                                  perf_event__name(event->header.type));
560                         goto out_munmap;
561                 }
562
563                 err = perf_event__parse_sample(event, attr.sample_type, sample_size,
564                                                false, &sample, false);
565                 if (err) {
566                         pr_err("Can't parse sample, err = %d\n", err);
567                         goto out_munmap;
568                 }
569
570                 evsel = perf_evlist__id2evsel(evlist, sample.id);
571                 if (evsel == NULL) {
572                         pr_debug("event with id %" PRIu64
573                                  " doesn't map to an evsel\n", sample.id);
574                         goto out_munmap;
575                 }
576                 nr_events[evsel->idx]++;
577         }
578
579         list_for_each_entry(evsel, &evlist->entries, node) {
580                 if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
581                         pr_debug("expected %d %s events, got %d\n",
582                                  expected_nr_events[evsel->idx],
583                                  event_name(evsel), nr_events[evsel->idx]);
584                         goto out_munmap;
585                 }
586         }
587
588         err = 0;
589 out_munmap:
590         perf_evlist__munmap(evlist);
591 out_close_fd:
592         for (i = 0; i < nsyscalls; ++i)
593                 perf_evsel__close_fd(evsels[i], 1, threads->nr);
594 out_free_evlist:
595         perf_evlist__delete(evlist);
596 out_free_cpus:
597         cpu_map__delete(cpus);
598 out_free_threads:
599         thread_map__delete(threads);
600         return err;
601 #undef nsyscalls
602 }
603
604 #define TEST_ASSERT_VAL(text, cond) \
605 do { \
606         if (!cond) { \
607                 pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
608                 return -1; \
609         } \
610 } while (0)
611
612 static int test__checkevent_tracepoint(struct perf_evlist *evlist)
613 {
614         struct perf_evsel *evsel = list_entry(evlist->entries.next,
615                                               struct perf_evsel, node);
616
617         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
618         TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
619         TEST_ASSERT_VAL("wrong sample_type",
620                 (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
621                 evsel->attr.sample_type);
622         TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
623         return 0;
624 }
625
626 static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
627 {
628         struct perf_evsel *evsel;
629
630         TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
631
632         list_for_each_entry(evsel, &evlist->entries, node) {
633                 TEST_ASSERT_VAL("wrong type",
634                         PERF_TYPE_TRACEPOINT == evsel->attr.type);
635                 TEST_ASSERT_VAL("wrong sample_type",
636                         (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU)
637                         == evsel->attr.sample_type);
638                 TEST_ASSERT_VAL("wrong sample_period",
639                         1 == evsel->attr.sample_period);
640         }
641         return 0;
642 }
643
644 static int test__checkevent_raw(struct perf_evlist *evlist)
645 {
646         struct perf_evsel *evsel = list_entry(evlist->entries.next,
647                                               struct perf_evsel, node);
648
649         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
650         TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
651         TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
652         return 0;
653 }
654
655 static int test__checkevent_numeric(struct perf_evlist *evlist)
656 {
657         struct perf_evsel *evsel = list_entry(evlist->entries.next,
658                                               struct perf_evsel, node);
659
660         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
661         TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type);
662         TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
663         return 0;
664 }
665
666 static int test__checkevent_symbolic_name(struct perf_evlist *evlist)
667 {
668         struct perf_evsel *evsel = list_entry(evlist->entries.next,
669                                               struct perf_evsel, node);
670
671         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
672         TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
673         TEST_ASSERT_VAL("wrong config",
674                         PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config);
675         return 0;
676 }
677
678 static int test__checkevent_symbolic_alias(struct perf_evlist *evlist)
679 {
680         struct perf_evsel *evsel = list_entry(evlist->entries.next,
681                                               struct perf_evsel, node);
682
683         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
684         TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type);
685         TEST_ASSERT_VAL("wrong config",
686                         PERF_COUNT_SW_PAGE_FAULTS == evsel->attr.config);
687         return 0;
688 }
689
690 static int test__checkevent_genhw(struct perf_evlist *evlist)
691 {
692         struct perf_evsel *evsel = list_entry(evlist->entries.next,
693                                               struct perf_evsel, node);
694
695         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
696         TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type);
697         TEST_ASSERT_VAL("wrong config", (1 << 16) == evsel->attr.config);
698         return 0;
699 }
700
701 static int test__checkevent_breakpoint(struct perf_evlist *evlist)
702 {
703         struct perf_evsel *evsel = list_entry(evlist->entries.next,
704                                               struct perf_evsel, node);
705
706         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
707         TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
708         TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
709         TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
710                                          evsel->attr.bp_type);
711         TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_4 ==
712                                         evsel->attr.bp_len);
713         return 0;
714 }
715
716 static int test__checkevent_breakpoint_x(struct perf_evlist *evlist)
717 {
718         struct perf_evsel *evsel = list_entry(evlist->entries.next,
719                                               struct perf_evsel, node);
720
721         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
722         TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
723         TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
724         TEST_ASSERT_VAL("wrong bp_type",
725                         HW_BREAKPOINT_X == evsel->attr.bp_type);
726         TEST_ASSERT_VAL("wrong bp_len", sizeof(long) == evsel->attr.bp_len);
727         return 0;
728 }
729
730 static int test__checkevent_breakpoint_r(struct perf_evlist *evlist)
731 {
732         struct perf_evsel *evsel = list_entry(evlist->entries.next,
733                                               struct perf_evsel, node);
734
735         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
736         TEST_ASSERT_VAL("wrong type",
737                         PERF_TYPE_BREAKPOINT == evsel->attr.type);
738         TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
739         TEST_ASSERT_VAL("wrong bp_type",
740                         HW_BREAKPOINT_R == evsel->attr.bp_type);
741         TEST_ASSERT_VAL("wrong bp_len",
742                         HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len);
743         return 0;
744 }
745
746 static int test__checkevent_breakpoint_w(struct perf_evlist *evlist)
747 {
748         struct perf_evsel *evsel = list_entry(evlist->entries.next,
749                                               struct perf_evsel, node);
750
751         TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
752         TEST_ASSERT_VAL("wrong type",
753                         PERF_TYPE_BREAKPOINT == evsel->attr.type);
754         TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
755         TEST_ASSERT_VAL("wrong bp_type",
756                         HW_BREAKPOINT_W == evsel->attr.bp_type);
757         TEST_ASSERT_VAL("wrong bp_len",
758                         HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len);
759         return 0;
760 }
761
762 static struct test__event_st {
763         const char *name;
764         __u32 type;
765         int (*check)(struct perf_evlist *evlist);
766 } test__events[] = {
767         {
768                 .name  = "syscalls:sys_enter_open",
769                 .check = test__checkevent_tracepoint,
770         },
771         {
772                 .name  = "syscalls:*",
773                 .check = test__checkevent_tracepoint_multi,
774         },
775         {
776                 .name  = "r1",
777                 .check = test__checkevent_raw,
778         },
779         {
780                 .name  = "1:1",
781                 .check = test__checkevent_numeric,
782         },
783         {
784                 .name  = "instructions",
785                 .check = test__checkevent_symbolic_name,
786         },
787         {
788                 .name  = "faults",
789                 .check = test__checkevent_symbolic_alias,
790         },
791         {
792                 .name  = "L1-dcache-load-miss",
793                 .check = test__checkevent_genhw,
794         },
795         {
796                 .name  = "mem:0",
797                 .check = test__checkevent_breakpoint,
798         },
799         {
800                 .name  = "mem:0:x",
801                 .check = test__checkevent_breakpoint_x,
802         },
803         {
804                 .name  = "mem:0:r",
805                 .check = test__checkevent_breakpoint_r,
806         },
807         {
808                 .name  = "mem:0:w",
809                 .check = test__checkevent_breakpoint_w,
810         },
811 };
812
813 #define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
814
815 static int test__parse_events(void)
816 {
817         struct perf_evlist *evlist;
818         u_int i;
819         int ret = 0;
820
821         for (i = 0; i < TEST__EVENTS_CNT; i++) {
822                 struct test__event_st *e = &test__events[i];
823
824                 evlist = perf_evlist__new(NULL, NULL);
825                 if (evlist == NULL)
826                         break;
827
828                 ret = parse_events(evlist, e->name, 0);
829                 if (ret) {
830                         pr_debug("failed to parse event '%s', err %d\n",
831                                  e->name, ret);
832                         break;
833                 }
834
835                 ret = e->check(evlist);
836                 if (ret)
837                         break;
838
839                 perf_evlist__delete(evlist);
840         }
841
842         return ret;
843 }
844
845 static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
846                                          size_t *sizep)
847 {
848         cpu_set_t *mask;
849         size_t size;
850         int i, cpu = -1, nrcpus = 1024;
851 realloc:
852         mask = CPU_ALLOC(nrcpus);
853         size = CPU_ALLOC_SIZE(nrcpus);
854         CPU_ZERO_S(size, mask);
855
856         if (sched_getaffinity(pid, size, mask) == -1) {
857                 CPU_FREE(mask);
858                 if (errno == EINVAL && nrcpus < (1024 << 8)) {
859                         nrcpus = nrcpus << 2;
860                         goto realloc;
861                 }
862                 perror("sched_getaffinity");
863                         return -1;
864         }
865
866         for (i = 0; i < nrcpus; i++) {
867                 if (CPU_ISSET_S(i, size, mask)) {
868                         if (cpu == -1) {
869                                 cpu = i;
870                                 *maskp = mask;
871                                 *sizep = size;
872                         } else
873                                 CPU_CLR_S(i, size, mask);
874                 }
875         }
876
877         if (cpu == -1)
878                 CPU_FREE(mask);
879
880         return cpu;
881 }
882
883 static int test__PERF_RECORD(void)
884 {
885         struct perf_record_opts opts = {
886                 .target_pid = -1,
887                 .target_tid = -1,
888                 .no_delay   = true,
889                 .freq       = 10,
890                 .mmap_pages = 256,
891                 .sample_id_all_avail = true,
892         };
893         cpu_set_t *cpu_mask = NULL;
894         size_t cpu_mask_size = 0;
895         struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
896         struct perf_evsel *evsel;
897         struct perf_sample sample;
898         const char *cmd = "sleep";
899         const char *argv[] = { cmd, "1", NULL, };
900         char *bname;
901         u64 sample_type, prev_time = 0;
902         bool found_cmd_mmap = false,
903              found_libc_mmap = false,
904              found_vdso_mmap = false,
905              found_ld_mmap = false;
906         int err = -1, i, wakeups = 0, sample_size;
907         u32 cpu;
908         int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
909
910         if (evlist == NULL || argv == NULL) {
911                 pr_debug("Not enough memory to create evlist\n");
912                 goto out;
913         }
914
915         /*
916          * We need at least one evsel in the evlist, use the default
917          * one: "cycles".
918          */
919         err = perf_evlist__add_default(evlist);
920         if (err < 0) {
921                 pr_debug("Not enough memory to create evsel\n");
922                 goto out_delete_evlist;
923         }
924
925         /*
926          * Create maps of threads and cpus to monitor. In this case
927          * we start with all threads and cpus (-1, -1) but then in
928          * perf_evlist__prepare_workload we'll fill in the only thread
929          * we're monitoring, the one forked there.
930          */
931         err = perf_evlist__create_maps(evlist, opts.target_pid,
932                                        opts.target_tid, opts.cpu_list);
933         if (err < 0) {
934                 pr_debug("Not enough memory to create thread/cpu maps\n");
935                 goto out_delete_evlist;
936         }
937
938         /*
939          * Prepare the workload in argv[] to run, it'll fork it, and then wait
940          * for perf_evlist__start_workload() to exec it. This is done this way
941          * so that we have time to open the evlist (calling sys_perf_event_open
942          * on all the fds) and then mmap them.
943          */
944         err = perf_evlist__prepare_workload(evlist, &opts, argv);
945         if (err < 0) {
946                 pr_debug("Couldn't run the workload!\n");
947                 goto out_delete_evlist;
948         }
949
950         /*
951          * Config the evsels, setting attr->comm on the first one, etc.
952          */
953         evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
954         evsel->attr.sample_type |= PERF_SAMPLE_CPU;
955         evsel->attr.sample_type |= PERF_SAMPLE_TID;
956         evsel->attr.sample_type |= PERF_SAMPLE_TIME;
957         perf_evlist__config_attrs(evlist, &opts);
958
959         err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
960                                             &cpu_mask_size);
961         if (err < 0) {
962                 pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
963                 goto out_delete_evlist;
964         }
965
966         cpu = err;
967
968         /*
969          * So that we can check perf_sample.cpu on all the samples.
970          */
971         if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
972                 pr_debug("sched_setaffinity: %s\n", strerror(errno));
973                 goto out_free_cpu_mask;
974         }
975
976         /*
977          * Call sys_perf_event_open on all the fds on all the evsels,
978          * grouping them if asked to.
979          */
980         err = perf_evlist__open(evlist, opts.group);
981         if (err < 0) {
982                 pr_debug("perf_evlist__open: %s\n", strerror(errno));
983                 goto out_delete_evlist;
984         }
985
986         /*
987          * mmap the first fd on a given CPU and ask for events for the other
988          * fds in the same CPU to be injected in the same mmap ring buffer
989          * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
990          */
991         err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
992         if (err < 0) {
993                 pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
994                 goto out_delete_evlist;
995         }
996
997         /*
998          * We'll need these two to parse the PERF_SAMPLE_* fields in each
999          * event.
1000          */
1001         sample_type = perf_evlist__sample_type(evlist);
1002         sample_size = __perf_evsel__sample_size(sample_type);
1003
1004         /*
1005          * Now that all is properly set up, enable the events, they will
1006          * count just on workload.pid, which will start...
1007          */
1008         perf_evlist__enable(evlist);
1009
1010         /*
1011          * Now!
1012          */
1013         perf_evlist__start_workload(evlist);
1014
1015         err = -1;
1016
1017         while (1) {
1018                 int before = total_events;
1019
1020                 for (i = 0; i < evlist->nr_mmaps; i++) {
1021                         union perf_event *event;
1022
1023                         while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
1024                                 const u32 type = event->header.type;
1025                                 const char *name = perf_event__name(type);
1026
1027                                 ++total_events;
1028                                 if (type < PERF_RECORD_MAX)
1029                                         nr_events[type]++;
1030
1031                                 if (perf_event__parse_sample(event, sample_type,
1032                                                              sample_size, true,
1033                                                              &sample, false) < 0) {
1034                                         if (verbose)
1035                                                 perf_event__fprintf(event, stderr);
1036                                         pr_debug("Couldn't parse sample\n");
1037                                         goto out_err;
1038                                 }
1039
1040                                 if (verbose) {
1041                                         pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
1042                                         perf_event__fprintf(event, stderr);
1043                                 }
1044
1045                                 if (prev_time > sample.time) {
1046                                         pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
1047                                                  name, prev_time, sample.time);
1048                                         goto out_err;
1049                                 }
1050
1051                                 prev_time = sample.time;
1052
1053                                 if (sample.cpu != cpu) {
1054                                         pr_debug("%s with unexpected cpu, expected %d, got %d\n",
1055                                                  name, cpu, sample.cpu);
1056                                         goto out_err;
1057                                 }
1058
1059                                 if ((pid_t)sample.pid != evlist->workload.pid) {
1060                                         pr_debug("%s with unexpected pid, expected %d, got %d\n",
1061                                                  name, evlist->workload.pid, sample.pid);
1062                                         goto out_err;
1063                                 }
1064
1065                                 if ((pid_t)sample.tid != evlist->workload.pid) {
1066                                         pr_debug("%s with unexpected tid, expected %d, got %d\n",
1067                                                  name, evlist->workload.pid, sample.tid);
1068                                         goto out_err;
1069                                 }
1070
1071                                 if ((type == PERF_RECORD_COMM ||
1072                                      type == PERF_RECORD_MMAP ||
1073                                      type == PERF_RECORD_FORK ||
1074                                      type == PERF_RECORD_EXIT) &&
1075                                      (pid_t)event->comm.pid != evlist->workload.pid) {
1076                                         pr_debug("%s with unexpected pid/tid\n", name);
1077                                         goto out_err;
1078                                 }
1079
1080                                 if ((type == PERF_RECORD_COMM ||
1081                                      type == PERF_RECORD_MMAP) &&
1082                                      event->comm.pid != event->comm.tid) {
1083                                         pr_debug("%s with different pid/tid!\n", name);
1084                                         goto out_err;
1085                                 }
1086
1087                                 switch (type) {
1088                                 case PERF_RECORD_COMM:
1089                                         if (strcmp(event->comm.comm, cmd)) {
1090                                                 pr_debug("%s with unexpected comm!\n", name);
1091                                                 goto out_err;
1092                                         }
1093                                         break;
1094                                 case PERF_RECORD_EXIT:
1095                                         goto found_exit;
1096                                 case PERF_RECORD_MMAP:
1097                                         bname = strrchr(event->mmap.filename, '/');
1098                                         if (bname != NULL) {
1099                                                 if (!found_cmd_mmap)
1100                                                         found_cmd_mmap = !strcmp(bname + 1, cmd);
1101                                                 if (!found_libc_mmap)
1102                                                         found_libc_mmap = !strncmp(bname + 1, "libc", 4);
1103                                                 if (!found_ld_mmap)
1104                                                         found_ld_mmap = !strncmp(bname + 1, "ld", 2);
1105                                         } else if (!found_vdso_mmap)
1106                                                 found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
1107                                         break;
1108
1109                                 case PERF_RECORD_SAMPLE:
1110                                         /* Just ignore samples for now */
1111                                         break;
1112                                 default:
1113                                         pr_debug("Unexpected perf_event->header.type %d!\n",
1114                                                  type);
1115                                         goto out_err;
1116                                 }
1117                         }
1118                 }
1119
1120                 /*
1121                  * We don't use poll here because at least at 3.1 times the
1122                  * PERF_RECORD_{!SAMPLE} events don't honour
1123                  * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
1124                  */
1125                 if (total_events == before && false)
1126                         poll(evlist->pollfd, evlist->nr_fds, -1);
1127
1128                 sleep(1);
1129                 if (++wakeups > 5) {
1130                         pr_debug("No PERF_RECORD_EXIT event!\n");
1131                         goto out_err;
1132                 }
1133         }
1134
1135 found_exit:
1136         if (nr_events[PERF_RECORD_COMM] > 1) {
1137                 pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
1138                 goto out_err;
1139         }
1140
1141         if (nr_events[PERF_RECORD_COMM] == 0) {
1142                 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
1143                 goto out_err;
1144         }
1145
1146         if (!found_cmd_mmap) {
1147                 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
1148                 goto out_err;
1149         }
1150
1151         if (!found_libc_mmap) {
1152                 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
1153                 goto out_err;
1154         }
1155
1156         if (!found_ld_mmap) {
1157                 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
1158                 goto out_err;
1159         }
1160
1161         if (!found_vdso_mmap) {
1162                 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
1163                 goto out_err;
1164         }
1165
1166         err = 0;
1167 out_err:
1168         perf_evlist__munmap(evlist);
1169 out_free_cpu_mask:
1170         CPU_FREE(cpu_mask);
1171 out_delete_evlist:
1172         perf_evlist__delete(evlist);
1173 out:
1174         return err;
1175 }
1176
1177 static struct test {
1178         const char *desc;
1179         int (*func)(void);
1180 } tests[] = {
1181         {
1182                 .desc = "vmlinux symtab matches kallsyms",
1183                 .func = test__vmlinux_matches_kallsyms,
1184         },
1185         {
1186                 .desc = "detect open syscall event",
1187                 .func = test__open_syscall_event,
1188         },
1189         {
1190                 .desc = "detect open syscall event on all cpus",
1191                 .func = test__open_syscall_event_on_all_cpus,
1192         },
1193         {
1194                 .desc = "read samples using the mmap interface",
1195                 .func = test__basic_mmap,
1196         },
1197         {
1198                 .desc = "parse events tests",
1199                 .func = test__parse_events,
1200         },
1201         {
1202                 .desc = "Validate PERF_RECORD_* events & perf_sample fields",
1203                 .func = test__PERF_RECORD,
1204         },
1205         {
1206                 .func = NULL,
1207         },
1208 };
1209
1210 static bool perf_test__matches(int curr, int argc, const char *argv[])
1211 {
1212         int i;
1213
1214         if (argc == 0)
1215                 return true;
1216
1217         for (i = 0; i < argc; ++i) {
1218                 char *end;
1219                 long nr = strtoul(argv[i], &end, 10);
1220
1221                 if (*end == '\0') {
1222                         if (nr == curr + 1)
1223                                 return true;
1224                         continue;
1225                 }
1226
1227                 if (strstr(tests[curr].desc, argv[i]))
1228                         return true;
1229         }
1230
1231         return false;
1232 }
1233
1234 static int __cmd_test(int argc, const char *argv[])
1235 {
1236         int i = 0;
1237
1238         while (tests[i].func) {
1239                 int curr = i++, err;
1240
1241                 if (!perf_test__matches(curr, argc, argv))
1242                         continue;
1243
1244                 pr_info("%2d: %s:", i, tests[curr].desc);
1245                 pr_debug("\n--- start ---\n");
1246                 err = tests[curr].func();
1247                 pr_debug("---- end ----\n%s:", tests[curr].desc);
1248                 pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
1249         }
1250
1251         return 0;
1252 }
1253
1254 static int perf_test__list(int argc, const char **argv)
1255 {
1256         int i = 0;
1257
1258         while (tests[i].func) {
1259                 int curr = i++;
1260
1261                 if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
1262                         continue;
1263
1264                 pr_info("%2d: %s\n", i, tests[curr].desc);
1265         }
1266
1267         return 0;
1268 }
1269
1270 int cmd_test(int argc, const char **argv, const char *prefix __used)
1271 {
1272         const char * const test_usage[] = {
1273         "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
1274         NULL,
1275         };
1276         const struct option test_options[] = {
1277         OPT_INTEGER('v', "verbose", &verbose,
1278                     "be more verbose (show symbol address, etc)"),
1279         OPT_END()
1280         };
1281
1282         argc = parse_options(argc, argv, test_options, test_usage, 0);
1283         if (argc >= 1 && !strcmp(argv[0], "list"))
1284                 return perf_test__list(argc, argv);
1285
1286         symbol_conf.priv_size = sizeof(int);
1287         symbol_conf.sort_by_name = true;
1288         symbol_conf.try_vmlinux_path = true;
1289
1290         if (symbol__init() < 0)
1291                 return -1;
1292
1293         setup_pager();
1294
1295         return __cmd_test(argc, argv);
1296 }