4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
12 #include "util/build-id.h"
13 #include "util/util.h"
14 #include "util/parse-options.h"
15 #include "util/parse-events.h"
17 #include "util/header.h"
18 #include "util/event.h"
19 #include "util/evlist.h"
20 #include "util/evsel.h"
21 #include "util/debug.h"
22 #include "util/session.h"
23 #include "util/tool.h"
24 #include "util/symbol.h"
25 #include "util/cpumap.h"
26 #include "util/thread_map.h"
27 #include "util/data.h"
33 #ifndef HAVE_ON_EXIT_SUPPORT
37 static int __on_exit_count = 0;
38 typedef void (*on_exit_func_t) (int, void *);
39 static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
40 static void *__on_exit_args[ATEXIT_MAX];
41 static int __exitcode = 0;
42 static void __handle_on_exit_funcs(void);
43 static int on_exit(on_exit_func_t function, void *arg);
44 #define exit(x) (exit)(__exitcode = (x))
46 static int on_exit(on_exit_func_t function, void *arg)
48 if (__on_exit_count == ATEXIT_MAX)
50 else if (__on_exit_count == 0)
51 atexit(__handle_on_exit_funcs);
52 __on_exit_funcs[__on_exit_count] = function;
53 __on_exit_args[__on_exit_count++] = arg;
57 static void __handle_on_exit_funcs(void)
60 for (i = 0; i < __on_exit_count; i++)
61 __on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
66 struct perf_tool tool;
67 struct perf_record_opts opts;
69 struct perf_data_file file;
70 struct perf_evlist *evlist;
71 struct perf_session *session;
75 bool no_buildid_cache;
79 static ssize_t perf_record__write(struct perf_record *rec,
80 void *buf, size_t size)
82 struct perf_session *session = rec->session;
85 ret = perf_data_file__write(session->file, buf, size);
87 pr_err("failed to write perf data, error: %m\n");
91 rec->bytes_written += ret;
95 static int process_synthesized_event(struct perf_tool *tool,
96 union perf_event *event,
97 struct perf_sample *sample __maybe_unused,
98 struct machine *machine __maybe_unused)
100 struct perf_record *rec = container_of(tool, struct perf_record, tool);
101 return perf_record__write(rec, event, event->header.size);
104 static int perf_record__mmap_read(struct perf_record *rec,
105 struct perf_mmap *md)
107 unsigned int head = perf_mmap__read_head(md);
108 unsigned int old = md->prev;
109 unsigned char *data = md->base + page_size;
121 if ((old & md->mask) + size != (head & md->mask)) {
122 buf = &data[old & md->mask];
123 size = md->mask + 1 - (old & md->mask);
126 if (perf_record__write(rec, buf, size) < 0) {
132 buf = &data[old & md->mask];
136 if (perf_record__write(rec, buf, size) < 0) {
142 perf_mmap__write_tail(md, old);
148 static volatile int done = 0;
149 static volatile int signr = -1;
150 static volatile int child_finished = 0;
152 static void sig_handler(int sig)
161 static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
163 struct perf_record *rec = arg;
166 if (rec->evlist->workload.pid > 0) {
168 kill(rec->evlist->workload.pid, SIGTERM);
171 if (WIFSIGNALED(status))
172 psignal(WTERMSIG(status), rec->progname);
175 if (signr == -1 || signr == SIGUSR1)
178 signal(signr, SIG_DFL);
181 static int perf_record__open(struct perf_record *rec)
184 struct perf_evsel *pos;
185 struct perf_evlist *evlist = rec->evlist;
186 struct perf_session *session = rec->session;
187 struct perf_record_opts *opts = &rec->opts;
190 perf_evlist__config(evlist, opts);
192 list_for_each_entry(pos, &evlist->entries, node) {
194 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
195 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
197 ui__warning("%s\n", msg);
202 perf_evsel__open_strerror(pos, &opts->target,
203 errno, msg, sizeof(msg));
204 ui__error("%s\n", msg);
209 if (perf_evlist__apply_filters(evlist)) {
210 error("failed to set filter with %d (%s)\n", errno,
216 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
217 if (errno == EPERM) {
218 pr_err("Permission error mapping pages.\n"
219 "Consider increasing "
220 "/proc/sys/kernel/perf_event_mlock_kb,\n"
221 "or try again with a smaller value of -m/--mmap_pages.\n"
222 "(current value: %u)\n", opts->mmap_pages);
225 pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
231 session->evlist = evlist;
232 perf_session__set_id_hdr_size(session);
237 static int process_buildids(struct perf_record *rec)
239 struct perf_data_file *file = &rec->file;
240 struct perf_session *session = rec->session;
241 u64 start = session->header.data_offset;
243 u64 size = lseek(file->fd, 0, SEEK_CUR);
247 return __perf_session__process_events(session, start,
249 size, &build_id__mark_dso_hit_ops);
252 static void perf_record__exit(int status, void *arg)
254 struct perf_record *rec = arg;
255 struct perf_data_file *file = &rec->file;
260 if (!file->is_pipe) {
261 rec->session->header.data_size += rec->bytes_written;
263 if (!rec->no_buildid)
264 process_buildids(rec);
265 perf_session__write_header(rec->session, rec->evlist,
267 perf_session__delete(rec->session);
268 perf_evlist__delete(rec->evlist);
273 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
276 struct perf_tool *tool = data;
278 *As for guest kernel when processing subcommand record&report,
279 *we arrange module mmap prior to guest kernel mmap and trigger
280 *a preload dso because default guest module symbols are loaded
281 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
282 *method is used to avoid symbol missing when the first addr is
283 *in module instead of in guest kernel.
285 err = perf_event__synthesize_modules(tool, process_synthesized_event,
288 pr_err("Couldn't record guest kernel [%d]'s reference"
289 " relocation symbol.\n", machine->pid);
292 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
293 * have no _text sometimes.
295 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
298 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
301 pr_err("Couldn't record guest kernel [%d]'s reference"
302 " relocation symbol.\n", machine->pid);
305 static struct perf_event_header finished_round_event = {
306 .size = sizeof(struct perf_event_header),
307 .type = PERF_RECORD_FINISHED_ROUND,
310 static int perf_record__mmap_read_all(struct perf_record *rec)
315 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
316 if (rec->evlist->mmap[i].base) {
317 if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
324 if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
325 rc = perf_record__write(rec, &finished_round_event,
326 sizeof(finished_round_event));
332 static void perf_record__init_features(struct perf_record *rec)
334 struct perf_evlist *evsel_list = rec->evlist;
335 struct perf_session *session = rec->session;
338 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
339 perf_header__set_feat(&session->header, feat);
342 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
344 if (!have_tracepoints(&evsel_list->entries))
345 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
347 if (!rec->opts.branch_stack)
348 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
351 static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
354 unsigned long waking = 0;
355 const bool forks = argc > 0;
356 struct machine *machine;
357 struct perf_tool *tool = &rec->tool;
358 struct perf_record_opts *opts = &rec->opts;
359 struct perf_evlist *evsel_list = rec->evlist;
360 struct perf_data_file *file = &rec->file;
361 struct perf_session *session;
362 bool disabled = false;
364 rec->progname = argv[0];
366 on_exit(perf_record__sig_exit, rec);
367 signal(SIGCHLD, sig_handler);
368 signal(SIGINT, sig_handler);
369 signal(SIGUSR1, sig_handler);
370 signal(SIGTERM, sig_handler);
372 session = perf_session__new(file, false, NULL);
373 if (session == NULL) {
374 pr_err("Not enough memory for reading perf file header\n");
378 rec->session = session;
380 perf_record__init_features(rec);
383 err = perf_evlist__prepare_workload(evsel_list, &opts->target,
387 pr_err("Couldn't run the workload!\n");
388 goto out_delete_session;
392 if (perf_record__open(rec) != 0) {
394 goto out_delete_session;
397 if (!evsel_list->nr_groups)
398 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
401 * perf_session__delete(session) will be called at perf_record__exit()
403 on_exit(perf_record__exit, rec);
406 err = perf_header__write_pipe(file->fd);
408 goto out_delete_session;
410 err = perf_session__write_header(session, evsel_list,
413 goto out_delete_session;
417 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
418 pr_err("Couldn't generate buildids. "
419 "Use --no-buildid to profile anyway.\n");
421 goto out_delete_session;
424 machine = &session->machines.host;
427 err = perf_event__synthesize_attrs(tool, session,
428 process_synthesized_event);
430 pr_err("Couldn't synthesize attrs.\n");
431 goto out_delete_session;
434 if (have_tracepoints(&evsel_list->entries)) {
436 * FIXME err <= 0 here actually means that
437 * there were no tracepoints so its not really
438 * an error, just that we don't need to
439 * synthesize anything. We really have to
440 * return this more properly and also
441 * propagate errors that now are calling die()
443 err = perf_event__synthesize_tracing_data(tool, file->fd, evsel_list,
444 process_synthesized_event);
446 pr_err("Couldn't record tracing data.\n");
447 goto out_delete_session;
449 rec->bytes_written += err;
453 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
456 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
459 pr_err("Couldn't record kernel reference relocation symbol\n"
460 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
461 "Check /proc/kallsyms permission or run as root.\n");
463 err = perf_event__synthesize_modules(tool, process_synthesized_event,
466 pr_err("Couldn't record kernel module information.\n"
467 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
468 "Check /proc/modules permission or run as root.\n");
471 machines__process_guests(&session->machines,
472 perf_event__synthesize_guest_os, tool);
475 err = __machine__synthesize_threads(machine, tool, &opts->target, evsel_list->threads,
476 process_synthesized_event, opts->sample_address);
478 goto out_delete_session;
480 if (rec->realtime_prio) {
481 struct sched_param param;
483 param.sched_priority = rec->realtime_prio;
484 if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
485 pr_err("Could not set realtime priority.\n");
487 goto out_delete_session;
492 * When perf is starting the traced process, all the events
493 * (apart from group members) have enable_on_exec=1 set,
494 * so don't spoil it by prematurely enabling them.
496 if (!target__none(&opts->target))
497 perf_evlist__enable(evsel_list);
503 perf_evlist__start_workload(evsel_list);
506 int hits = rec->samples;
508 if (perf_record__mmap_read_all(rec) < 0) {
510 goto out_delete_session;
513 if (hits == rec->samples) {
516 err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
521 * When perf is starting the traced process, at the end events
522 * die with the process and we wait for that. Thus no need to
523 * disable events in this case.
525 if (done && !disabled && !target__none(&opts->target)) {
526 perf_evlist__disable(evsel_list);
531 if (quiet || signr == SIGUSR1)
534 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
537 * Approximate RIP event size: 24 bytes.
540 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
541 (double)rec->bytes_written / 1024.0 / 1024.0,
543 rec->bytes_written / 24);
548 perf_session__delete(session);
552 #define BRANCH_OPT(n, m) \
553 { .name = n, .mode = (m) }
555 #define BRANCH_END { .name = NULL }
562 static const struct branch_mode branch_modes[] = {
563 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
564 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
565 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
566 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
567 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
568 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
569 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
570 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
571 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
572 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
577 parse_branch_stack(const struct option *opt, const char *str, int unset)
580 (PERF_SAMPLE_BRANCH_USER |\
581 PERF_SAMPLE_BRANCH_KERNEL |\
582 PERF_SAMPLE_BRANCH_HV)
584 uint64_t *mode = (uint64_t *)opt->value;
585 const struct branch_mode *br;
586 char *s, *os = NULL, *p;
593 * cannot set it twice, -b + --branch-filter for instance
598 /* str may be NULL in case no arg is passed to -b */
600 /* because str is read-only */
601 s = os = strdup(str);
610 for (br = branch_modes; br->name; br++) {
611 if (!strcasecmp(s, br->name))
615 ui__warning("unknown branch filter %s,"
616 " check man page\n", s);
630 /* default to any branch */
631 if ((*mode & ~ONLY_PLM) == 0) {
632 *mode = PERF_SAMPLE_BRANCH_ANY;
639 #ifdef HAVE_LIBUNWIND_SUPPORT
640 static int get_stack_size(char *str, unsigned long *_size)
644 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
646 size = strtoul(str, &endptr, 0);
652 size = round_up(size, sizeof(u64));
653 if (!size || size > max_size)
661 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
665 #endif /* HAVE_LIBUNWIND_SUPPORT */
667 int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
669 char *tok, *name, *saveptr = NULL;
673 /* We need buffer that we know we can write to. */
674 buf = malloc(strlen(arg) + 1);
680 tok = strtok_r((char *)buf, ",", &saveptr);
681 name = tok ? : (char *)buf;
684 /* Framepointer style */
685 if (!strncmp(name, "fp", sizeof("fp"))) {
686 if (!strtok_r(NULL, ",", &saveptr)) {
687 opts->call_graph = CALLCHAIN_FP;
690 pr_err("callchain: No more arguments "
691 "needed for -g fp\n");
694 #ifdef HAVE_LIBUNWIND_SUPPORT
696 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
697 const unsigned long default_stack_dump_size = 8192;
700 opts->call_graph = CALLCHAIN_DWARF;
701 opts->stack_dump_size = default_stack_dump_size;
703 tok = strtok_r(NULL, ",", &saveptr);
705 unsigned long size = 0;
707 ret = get_stack_size(tok, &size);
708 opts->stack_dump_size = size;
710 #endif /* HAVE_LIBUNWIND_SUPPORT */
712 pr_err("callchain: Unknown --call-graph option "
723 static void callchain_debug(struct perf_record_opts *opts)
725 pr_debug("callchain: type %d\n", opts->call_graph);
727 if (opts->call_graph == CALLCHAIN_DWARF)
728 pr_debug("callchain: stack dump size %d\n",
729 opts->stack_dump_size);
732 int record_parse_callchain_opt(const struct option *opt,
736 struct perf_record_opts *opts = opt->value;
739 /* --no-call-graph */
741 opts->call_graph = CALLCHAIN_NONE;
742 pr_debug("callchain: disabled\n");
746 ret = record_parse_callchain(arg, opts);
748 callchain_debug(opts);
753 int record_callchain_opt(const struct option *opt,
754 const char *arg __maybe_unused,
755 int unset __maybe_unused)
757 struct perf_record_opts *opts = opt->value;
759 if (opts->call_graph == CALLCHAIN_NONE)
760 opts->call_graph = CALLCHAIN_FP;
762 callchain_debug(opts);
766 static const char * const record_usage[] = {
767 "perf record [<options>] [<command>]",
768 "perf record [<options>] -- <command> [<options>]",
773 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
774 * because we need to have access to it in perf_record__exit, that is called
775 * after cmd_record() exits, but since record_options need to be accessible to
776 * builtin-script, leave it here.
778 * At least we don't ouch it in all the other functions here directly.
780 * Just say no to tons of global variables, sigh.
782 static struct perf_record record = {
784 .mmap_pages = UINT_MAX,
785 .user_freq = UINT_MAX,
786 .user_interval = ULLONG_MAX,
790 .default_per_cpu = true,
795 #define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
797 #ifdef HAVE_LIBUNWIND_SUPPORT
798 const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
800 const char record_callchain_help[] = CALLCHAIN_HELP "fp";
804 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
805 * with it and switch to use the library functions in perf_evlist that came
806 * from builtin-record.c, i.e. use perf_record_opts,
807 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
810 const struct option record_options[] = {
811 OPT_CALLBACK('e', "event", &record.evlist, "event",
812 "event selector. use 'perf list' to list available events",
813 parse_events_option),
814 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
815 "event filter", parse_filter),
816 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
817 "record events on existing process id"),
818 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
819 "record events on existing thread id"),
820 OPT_INTEGER('r', "realtime", &record.realtime_prio,
821 "collect data with this RT SCHED_FIFO priority"),
822 OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
823 "collect data without buffering"),
824 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
825 "collect raw sample records from all opened counters"),
826 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
827 "system-wide collection from all CPUs"),
828 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
829 "list of cpus to monitor"),
830 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
831 OPT_STRING('o', "output", &record.file.path, "file",
833 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
834 &record.opts.no_inherit_set,
835 "child tasks do not inherit counters"),
836 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
837 OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
838 "number of mmap data pages",
839 perf_evlist__parse_mmap_pages),
840 OPT_BOOLEAN(0, "group", &record.opts.group,
841 "put the counters into a counter group"),
842 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
843 NULL, "enables call-graph recording" ,
844 &record_callchain_opt),
845 OPT_CALLBACK(0, "call-graph", &record.opts,
846 "mode[,dump_size]", record_callchain_help,
847 &record_parse_callchain_opt),
848 OPT_INCR('v', "verbose", &verbose,
849 "be more verbose (show counter open errors, etc)"),
850 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
851 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
852 "per thread counts"),
853 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
855 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
856 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
857 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
859 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
860 "do not update the buildid cache"),
861 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
862 "do not collect buildids in perf.data"),
863 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
864 "monitor event in cgroup name only",
866 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
869 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
870 "branch any", "sample any taken branches",
873 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
874 "branch filter mask", "branch stack filter modes",
876 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
877 "sample by weight (on special events only)"),
878 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
879 "sample transaction flags (special events only)"),
880 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
881 "use per-thread mmaps"),
885 int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
888 struct perf_evlist *evsel_list;
889 struct perf_record *rec = &record;
892 evsel_list = perf_evlist__new();
893 if (evsel_list == NULL)
896 rec->evlist = evsel_list;
898 argc = parse_options(argc, argv, record_options, record_usage,
899 PARSE_OPT_STOP_AT_NON_OPTION);
900 if (!argc && target__none(&rec->opts.target))
901 usage_with_options(record_usage, record_options);
903 if (nr_cgroups && !rec->opts.target.system_wide) {
904 ui__error("cgroup monitoring only available in"
905 " system-wide mode\n");
906 usage_with_options(record_usage, record_options);
911 if (symbol_conf.kptr_restrict)
913 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
914 "check /proc/sys/kernel/kptr_restrict.\n\n"
915 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
916 "file is not found in the buildid cache or in the vmlinux path.\n\n"
917 "Samples in kernel modules won't be resolved at all.\n\n"
918 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
919 "even with a suitable vmlinux or kallsyms file.\n\n");
921 if (rec->no_buildid_cache || rec->no_buildid)
922 disable_buildid_cache();
924 if (evsel_list->nr_entries == 0 &&
925 perf_evlist__add_default(evsel_list) < 0) {
926 pr_err("Not enough memory for event selector list\n");
927 goto out_symbol_exit;
930 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
931 rec->opts.no_inherit = true;
933 err = target__validate(&rec->opts.target);
935 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
936 ui__warning("%s", errbuf);
939 err = target__parse_uid(&rec->opts.target);
941 int saved_errno = errno;
943 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
944 ui__error("%s", errbuf);
947 goto out_symbol_exit;
951 if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
952 usage_with_options(record_usage, record_options);
954 if (perf_record_opts__config(&rec->opts)) {
959 err = __cmd_record(&record, argc, argv);
961 perf_evlist__munmap(evsel_list);
962 perf_evlist__close(evsel_list);
964 perf_evlist__delete_maps(evsel_list);