4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
8 #define _FILE_OFFSET_BITS 64
14 #include "util/build-id.h"
15 #include "util/util.h"
16 #include "util/parse-options.h"
17 #include "util/parse-events.h"
19 #include "util/header.h"
20 #include "util/event.h"
21 #include "util/evlist.h"
22 #include "util/evsel.h"
23 #include "util/debug.h"
24 #include "util/session.h"
25 #include "util/tool.h"
26 #include "util/symbol.h"
27 #include "util/cpumap.h"
28 #include "util/thread_map.h"
34 #define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "
36 #ifdef NO_LIBUNWIND_SUPPORT
37 static char callchain_help[] = CALLCHAIN_HELP "[fp]";
39 static unsigned long default_stack_dump_size = 8192;
40 static char callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
49 struct perf_tool tool;
50 struct perf_record_opts opts;
52 const char *output_name;
53 struct perf_evlist *evlist;
54 struct perf_session *session;
57 unsigned int page_size;
59 enum write_mode_t write_mode;
61 bool no_buildid_cache;
66 off_t post_processing_offset;
69 static void advance_output(struct perf_record *rec, size_t size)
71 rec->bytes_written += size;
74 static void write_output(struct perf_record *rec, void *buf, size_t size)
77 int ret = write(rec->output, buf, size);
80 die("failed to write");
85 rec->bytes_written += ret;
89 static int process_synthesized_event(struct perf_tool *tool,
90 union perf_event *event,
91 struct perf_sample *sample __used,
92 struct machine *machine __used)
94 struct perf_record *rec = container_of(tool, struct perf_record, tool);
95 write_output(rec, event, event->header.size);
99 static void perf_record__mmap_read(struct perf_record *rec,
100 struct perf_mmap *md)
102 unsigned int head = perf_mmap__read_head(md);
103 unsigned int old = md->prev;
104 unsigned char *data = md->base + rec->page_size;
115 if ((old & md->mask) + size != (head & md->mask)) {
116 buf = &data[old & md->mask];
117 size = md->mask + 1 - (old & md->mask);
120 write_output(rec, buf, size);
123 buf = &data[old & md->mask];
127 write_output(rec, buf, size);
130 perf_mmap__write_tail(md, old);
133 static volatile int done = 0;
134 static volatile int signr = -1;
135 static volatile int child_finished = 0;
137 static void sig_handler(int sig)
146 static void perf_record__sig_exit(int exit_status __used, void *arg)
148 struct perf_record *rec = arg;
151 if (rec->evlist->workload.pid > 0) {
153 kill(rec->evlist->workload.pid, SIGTERM);
156 if (WIFSIGNALED(status))
157 psignal(WTERMSIG(status), rec->progname);
160 if (signr == -1 || signr == SIGUSR1)
163 signal(signr, SIG_DFL);
164 kill(getpid(), signr);
167 static bool perf_evlist__equal(struct perf_evlist *evlist,
168 struct perf_evlist *other)
170 struct perf_evsel *pos, *pair;
172 if (evlist->nr_entries != other->nr_entries)
175 pair = perf_evlist__first(other);
177 list_for_each_entry(pos, &evlist->entries, node) {
178 if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0))
180 pair = perf_evsel__next(pair);
186 static void perf_record__open(struct perf_record *rec)
188 struct perf_evsel *pos;
189 struct perf_evlist *evlist = rec->evlist;
190 struct perf_session *session = rec->session;
191 struct perf_record_opts *opts = &rec->opts;
193 perf_evlist__config_attrs(evlist, opts);
196 perf_evlist__set_leader(evlist);
198 list_for_each_entry(pos, &evlist->entries, node) {
199 struct perf_event_attr *attr = &pos->attr;
201 * Check if parse_single_tracepoint_event has already asked for
204 * XXX this is kludgy but short term fix for problems introduced by
205 * eac23d1c that broke 'perf script' by having different sample_types
206 * when using multiple tracepoint events when we use a perf binary
207 * that tries to use sample_id_all on an older kernel.
209 * We need to move counter creation to perf_session, support
210 * different sample_types, etc.
212 bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
214 fallback_missing_features:
215 if (opts->exclude_guest_missing)
216 attr->exclude_guest = attr->exclude_host = 0;
218 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
220 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
223 if (err == EPERM || err == EACCES) {
224 ui__error_paranoid();
226 } else if (err == ENODEV && opts->target.cpu_list) {
227 die("No such device - did you specify"
228 " an out-of-range profile CPU?\n");
229 } else if (err == EINVAL) {
230 if (!opts->exclude_guest_missing &&
231 (attr->exclude_guest || attr->exclude_host)) {
232 pr_debug("Old kernel, cannot exclude "
233 "guest or host samples.\n");
234 opts->exclude_guest_missing = true;
235 goto fallback_missing_features;
236 } else if (!opts->sample_id_all_missing) {
238 * Old kernel, no attr->sample_id_type_all field
240 opts->sample_id_all_missing = true;
241 if (!opts->sample_time && !opts->raw_samples && !time_needed)
242 attr->sample_type &= ~PERF_SAMPLE_TIME;
244 goto retry_sample_id;
249 * If it's cycles then fall back to hrtimer
250 * based cpu-clock-tick sw counter, which
251 * is always available even if no PMU support.
253 * PPC returns ENXIO until 2.6.37 (behavior changed
254 * with commit b0a873e).
256 if ((err == ENOENT || err == ENXIO)
257 && attr->type == PERF_TYPE_HARDWARE
258 && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
261 ui__warning("The cycles event is not supported, "
262 "trying to fall back to cpu-clock-ticks\n");
263 attr->type = PERF_TYPE_SOFTWARE;
264 attr->config = PERF_COUNT_SW_CPU_CLOCK;
273 ui__error("The %s event is not supported.\n",
274 perf_evsel__name(pos));
279 error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
282 #if defined(__i386__) || defined(__x86_64__)
283 if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
284 die("No hardware sampling interrupt available."
285 " No APIC? If so then you can boot the kernel"
286 " with the \"lapic\" boot parameter to"
287 " force-enable it.\n");
290 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
294 if (perf_evlist__set_filters(evlist)) {
295 error("failed to set filter with %d (%s)\n", errno,
300 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
302 die("Permission error mapping pages.\n"
303 "Consider increasing "
304 "/proc/sys/kernel/perf_event_mlock_kb,\n"
305 "or try again with a smaller value of -m/--mmap_pages.\n"
306 "(current value: %d)\n", opts->mmap_pages);
307 else if (!is_power_of_2(opts->mmap_pages))
308 die("--mmap_pages/-m value must be a power of two.");
310 die("failed to mmap with %d (%s)\n", errno, strerror(errno));
314 session->evlist = evlist;
316 if (!perf_evlist__equal(session->evlist, evlist)) {
317 fprintf(stderr, "incompatible append\n");
322 perf_session__set_id_hdr_size(session);
325 static int process_buildids(struct perf_record *rec)
327 u64 size = lseek(rec->output, 0, SEEK_CUR);
332 rec->session->fd = rec->output;
333 return __perf_session__process_events(rec->session, rec->post_processing_offset,
334 size - rec->post_processing_offset,
335 size, &build_id__mark_dso_hit_ops);
338 static void perf_record__exit(int status __used, void *arg)
340 struct perf_record *rec = arg;
342 if (!rec->opts.pipe_output) {
343 rec->session->header.data_size += rec->bytes_written;
345 if (!rec->no_buildid)
346 process_buildids(rec);
347 perf_session__write_header(rec->session, rec->evlist,
349 perf_session__delete(rec->session);
350 perf_evlist__delete(rec->evlist);
355 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
358 struct perf_tool *tool = data;
360 if (machine__is_host(machine))
364 *As for guest kernel when processing subcommand record&report,
365 *we arrange module mmap prior to guest kernel mmap and trigger
366 *a preload dso because default guest module symbols are loaded
367 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
368 *method is used to avoid symbol missing when the first addr is
369 *in module instead of in guest kernel.
371 err = perf_event__synthesize_modules(tool, process_synthesized_event,
374 pr_err("Couldn't record guest kernel [%d]'s reference"
375 " relocation symbol.\n", machine->pid);
378 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
379 * have no _text sometimes.
381 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
384 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
387 pr_err("Couldn't record guest kernel [%d]'s reference"
388 " relocation symbol.\n", machine->pid);
391 static struct perf_event_header finished_round_event = {
392 .size = sizeof(struct perf_event_header),
393 .type = PERF_RECORD_FINISHED_ROUND,
396 static void perf_record__mmap_read_all(struct perf_record *rec)
400 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
401 if (rec->evlist->mmap[i].base)
402 perf_record__mmap_read(rec, &rec->evlist->mmap[i]);
405 if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
406 write_output(rec, &finished_round_event, sizeof(finished_round_event));
409 static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
413 int err, output, feat;
414 unsigned long waking = 0;
415 const bool forks = argc > 0;
416 struct machine *machine;
417 struct perf_tool *tool = &rec->tool;
418 struct perf_record_opts *opts = &rec->opts;
419 struct perf_evlist *evsel_list = rec->evlist;
420 const char *output_name = rec->output_name;
421 struct perf_session *session;
423 rec->progname = argv[0];
425 rec->page_size = sysconf(_SC_PAGE_SIZE);
427 on_exit(perf_record__sig_exit, rec);
428 signal(SIGCHLD, sig_handler);
429 signal(SIGINT, sig_handler);
430 signal(SIGUSR1, sig_handler);
433 if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
434 opts->pipe_output = true;
436 rec->output_name = output_name = "perf.data";
439 if (!strcmp(output_name, "-"))
440 opts->pipe_output = true;
441 else if (!stat(output_name, &st) && st.st_size) {
442 if (rec->write_mode == WRITE_FORCE) {
443 char oldname[PATH_MAX];
444 snprintf(oldname, sizeof(oldname), "%s.old",
447 rename(output_name, oldname);
449 } else if (rec->write_mode == WRITE_APPEND) {
450 rec->write_mode = WRITE_FORCE;
454 flags = O_CREAT|O_RDWR;
455 if (rec->write_mode == WRITE_APPEND)
460 if (opts->pipe_output)
461 output = STDOUT_FILENO;
463 output = open(output_name, flags, S_IRUSR | S_IWUSR);
465 perror("failed to create output file");
469 rec->output = output;
471 session = perf_session__new(output_name, O_WRONLY,
472 rec->write_mode == WRITE_FORCE, false, NULL);
473 if (session == NULL) {
474 pr_err("Not enough memory for reading perf file header\n");
478 rec->session = session;
480 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
481 perf_header__set_feat(&session->header, feat);
484 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
486 if (!have_tracepoints(&evsel_list->entries))
487 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
489 if (!rec->opts.branch_stack)
490 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
492 if (!rec->file_new) {
493 err = perf_session__read_header(session, output);
495 goto out_delete_session;
499 err = perf_evlist__prepare_workload(evsel_list, opts, argv);
501 pr_err("Couldn't run the workload!\n");
502 goto out_delete_session;
506 perf_record__open(rec);
509 * perf_session__delete(session) will be called at perf_record__exit()
511 on_exit(perf_record__exit, rec);
513 if (opts->pipe_output) {
514 err = perf_header__write_pipe(output);
517 } else if (rec->file_new) {
518 err = perf_session__write_header(session, evsel_list,
525 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
526 pr_err("Couldn't generate buildids. "
527 "Use --no-buildid to profile anyway.\n");
531 rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
533 machine = perf_session__find_host_machine(session);
535 pr_err("Couldn't find native kernel information.\n");
539 if (opts->pipe_output) {
540 err = perf_event__synthesize_attrs(tool, session,
541 process_synthesized_event);
543 pr_err("Couldn't synthesize attrs.\n");
547 err = perf_event__synthesize_event_types(tool, process_synthesized_event,
550 pr_err("Couldn't synthesize event_types.\n");
554 if (have_tracepoints(&evsel_list->entries)) {
556 * FIXME err <= 0 here actually means that
557 * there were no tracepoints so its not really
558 * an error, just that we don't need to
559 * synthesize anything. We really have to
560 * return this more properly and also
561 * propagate errors that now are calling die()
563 err = perf_event__synthesize_tracing_data(tool, output, evsel_list,
564 process_synthesized_event);
566 pr_err("Couldn't record tracing data.\n");
569 advance_output(rec, err);
573 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
576 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
579 pr_err("Couldn't record kernel reference relocation symbol\n"
580 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
581 "Check /proc/kallsyms permission or run as root.\n");
583 err = perf_event__synthesize_modules(tool, process_synthesized_event,
586 pr_err("Couldn't record kernel module information.\n"
587 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
588 "Check /proc/modules permission or run as root.\n");
591 perf_session__process_machines(session, tool,
592 perf_event__synthesize_guest_os);
594 if (!opts->target.system_wide)
595 perf_event__synthesize_thread_map(tool, evsel_list->threads,
596 process_synthesized_event,
599 perf_event__synthesize_threads(tool, process_synthesized_event,
602 if (rec->realtime_prio) {
603 struct sched_param param;
605 param.sched_priority = rec->realtime_prio;
606 if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
607 pr_err("Could not set realtime priority.\n");
612 perf_evlist__enable(evsel_list);
618 perf_evlist__start_workload(evsel_list);
621 int hits = rec->samples;
623 perf_record__mmap_read_all(rec);
625 if (hits == rec->samples) {
628 err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
633 perf_evlist__disable(evsel_list);
636 if (quiet || signr == SIGUSR1)
639 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
642 * Approximate RIP event size: 24 bytes.
645 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
646 (double)rec->bytes_written / 1024.0 / 1024.0,
648 rec->bytes_written / 24);
653 perf_session__delete(session);
657 #define BRANCH_OPT(n, m) \
658 { .name = n, .mode = (m) }
660 #define BRANCH_END { .name = NULL }
667 static const struct branch_mode branch_modes[] = {
668 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
669 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
670 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
671 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
672 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
673 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
674 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
679 parse_branch_stack(const struct option *opt, const char *str, int unset)
682 (PERF_SAMPLE_BRANCH_USER |\
683 PERF_SAMPLE_BRANCH_KERNEL |\
684 PERF_SAMPLE_BRANCH_HV)
686 uint64_t *mode = (uint64_t *)opt->value;
687 const struct branch_mode *br;
688 char *s, *os = NULL, *p;
695 * cannot set it twice, -b + --branch-filter for instance
700 /* str may be NULL in case no arg is passed to -b */
702 /* because str is read-only */
703 s = os = strdup(str);
712 for (br = branch_modes; br->name; br++) {
713 if (!strcasecmp(s, br->name))
717 ui__warning("unknown branch filter %s,"
718 " check man page\n", s);
732 /* default to any branch */
733 if ((*mode & ~ONLY_PLM) == 0) {
734 *mode = PERF_SAMPLE_BRANCH_ANY;
741 #ifndef NO_LIBUNWIND_SUPPORT
742 static int get_stack_size(char *str, unsigned long *_size)
746 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
748 size = strtoul(str, &endptr, 0);
754 size = round_up(size, sizeof(u64));
755 if (!size || size > max_size)
763 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
767 #endif /* !NO_LIBUNWIND_SUPPORT */
770 parse_callchain_opt(const struct option *opt __used, const char *arg,
773 struct perf_record *rec = (struct perf_record *)opt->value;
774 char *tok, *name, *saveptr = NULL;
778 /* --no-call-graph */
782 /* We specified default option if none is provided. */
785 /* We need buffer that we know we can write to. */
786 buf = malloc(strlen(arg) + 1);
792 tok = strtok_r((char *)buf, ",", &saveptr);
793 name = tok ? : (char *)buf;
796 /* Framepointer style */
797 if (!strncmp(name, "fp", sizeof("fp"))) {
798 if (!strtok_r(NULL, ",", &saveptr)) {
799 rec->opts.call_graph = CALLCHAIN_FP;
802 pr_err("callchain: No more arguments "
803 "needed for -g fp\n");
806 #ifndef NO_LIBUNWIND_SUPPORT
808 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
810 rec->opts.call_graph = CALLCHAIN_DWARF;
811 rec->opts.stack_dump_size = default_stack_dump_size;
813 tok = strtok_r(NULL, ",", &saveptr);
815 unsigned long size = 0;
817 ret = get_stack_size(tok, &size);
818 rec->opts.stack_dump_size = size;
822 pr_debug("callchain: stack dump size %d\n",
823 rec->opts.stack_dump_size);
824 #endif /* !NO_LIBUNWIND_SUPPORT */
826 pr_err("callchain: Unknown -g option "
836 pr_debug("callchain: type %d\n", rec->opts.call_graph);
841 static const char * const record_usage[] = {
842 "perf record [<options>] [<command>]",
843 "perf record [<options>] -- <command> [<options>]",
848 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
849 * because we need to have access to it in perf_record__exit, that is called
850 * after cmd_record() exits, but since record_options need to be accessible to
851 * builtin-script, leave it here.
853 * At least we don't ouch it in all the other functions here directly.
855 * Just say no to tons of global variables, sigh.
857 static struct perf_record record = {
859 .mmap_pages = UINT_MAX,
860 .user_freq = UINT_MAX,
861 .user_interval = ULLONG_MAX,
867 .write_mode = WRITE_FORCE,
872 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
873 * with it and switch to use the library functions in perf_evlist that came
874 * from builtin-record.c, i.e. use perf_record_opts,
875 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
878 const struct option record_options[] = {
879 OPT_CALLBACK('e', "event", &record.evlist, "event",
880 "event selector. use 'perf list' to list available events",
881 parse_events_option),
882 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
883 "event filter", parse_filter),
884 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
885 "record events on existing process id"),
886 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
887 "record events on existing thread id"),
888 OPT_INTEGER('r', "realtime", &record.realtime_prio,
889 "collect data with this RT SCHED_FIFO priority"),
890 OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
891 "collect data without buffering"),
892 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
893 "collect raw sample records from all opened counters"),
894 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
895 "system-wide collection from all CPUs"),
896 OPT_BOOLEAN('A', "append", &record.append_file,
897 "append to the output file to do incremental profiling"),
898 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
899 "list of cpus to monitor"),
900 OPT_BOOLEAN('f', "force", &record.force,
901 "overwrite existing data file (deprecated)"),
902 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
903 OPT_STRING('o', "output", &record.output_name, "file",
905 OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
906 "child tasks do not inherit counters"),
907 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
908 OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
909 "number of mmap data pages"),
910 OPT_BOOLEAN(0, "group", &record.opts.group,
911 "put the counters into a counter group"),
912 OPT_CALLBACK_DEFAULT('g', "call-graph", &record, "mode[,dump_size]",
913 callchain_help, &parse_callchain_opt,
915 OPT_INCR('v', "verbose", &verbose,
916 "be more verbose (show counter open errors, etc)"),
917 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
918 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
919 "per thread counts"),
920 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
922 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
923 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
924 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
926 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
927 "do not update the buildid cache"),
928 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
929 "do not collect buildids in perf.data"),
930 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
931 "monitor event in cgroup name only",
933 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
936 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
937 "branch any", "sample any taken branches",
940 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
941 "branch filter mask", "branch stack filter modes",
946 int cmd_record(int argc, const char **argv, const char *prefix __used)
949 struct perf_evsel *pos;
950 struct perf_evlist *evsel_list;
951 struct perf_record *rec = &record;
954 evsel_list = perf_evlist__new(NULL, NULL);
955 if (evsel_list == NULL)
958 rec->evlist = evsel_list;
960 argc = parse_options(argc, argv, record_options, record_usage,
961 PARSE_OPT_STOP_AT_NON_OPTION);
962 if (!argc && perf_target__none(&rec->opts.target))
963 usage_with_options(record_usage, record_options);
965 if (rec->force && rec->append_file) {
966 ui__error("Can't overwrite and append at the same time."
967 " You need to choose between -f and -A");
968 usage_with_options(record_usage, record_options);
969 } else if (rec->append_file) {
970 rec->write_mode = WRITE_APPEND;
972 rec->write_mode = WRITE_FORCE;
975 if (nr_cgroups && !rec->opts.target.system_wide) {
976 ui__error("cgroup monitoring only available in"
977 " system-wide mode\n");
978 usage_with_options(record_usage, record_options);
983 if (symbol_conf.kptr_restrict)
985 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
986 "check /proc/sys/kernel/kptr_restrict.\n\n"
987 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
988 "file is not found in the buildid cache or in the vmlinux path.\n\n"
989 "Samples in kernel modules won't be resolved at all.\n\n"
990 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
991 "even with a suitable vmlinux or kallsyms file.\n\n");
993 if (rec->no_buildid_cache || rec->no_buildid)
994 disable_buildid_cache();
996 if (evsel_list->nr_entries == 0 &&
997 perf_evlist__add_default(evsel_list) < 0) {
998 pr_err("Not enough memory for event selector list\n");
999 goto out_symbol_exit;
1002 err = perf_target__validate(&rec->opts.target);
1004 perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1005 ui__warning("%s", errbuf);
1008 err = perf_target__parse_uid(&rec->opts.target);
1010 int saved_errno = errno;
1012 perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1013 ui__error("%s", errbuf);
1020 if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
1021 usage_with_options(record_usage, record_options);
1023 list_for_each_entry(pos, &evsel_list->entries, node) {
1024 if (perf_header__push_event(pos->attr.config, perf_evsel__name(pos)))
1028 if (rec->opts.user_interval != ULLONG_MAX)
1029 rec->opts.default_interval = rec->opts.user_interval;
1030 if (rec->opts.user_freq != UINT_MAX)
1031 rec->opts.freq = rec->opts.user_freq;
1034 * User specified count overrides default frequency.
1036 if (rec->opts.default_interval)
1038 else if (rec->opts.freq) {
1039 rec->opts.default_interval = rec->opts.freq;
1041 ui__error("frequency and count are zero, aborting\n");
1046 err = __cmd_record(&record, argc, argv);
1048 perf_evlist__delete_maps(evsel_list);