Merge branch 'perf/urgent' into perf/core, to pick up fixes before applying new changes
[firefly-linux-kernel-4.4.55.git] / tools / perf / util / record.c
1 #include "evlist.h"
2 #include "evsel.h"
3 #include "cpumap.h"
4 #include "parse-events.h"
5 #include <api/fs/fs.h>
6 #include "util.h"
7 #include "cloexec.h"
8
9 typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
10
11 static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
12 {
13         struct perf_evlist *evlist;
14         struct perf_evsel *evsel;
15         unsigned long flags = perf_event_open_cloexec_flag();
16         int err = -EAGAIN, fd;
17         static pid_t pid = -1;
18
19         evlist = perf_evlist__new();
20         if (!evlist)
21                 return -ENOMEM;
22
23         if (parse_events(evlist, str, NULL))
24                 goto out_delete;
25
26         evsel = perf_evlist__first(evlist);
27
28         while (1) {
29                 fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
30                 if (fd < 0) {
31                         if (pid == -1 && errno == EACCES) {
32                                 pid = 0;
33                                 continue;
34                         }
35                         goto out_delete;
36                 }
37                 break;
38         }
39         close(fd);
40
41         fn(evsel);
42
43         fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
44         if (fd < 0) {
45                 if (errno == EINVAL)
46                         err = -EINVAL;
47                 goto out_delete;
48         }
49         close(fd);
50         err = 0;
51
52 out_delete:
53         perf_evlist__delete(evlist);
54         return err;
55 }
56
57 static bool perf_probe_api(setup_probe_fn_t fn)
58 {
59         const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
60         struct cpu_map *cpus;
61         int cpu, ret, i = 0;
62
63         cpus = cpu_map__new(NULL);
64         if (!cpus)
65                 return false;
66         cpu = cpus->map[0];
67         cpu_map__put(cpus);
68
69         do {
70                 ret = perf_do_probe_api(fn, cpu, try[i++]);
71                 if (!ret)
72                         return true;
73         } while (ret == -EAGAIN && try[i]);
74
75         return false;
76 }
77
78 static void perf_probe_sample_identifier(struct perf_evsel *evsel)
79 {
80         evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
81 }
82
83 static void perf_probe_comm_exec(struct perf_evsel *evsel)
84 {
85         evsel->attr.comm_exec = 1;
86 }
87
88 static void perf_probe_context_switch(struct perf_evsel *evsel)
89 {
90         evsel->attr.context_switch = 1;
91 }
92
93 bool perf_can_sample_identifier(void)
94 {
95         return perf_probe_api(perf_probe_sample_identifier);
96 }
97
98 static bool perf_can_comm_exec(void)
99 {
100         return perf_probe_api(perf_probe_comm_exec);
101 }
102
103 bool perf_can_record_switch_events(void)
104 {
105         return perf_probe_api(perf_probe_context_switch);
106 }
107
108 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
109 {
110         struct perf_evsel *evsel;
111         bool use_sample_identifier = false;
112         bool use_comm_exec;
113
114         /*
115          * Set the evsel leader links before we configure attributes,
116          * since some might depend on this info.
117          */
118         if (opts->group)
119                 perf_evlist__set_leader(evlist);
120
121         if (evlist->cpus->map[0] < 0)
122                 opts->no_inherit = true;
123
124         use_comm_exec = perf_can_comm_exec();
125
126         evlist__for_each(evlist, evsel) {
127                 perf_evsel__config(evsel, opts);
128                 if (evsel->tracking && use_comm_exec)
129                         evsel->attr.comm_exec = 1;
130         }
131
132         if (opts->full_auxtrace) {
133                 /*
134                  * Need to be able to synthesize and parse selected events with
135                  * arbitrary sample types, which requires always being able to
136                  * match the id.
137                  */
138                 use_sample_identifier = perf_can_sample_identifier();
139                 evlist__for_each(evlist, evsel)
140                         perf_evsel__set_sample_id(evsel, use_sample_identifier);
141         } else if (evlist->nr_entries > 1) {
142                 struct perf_evsel *first = perf_evlist__first(evlist);
143
144                 evlist__for_each(evlist, evsel) {
145                         if (evsel->attr.sample_type == first->attr.sample_type)
146                                 continue;
147                         use_sample_identifier = perf_can_sample_identifier();
148                         break;
149                 }
150                 evlist__for_each(evlist, evsel)
151                         perf_evsel__set_sample_id(evsel, use_sample_identifier);
152         }
153
154         perf_evlist__set_id_pos(evlist);
155 }
156
157 static int get_max_rate(unsigned int *rate)
158 {
159         return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
160 }
161
162 static int record_opts__config_freq(struct record_opts *opts)
163 {
164         bool user_freq = opts->user_freq != UINT_MAX;
165         unsigned int max_rate;
166
167         if (opts->user_interval != ULLONG_MAX)
168                 opts->default_interval = opts->user_interval;
169         if (user_freq)
170                 opts->freq = opts->user_freq;
171
172         /*
173          * User specified count overrides default frequency.
174          */
175         if (opts->default_interval)
176                 opts->freq = 0;
177         else if (opts->freq) {
178                 opts->default_interval = opts->freq;
179         } else {
180                 pr_err("frequency and count are zero, aborting\n");
181                 return -1;
182         }
183
184         if (get_max_rate(&max_rate))
185                 return 0;
186
187         /*
188          * User specified frequency is over current maximum.
189          */
190         if (user_freq && (max_rate < opts->freq)) {
191                 pr_err("Maximum frequency rate (%u) reached.\n"
192                    "Please use -F freq option with lower value or consider\n"
193                    "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
194                    max_rate);
195                 return -1;
196         }
197
198         /*
199          * Default frequency is over current maximum.
200          */
201         if (max_rate < opts->freq) {
202                 pr_warning("Lowering default frequency rate to %u.\n"
203                            "Please consider tweaking "
204                            "/proc/sys/kernel/perf_event_max_sample_rate.\n",
205                            max_rate);
206                 opts->freq = max_rate;
207         }
208
209         return 0;
210 }
211
212 int record_opts__config(struct record_opts *opts)
213 {
214         return record_opts__config_freq(opts);
215 }
216
217 bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
218 {
219         struct perf_evlist *temp_evlist;
220         struct perf_evsel *evsel;
221         int err, fd, cpu;
222         bool ret = false;
223         pid_t pid = -1;
224
225         temp_evlist = perf_evlist__new();
226         if (!temp_evlist)
227                 return false;
228
229         err = parse_events(temp_evlist, str, NULL);
230         if (err)
231                 goto out_delete;
232
233         evsel = perf_evlist__last(temp_evlist);
234
235         if (!evlist || cpu_map__empty(evlist->cpus)) {
236                 struct cpu_map *cpus = cpu_map__new(NULL);
237
238                 cpu =  cpus ? cpus->map[0] : 0;
239                 cpu_map__put(cpus);
240         } else {
241                 cpu = evlist->cpus->map[0];
242         }
243
244         while (1) {
245                 fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1,
246                                          perf_event_open_cloexec_flag());
247                 if (fd < 0) {
248                         if (pid == -1 && errno == EACCES) {
249                                 pid = 0;
250                                 continue;
251                         }
252                         goto out_delete;
253                 }
254                 break;
255         }
256         close(fd);
257         ret = true;
258
259 out_delete:
260         perf_evlist__delete(temp_evlist);
261         return ret;
262 }