1 #ifndef __PERF_RECORD_H
2 #define __PERF_RECORD_H
10 #include "perf_regs.h"
13 struct perf_event_header header;
18 char filename[PATH_MAX];
22 struct perf_event_header header;
33 char filename[PATH_MAX];
37 struct perf_event_header header;
43 struct perf_event_header header;
50 struct perf_event_header header;
55 struct lost_samples_event {
56 struct perf_event_header header;
61 * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
64 struct perf_event_header header;
72 struct throttle_event {
73 struct perf_event_header header;
79 #define PERF_SAMPLE_MASK \
80 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
81 PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
82 PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
83 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \
84 PERF_SAMPLE_IDENTIFIER)
86 /* perf sample has 16 bits size limit */
87 #define PERF_SAMPLE_MAX_SIZE (1 << 16)
90 struct perf_event_header header;
99 /* Cached values/mask filled by first register access. */
100 u64 cache_regs[PERF_REGS_MAX];
110 struct sample_read_value {
121 struct sample_read_value *values;
123 struct sample_read_value one;
127 struct ip_callchain {
132 struct branch_flags {
140 struct branch_entry {
143 struct branch_flags flags;
146 struct branch_stack {
148 struct branch_entry entries[0];
152 PERF_IP_FLAG_BRANCH = 1ULL << 0,
153 PERF_IP_FLAG_CALL = 1ULL << 1,
154 PERF_IP_FLAG_RETURN = 1ULL << 2,
155 PERF_IP_FLAG_CONDITIONAL = 1ULL << 3,
156 PERF_IP_FLAG_SYSCALLRET = 1ULL << 4,
157 PERF_IP_FLAG_ASYNC = 1ULL << 5,
158 PERF_IP_FLAG_INTERRUPT = 1ULL << 6,
159 PERF_IP_FLAG_TX_ABORT = 1ULL << 7,
160 PERF_IP_FLAG_TRACE_BEGIN = 1ULL << 8,
161 PERF_IP_FLAG_TRACE_END = 1ULL << 9,
162 PERF_IP_FLAG_IN_TX = 1ULL << 10,
165 #define PERF_IP_FLAG_CHARS "bcrosyiABEx"
167 #define PERF_BRANCH_MASK (\
168 PERF_IP_FLAG_BRANCH |\
170 PERF_IP_FLAG_RETURN |\
171 PERF_IP_FLAG_CONDITIONAL |\
172 PERF_IP_FLAG_SYSCALLRET |\
173 PERF_IP_FLAG_ASYNC |\
174 PERF_IP_FLAG_INTERRUPT |\
175 PERF_IP_FLAG_TX_ABORT |\
176 PERF_IP_FLAG_TRACE_BEGIN |\
177 PERF_IP_FLAG_TRACE_END)
195 struct ip_callchain *callchain;
196 struct branch_stack *branch_stack;
197 struct regs_dump user_regs;
198 struct regs_dump intr_regs;
199 struct stack_dump user_stack;
200 struct sample_read read;
203 #define PERF_MEM_DATA_SRC_NONE \
204 (PERF_MEM_S(OP, NA) |\
205 PERF_MEM_S(LVL, NA) |\
206 PERF_MEM_S(SNOOP, NA) |\
207 PERF_MEM_S(LOCK, NA) |\
210 struct build_id_event {
211 struct perf_event_header header;
213 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
217 enum perf_user_event_type { /* above any possible kernel type */
218 PERF_RECORD_USER_TYPE_START = 64,
219 PERF_RECORD_HEADER_ATTR = 64,
220 PERF_RECORD_HEADER_EVENT_TYPE = 65, /* depreceated */
221 PERF_RECORD_HEADER_TRACING_DATA = 66,
222 PERF_RECORD_HEADER_BUILD_ID = 67,
223 PERF_RECORD_FINISHED_ROUND = 68,
224 PERF_RECORD_ID_INDEX = 69,
225 PERF_RECORD_AUXTRACE_INFO = 70,
226 PERF_RECORD_AUXTRACE = 71,
227 PERF_RECORD_AUXTRACE_ERROR = 72,
228 PERF_RECORD_HEADER_MAX
231 enum auxtrace_error_type {
232 PERF_AUXTRACE_ERROR_ITRACE = 1,
233 PERF_AUXTRACE_ERROR_MAX
237 * The kernel collects the number of events it couldn't send in a stretch and
238 * when possible sends this number in a PERF_RECORD_LOST event. The number of
239 * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
240 * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
241 * the sum of all struct lost_event.lost fields reported.
243 * The kernel discards mixed up samples and sends the number in a
244 * PERF_RECORD_LOST_SAMPLES event. The number of lost-samples events is stored
245 * in .nr_events[PERF_RECORD_LOST_SAMPLES] while total_lost_samples tells
246 * exactly how many samples the kernel in fact dropped, i.e. it is the sum of
247 * all struct lost_samples_event.lost fields reported.
249 * The total_period is needed because by default auto-freq is used, so
250 * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
251 * the total number of low level events, it is necessary to to sum all struct
252 * sample_event.period and stash the result in total_period.
254 struct events_stats {
256 u64 total_non_filtered_period;
258 u64 total_lost_samples;
259 u64 total_invalid_chains;
260 u32 nr_events[PERF_RECORD_HEADER_MAX];
261 u32 nr_non_filtered_samples;
263 u32 nr_unknown_events;
264 u32 nr_invalid_chains;
266 u32 nr_unprocessable_samples;
267 u32 nr_auxtrace_errors[PERF_AUXTRACE_ERROR_MAX];
268 u32 nr_proc_map_timeout;
272 struct perf_event_header header;
273 struct perf_event_attr attr;
277 #define MAX_EVENT_NAME 64
279 struct perf_trace_event_type {
281 char name[MAX_EVENT_NAME];
284 struct event_type_event {
285 struct perf_event_header header;
286 struct perf_trace_event_type event_type;
289 struct tracing_data_event {
290 struct perf_event_header header;
294 struct id_index_entry {
301 struct id_index_event {
302 struct perf_event_header header;
304 struct id_index_entry entries[0];
307 struct auxtrace_info_event {
308 struct perf_event_header header;
310 u32 reserved__; /* For alignment */
314 struct auxtrace_event {
315 struct perf_event_header header;
322 u32 reserved__; /* For alignment */
325 #define MAX_AUXTRACE_ERROR_MSG 64
327 struct auxtrace_error_event {
328 struct perf_event_header header;
334 u32 reserved__; /* For alignment */
336 char msg[MAX_AUXTRACE_ERROR_MSG];
340 struct perf_event_header header;
346 struct itrace_start_event {
347 struct perf_event_header header;
352 struct perf_event_header header;
353 struct mmap_event mmap;
354 struct mmap2_event mmap2;
355 struct comm_event comm;
356 struct fork_event fork;
357 struct lost_event lost;
358 struct lost_samples_event lost_samples;
359 struct read_event read;
360 struct throttle_event throttle;
361 struct sample_event sample;
362 struct attr_event attr;
363 struct event_type_event event_type;
364 struct tracing_data_event tracing_data;
365 struct build_id_event build_id;
366 struct id_index_event id_index;
367 struct auxtrace_info_event auxtrace_info;
368 struct auxtrace_event auxtrace;
369 struct auxtrace_error_event auxtrace_error;
370 struct aux_event aux;
371 struct itrace_start_event itrace_start;
374 void perf_event__print_totals(void);
379 typedef int (*perf_event__handler_t)(struct perf_tool *tool,
380 union perf_event *event,
381 struct perf_sample *sample,
382 struct machine *machine);
384 int perf_event__synthesize_thread_map(struct perf_tool *tool,
385 struct thread_map *threads,
386 perf_event__handler_t process,
387 struct machine *machine, bool mmap_data,
388 unsigned int proc_map_timeout);
389 int perf_event__synthesize_threads(struct perf_tool *tool,
390 perf_event__handler_t process,
391 struct machine *machine, bool mmap_data,
392 unsigned int proc_map_timeout);
393 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
394 perf_event__handler_t process,
395 struct machine *machine);
397 int perf_event__synthesize_modules(struct perf_tool *tool,
398 perf_event__handler_t process,
399 struct machine *machine);
401 int perf_event__process_comm(struct perf_tool *tool,
402 union perf_event *event,
403 struct perf_sample *sample,
404 struct machine *machine);
405 int perf_event__process_lost(struct perf_tool *tool,
406 union perf_event *event,
407 struct perf_sample *sample,
408 struct machine *machine);
409 int perf_event__process_lost_samples(struct perf_tool *tool,
410 union perf_event *event,
411 struct perf_sample *sample,
412 struct machine *machine);
413 int perf_event__process_aux(struct perf_tool *tool,
414 union perf_event *event,
415 struct perf_sample *sample,
416 struct machine *machine);
417 int perf_event__process_itrace_start(struct perf_tool *tool,
418 union perf_event *event,
419 struct perf_sample *sample,
420 struct machine *machine);
421 int perf_event__process_mmap(struct perf_tool *tool,
422 union perf_event *event,
423 struct perf_sample *sample,
424 struct machine *machine);
425 int perf_event__process_mmap2(struct perf_tool *tool,
426 union perf_event *event,
427 struct perf_sample *sample,
428 struct machine *machine);
429 int perf_event__process_fork(struct perf_tool *tool,
430 union perf_event *event,
431 struct perf_sample *sample,
432 struct machine *machine);
433 int perf_event__process_exit(struct perf_tool *tool,
434 union perf_event *event,
435 struct perf_sample *sample,
436 struct machine *machine);
437 int perf_event__process(struct perf_tool *tool,
438 union perf_event *event,
439 struct perf_sample *sample,
440 struct machine *machine);
442 struct addr_location;
444 int perf_event__preprocess_sample(const union perf_event *event,
445 struct machine *machine,
446 struct addr_location *al,
447 struct perf_sample *sample);
449 void addr_location__put(struct addr_location *al);
453 bool is_bts_event(struct perf_event_attr *attr);
454 bool sample_addr_correlates_sym(struct perf_event_attr *attr);
455 void perf_event__preprocess_sample_addr(union perf_event *event,
456 struct perf_sample *sample,
457 struct thread *thread,
458 struct addr_location *al);
460 const char *perf_event__name(unsigned int id);
462 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
464 int perf_event__synthesize_sample(union perf_event *event, u64 type,
466 const struct perf_sample *sample,
469 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
470 union perf_event *event,
471 pid_t pid, pid_t tgid,
472 perf_event__handler_t process,
473 struct machine *machine,
475 unsigned int proc_map_timeout);
477 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
478 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
479 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
480 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
481 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp);
482 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp);
483 size_t perf_event__fprintf(union perf_event *event, FILE *fp);
485 u64 kallsyms__get_function_start(const char *kallsyms_filename,
486 const char *symbol_name);
488 #endif /* __PERF_RECORD_H */