2 #ifndef _LINUX_KERNEL_TRACE_H
3 #define _LINUX_KERNEL_TRACE_H
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/clocksource.h>
9 #include <linux/ring_buffer.h>
10 #include <linux/mmiotrace.h>
11 #include <linux/tracepoint.h>
12 #include <linux/ftrace.h>
13 #include <linux/hw_breakpoint.h>
14 #include <linux/trace_seq.h>
15 #include <linux/ftrace_event.h>
17 #ifdef CONFIG_FTRACE_SYSCALLS
18 #include <asm/unistd.h> /* For NR_SYSCALLS */
19 #include <asm/syscall.h> /* some archs define it here */
23 __TRACE_FIRST_TYPE = 0,
45 #define __field(type, item) type item;
48 #define __field_struct(type, item) __field(type, item)
51 #define __field_desc(type, container, item)
54 #define __array(type, item, size) type item[size];
57 #define __array_desc(type, container, item, size)
59 #undef __dynamic_array
60 #define __dynamic_array(type, item) type item[];
63 #define F_STRUCT(args...) args
66 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
67 struct struct_name { \
68 struct trace_entry ent; \
73 #define TP_ARGS(args...) args
75 #undef FTRACE_ENTRY_DUP
76 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
78 #undef FTRACE_ENTRY_REG
79 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
81 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
84 #include "trace_entries.h"
87 * syscalls are special, and need special handling, this is why
88 * they are not included in trace_entries.h
90 struct syscall_trace_enter {
91 struct trace_entry ent;
96 struct syscall_trace_exit {
97 struct trace_entry ent;
102 struct kprobe_trace_entry_head {
103 struct trace_entry ent;
107 struct kretprobe_trace_entry_head {
108 struct trace_entry ent;
110 unsigned long ret_ip;
114 * trace_flag_type is an enumeration that holds different
115 * states when a trace occurs. These are:
116 * IRQS_OFF - interrupts were disabled
117 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
118 * NEED_RESCHED - reschedule is requested
119 * HARDIRQ - inside an interrupt handler
120 * SOFTIRQ - inside a softirq handler
122 enum trace_flag_type {
123 TRACE_FLAG_IRQS_OFF = 0x01,
124 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
125 TRACE_FLAG_NEED_RESCHED = 0x04,
126 TRACE_FLAG_HARDIRQ = 0x08,
127 TRACE_FLAG_SOFTIRQ = 0x10,
128 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
131 #define TRACE_BUF_SIZE 1024
136 * The CPU trace array - it consists of thousands of trace entries
137 * plus some other descriptor data: (for example which task started
140 struct trace_array_cpu {
142 void *buffer_page; /* ring buffer spare */
144 unsigned long entries;
145 unsigned long saved_latency;
146 unsigned long critical_start;
147 unsigned long critical_end;
148 unsigned long critical_sequence;
150 unsigned long policy;
151 unsigned long rt_priority;
152 unsigned long skipped_entries;
153 cycle_t preempt_timestamp;
156 char comm[TASK_COMM_LEN];
161 struct trace_buffer {
162 struct trace_array *tr;
163 struct ring_buffer *buffer;
164 struct trace_array_cpu __percpu *data;
170 * The trace array - an array of per-CPU trace arrays. This is the
171 * highest level data structure that individual tracers deal with.
172 * They have on/off state as well:
175 struct list_head list;
177 struct trace_buffer trace_buffer;
178 #ifdef CONFIG_TRACER_MAX_TRACE
180 * The max_buffer is used to snapshot the trace when a maximum
181 * latency is reached, or when the user initiates a snapshot.
182 * Some tracers will use this to store a maximum trace while
183 * it continues examining live traces.
185 * The buffers for the max_buffer are set up the same as the trace_buffer
186 * When a snapshot is taken, the buffer of the max_buffer is swapped
187 * with the buffer of the trace_buffer and the buffers are reset for
188 * the trace_buffer so the tracing can continue.
190 struct trace_buffer max_buffer;
191 bool allocated_snapshot;
194 #ifdef CONFIG_FTRACE_SYSCALLS
195 int sys_refcount_enter;
196 int sys_refcount_exit;
197 struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls];
198 struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls];
202 struct tracer *current_trace;
204 raw_spinlock_t start_lock;
206 struct dentry *options;
207 struct dentry *percpu_dir;
208 struct dentry *event_dir;
209 struct list_head systems;
210 struct list_head events;
211 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
216 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
219 extern struct list_head ftrace_trace_arrays;
221 extern struct mutex trace_types_lock;
223 extern int trace_array_get(struct trace_array *tr);
224 extern void trace_array_put(struct trace_array *tr);
227 * The global tracer (top) should be the first trace array added,
228 * but we check the flag anyway.
230 static inline struct trace_array *top_trace_array(void)
232 struct trace_array *tr;
234 tr = list_entry(ftrace_trace_arrays.prev,
236 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
240 #define FTRACE_CMP_TYPE(var, type) \
241 __builtin_types_compatible_p(typeof(var), type *)
244 #define IF_ASSIGN(var, entry, etype, id) \
245 if (FTRACE_CMP_TYPE(var, etype)) { \
246 var = (typeof(var))(entry); \
247 WARN_ON(id && (entry)->type != id); \
251 /* Will cause compile errors if type is not found. */
252 extern void __ftrace_bad_type(void);
255 * The trace_assign_type is a verifier that the entry type is
256 * the same as the type being assigned. To add new types simply
257 * add a line with the following format:
259 * IF_ASSIGN(var, ent, type, id);
261 * Where "type" is the trace type that includes the trace_entry
262 * as the "ent" item. And "id" is the trace identifier that is
263 * used in the trace_type enum.
265 * If the type can have more than one id, then use zero.
267 #define trace_assign_type(var, ent) \
269 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
270 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
271 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
272 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
273 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
274 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
275 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
276 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
278 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
280 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
281 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
283 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
285 __ftrace_bad_type(); \
289 * An option specific to a tracer. This is a boolean value.
290 * The bit is the bit index that sets its value on the
291 * flags value in struct tracer_flags.
294 const char *name; /* Will appear on the trace_options file */
295 u32 bit; /* Mask assigned in val field in tracer_flags */
299 * The set of specific options for a tracer. Your tracer
300 * have to set the initial value of the flags val.
302 struct tracer_flags {
304 struct tracer_opt *opts;
307 /* Makes more easy to define a tracer opt */
308 #define TRACER_OPT(s, b) .name = #s, .bit = b
312 * struct tracer - a specific tracer and its callbacks to interact with debugfs
313 * @name: the name chosen to select it on the available_tracers file
314 * @init: called when one switches to this tracer (echo name > current_tracer)
315 * @reset: called when one switches to another tracer
316 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
317 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
318 * @open: called when the trace file is opened
319 * @pipe_open: called when the trace_pipe file is opened
320 * @wait_pipe: override how the user waits for traces on trace_pipe
321 * @close: called when the trace file is released
322 * @pipe_close: called when the trace_pipe file is released
323 * @read: override the default read callback on trace_pipe
324 * @splice_read: override the default splice_read callback on trace_pipe
325 * @selftest: selftest to run on boot (see trace_selftest.c)
326 * @print_headers: override the first lines that describe your columns
327 * @print_line: callback that prints a trace
328 * @set_flag: signals one of your private flags changed (trace_options file)
329 * @flags: your private flags
333 int (*init)(struct trace_array *tr);
334 void (*reset)(struct trace_array *tr);
335 void (*start)(struct trace_array *tr);
336 void (*stop)(struct trace_array *tr);
337 void (*open)(struct trace_iterator *iter);
338 void (*pipe_open)(struct trace_iterator *iter);
339 void (*wait_pipe)(struct trace_iterator *iter);
340 void (*close)(struct trace_iterator *iter);
341 void (*pipe_close)(struct trace_iterator *iter);
342 ssize_t (*read)(struct trace_iterator *iter,
343 struct file *filp, char __user *ubuf,
344 size_t cnt, loff_t *ppos);
345 ssize_t (*splice_read)(struct trace_iterator *iter,
348 struct pipe_inode_info *pipe,
351 #ifdef CONFIG_FTRACE_STARTUP_TEST
352 int (*selftest)(struct tracer *trace,
353 struct trace_array *tr);
355 void (*print_header)(struct seq_file *m);
356 enum print_line_t (*print_line)(struct trace_iterator *iter);
357 /* If you handled the flag setting, return 0 */
358 int (*set_flag)(struct trace_array *tr,
359 u32 old_flags, u32 bit, int set);
360 /* Return 0 if OK with change, else return non-zero */
361 int (*flag_changed)(struct trace_array *tr,
364 struct tracer_flags *flags;
367 bool allow_instances;
368 #ifdef CONFIG_TRACER_MAX_TRACE
374 /* Only current can touch trace_recursion */
377 * For function tracing recursion:
378 * The order of these bits are important.
380 * When function tracing occurs, the following steps are made:
381 * If arch does not support a ftrace feature:
382 * call internal function (uses INTERNAL bits) which calls...
383 * If callback is registered to the "global" list, the list
384 * function is called and recursion checks the GLOBAL bits.
385 * then this function calls...
386 * The function callback, which can use the FTRACE bits to
387 * check for recursion.
389 * Now if the arch does not suppport a feature, and it calls
390 * the global list function which calls the ftrace callback
391 * all three of these steps will do a recursion protection.
392 * There's no reason to do one if the previous caller already
393 * did. The recursion that we are protecting against will
394 * go through the same steps again.
396 * To prevent the multiple recursion checks, if a recursion
397 * bit is set that is higher than the MAX bit of the current
398 * check, then we know that the check was made by the previous
399 * caller, and we can skip the current check.
403 TRACE_BUFFER_NMI_BIT,
404 TRACE_BUFFER_IRQ_BIT,
405 TRACE_BUFFER_SIRQ_BIT,
407 /* Start of function recursion bits */
409 TRACE_FTRACE_NMI_BIT,
410 TRACE_FTRACE_IRQ_BIT,
411 TRACE_FTRACE_SIRQ_BIT,
413 /* GLOBAL_BITs must be greater than FTRACE_BITs */
415 TRACE_GLOBAL_NMI_BIT,
416 TRACE_GLOBAL_IRQ_BIT,
417 TRACE_GLOBAL_SIRQ_BIT,
419 /* INTERNAL_BITs must be greater than GLOBAL_BITs */
421 TRACE_INTERNAL_NMI_BIT,
422 TRACE_INTERNAL_IRQ_BIT,
423 TRACE_INTERNAL_SIRQ_BIT,
428 * Abuse of the trace_recursion.
429 * As we need a way to maintain state if we are tracing the function
430 * graph in irq because we want to trace a particular function that
431 * was called in irq context but we have irq tracing off. Since this
432 * can only be modified by current, we can reuse trace_recursion.
437 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
438 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
439 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
441 #define TRACE_CONTEXT_BITS 4
443 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
444 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
446 #define TRACE_GLOBAL_START TRACE_GLOBAL_BIT
447 #define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)
449 #define TRACE_LIST_START TRACE_INTERNAL_BIT
450 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
452 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
454 static __always_inline int trace_get_context_bit(void)
458 if (in_interrupt()) {
472 static __always_inline int trace_test_and_set_recursion(int start, int max)
474 unsigned int val = current->trace_recursion;
477 /* A previous recursion check was made */
478 if ((val & TRACE_CONTEXT_MASK) > max)
481 bit = trace_get_context_bit() + start;
482 if (unlikely(val & (1 << bit)))
486 current->trace_recursion = val;
492 static __always_inline void trace_clear_recursion(int bit)
494 unsigned int val = current->trace_recursion;
503 current->trace_recursion = val;
506 static inline struct ring_buffer_iter *
507 trace_buffer_iter(struct trace_iterator *iter, int cpu)
509 if (iter->buffer_iter && iter->buffer_iter[cpu])
510 return iter->buffer_iter[cpu];
514 int tracer_init(struct tracer *t, struct trace_array *tr);
515 int tracing_is_enabled(void);
516 void tracing_reset(struct trace_buffer *buf, int cpu);
517 void tracing_reset_online_cpus(struct trace_buffer *buf);
518 void tracing_reset_current(int cpu);
519 void tracing_reset_all_online_cpus(void);
520 int tracing_open_generic(struct inode *inode, struct file *filp);
521 bool tracing_is_disabled(void);
522 struct dentry *trace_create_file(const char *name,
524 struct dentry *parent,
526 const struct file_operations *fops);
528 struct dentry *tracing_init_dentry_tr(struct trace_array *tr);
529 struct dentry *tracing_init_dentry(void);
531 struct ring_buffer_event;
533 struct ring_buffer_event *
534 trace_buffer_lock_reserve(struct ring_buffer *buffer,
540 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
541 struct trace_array_cpu *data);
543 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
544 int *ent_cpu, u64 *ent_ts);
546 void __buffer_unlock_commit(struct ring_buffer *buffer,
547 struct ring_buffer_event *event);
549 int trace_empty(struct trace_iterator *iter);
551 void *trace_find_next_entry_inc(struct trace_iterator *iter);
553 void trace_init_global_iter(struct trace_iterator *iter);
555 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
557 void poll_wait_pipe(struct trace_iterator *iter);
559 void tracing_sched_switch_trace(struct trace_array *tr,
560 struct task_struct *prev,
561 struct task_struct *next,
562 unsigned long flags, int pc);
564 void tracing_sched_wakeup_trace(struct trace_array *tr,
565 struct task_struct *wakee,
566 struct task_struct *cur,
567 unsigned long flags, int pc);
568 void trace_function(struct trace_array *tr,
570 unsigned long parent_ip,
571 unsigned long flags, int pc);
572 void trace_graph_function(struct trace_array *tr,
574 unsigned long parent_ip,
575 unsigned long flags, int pc);
576 void trace_latency_header(struct seq_file *m);
577 void trace_default_header(struct seq_file *m);
578 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
579 int trace_empty(struct trace_iterator *iter);
581 void trace_graph_return(struct ftrace_graph_ret *trace);
582 int trace_graph_entry(struct ftrace_graph_ent *trace);
583 void set_graph_array(struct trace_array *tr);
585 void tracing_start_cmdline_record(void);
586 void tracing_stop_cmdline_record(void);
587 void tracing_sched_switch_assign_trace(struct trace_array *tr);
588 void tracing_stop_sched_switch_record(void);
589 void tracing_start_sched_switch_record(void);
590 int register_tracer(struct tracer *type);
591 int is_tracing_stopped(void);
593 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
595 extern cpumask_var_t __read_mostly tracing_buffer_mask;
597 #define for_each_tracing_cpu(cpu) \
598 for_each_cpu(cpu, tracing_buffer_mask)
600 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
602 extern unsigned long tracing_thresh;
604 #ifdef CONFIG_TRACER_MAX_TRACE
605 extern unsigned long tracing_max_latency;
607 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
608 void update_max_tr_single(struct trace_array *tr,
609 struct task_struct *tsk, int cpu);
610 #endif /* CONFIG_TRACER_MAX_TRACE */
612 #ifdef CONFIG_STACKTRACE
613 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
616 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
617 int skip, int pc, struct pt_regs *regs);
619 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
622 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
625 static inline void ftrace_trace_stack(struct ring_buffer *buffer,
626 unsigned long flags, int skip, int pc)
630 static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
631 unsigned long flags, int skip,
632 int pc, struct pt_regs *regs)
636 static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
637 unsigned long flags, int pc)
641 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
645 #endif /* CONFIG_STACKTRACE */
647 extern cycle_t ftrace_now(int cpu);
649 extern void trace_find_cmdline(int pid, char comm[]);
651 #ifdef CONFIG_DYNAMIC_FTRACE
652 extern unsigned long ftrace_update_tot_cnt;
654 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
655 extern int DYN_FTRACE_TEST_NAME(void);
656 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
657 extern int DYN_FTRACE_TEST_NAME2(void);
659 extern bool ring_buffer_expanded;
660 extern bool tracing_selftest_disabled;
661 DECLARE_PER_CPU(int, ftrace_cpu_disabled);
663 #ifdef CONFIG_FTRACE_STARTUP_TEST
664 extern int trace_selftest_startup_function(struct tracer *trace,
665 struct trace_array *tr);
666 extern int trace_selftest_startup_function_graph(struct tracer *trace,
667 struct trace_array *tr);
668 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
669 struct trace_array *tr);
670 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
671 struct trace_array *tr);
672 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
673 struct trace_array *tr);
674 extern int trace_selftest_startup_wakeup(struct tracer *trace,
675 struct trace_array *tr);
676 extern int trace_selftest_startup_nop(struct tracer *trace,
677 struct trace_array *tr);
678 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
679 struct trace_array *tr);
680 extern int trace_selftest_startup_branch(struct tracer *trace,
681 struct trace_array *tr);
683 * Tracer data references selftest functions that only occur
684 * on boot up. These can be __init functions. Thus, when selftests
685 * are enabled, then the tracers need to reference __init functions.
687 #define __tracer_data __refdata
689 /* Tracers are seldom changed. Optimize when selftests are disabled. */
690 #define __tracer_data __read_mostly
691 #endif /* CONFIG_FTRACE_STARTUP_TEST */
693 extern void *head_page(struct trace_array_cpu *data);
694 extern unsigned long long ns2usecs(cycle_t nsec);
696 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
698 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
700 trace_array_vprintk(struct trace_array *tr,
701 unsigned long ip, const char *fmt, va_list args);
702 int trace_array_printk(struct trace_array *tr,
703 unsigned long ip, const char *fmt, ...);
704 int trace_array_printk_buf(struct ring_buffer *buffer,
705 unsigned long ip, const char *fmt, ...);
706 void trace_printk_seq(struct trace_seq *s);
707 enum print_line_t print_trace_line(struct trace_iterator *iter);
709 extern unsigned long trace_flags;
711 /* Standard output formatting function used for function return traces */
712 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
715 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
716 #define TRACE_GRAPH_PRINT_CPU 0x2
717 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
718 #define TRACE_GRAPH_PRINT_PROC 0x8
719 #define TRACE_GRAPH_PRINT_DURATION 0x10
720 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
721 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
722 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
724 extern enum print_line_t
725 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
726 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
727 extern enum print_line_t
728 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
729 extern void graph_trace_open(struct trace_iterator *iter);
730 extern void graph_trace_close(struct trace_iterator *iter);
731 extern int __trace_graph_entry(struct trace_array *tr,
732 struct ftrace_graph_ent *trace,
733 unsigned long flags, int pc);
734 extern void __trace_graph_return(struct trace_array *tr,
735 struct ftrace_graph_ret *trace,
736 unsigned long flags, int pc);
739 #ifdef CONFIG_DYNAMIC_FTRACE
740 /* TODO: make this variable */
741 #define FTRACE_GRAPH_MAX_FUNCS 32
742 extern int ftrace_graph_count;
743 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
744 extern int ftrace_graph_notrace_count;
745 extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
747 static inline int ftrace_graph_addr(unsigned long addr)
751 if (!ftrace_graph_count)
754 for (i = 0; i < ftrace_graph_count; i++) {
755 if (addr == ftrace_graph_funcs[i]) {
757 * If no irqs are to be traced, but a set_graph_function
758 * is set, and called by an interrupt handler, we still
762 trace_recursion_set(TRACE_IRQ_BIT);
764 trace_recursion_clear(TRACE_IRQ_BIT);
772 static inline int ftrace_graph_notrace_addr(unsigned long addr)
776 if (!ftrace_graph_notrace_count)
779 for (i = 0; i < ftrace_graph_notrace_count; i++) {
780 if (addr == ftrace_graph_notrace_funcs[i])
787 static inline int ftrace_graph_addr(unsigned long addr)
792 static inline int ftrace_graph_notrace_addr(unsigned long addr)
796 #endif /* CONFIG_DYNAMIC_FTRACE */
797 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
798 static inline enum print_line_t
799 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
801 return TRACE_TYPE_UNHANDLED;
803 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
805 extern struct list_head ftrace_pids;
807 #ifdef CONFIG_FUNCTION_TRACER
808 extern bool ftrace_filter_param __initdata;
809 static inline int ftrace_trace_task(struct task_struct *task)
811 if (list_empty(&ftrace_pids))
814 return test_tsk_trace_trace(task);
816 extern int ftrace_is_dead(void);
818 static inline int ftrace_trace_task(struct task_struct *task)
822 static inline int ftrace_is_dead(void) { return 0; }
825 int ftrace_event_is_function(struct ftrace_event_call *call);
828 * struct trace_parser - servers for reading the user input separated by spaces
829 * @cont: set if the input is not complete - no final space char was found
830 * @buffer: holds the parsed user input
831 * @idx: user input length
834 struct trace_parser {
841 static inline bool trace_parser_loaded(struct trace_parser *parser)
843 return (parser->idx != 0);
846 static inline bool trace_parser_cont(struct trace_parser *parser)
851 static inline void trace_parser_clear(struct trace_parser *parser)
853 parser->cont = false;
857 extern int trace_parser_get_init(struct trace_parser *parser, int size);
858 extern void trace_parser_put(struct trace_parser *parser);
859 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
860 size_t cnt, loff_t *ppos);
863 * trace_iterator_flags is an enumeration that defines bit
864 * positions into trace_flags that controls the output.
866 * NOTE: These bits must match the trace_options array in
869 enum trace_iterator_flags {
870 TRACE_ITER_PRINT_PARENT = 0x01,
871 TRACE_ITER_SYM_OFFSET = 0x02,
872 TRACE_ITER_SYM_ADDR = 0x04,
873 TRACE_ITER_VERBOSE = 0x08,
874 TRACE_ITER_RAW = 0x10,
875 TRACE_ITER_HEX = 0x20,
876 TRACE_ITER_BIN = 0x40,
877 TRACE_ITER_BLOCK = 0x80,
878 TRACE_ITER_STACKTRACE = 0x100,
879 TRACE_ITER_PRINTK = 0x200,
880 TRACE_ITER_PREEMPTONLY = 0x400,
881 TRACE_ITER_BRANCH = 0x800,
882 TRACE_ITER_ANNOTATE = 0x1000,
883 TRACE_ITER_USERSTACKTRACE = 0x2000,
884 TRACE_ITER_SYM_USEROBJ = 0x4000,
885 TRACE_ITER_PRINTK_MSGONLY = 0x8000,
886 TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */
887 TRACE_ITER_LATENCY_FMT = 0x20000,
888 TRACE_ITER_SLEEP_TIME = 0x40000,
889 TRACE_ITER_GRAPH_TIME = 0x80000,
890 TRACE_ITER_RECORD_CMD = 0x100000,
891 TRACE_ITER_OVERWRITE = 0x200000,
892 TRACE_ITER_STOP_ON_FREE = 0x400000,
893 TRACE_ITER_IRQ_INFO = 0x800000,
894 TRACE_ITER_MARKERS = 0x1000000,
895 TRACE_ITER_FUNCTION = 0x2000000,
899 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
900 * control the output of kernel symbols.
902 #define TRACE_ITER_SYM_MASK \
903 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
905 extern struct tracer nop_trace;
907 #ifdef CONFIG_BRANCH_TRACER
908 extern int enable_branch_tracing(struct trace_array *tr);
909 extern void disable_branch_tracing(void);
910 static inline int trace_branch_enable(struct trace_array *tr)
912 if (trace_flags & TRACE_ITER_BRANCH)
913 return enable_branch_tracing(tr);
916 static inline void trace_branch_disable(void)
918 /* due to races, always disable */
919 disable_branch_tracing();
922 static inline int trace_branch_enable(struct trace_array *tr)
926 static inline void trace_branch_disable(void)
929 #endif /* CONFIG_BRANCH_TRACER */
931 /* set ring buffers to default size if not already done so */
932 int tracing_update_buffers(void);
934 struct ftrace_event_field {
935 struct list_head link;
944 struct event_filter {
945 int n_preds; /* Number assigned */
946 int a_preds; /* allocated */
947 struct filter_pred *preds;
948 struct filter_pred *root;
952 struct event_subsystem {
953 struct list_head list;
955 struct event_filter *filter;
959 struct ftrace_subsystem_dir {
960 struct list_head list;
961 struct event_subsystem *subsystem;
962 struct trace_array *tr;
963 struct dentry *entry;
968 #define FILTER_PRED_INVALID ((unsigned short)-1)
969 #define FILTER_PRED_IS_RIGHT (1 << 15)
970 #define FILTER_PRED_FOLD (1 << 15)
973 * The max preds is the size of unsigned short with
974 * two flags at the MSBs. One bit is used for both the IS_RIGHT
975 * and FOLD flags. The other is reserved.
977 * 2^14 preds is way more than enough.
979 #define MAX_FILTER_PRED 16384
984 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
986 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
996 char pattern[MAX_FILTER_STR_VAL];
999 regex_match_func match;
1002 struct filter_pred {
1003 filter_pred_fn_t fn;
1006 unsigned short *ops;
1007 struct ftrace_event_field *field;
1011 unsigned short index;
1012 unsigned short parent;
1013 unsigned short left;
1014 unsigned short right;
1017 extern enum regex_type
1018 filter_parse_regex(char *buff, int len, char **search, int *not);
1019 extern void print_event_filter(struct ftrace_event_file *file,
1020 struct trace_seq *s);
1021 extern int apply_event_filter(struct ftrace_event_file *file,
1022 char *filter_string);
1023 extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
1024 char *filter_string);
1025 extern void print_subsystem_event_filter(struct event_subsystem *system,
1026 struct trace_seq *s);
1027 extern int filter_assign_type(const char *type);
1028 extern int create_event_filter(struct ftrace_event_call *call,
1029 char *filter_str, bool set_str,
1030 struct event_filter **filterp);
1031 extern void free_event_filter(struct event_filter *filter);
1033 struct ftrace_event_field *
1034 trace_find_event_field(struct ftrace_event_call *call, char *name);
1036 extern void trace_event_enable_cmd_record(bool enable);
1037 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1038 extern int event_trace_del_tracer(struct trace_array *tr);
1040 extern struct ftrace_event_file *find_event_file(struct trace_array *tr,
1044 static inline void *event_file_data(struct file *filp)
1046 return ACCESS_ONCE(file_inode(filp)->i_private);
1049 extern struct mutex event_mutex;
1050 extern struct list_head ftrace_events;
1052 extern const struct file_operations event_trigger_fops;
1054 extern int register_trigger_cmds(void);
1055 extern void clear_event_triggers(struct trace_array *tr);
1057 struct event_trigger_data {
1058 unsigned long count;
1060 struct event_trigger_ops *ops;
1061 struct event_command *cmd_ops;
1062 struct event_filter __rcu *filter;
1065 struct list_head list;
1069 * struct event_trigger_ops - callbacks for trace event triggers
1071 * The methods in this structure provide per-event trigger hooks for
1072 * various trigger operations.
1074 * All the methods below, except for @init() and @free(), must be
1077 * @func: The trigger 'probe' function called when the triggering
1078 * event occurs. The data passed into this callback is the data
1079 * that was supplied to the event_command @reg() function that
1080 * registered the trigger (see struct event_command).
1082 * @init: An optional initialization function called for the trigger
1083 * when the trigger is registered (via the event_command reg()
1084 * function). This can be used to perform per-trigger
1085 * initialization such as incrementing a per-trigger reference
1086 * count, for instance. This is usually implemented by the
1087 * generic utility function @event_trigger_init() (see
1088 * trace_event_triggers.c).
1090 * @free: An optional de-initialization function called for the
1091 * trigger when the trigger is unregistered (via the
1092 * event_command @reg() function). This can be used to perform
1093 * per-trigger de-initialization such as decrementing a
1094 * per-trigger reference count and freeing corresponding trigger
1095 * data, for instance. This is usually implemented by the
1096 * generic utility function @event_trigger_free() (see
1097 * trace_event_triggers.c).
1099 * @print: The callback function invoked to have the trigger print
1100 * itself. This is usually implemented by a wrapper function
1101 * that calls the generic utility function @event_trigger_print()
1102 * (see trace_event_triggers.c).
1104 struct event_trigger_ops {
1105 void (*func)(struct event_trigger_data *data);
1106 int (*init)(struct event_trigger_ops *ops,
1107 struct event_trigger_data *data);
1108 void (*free)(struct event_trigger_ops *ops,
1109 struct event_trigger_data *data);
1110 int (*print)(struct seq_file *m,
1111 struct event_trigger_ops *ops,
1112 struct event_trigger_data *data);
1116 * struct event_command - callbacks and data members for event commands
1118 * Event commands are invoked by users by writing the command name
1119 * into the 'trigger' file associated with a trace event. The
1120 * parameters associated with a specific invocation of an event
1121 * command are used to create an event trigger instance, which is
1122 * added to the list of trigger instances associated with that trace
1123 * event. When the event is hit, the set of triggers associated with
1124 * that event is invoked.
1126 * The data members in this structure provide per-event command data
1127 * for various event commands.
1129 * All the data members below, except for @post_trigger, must be set
1130 * for each event command.
1132 * @name: The unique name that identifies the event command. This is
1133 * the name used when setting triggers via trigger files.
1135 * @trigger_type: A unique id that identifies the event command
1136 * 'type'. This value has two purposes, the first to ensure that
1137 * only one trigger of the same type can be set at a given time
1138 * for a particular event e.g. it doesn't make sense to have both
1139 * a traceon and traceoff trigger attached to a single event at
1140 * the same time, so traceon and traceoff have the same type
1141 * though they have different names. The @trigger_type value is
1142 * also used as a bit value for deferring the actual trigger
1143 * action until after the current event is finished. Some
1144 * commands need to do this if they themselves log to the trace
1145 * buffer (see the @post_trigger() member below). @trigger_type
1146 * values are defined by adding new values to the trigger_type
1147 * enum in include/linux/ftrace_event.h.
1149 * @post_trigger: A flag that says whether or not this command needs
1150 * to have its action delayed until after the current event has
1151 * been closed. Some triggers need to avoid being invoked while
1152 * an event is currently in the process of being logged, since
1153 * the trigger may itself log data into the trace buffer. Thus
1154 * we make sure the current event is committed before invoking
1155 * those triggers. To do that, the trigger invocation is split
1156 * in two - the first part checks the filter using the current
1157 * trace record; if a command has the @post_trigger flag set, it
1158 * sets a bit for itself in the return value, otherwise it
1159 * directly invokes the trigger. Once all commands have been
1160 * either invoked or set their return flag, the current record is
1161 * either committed or discarded. At that point, if any commands
1162 * have deferred their triggers, those commands are finally
1163 * invoked following the close of the current event. In other
1164 * words, if the event_trigger_ops @func() probe implementation
1165 * itself logs to the trace buffer, this flag should be set,
1166 * otherwise it can be left unspecified.
1168 * All the methods below, except for @set_filter(), must be
1171 * @func: The callback function responsible for parsing and
1172 * registering the trigger written to the 'trigger' file by the
1173 * user. It allocates the trigger instance and registers it with
1174 * the appropriate trace event. It makes use of the other
1175 * event_command callback functions to orchestrate this, and is
1176 * usually implemented by the generic utility function
1177 * @event_trigger_callback() (see trace_event_triggers.c).
1179 * @reg: Adds the trigger to the list of triggers associated with the
1180 * event, and enables the event trigger itself, after
1181 * initializing it (via the event_trigger_ops @init() function).
1182 * This is also where commands can use the @trigger_type value to
1183 * make the decision as to whether or not multiple instances of
1184 * the trigger should be allowed. This is usually implemented by
1185 * the generic utility function @register_trigger() (see
1186 * trace_event_triggers.c).
1188 * @unreg: Removes the trigger from the list of triggers associated
1189 * with the event, and disables the event trigger itself, after
1190 * initializing it (via the event_trigger_ops @free() function).
1191 * This is usually implemented by the generic utility function
1192 * @unregister_trigger() (see trace_event_triggers.c).
1194 * @set_filter: An optional function called to parse and set a filter
1195 * for the trigger. If no @set_filter() method is set for the
1196 * event command, filters set by the user for the command will be
1197 * ignored. This is usually implemented by the generic utility
1198 * function @set_trigger_filter() (see trace_event_triggers.c).
1200 * @get_trigger_ops: The callback function invoked to retrieve the
1201 * event_trigger_ops implementation associated with the command.
1203 struct event_command {
1204 struct list_head list;
1206 enum event_trigger_type trigger_type;
1208 int (*func)(struct event_command *cmd_ops,
1209 struct ftrace_event_file *file,
1210 char *glob, char *cmd, char *params);
1211 int (*reg)(char *glob,
1212 struct event_trigger_ops *ops,
1213 struct event_trigger_data *data,
1214 struct ftrace_event_file *file);
1215 void (*unreg)(char *glob,
1216 struct event_trigger_ops *ops,
1217 struct event_trigger_data *data,
1218 struct ftrace_event_file *file);
1219 int (*set_filter)(char *filter_str,
1220 struct event_trigger_data *data,
1221 struct ftrace_event_file *file);
1222 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1225 extern int trace_event_enable_disable(struct ftrace_event_file *file,
1226 int enable, int soft_disable);
1227 extern int tracing_alloc_snapshot(void);
1229 extern const char *__start___trace_bprintk_fmt[];
1230 extern const char *__stop___trace_bprintk_fmt[];
1232 extern const char *__start___tracepoint_str[];
1233 extern const char *__stop___tracepoint_str[];
1235 void trace_printk_init_buffers(void);
1236 void trace_printk_start_comm(void);
1237 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1238 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1241 * Normal trace_printk() and friends allocates special buffers
1242 * to do the manipulation, as well as saves the print formats
1243 * into sections to display. But the trace infrastructure wants
1244 * to use these without the added overhead at the price of being
1245 * a bit slower (used mainly for warnings, where we don't care
1246 * about performance). The internal_trace_puts() is for such
1249 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1252 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1253 extern struct ftrace_event_call \
1254 __attribute__((__aligned__(4))) event_##call;
1255 #undef FTRACE_ENTRY_DUP
1256 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1257 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1259 #include "trace_entries.h"
1261 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1262 int perf_ftrace_event_register(struct ftrace_event_call *call,
1263 enum trace_reg type, void *data);
1265 #define perf_ftrace_event_register NULL
1268 #endif /* _LINUX_KERNEL_TRACE_H */