2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/slab.h>
21 static void tracing_start_function_trace(struct trace_array *tr);
22 static void tracing_stop_function_trace(struct trace_array *tr);
24 function_trace_call(unsigned long ip, unsigned long parent_ip,
25 struct ftrace_ops *op, struct pt_regs *pt_regs);
27 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29 static struct ftrace_ops trace_ops;
30 static struct ftrace_ops trace_stack_ops;
31 static struct tracer_flags func_flags;
35 TRACE_FUNC_OPT_STACK = 0x1,
38 static int allocate_ftrace_ops(struct trace_array *tr)
40 struct ftrace_ops *ops;
42 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
46 /* Currently only the non stack verision is supported */
47 ops->func = function_trace_call;
48 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
56 int ftrace_create_function_files(struct trace_array *tr,
57 struct dentry *parent)
62 * The top level array uses the "global_ops", and the files are
65 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
68 ret = allocate_ftrace_ops(tr);
72 ftrace_create_filter_files(tr->ops, parent);
77 void ftrace_destroy_function_files(struct trace_array *tr)
79 ftrace_destroy_filter_files(tr->ops);
84 static int function_trace_init(struct trace_array *tr)
86 struct ftrace_ops *ops;
88 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
89 /* There's only one global tr */
90 if (!trace_ops.private) {
91 trace_ops.private = tr;
92 trace_stack_ops.private = tr;
95 if (func_flags.val & TRACE_FUNC_OPT_STACK)
96 ops = &trace_stack_ops;
100 } else if (!tr->ops) {
102 * Instance trace_arrays get their ops allocated
103 * at instance creation. Unless it failed
109 tr->trace_buffer.cpu = get_cpu();
112 tracing_start_cmdline_record();
113 tracing_start_function_trace(tr);
117 static void function_trace_reset(struct trace_array *tr)
119 tracing_stop_function_trace(tr);
120 tracing_stop_cmdline_record();
123 static void function_trace_start(struct trace_array *tr)
125 tracing_reset_online_cpus(&tr->trace_buffer);
129 function_trace_call(unsigned long ip, unsigned long parent_ip,
130 struct ftrace_ops *op, struct pt_regs *pt_regs)
132 struct trace_array *tr = op->private;
133 struct trace_array_cpu *data;
139 if (unlikely(!tr->function_enabled))
142 pc = preempt_count();
143 preempt_disable_notrace();
145 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
149 cpu = smp_processor_id();
150 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
151 if (!atomic_read(&data->disabled)) {
152 local_save_flags(flags);
153 trace_function(tr, ip, parent_ip, flags, pc);
155 trace_clear_recursion(bit);
158 preempt_enable_notrace();
162 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
163 struct ftrace_ops *op, struct pt_regs *pt_regs)
165 struct trace_array *tr = op->private;
166 struct trace_array_cpu *data;
172 if (unlikely(!tr->function_enabled))
176 * Need to use raw, since this must be called before the
177 * recursive protection is performed.
179 local_irq_save(flags);
180 cpu = raw_smp_processor_id();
181 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
182 disabled = atomic_inc_return(&data->disabled);
184 if (likely(disabled == 1)) {
185 pc = preempt_count();
186 trace_function(tr, ip, parent_ip, flags, pc);
189 * __ftrace_trace_stack,
191 * function_stack_trace_call
195 __trace_stack(tr, flags, 5, pc);
198 atomic_dec(&data->disabled);
199 local_irq_restore(flags);
202 static struct ftrace_ops trace_ops __read_mostly =
204 .func = function_trace_call,
205 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
208 static struct ftrace_ops trace_stack_ops __read_mostly =
210 .func = function_stack_trace_call,
211 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
214 static struct tracer_opt func_opts[] = {
215 #ifdef CONFIG_STACKTRACE
216 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
218 { } /* Always set a last empty entry */
221 static struct tracer_flags func_flags = {
222 .val = 0, /* By default: all flags disabled */
226 static void tracing_start_function_trace(struct trace_array *tr)
228 tr->function_enabled = 0;
229 register_ftrace_function(tr->ops);
230 tr->function_enabled = 1;
233 static void tracing_stop_function_trace(struct trace_array *tr)
235 tr->function_enabled = 0;
236 unregister_ftrace_function(tr->ops);
240 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
243 case TRACE_FUNC_OPT_STACK:
244 /* do nothing if already set */
245 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
248 unregister_ftrace_function(tr->ops);
251 tr->ops = &trace_stack_ops;
252 register_ftrace_function(tr->ops);
254 tr->ops = &trace_ops;
255 register_ftrace_function(tr->ops);
266 static struct tracer function_trace __tracer_data =
269 .init = function_trace_init,
270 .reset = function_trace_reset,
271 .start = function_trace_start,
272 .wait_pipe = poll_wait_pipe,
273 .flags = &func_flags,
274 .set_flag = func_set_flag,
275 .allow_instances = true,
276 #ifdef CONFIG_FTRACE_SELFTEST
277 .selftest = trace_selftest_startup_function,
281 #ifdef CONFIG_DYNAMIC_FTRACE
282 static int update_count(void **data)
284 unsigned long *count = (long *)data;
296 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
301 if (update_count(data))
306 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
308 if (!tracing_is_on())
311 if (update_count(data))
316 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
325 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
327 if (!tracing_is_on())
335 * ftrace_stacktrace()
336 * function_trace_probe_call()
337 * ftrace_ops_list_func()
343 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
345 trace_dump_stack(STACK_SKIP);
349 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
351 if (!tracing_is_on())
354 if (update_count(data))
355 trace_dump_stack(STACK_SKIP);
359 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
361 if (update_count(data))
362 ftrace_dump(DUMP_ALL);
365 /* Only dump the current CPU buffer. */
367 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
369 if (update_count(data))
370 ftrace_dump(DUMP_ORIG);
374 ftrace_probe_print(const char *name, struct seq_file *m,
375 unsigned long ip, void *data)
377 long count = (long)data;
379 seq_printf(m, "%ps:%s", (void *)ip, name);
382 seq_printf(m, ":unlimited\n");
384 seq_printf(m, ":count=%ld\n", count);
390 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
391 struct ftrace_probe_ops *ops, void *data)
393 return ftrace_probe_print("traceon", m, ip, data);
397 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
398 struct ftrace_probe_ops *ops, void *data)
400 return ftrace_probe_print("traceoff", m, ip, data);
404 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
405 struct ftrace_probe_ops *ops, void *data)
407 return ftrace_probe_print("stacktrace", m, ip, data);
411 ftrace_dump_print(struct seq_file *m, unsigned long ip,
412 struct ftrace_probe_ops *ops, void *data)
414 return ftrace_probe_print("dump", m, ip, data);
418 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
419 struct ftrace_probe_ops *ops, void *data)
421 return ftrace_probe_print("cpudump", m, ip, data);
424 static struct ftrace_probe_ops traceon_count_probe_ops = {
425 .func = ftrace_traceon_count,
426 .print = ftrace_traceon_print,
429 static struct ftrace_probe_ops traceoff_count_probe_ops = {
430 .func = ftrace_traceoff_count,
431 .print = ftrace_traceoff_print,
434 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
435 .func = ftrace_stacktrace_count,
436 .print = ftrace_stacktrace_print,
439 static struct ftrace_probe_ops dump_probe_ops = {
440 .func = ftrace_dump_probe,
441 .print = ftrace_dump_print,
444 static struct ftrace_probe_ops cpudump_probe_ops = {
445 .func = ftrace_cpudump_probe,
446 .print = ftrace_cpudump_print,
449 static struct ftrace_probe_ops traceon_probe_ops = {
450 .func = ftrace_traceon,
451 .print = ftrace_traceon_print,
454 static struct ftrace_probe_ops traceoff_probe_ops = {
455 .func = ftrace_traceoff,
456 .print = ftrace_traceoff_print,
459 static struct ftrace_probe_ops stacktrace_probe_ops = {
460 .func = ftrace_stacktrace,
461 .print = ftrace_stacktrace_print,
465 ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
466 struct ftrace_hash *hash, char *glob,
467 char *cmd, char *param, int enable)
469 void *count = (void *)-1;
473 /* hash funcs only work with set_ftrace_filter */
477 if (glob[0] == '!') {
478 unregister_ftrace_function_probe_func(glob+1, ops);
485 number = strsep(¶m, ":");
491 * We use the callback data field (which is a pointer)
494 ret = kstrtoul(number, 0, (unsigned long *)&count);
499 ret = register_ftrace_function_probe(glob, ops, count);
501 return ret < 0 ? ret : 0;
505 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
506 char *glob, char *cmd, char *param, int enable)
508 struct ftrace_probe_ops *ops;
510 /* we register both traceon and traceoff to this callback */
511 if (strcmp(cmd, "traceon") == 0)
512 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
514 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
516 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
521 ftrace_stacktrace_callback(struct ftrace_hash *hash,
522 char *glob, char *cmd, char *param, int enable)
524 struct ftrace_probe_ops *ops;
526 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
528 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
533 ftrace_dump_callback(struct ftrace_hash *hash,
534 char *glob, char *cmd, char *param, int enable)
536 struct ftrace_probe_ops *ops;
538 ops = &dump_probe_ops;
540 /* Only dump once. */
541 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
546 ftrace_cpudump_callback(struct ftrace_hash *hash,
547 char *glob, char *cmd, char *param, int enable)
549 struct ftrace_probe_ops *ops;
551 ops = &cpudump_probe_ops;
553 /* Only dump once. */
554 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
558 static struct ftrace_func_command ftrace_traceon_cmd = {
560 .func = ftrace_trace_onoff_callback,
563 static struct ftrace_func_command ftrace_traceoff_cmd = {
565 .func = ftrace_trace_onoff_callback,
568 static struct ftrace_func_command ftrace_stacktrace_cmd = {
569 .name = "stacktrace",
570 .func = ftrace_stacktrace_callback,
573 static struct ftrace_func_command ftrace_dump_cmd = {
575 .func = ftrace_dump_callback,
578 static struct ftrace_func_command ftrace_cpudump_cmd = {
580 .func = ftrace_cpudump_callback,
583 static int __init init_func_cmd_traceon(void)
587 ret = register_ftrace_command(&ftrace_traceoff_cmd);
591 ret = register_ftrace_command(&ftrace_traceon_cmd);
593 goto out_free_traceoff;
595 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
597 goto out_free_traceon;
599 ret = register_ftrace_command(&ftrace_dump_cmd);
601 goto out_free_stacktrace;
603 ret = register_ftrace_command(&ftrace_cpudump_cmd);
610 unregister_ftrace_command(&ftrace_dump_cmd);
612 unregister_ftrace_command(&ftrace_stacktrace_cmd);
614 unregister_ftrace_command(&ftrace_traceon_cmd);
616 unregister_ftrace_command(&ftrace_traceoff_cmd);
621 static inline int init_func_cmd_traceon(void)
625 #endif /* CONFIG_DYNAMIC_FTRACE */
627 static __init int init_function_trace(void)
629 init_func_cmd_traceon();
630 return register_tracer(&function_trace);
632 core_initcall(init_function_trace);