2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/kprobes.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/sched/rt.h>
46 #include "trace_output.h"
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
52 bool ring_buffer_expanded;
55 * We need to change this state when a selftest is running.
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
58 * insertions into the ring-buffer such as trace_printk could occurred
59 * at the same time, giving false positive or negative results.
61 static bool __read_mostly tracing_selftest_running;
64 * If a tracer is running, we do not want to run SELFTEST.
66 bool __read_mostly tracing_selftest_disabled;
68 /* Pipe tracepoints to printk */
69 struct trace_iterator *tracepoint_print_iter;
70 int tracepoint_printk;
72 /* For tracers that don't implement custom flags */
73 static struct tracer_opt dummy_tracer_opt[] = {
77 static struct tracer_flags dummy_tracer_flags = {
79 .opts = dummy_tracer_opt
83 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
93 static DEFINE_PER_CPU(bool, trace_cmdline_save);
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
101 static int tracing_disabled = 1;
103 cpumask_var_t __read_mostly tracing_buffer_mask;
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
121 enum ftrace_dump_mode ftrace_dump_on_oops;
123 /* When set, tracing will stop when a WARN*() is hit */
124 int __disable_trace_on_warning;
126 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
127 /* Map of enums to their values, for "enum_map" file */
128 struct trace_enum_map_head {
130 unsigned long length;
133 union trace_enum_map_item;
135 struct trace_enum_map_tail {
137 * "end" is first and points to NULL as it must be different
138 * than "mod" or "enum_string"
140 union trace_enum_map_item *next;
141 const char *end; /* points to NULL */
144 static DEFINE_MUTEX(trace_enum_mutex);
147 * The trace_enum_maps are saved in an array with two extra elements,
148 * one at the beginning, and one at the end. The beginning item contains
149 * the count of the saved maps (head.length), and the module they
150 * belong to if not built in (head.mod). The ending item contains a
151 * pointer to the next array of saved enum_map items.
153 union trace_enum_map_item {
154 struct trace_enum_map map;
155 struct trace_enum_map_head head;
156 struct trace_enum_map_tail tail;
159 static union trace_enum_map_item *trace_enum_maps;
160 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
162 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
164 #define MAX_TRACER_SIZE 100
165 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
166 static char *default_bootup_tracer;
168 static bool allocate_snapshot;
170 static int __init set_cmdline_ftrace(char *str)
172 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
173 default_bootup_tracer = bootup_tracer_buf;
174 /* We are using ftrace early, expand it */
175 ring_buffer_expanded = true;
178 __setup("ftrace=", set_cmdline_ftrace);
180 static int __init set_ftrace_dump_on_oops(char *str)
182 if (*str++ != '=' || !*str) {
183 ftrace_dump_on_oops = DUMP_ALL;
187 if (!strcmp("orig_cpu", str)) {
188 ftrace_dump_on_oops = DUMP_ORIG;
194 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
196 static int __init stop_trace_on_warning(char *str)
198 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
199 __disable_trace_on_warning = 1;
202 __setup("traceoff_on_warning", stop_trace_on_warning);
204 static int __init boot_alloc_snapshot(char *str)
206 allocate_snapshot = true;
207 /* We also need the main ring buffer expanded */
208 ring_buffer_expanded = true;
211 __setup("alloc_snapshot", boot_alloc_snapshot);
214 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
216 static int __init set_trace_boot_options(char *str)
218 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
221 __setup("trace_options=", set_trace_boot_options);
223 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
224 static char *trace_boot_clock __initdata;
226 static int __init set_trace_boot_clock(char *str)
228 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
229 trace_boot_clock = trace_boot_clock_buf;
232 __setup("trace_clock=", set_trace_boot_clock);
234 static int __init set_tracepoint_printk(char *str)
236 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
237 tracepoint_printk = 1;
240 __setup("tp_printk", set_tracepoint_printk);
242 unsigned long long ns2usecs(cycle_t nsec)
249 /* trace_flags holds trace_options default values */
250 #define TRACE_DEFAULT_FLAGS \
251 (FUNCTION_DEFAULT_FLAGS | \
252 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
253 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
254 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
255 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
257 /* trace_options that are only supported by global_trace */
258 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
259 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
263 * The global_trace is the descriptor that holds the tracing
264 * buffers for the live tracing. For each CPU, it contains
265 * a link list of pages that will store trace entries. The
266 * page descriptor of the pages in the memory is used to hold
267 * the link list by linking the lru item in the page descriptor
268 * to each of the pages in the buffer per CPU.
270 * For each active CPU there is a data field that holds the
271 * pages for the buffer for that CPU. Each CPU has the same number
272 * of pages allocated for its buffer.
274 static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
278 LIST_HEAD(ftrace_trace_arrays);
280 int trace_array_get(struct trace_array *this_tr)
282 struct trace_array *tr;
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
293 mutex_unlock(&trace_types_lock);
298 static void __trace_array_put(struct trace_array *this_tr)
300 WARN_ON(!this_tr->ref);
304 void trace_array_put(struct trace_array *this_tr)
306 mutex_lock(&trace_types_lock);
307 __trace_array_put(this_tr);
308 mutex_unlock(&trace_types_lock);
311 int filter_check_discard(struct trace_event_file *file, void *rec,
312 struct ring_buffer *buffer,
313 struct ring_buffer_event *event)
315 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
316 !filter_match_preds(file->filter, rec)) {
317 ring_buffer_discard_commit(buffer, event);
323 EXPORT_SYMBOL_GPL(filter_check_discard);
325 int call_filter_check_discard(struct trace_event_call *call, void *rec,
326 struct ring_buffer *buffer,
327 struct ring_buffer_event *event)
329 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
330 !filter_match_preds(call->filter, rec)) {
331 ring_buffer_discard_commit(buffer, event);
337 EXPORT_SYMBOL_GPL(call_filter_check_discard);
339 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
343 /* Early boot up does not have a buffer yet */
345 return trace_clock_local();
347 ts = ring_buffer_time_stamp(buf->buffer, cpu);
348 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
353 cycle_t ftrace_now(int cpu)
355 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
359 * tracing_is_enabled - Show if global_trace has been disabled
361 * Shows if the global trace has been enabled or not. It uses the
362 * mirror flag "buffer_disabled" to be used in fast paths such as for
363 * the irqsoff tracer. But it may be inaccurate due to races. If you
364 * need to know the accurate state, use tracing_is_on() which is a little
365 * slower, but accurate.
367 int tracing_is_enabled(void)
370 * For quick access (irqsoff uses this in fast path), just
371 * return the mirror variable of the state of the ring buffer.
372 * It's a little racy, but we don't really care.
375 return !global_trace.buffer_disabled;
379 * trace_buf_size is the size in bytes that is allocated
380 * for a buffer. Note, the number of bytes is always rounded
383 * This number is purposely set to a low number of 16384.
384 * If the dump on oops happens, it will be much appreciated
385 * to not have to wait for all that output. Anyway this can be
386 * boot time and run time configurable.
388 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
390 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
392 /* trace_types holds a link list of available tracers. */
393 static struct tracer *trace_types __read_mostly;
396 * trace_types_lock is used to protect the trace_types list.
398 DEFINE_MUTEX(trace_types_lock);
401 * serialize the access of the ring buffer
403 * ring buffer serializes readers, but it is low level protection.
404 * The validity of the events (which returns by ring_buffer_peek() ..etc)
405 * are not protected by ring buffer.
407 * The content of events may become garbage if we allow other process consumes
408 * these events concurrently:
409 * A) the page of the consumed events may become a normal page
410 * (not reader page) in ring buffer, and this page will be rewrited
411 * by events producer.
412 * B) The page of the consumed events may become a page for splice_read,
413 * and this page will be returned to system.
415 * These primitives allow multi process access to different cpu ring buffer
418 * These primitives don't distinguish read-only and read-consume access.
419 * Multi read-only access are also serialized.
423 static DECLARE_RWSEM(all_cpu_access_lock);
424 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
426 static inline void trace_access_lock(int cpu)
428 if (cpu == RING_BUFFER_ALL_CPUS) {
429 /* gain it for accessing the whole ring buffer. */
430 down_write(&all_cpu_access_lock);
432 /* gain it for accessing a cpu ring buffer. */
434 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
435 down_read(&all_cpu_access_lock);
437 /* Secondly block other access to this @cpu ring buffer. */
438 mutex_lock(&per_cpu(cpu_access_lock, cpu));
442 static inline void trace_access_unlock(int cpu)
444 if (cpu == RING_BUFFER_ALL_CPUS) {
445 up_write(&all_cpu_access_lock);
447 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
448 up_read(&all_cpu_access_lock);
452 static inline void trace_access_lock_init(void)
456 for_each_possible_cpu(cpu)
457 mutex_init(&per_cpu(cpu_access_lock, cpu));
462 static DEFINE_MUTEX(access_lock);
464 static inline void trace_access_lock(int cpu)
467 mutex_lock(&access_lock);
470 static inline void trace_access_unlock(int cpu)
473 mutex_unlock(&access_lock);
476 static inline void trace_access_lock_init(void)
482 #ifdef CONFIG_STACKTRACE
483 static void __ftrace_trace_stack(struct ring_buffer *buffer,
485 int skip, int pc, struct pt_regs *regs);
486 static inline void ftrace_trace_stack(struct trace_array *tr,
487 struct ring_buffer *buffer,
489 int skip, int pc, struct pt_regs *regs);
492 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
494 int skip, int pc, struct pt_regs *regs)
497 static inline void ftrace_trace_stack(struct trace_array *tr,
498 struct ring_buffer *buffer,
500 int skip, int pc, struct pt_regs *regs)
506 static void tracer_tracing_on(struct trace_array *tr)
508 if (tr->trace_buffer.buffer)
509 ring_buffer_record_on(tr->trace_buffer.buffer);
511 * This flag is looked at when buffers haven't been allocated
512 * yet, or by some tracers (like irqsoff), that just want to
513 * know if the ring buffer has been disabled, but it can handle
514 * races of where it gets disabled but we still do a record.
515 * As the check is in the fast path of the tracers, it is more
516 * important to be fast than accurate.
518 tr->buffer_disabled = 0;
519 /* Make the flag seen by readers */
524 * tracing_on - enable tracing buffers
526 * This function enables tracing buffers that may have been
527 * disabled with tracing_off.
529 void tracing_on(void)
531 tracer_tracing_on(&global_trace);
533 EXPORT_SYMBOL_GPL(tracing_on);
536 * __trace_puts - write a constant string into the trace buffer.
537 * @ip: The address of the caller
538 * @str: The constant string to write
539 * @size: The size of the string.
541 int __trace_puts(unsigned long ip, const char *str, int size)
543 struct ring_buffer_event *event;
544 struct ring_buffer *buffer;
545 struct print_entry *entry;
546 unsigned long irq_flags;
550 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
553 pc = preempt_count();
555 if (unlikely(tracing_selftest_running || tracing_disabled))
558 alloc = sizeof(*entry) + size + 2; /* possible \n added */
560 local_save_flags(irq_flags);
561 buffer = global_trace.trace_buffer.buffer;
562 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
567 entry = ring_buffer_event_data(event);
570 memcpy(&entry->buf, str, size);
572 /* Add a newline if necessary */
573 if (entry->buf[size - 1] != '\n') {
574 entry->buf[size] = '\n';
575 entry->buf[size + 1] = '\0';
577 entry->buf[size] = '\0';
579 __buffer_unlock_commit(buffer, event);
580 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
584 EXPORT_SYMBOL_GPL(__trace_puts);
587 * __trace_bputs - write the pointer to a constant string into trace buffer
588 * @ip: The address of the caller
589 * @str: The constant string to write to the buffer to
591 int __trace_bputs(unsigned long ip, const char *str)
593 struct ring_buffer_event *event;
594 struct ring_buffer *buffer;
595 struct bputs_entry *entry;
596 unsigned long irq_flags;
597 int size = sizeof(struct bputs_entry);
600 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
603 pc = preempt_count();
605 if (unlikely(tracing_selftest_running || tracing_disabled))
608 local_save_flags(irq_flags);
609 buffer = global_trace.trace_buffer.buffer;
610 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
615 entry = ring_buffer_event_data(event);
619 __buffer_unlock_commit(buffer, event);
620 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
624 EXPORT_SYMBOL_GPL(__trace_bputs);
626 #ifdef CONFIG_TRACER_SNAPSHOT
628 * trace_snapshot - take a snapshot of the current buffer.
630 * This causes a swap between the snapshot buffer and the current live
631 * tracing buffer. You can use this to take snapshots of the live
632 * trace when some condition is triggered, but continue to trace.
634 * Note, make sure to allocate the snapshot with either
635 * a tracing_snapshot_alloc(), or by doing it manually
636 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
638 * If the snapshot buffer is not allocated, it will stop tracing.
639 * Basically making a permanent snapshot.
641 void tracing_snapshot(void)
643 struct trace_array *tr = &global_trace;
644 struct tracer *tracer = tr->current_trace;
648 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
649 internal_trace_puts("*** snapshot is being ignored ***\n");
653 if (!tr->allocated_snapshot) {
654 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
655 internal_trace_puts("*** stopping trace here! ***\n");
660 /* Note, snapshot can not be used when the tracer uses it */
661 if (tracer->use_max_tr) {
662 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
663 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
667 local_irq_save(flags);
668 update_max_tr(tr, current, smp_processor_id());
669 local_irq_restore(flags);
671 EXPORT_SYMBOL_GPL(tracing_snapshot);
673 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
674 struct trace_buffer *size_buf, int cpu_id);
675 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
677 static int alloc_snapshot(struct trace_array *tr)
681 if (!tr->allocated_snapshot) {
683 /* allocate spare buffer */
684 ret = resize_buffer_duplicate_size(&tr->max_buffer,
685 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
689 tr->allocated_snapshot = true;
695 static void free_snapshot(struct trace_array *tr)
698 * We don't free the ring buffer. instead, resize it because
699 * The max_tr ring buffer has some state (e.g. ring->clock) and
700 * we want preserve it.
702 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
703 set_buffer_entries(&tr->max_buffer, 1);
704 tracing_reset_online_cpus(&tr->max_buffer);
705 tr->allocated_snapshot = false;
709 * tracing_alloc_snapshot - allocate snapshot buffer.
711 * This only allocates the snapshot buffer if it isn't already
712 * allocated - it doesn't also take a snapshot.
714 * This is meant to be used in cases where the snapshot buffer needs
715 * to be set up for events that can't sleep but need to be able to
716 * trigger a snapshot.
718 int tracing_alloc_snapshot(void)
720 struct trace_array *tr = &global_trace;
723 ret = alloc_snapshot(tr);
728 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
731 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
733 * This is similar to trace_snapshot(), but it will allocate the
734 * snapshot buffer if it isn't already allocated. Use this only
735 * where it is safe to sleep, as the allocation may sleep.
737 * This causes a swap between the snapshot buffer and the current live
738 * tracing buffer. You can use this to take snapshots of the live
739 * trace when some condition is triggered, but continue to trace.
741 void tracing_snapshot_alloc(void)
745 ret = tracing_alloc_snapshot();
751 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
753 void tracing_snapshot(void)
755 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
757 EXPORT_SYMBOL_GPL(tracing_snapshot);
758 int tracing_alloc_snapshot(void)
760 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
763 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
764 void tracing_snapshot_alloc(void)
769 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
770 #endif /* CONFIG_TRACER_SNAPSHOT */
772 static void tracer_tracing_off(struct trace_array *tr)
774 if (tr->trace_buffer.buffer)
775 ring_buffer_record_off(tr->trace_buffer.buffer);
777 * This flag is looked at when buffers haven't been allocated
778 * yet, or by some tracers (like irqsoff), that just want to
779 * know if the ring buffer has been disabled, but it can handle
780 * races of where it gets disabled but we still do a record.
781 * As the check is in the fast path of the tracers, it is more
782 * important to be fast than accurate.
784 tr->buffer_disabled = 1;
785 /* Make the flag seen by readers */
790 * tracing_off - turn off tracing buffers
792 * This function stops the tracing buffers from recording data.
793 * It does not disable any overhead the tracers themselves may
794 * be causing. This function simply causes all recording to
795 * the ring buffers to fail.
797 void tracing_off(void)
799 tracer_tracing_off(&global_trace);
801 EXPORT_SYMBOL_GPL(tracing_off);
803 void disable_trace_on_warning(void)
805 if (__disable_trace_on_warning)
810 * tracer_tracing_is_on - show real state of ring buffer enabled
811 * @tr : the trace array to know if ring buffer is enabled
813 * Shows real state of the ring buffer if it is enabled or not.
815 static int tracer_tracing_is_on(struct trace_array *tr)
817 if (tr->trace_buffer.buffer)
818 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
819 return !tr->buffer_disabled;
823 * tracing_is_on - show state of ring buffers enabled
825 int tracing_is_on(void)
827 return tracer_tracing_is_on(&global_trace);
829 EXPORT_SYMBOL_GPL(tracing_is_on);
831 static int __init set_buf_size(char *str)
833 unsigned long buf_size;
837 buf_size = memparse(str, &str);
838 /* nr_entries can not be zero */
841 trace_buf_size = buf_size;
844 __setup("trace_buf_size=", set_buf_size);
846 static int __init set_tracing_thresh(char *str)
848 unsigned long threshold;
853 ret = kstrtoul(str, 0, &threshold);
856 tracing_thresh = threshold * 1000;
859 __setup("tracing_thresh=", set_tracing_thresh);
861 unsigned long nsecs_to_usecs(unsigned long nsecs)
867 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
868 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
869 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
870 * of strings in the order that the enums were defined.
875 /* These must match the bit postions in trace_iterator_flags */
876 static const char *trace_options[] = {
884 int in_ns; /* is this clock in nanoseconds? */
886 { trace_clock_local, "local", 1 },
887 { trace_clock_global, "global", 1 },
888 { trace_clock_counter, "counter", 0 },
889 { trace_clock_jiffies, "uptime", 0 },
890 { trace_clock, "perf", 1 },
891 { ktime_get_mono_fast_ns, "mono", 1 },
892 { ktime_get_raw_fast_ns, "mono_raw", 1 },
893 { ktime_get_boot_fast_ns, "boot", 1 },
898 * trace_parser_get_init - gets the buffer for trace parser
900 int trace_parser_get_init(struct trace_parser *parser, int size)
902 memset(parser, 0, sizeof(*parser));
904 parser->buffer = kmalloc(size, GFP_KERNEL);
913 * trace_parser_put - frees the buffer for trace parser
915 void trace_parser_put(struct trace_parser *parser)
917 kfree(parser->buffer);
921 * trace_get_user - reads the user input string separated by space
922 * (matched by isspace(ch))
924 * For each string found the 'struct trace_parser' is updated,
925 * and the function returns.
927 * Returns number of bytes read.
929 * See kernel/trace/trace.h for 'struct trace_parser' details.
931 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
932 size_t cnt, loff_t *ppos)
939 trace_parser_clear(parser);
941 ret = get_user(ch, ubuf++);
949 * The parser is not finished with the last write,
950 * continue reading the user input without skipping spaces.
953 /* skip white space */
954 while (cnt && isspace(ch)) {
955 ret = get_user(ch, ubuf++);
962 /* only spaces were written */
972 /* read the non-space input */
973 while (cnt && !isspace(ch)) {
974 if (parser->idx < parser->size - 1)
975 parser->buffer[parser->idx++] = ch;
980 ret = get_user(ch, ubuf++);
987 /* We either got finished input or we have to wait for another call. */
989 parser->buffer[parser->idx] = 0;
990 parser->cont = false;
991 } else if (parser->idx < parser->size - 1) {
993 parser->buffer[parser->idx++] = ch;
1006 /* TODO add a seq_buf_to_buffer() */
1007 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1011 if (trace_seq_used(s) <= s->seq.readpos)
1014 len = trace_seq_used(s) - s->seq.readpos;
1017 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1019 s->seq.readpos += cnt;
1023 unsigned long __read_mostly tracing_thresh;
1025 #ifdef CONFIG_TRACER_MAX_TRACE
1027 * Copy the new maximum trace into the separate maximum-trace
1028 * structure. (this way the maximum trace is permanently saved,
1029 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1032 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1034 struct trace_buffer *trace_buf = &tr->trace_buffer;
1035 struct trace_buffer *max_buf = &tr->max_buffer;
1036 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1037 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1040 max_buf->time_start = data->preempt_timestamp;
1042 max_data->saved_latency = tr->max_latency;
1043 max_data->critical_start = data->critical_start;
1044 max_data->critical_end = data->critical_end;
1046 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1047 max_data->pid = tsk->pid;
1049 * If tsk == current, then use current_uid(), as that does not use
1050 * RCU. The irq tracer can be called out of RCU scope.
1053 max_data->uid = current_uid();
1055 max_data->uid = task_uid(tsk);
1057 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1058 max_data->policy = tsk->policy;
1059 max_data->rt_priority = tsk->rt_priority;
1061 /* record this tasks comm */
1062 tracing_record_cmdline(tsk);
1066 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1068 * @tsk: the task with the latency
1069 * @cpu: The cpu that initiated the trace.
1071 * Flip the buffers between the @tr and the max_tr and record information
1072 * about which task was the cause of this latency.
1075 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1077 struct ring_buffer *buf;
1082 WARN_ON_ONCE(!irqs_disabled());
1084 if (!tr->allocated_snapshot) {
1085 /* Only the nop tracer should hit this when disabling */
1086 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1090 arch_spin_lock(&tr->max_lock);
1092 buf = tr->trace_buffer.buffer;
1093 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1094 tr->max_buffer.buffer = buf;
1096 __update_max_tr(tr, tsk, cpu);
1097 arch_spin_unlock(&tr->max_lock);
1101 * update_max_tr_single - only copy one trace over, and reset the rest
1103 * @tsk - task with the latency
1104 * @cpu - the cpu of the buffer to copy.
1106 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1109 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1116 WARN_ON_ONCE(!irqs_disabled());
1117 if (!tr->allocated_snapshot) {
1118 /* Only the nop tracer should hit this when disabling */
1119 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1123 arch_spin_lock(&tr->max_lock);
1125 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1127 if (ret == -EBUSY) {
1129 * We failed to swap the buffer due to a commit taking
1130 * place on this CPU. We fail to record, but we reset
1131 * the max trace buffer (no one writes directly to it)
1132 * and flag that it failed.
1134 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1135 "Failed to swap buffers due to commit in progress\n");
1138 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1140 __update_max_tr(tr, tsk, cpu);
1141 arch_spin_unlock(&tr->max_lock);
1143 #endif /* CONFIG_TRACER_MAX_TRACE */
1145 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1147 /* Iterators are static, they should be filled or empty */
1148 if (trace_buffer_iter(iter, iter->cpu_file))
1151 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1155 #ifdef CONFIG_FTRACE_STARTUP_TEST
1156 static int run_tracer_selftest(struct tracer *type)
1158 struct trace_array *tr = &global_trace;
1159 struct tracer *saved_tracer = tr->current_trace;
1162 if (!type->selftest || tracing_selftest_disabled)
1166 * Run a selftest on this tracer.
1167 * Here we reset the trace buffer, and set the current
1168 * tracer to be this tracer. The tracer can then run some
1169 * internal tracing to verify that everything is in order.
1170 * If we fail, we do not register this tracer.
1172 tracing_reset_online_cpus(&tr->trace_buffer);
1174 tr->current_trace = type;
1176 #ifdef CONFIG_TRACER_MAX_TRACE
1177 if (type->use_max_tr) {
1178 /* If we expanded the buffers, make sure the max is expanded too */
1179 if (ring_buffer_expanded)
1180 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1181 RING_BUFFER_ALL_CPUS);
1182 tr->allocated_snapshot = true;
1186 /* the test is responsible for initializing and enabling */
1187 pr_info("Testing tracer %s: ", type->name);
1188 ret = type->selftest(type, tr);
1189 /* the test is responsible for resetting too */
1190 tr->current_trace = saved_tracer;
1192 printk(KERN_CONT "FAILED!\n");
1193 /* Add the warning after printing 'FAILED' */
1197 /* Only reset on passing, to avoid touching corrupted buffers */
1198 tracing_reset_online_cpus(&tr->trace_buffer);
1200 #ifdef CONFIG_TRACER_MAX_TRACE
1201 if (type->use_max_tr) {
1202 tr->allocated_snapshot = false;
1204 /* Shrink the max buffer again */
1205 if (ring_buffer_expanded)
1206 ring_buffer_resize(tr->max_buffer.buffer, 1,
1207 RING_BUFFER_ALL_CPUS);
1211 printk(KERN_CONT "PASSED\n");
1215 static inline int run_tracer_selftest(struct tracer *type)
1219 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1221 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1223 static void __init apply_trace_boot_options(void);
1226 * register_tracer - register a tracer with the ftrace system.
1227 * @type - the plugin for the tracer
1229 * Register a new plugin tracer.
1231 int __init register_tracer(struct tracer *type)
1237 pr_info("Tracer must have a name\n");
1241 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1242 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1246 mutex_lock(&trace_types_lock);
1248 tracing_selftest_running = true;
1250 for (t = trace_types; t; t = t->next) {
1251 if (strcmp(type->name, t->name) == 0) {
1253 pr_info("Tracer %s already registered\n",
1260 if (!type->set_flag)
1261 type->set_flag = &dummy_set_flag;
1263 type->flags = &dummy_tracer_flags;
1265 if (!type->flags->opts)
1266 type->flags->opts = dummy_tracer_opt;
1268 ret = run_tracer_selftest(type);
1272 type->next = trace_types;
1274 add_tracer_options(&global_trace, type);
1277 tracing_selftest_running = false;
1278 mutex_unlock(&trace_types_lock);
1280 if (ret || !default_bootup_tracer)
1283 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1286 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1287 /* Do we want this tracer to start on bootup? */
1288 tracing_set_tracer(&global_trace, type->name);
1289 default_bootup_tracer = NULL;
1291 apply_trace_boot_options();
1293 /* disable other selftests, since this will break it. */
1294 tracing_selftest_disabled = true;
1295 #ifdef CONFIG_FTRACE_STARTUP_TEST
1296 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1304 void tracing_reset(struct trace_buffer *buf, int cpu)
1306 struct ring_buffer *buffer = buf->buffer;
1311 ring_buffer_record_disable(buffer);
1313 /* Make sure all commits have finished */
1314 synchronize_sched();
1315 ring_buffer_reset_cpu(buffer, cpu);
1317 ring_buffer_record_enable(buffer);
1320 void tracing_reset_online_cpus(struct trace_buffer *buf)
1322 struct ring_buffer *buffer = buf->buffer;
1328 ring_buffer_record_disable(buffer);
1330 /* Make sure all commits have finished */
1331 synchronize_sched();
1333 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1335 for_each_online_cpu(cpu)
1336 ring_buffer_reset_cpu(buffer, cpu);
1338 ring_buffer_record_enable(buffer);
1341 /* Must have trace_types_lock held */
1342 void tracing_reset_all_online_cpus(void)
1344 struct trace_array *tr;
1346 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1347 tracing_reset_online_cpus(&tr->trace_buffer);
1348 #ifdef CONFIG_TRACER_MAX_TRACE
1349 tracing_reset_online_cpus(&tr->max_buffer);
1354 #define SAVED_CMDLINES_DEFAULT 128
1355 #define NO_CMDLINE_MAP UINT_MAX
1356 static unsigned saved_tgids[SAVED_CMDLINES_DEFAULT];
1357 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1358 struct saved_cmdlines_buffer {
1359 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1360 unsigned *map_cmdline_to_pid;
1361 unsigned cmdline_num;
1363 char *saved_cmdlines;
1365 static struct saved_cmdlines_buffer *savedcmd;
1367 /* temporary disable recording */
1368 static atomic_t trace_record_cmdline_disabled __read_mostly;
1370 static inline char *get_saved_cmdlines(int idx)
1372 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1375 static inline void set_cmdline(int idx, const char *cmdline)
1377 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1380 static int allocate_cmdlines_buffer(unsigned int val,
1381 struct saved_cmdlines_buffer *s)
1383 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1385 if (!s->map_cmdline_to_pid)
1388 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1389 if (!s->saved_cmdlines) {
1390 kfree(s->map_cmdline_to_pid);
1395 s->cmdline_num = val;
1396 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1397 sizeof(s->map_pid_to_cmdline));
1398 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1399 val * sizeof(*s->map_cmdline_to_pid));
1404 static int trace_create_savedcmd(void)
1408 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1412 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1422 int is_tracing_stopped(void)
1424 return global_trace.stop_count;
1428 * tracing_start - quick start of the tracer
1430 * If tracing is enabled but was stopped by tracing_stop,
1431 * this will start the tracer back up.
1433 void tracing_start(void)
1435 struct ring_buffer *buffer;
1436 unsigned long flags;
1438 if (tracing_disabled)
1441 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1442 if (--global_trace.stop_count) {
1443 if (global_trace.stop_count < 0) {
1444 /* Someone screwed up their debugging */
1446 global_trace.stop_count = 0;
1451 /* Prevent the buffers from switching */
1452 arch_spin_lock(&global_trace.max_lock);
1454 buffer = global_trace.trace_buffer.buffer;
1456 ring_buffer_record_enable(buffer);
1458 #ifdef CONFIG_TRACER_MAX_TRACE
1459 buffer = global_trace.max_buffer.buffer;
1461 ring_buffer_record_enable(buffer);
1464 arch_spin_unlock(&global_trace.max_lock);
1467 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1470 static void tracing_start_tr(struct trace_array *tr)
1472 struct ring_buffer *buffer;
1473 unsigned long flags;
1475 if (tracing_disabled)
1478 /* If global, we need to also start the max tracer */
1479 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1480 return tracing_start();
1482 raw_spin_lock_irqsave(&tr->start_lock, flags);
1484 if (--tr->stop_count) {
1485 if (tr->stop_count < 0) {
1486 /* Someone screwed up their debugging */
1493 buffer = tr->trace_buffer.buffer;
1495 ring_buffer_record_enable(buffer);
1498 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1502 * tracing_stop - quick stop of the tracer
1504 * Light weight way to stop tracing. Use in conjunction with
1507 void tracing_stop(void)
1509 struct ring_buffer *buffer;
1510 unsigned long flags;
1512 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1513 if (global_trace.stop_count++)
1516 /* Prevent the buffers from switching */
1517 arch_spin_lock(&global_trace.max_lock);
1519 buffer = global_trace.trace_buffer.buffer;
1521 ring_buffer_record_disable(buffer);
1523 #ifdef CONFIG_TRACER_MAX_TRACE
1524 buffer = global_trace.max_buffer.buffer;
1526 ring_buffer_record_disable(buffer);
1529 arch_spin_unlock(&global_trace.max_lock);
1532 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1535 static void tracing_stop_tr(struct trace_array *tr)
1537 struct ring_buffer *buffer;
1538 unsigned long flags;
1540 /* If global, we need to also stop the max tracer */
1541 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1542 return tracing_stop();
1544 raw_spin_lock_irqsave(&tr->start_lock, flags);
1545 if (tr->stop_count++)
1548 buffer = tr->trace_buffer.buffer;
1550 ring_buffer_record_disable(buffer);
1553 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1556 void trace_stop_cmdline_recording(void);
1558 static int trace_save_cmdline(struct task_struct *tsk)
1562 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1566 * It's not the end of the world if we don't get
1567 * the lock, but we also don't want to spin
1568 * nor do we want to disable interrupts,
1569 * so if we miss here, then better luck next time.
1571 if (!arch_spin_trylock(&trace_cmdline_lock))
1574 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1575 if (idx == NO_CMDLINE_MAP) {
1576 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1579 * Check whether the cmdline buffer at idx has a pid
1580 * mapped. We are going to overwrite that entry so we
1581 * need to clear the map_pid_to_cmdline. Otherwise we
1582 * would read the new comm for the old pid.
1584 pid = savedcmd->map_cmdline_to_pid[idx];
1585 if (pid != NO_CMDLINE_MAP)
1586 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1588 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1589 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1591 savedcmd->cmdline_idx = idx;
1594 set_cmdline(idx, tsk->comm);
1595 saved_tgids[idx] = tsk->tgid;
1596 arch_spin_unlock(&trace_cmdline_lock);
1601 static void __trace_find_cmdline(int pid, char comm[])
1606 strcpy(comm, "<idle>");
1610 if (WARN_ON_ONCE(pid < 0)) {
1611 strcpy(comm, "<XXX>");
1615 if (pid > PID_MAX_DEFAULT) {
1616 strcpy(comm, "<...>");
1620 map = savedcmd->map_pid_to_cmdline[pid];
1621 if (map != NO_CMDLINE_MAP)
1622 strcpy(comm, get_saved_cmdlines(map));
1624 strcpy(comm, "<...>");
1627 void trace_find_cmdline(int pid, char comm[])
1630 arch_spin_lock(&trace_cmdline_lock);
1632 __trace_find_cmdline(pid, comm);
1634 arch_spin_unlock(&trace_cmdline_lock);
1638 int trace_find_tgid(int pid)
1644 arch_spin_lock(&trace_cmdline_lock);
1645 map = savedcmd->map_pid_to_cmdline[pid];
1646 if (map != NO_CMDLINE_MAP)
1647 tgid = saved_tgids[map];
1651 arch_spin_unlock(&trace_cmdline_lock);
1657 void tracing_record_cmdline(struct task_struct *tsk)
1659 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1662 if (!__this_cpu_read(trace_cmdline_save))
1665 if (trace_save_cmdline(tsk))
1666 __this_cpu_write(trace_cmdline_save, false);
1670 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1673 struct task_struct *tsk = current;
1675 entry->preempt_count = pc & 0xff;
1676 entry->pid = (tsk) ? tsk->pid : 0;
1678 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1679 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1681 TRACE_FLAG_IRQS_NOSUPPORT |
1683 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1684 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1685 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1686 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1688 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1690 struct ring_buffer_event *
1691 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1694 unsigned long flags, int pc)
1696 struct ring_buffer_event *event;
1698 event = ring_buffer_lock_reserve(buffer, len);
1699 if (event != NULL) {
1700 struct trace_entry *ent = ring_buffer_event_data(event);
1702 tracing_generic_entry_update(ent, flags, pc);
1710 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1712 __this_cpu_write(trace_cmdline_save, true);
1713 ring_buffer_unlock_commit(buffer, event);
1716 void trace_buffer_unlock_commit(struct trace_array *tr,
1717 struct ring_buffer *buffer,
1718 struct ring_buffer_event *event,
1719 unsigned long flags, int pc)
1721 __buffer_unlock_commit(buffer, event);
1723 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
1724 ftrace_trace_userstack(buffer, flags, pc);
1726 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1728 static struct ring_buffer *temp_buffer;
1730 struct ring_buffer_event *
1731 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1732 struct trace_event_file *trace_file,
1733 int type, unsigned long len,
1734 unsigned long flags, int pc)
1736 struct ring_buffer_event *entry;
1738 *current_rb = trace_file->tr->trace_buffer.buffer;
1739 entry = trace_buffer_lock_reserve(*current_rb,
1740 type, len, flags, pc);
1742 * If tracing is off, but we have triggers enabled
1743 * we still need to look at the event data. Use the temp_buffer
1744 * to store the trace event for the tigger to use. It's recusive
1745 * safe and will not be recorded anywhere.
1747 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
1748 *current_rb = temp_buffer;
1749 entry = trace_buffer_lock_reserve(*current_rb,
1750 type, len, flags, pc);
1754 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1756 struct ring_buffer_event *
1757 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1758 int type, unsigned long len,
1759 unsigned long flags, int pc)
1761 *current_rb = global_trace.trace_buffer.buffer;
1762 return trace_buffer_lock_reserve(*current_rb,
1763 type, len, flags, pc);
1765 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1767 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1768 struct ring_buffer *buffer,
1769 struct ring_buffer_event *event,
1770 unsigned long flags, int pc,
1771 struct pt_regs *regs)
1773 __buffer_unlock_commit(buffer, event);
1775 ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
1776 ftrace_trace_userstack(buffer, flags, pc);
1778 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1780 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1781 struct ring_buffer_event *event)
1783 ring_buffer_discard_commit(buffer, event);
1785 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1788 trace_function(struct trace_array *tr,
1789 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1792 struct trace_event_call *call = &event_function;
1793 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1794 struct ring_buffer_event *event;
1795 struct ftrace_entry *entry;
1797 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1801 entry = ring_buffer_event_data(event);
1803 entry->parent_ip = parent_ip;
1805 if (!call_filter_check_discard(call, entry, buffer, event))
1806 __buffer_unlock_commit(buffer, event);
1809 #ifdef CONFIG_STACKTRACE
1811 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1812 struct ftrace_stack {
1813 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1816 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1817 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1819 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1820 unsigned long flags,
1821 int skip, int pc, struct pt_regs *regs)
1823 struct trace_event_call *call = &event_kernel_stack;
1824 struct ring_buffer_event *event;
1825 struct stack_entry *entry;
1826 struct stack_trace trace;
1828 int size = FTRACE_STACK_ENTRIES;
1830 trace.nr_entries = 0;
1834 * Since events can happen in NMIs there's no safe way to
1835 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1836 * or NMI comes in, it will just have to use the default
1837 * FTRACE_STACK_SIZE.
1839 preempt_disable_notrace();
1841 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1843 * We don't need any atomic variables, just a barrier.
1844 * If an interrupt comes in, we don't care, because it would
1845 * have exited and put the counter back to what we want.
1846 * We just need a barrier to keep gcc from moving things
1850 if (use_stack == 1) {
1851 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1852 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1855 save_stack_trace_regs(regs, &trace);
1857 save_stack_trace(&trace);
1859 if (trace.nr_entries > size)
1860 size = trace.nr_entries;
1862 /* From now on, use_stack is a boolean */
1865 size *= sizeof(unsigned long);
1867 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1868 sizeof(*entry) + size, flags, pc);
1871 entry = ring_buffer_event_data(event);
1873 memset(&entry->caller, 0, size);
1876 memcpy(&entry->caller, trace.entries,
1877 trace.nr_entries * sizeof(unsigned long));
1879 trace.max_entries = FTRACE_STACK_ENTRIES;
1880 trace.entries = entry->caller;
1882 save_stack_trace_regs(regs, &trace);
1884 save_stack_trace(&trace);
1887 entry->size = trace.nr_entries;
1889 if (!call_filter_check_discard(call, entry, buffer, event))
1890 __buffer_unlock_commit(buffer, event);
1893 /* Again, don't let gcc optimize things here */
1895 __this_cpu_dec(ftrace_stack_reserve);
1896 preempt_enable_notrace();
1900 static inline void ftrace_trace_stack(struct trace_array *tr,
1901 struct ring_buffer *buffer,
1902 unsigned long flags,
1903 int skip, int pc, struct pt_regs *regs)
1905 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
1908 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1911 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1914 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1918 * trace_dump_stack - record a stack back trace in the trace buffer
1919 * @skip: Number of functions to skip (helper handlers)
1921 void trace_dump_stack(int skip)
1923 unsigned long flags;
1925 if (tracing_disabled || tracing_selftest_running)
1928 local_save_flags(flags);
1931 * Skip 3 more, seems to get us at the caller of
1935 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1936 flags, skip, preempt_count(), NULL);
1939 static DEFINE_PER_CPU(int, user_stack_count);
1942 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1944 struct trace_event_call *call = &event_user_stack;
1945 struct ring_buffer_event *event;
1946 struct userstack_entry *entry;
1947 struct stack_trace trace;
1949 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
1953 * NMIs can not handle page faults, even with fix ups.
1954 * The save user stack can (and often does) fault.
1956 if (unlikely(in_nmi()))
1960 * prevent recursion, since the user stack tracing may
1961 * trigger other kernel events.
1964 if (__this_cpu_read(user_stack_count))
1967 __this_cpu_inc(user_stack_count);
1969 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1970 sizeof(*entry), flags, pc);
1972 goto out_drop_count;
1973 entry = ring_buffer_event_data(event);
1975 entry->tgid = current->tgid;
1976 memset(&entry->caller, 0, sizeof(entry->caller));
1978 trace.nr_entries = 0;
1979 trace.max_entries = FTRACE_STACK_ENTRIES;
1981 trace.entries = entry->caller;
1983 save_stack_trace_user(&trace);
1984 if (!call_filter_check_discard(call, entry, buffer, event))
1985 __buffer_unlock_commit(buffer, event);
1988 __this_cpu_dec(user_stack_count);
1994 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1996 ftrace_trace_userstack(tr, flags, preempt_count());
2000 #endif /* CONFIG_STACKTRACE */
2002 /* created for use with alloc_percpu */
2003 struct trace_buffer_struct {
2004 char buffer[TRACE_BUF_SIZE];
2007 static struct trace_buffer_struct *trace_percpu_buffer;
2008 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
2009 static struct trace_buffer_struct *trace_percpu_irq_buffer;
2010 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
2013 * The buffer used is dependent on the context. There is a per cpu
2014 * buffer for normal context, softirq contex, hard irq context and
2015 * for NMI context. Thise allows for lockless recording.
2017 * Note, if the buffers failed to be allocated, then this returns NULL
2019 static char *get_trace_buf(void)
2021 struct trace_buffer_struct *percpu_buffer;
2024 * If we have allocated per cpu buffers, then we do not
2025 * need to do any locking.
2028 percpu_buffer = trace_percpu_nmi_buffer;
2030 percpu_buffer = trace_percpu_irq_buffer;
2031 else if (in_softirq())
2032 percpu_buffer = trace_percpu_sirq_buffer;
2034 percpu_buffer = trace_percpu_buffer;
2039 return this_cpu_ptr(&percpu_buffer->buffer[0]);
2042 static int alloc_percpu_trace_buffer(void)
2044 struct trace_buffer_struct *buffers;
2045 struct trace_buffer_struct *sirq_buffers;
2046 struct trace_buffer_struct *irq_buffers;
2047 struct trace_buffer_struct *nmi_buffers;
2049 buffers = alloc_percpu(struct trace_buffer_struct);
2053 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2057 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2061 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2065 trace_percpu_buffer = buffers;
2066 trace_percpu_sirq_buffer = sirq_buffers;
2067 trace_percpu_irq_buffer = irq_buffers;
2068 trace_percpu_nmi_buffer = nmi_buffers;
2073 free_percpu(irq_buffers);
2075 free_percpu(sirq_buffers);
2077 free_percpu(buffers);
2079 WARN(1, "Could not allocate percpu trace_printk buffer");
2083 static int buffers_allocated;
2085 void trace_printk_init_buffers(void)
2087 if (buffers_allocated)
2090 if (alloc_percpu_trace_buffer())
2093 /* trace_printk() is for debug use only. Don't use it in production. */
2096 pr_warning("**********************************************************\n");
2097 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2098 pr_warning("** **\n");
2099 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2100 pr_warning("** **\n");
2101 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2102 pr_warning("** unsafe for production use. **\n");
2103 pr_warning("** **\n");
2104 pr_warning("** If you see this message and you are not debugging **\n");
2105 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2106 pr_warning("** **\n");
2107 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2108 pr_warning("**********************************************************\n");
2110 /* Expand the buffers to set size */
2111 tracing_update_buffers();
2113 buffers_allocated = 1;
2116 * trace_printk_init_buffers() can be called by modules.
2117 * If that happens, then we need to start cmdline recording
2118 * directly here. If the global_trace.buffer is already
2119 * allocated here, then this was called by module code.
2121 if (global_trace.trace_buffer.buffer)
2122 tracing_start_cmdline_record();
2125 void trace_printk_start_comm(void)
2127 /* Start tracing comms if trace printk is set */
2128 if (!buffers_allocated)
2130 tracing_start_cmdline_record();
2133 static void trace_printk_start_stop_comm(int enabled)
2135 if (!buffers_allocated)
2139 tracing_start_cmdline_record();
2141 tracing_stop_cmdline_record();
2145 * trace_vbprintk - write binary msg to tracing buffer
2148 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2150 struct trace_event_call *call = &event_bprint;
2151 struct ring_buffer_event *event;
2152 struct ring_buffer *buffer;
2153 struct trace_array *tr = &global_trace;
2154 struct bprint_entry *entry;
2155 unsigned long flags;
2157 int len = 0, size, pc;
2159 if (unlikely(tracing_selftest_running || tracing_disabled))
2162 /* Don't pollute graph traces with trace_vprintk internals */
2163 pause_graph_tracing();
2165 pc = preempt_count();
2166 preempt_disable_notrace();
2168 tbuffer = get_trace_buf();
2174 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2176 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2179 local_save_flags(flags);
2180 size = sizeof(*entry) + sizeof(u32) * len;
2181 buffer = tr->trace_buffer.buffer;
2182 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2186 entry = ring_buffer_event_data(event);
2190 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2191 if (!call_filter_check_discard(call, entry, buffer, event)) {
2192 __buffer_unlock_commit(buffer, event);
2193 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2197 preempt_enable_notrace();
2198 unpause_graph_tracing();
2202 EXPORT_SYMBOL_GPL(trace_vbprintk);
2205 __trace_array_vprintk(struct ring_buffer *buffer,
2206 unsigned long ip, const char *fmt, va_list args)
2208 struct trace_event_call *call = &event_print;
2209 struct ring_buffer_event *event;
2210 int len = 0, size, pc;
2211 struct print_entry *entry;
2212 unsigned long flags;
2215 if (tracing_disabled || tracing_selftest_running)
2218 /* Don't pollute graph traces with trace_vprintk internals */
2219 pause_graph_tracing();
2221 pc = preempt_count();
2222 preempt_disable_notrace();
2225 tbuffer = get_trace_buf();
2231 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2233 local_save_flags(flags);
2234 size = sizeof(*entry) + len + 1;
2235 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2239 entry = ring_buffer_event_data(event);
2242 memcpy(&entry->buf, tbuffer, len + 1);
2243 if (!call_filter_check_discard(call, entry, buffer, event)) {
2244 __buffer_unlock_commit(buffer, event);
2245 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2248 preempt_enable_notrace();
2249 unpause_graph_tracing();
2254 int trace_array_vprintk(struct trace_array *tr,
2255 unsigned long ip, const char *fmt, va_list args)
2257 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2260 int trace_array_printk(struct trace_array *tr,
2261 unsigned long ip, const char *fmt, ...)
2266 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2270 ret = trace_array_vprintk(tr, ip, fmt, ap);
2275 int trace_array_printk_buf(struct ring_buffer *buffer,
2276 unsigned long ip, const char *fmt, ...)
2281 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2285 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2290 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2292 return trace_array_vprintk(&global_trace, ip, fmt, args);
2294 EXPORT_SYMBOL_GPL(trace_vprintk);
2296 static void trace_iterator_increment(struct trace_iterator *iter)
2298 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2302 ring_buffer_read(buf_iter, NULL);
2305 static struct trace_entry *
2306 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2307 unsigned long *lost_events)
2309 struct ring_buffer_event *event;
2310 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2313 event = ring_buffer_iter_peek(buf_iter, ts);
2315 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2319 iter->ent_size = ring_buffer_event_length(event);
2320 return ring_buffer_event_data(event);
2326 static struct trace_entry *
2327 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2328 unsigned long *missing_events, u64 *ent_ts)
2330 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2331 struct trace_entry *ent, *next = NULL;
2332 unsigned long lost_events = 0, next_lost = 0;
2333 int cpu_file = iter->cpu_file;
2334 u64 next_ts = 0, ts;
2340 * If we are in a per_cpu trace file, don't bother by iterating over
2341 * all cpu and peek directly.
2343 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2344 if (ring_buffer_empty_cpu(buffer, cpu_file))
2346 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2348 *ent_cpu = cpu_file;
2353 for_each_tracing_cpu(cpu) {
2355 if (ring_buffer_empty_cpu(buffer, cpu))
2358 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2361 * Pick the entry with the smallest timestamp:
2363 if (ent && (!next || ts < next_ts)) {
2367 next_lost = lost_events;
2368 next_size = iter->ent_size;
2372 iter->ent_size = next_size;
2375 *ent_cpu = next_cpu;
2381 *missing_events = next_lost;
2386 /* Find the next real entry, without updating the iterator itself */
2387 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2388 int *ent_cpu, u64 *ent_ts)
2390 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2393 /* Find the next real entry, and increment the iterator to the next entry */
2394 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2396 iter->ent = __find_next_entry(iter, &iter->cpu,
2397 &iter->lost_events, &iter->ts);
2400 trace_iterator_increment(iter);
2402 return iter->ent ? iter : NULL;
2405 static void trace_consume(struct trace_iterator *iter)
2407 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2408 &iter->lost_events);
2411 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2413 struct trace_iterator *iter = m->private;
2417 WARN_ON_ONCE(iter->leftover);
2421 /* can't go backwards */
2426 ent = trace_find_next_entry_inc(iter);
2430 while (ent && iter->idx < i)
2431 ent = trace_find_next_entry_inc(iter);
2438 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2440 struct ring_buffer_event *event;
2441 struct ring_buffer_iter *buf_iter;
2442 unsigned long entries = 0;
2445 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2447 buf_iter = trace_buffer_iter(iter, cpu);
2451 ring_buffer_iter_reset(buf_iter);
2454 * We could have the case with the max latency tracers
2455 * that a reset never took place on a cpu. This is evident
2456 * by the timestamp being before the start of the buffer.
2458 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2459 if (ts >= iter->trace_buffer->time_start)
2462 ring_buffer_read(buf_iter, NULL);
2465 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2469 * The current tracer is copied to avoid a global locking
2472 static void *s_start(struct seq_file *m, loff_t *pos)
2474 struct trace_iterator *iter = m->private;
2475 struct trace_array *tr = iter->tr;
2476 int cpu_file = iter->cpu_file;
2482 * copy the tracer to avoid using a global lock all around.
2483 * iter->trace is a copy of current_trace, the pointer to the
2484 * name may be used instead of a strcmp(), as iter->trace->name
2485 * will point to the same string as current_trace->name.
2487 mutex_lock(&trace_types_lock);
2488 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2489 *iter->trace = *tr->current_trace;
2490 mutex_unlock(&trace_types_lock);
2492 #ifdef CONFIG_TRACER_MAX_TRACE
2493 if (iter->snapshot && iter->trace->use_max_tr)
2494 return ERR_PTR(-EBUSY);
2497 if (!iter->snapshot)
2498 atomic_inc(&trace_record_cmdline_disabled);
2500 if (*pos != iter->pos) {
2505 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2506 for_each_tracing_cpu(cpu)
2507 tracing_iter_reset(iter, cpu);
2509 tracing_iter_reset(iter, cpu_file);
2512 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2517 * If we overflowed the seq_file before, then we want
2518 * to just reuse the trace_seq buffer again.
2524 p = s_next(m, p, &l);
2528 trace_event_read_lock();
2529 trace_access_lock(cpu_file);
2533 static void s_stop(struct seq_file *m, void *p)
2535 struct trace_iterator *iter = m->private;
2537 #ifdef CONFIG_TRACER_MAX_TRACE
2538 if (iter->snapshot && iter->trace->use_max_tr)
2542 if (!iter->snapshot)
2543 atomic_dec(&trace_record_cmdline_disabled);
2545 trace_access_unlock(iter->cpu_file);
2546 trace_event_read_unlock();
2550 get_total_entries(struct trace_buffer *buf,
2551 unsigned long *total, unsigned long *entries)
2553 unsigned long count;
2559 for_each_tracing_cpu(cpu) {
2560 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2562 * If this buffer has skipped entries, then we hold all
2563 * entries for the trace and we need to ignore the
2564 * ones before the time stamp.
2566 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2567 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2568 /* total is the same as the entries */
2572 ring_buffer_overrun_cpu(buf->buffer, cpu);
2577 static void print_lat_help_header(struct seq_file *m)
2579 seq_puts(m, "# _------=> CPU# \n"
2580 "# / _-----=> irqs-off \n"
2581 "# | / _----=> need-resched \n"
2582 "# || / _---=> hardirq/softirq \n"
2583 "# ||| / _--=> preempt-depth \n"
2585 "# cmd pid ||||| time | caller \n"
2586 "# \\ / ||||| \\ | / \n");
2589 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2591 unsigned long total;
2592 unsigned long entries;
2594 get_total_entries(buf, &total, &entries);
2595 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2596 entries, total, num_online_cpus());
2600 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2602 print_event_info(buf, m);
2603 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2607 static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
2609 print_event_info(buf, m);
2610 seq_puts(m, "# TASK-PID TGID CPU# TIMESTAMP FUNCTION\n");
2611 seq_puts(m, "# | | | | | |\n");
2614 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2616 print_event_info(buf, m);
2617 seq_puts(m, "# _-----=> irqs-off\n"
2618 "# / _----=> need-resched\n"
2619 "# | / _---=> hardirq/softirq\n"
2620 "# || / _--=> preempt-depth\n"
2622 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2623 "# | | | |||| | |\n");
2626 static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
2628 print_event_info(buf, m);
2629 seq_puts(m, "# _-----=> irqs-off\n");
2630 seq_puts(m, "# / _----=> need-resched\n");
2631 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2632 seq_puts(m, "# || / _--=> preempt-depth\n");
2633 seq_puts(m, "# ||| / delay\n");
2634 seq_puts(m, "# TASK-PID TGID CPU# |||| TIMESTAMP FUNCTION\n");
2635 seq_puts(m, "# | | | | |||| | |\n");
2639 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2641 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
2642 struct trace_buffer *buf = iter->trace_buffer;
2643 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2644 struct tracer *type = iter->trace;
2645 unsigned long entries;
2646 unsigned long total;
2647 const char *name = "preemption";
2651 get_total_entries(buf, &total, &entries);
2653 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2655 seq_puts(m, "# -----------------------------------"
2656 "---------------------------------\n");
2657 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2658 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2659 nsecs_to_usecs(data->saved_latency),
2663 #if defined(CONFIG_PREEMPT_NONE)
2665 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2667 #elif defined(CONFIG_PREEMPT)
2672 /* These are reserved for later use */
2675 seq_printf(m, " #P:%d)\n", num_online_cpus());
2679 seq_puts(m, "# -----------------\n");
2680 seq_printf(m, "# | task: %.16s-%d "
2681 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2682 data->comm, data->pid,
2683 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2684 data->policy, data->rt_priority);
2685 seq_puts(m, "# -----------------\n");
2687 if (data->critical_start) {
2688 seq_puts(m, "# => started at: ");
2689 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2690 trace_print_seq(m, &iter->seq);
2691 seq_puts(m, "\n# => ended at: ");
2692 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2693 trace_print_seq(m, &iter->seq);
2694 seq_puts(m, "\n#\n");
2700 static void test_cpu_buff_start(struct trace_iterator *iter)
2702 struct trace_seq *s = &iter->seq;
2703 struct trace_array *tr = iter->tr;
2705 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
2708 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2711 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
2714 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2718 cpumask_set_cpu(iter->cpu, iter->started);
2720 /* Don't print started cpu buffer for the first entry of the trace */
2722 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2726 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2728 struct trace_array *tr = iter->tr;
2729 struct trace_seq *s = &iter->seq;
2730 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
2731 struct trace_entry *entry;
2732 struct trace_event *event;
2736 test_cpu_buff_start(iter);
2738 event = ftrace_find_event(entry->type);
2740 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2741 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2742 trace_print_lat_context(iter);
2744 trace_print_context(iter);
2747 if (trace_seq_has_overflowed(s))
2748 return TRACE_TYPE_PARTIAL_LINE;
2751 return event->funcs->trace(iter, sym_flags, event);
2753 trace_seq_printf(s, "Unknown type %d\n", entry->type);
2755 return trace_handle_return(s);
2758 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2760 struct trace_array *tr = iter->tr;
2761 struct trace_seq *s = &iter->seq;
2762 struct trace_entry *entry;
2763 struct trace_event *event;
2767 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
2768 trace_seq_printf(s, "%d %d %llu ",
2769 entry->pid, iter->cpu, iter->ts);
2771 if (trace_seq_has_overflowed(s))
2772 return TRACE_TYPE_PARTIAL_LINE;
2774 event = ftrace_find_event(entry->type);
2776 return event->funcs->raw(iter, 0, event);
2778 trace_seq_printf(s, "%d ?\n", entry->type);
2780 return trace_handle_return(s);
2783 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2785 struct trace_array *tr = iter->tr;
2786 struct trace_seq *s = &iter->seq;
2787 unsigned char newline = '\n';
2788 struct trace_entry *entry;
2789 struct trace_event *event;
2793 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2794 SEQ_PUT_HEX_FIELD(s, entry->pid);
2795 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2796 SEQ_PUT_HEX_FIELD(s, iter->ts);
2797 if (trace_seq_has_overflowed(s))
2798 return TRACE_TYPE_PARTIAL_LINE;
2801 event = ftrace_find_event(entry->type);
2803 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2804 if (ret != TRACE_TYPE_HANDLED)
2808 SEQ_PUT_FIELD(s, newline);
2810 return trace_handle_return(s);
2813 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2815 struct trace_array *tr = iter->tr;
2816 struct trace_seq *s = &iter->seq;
2817 struct trace_entry *entry;
2818 struct trace_event *event;
2822 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2823 SEQ_PUT_FIELD(s, entry->pid);
2824 SEQ_PUT_FIELD(s, iter->cpu);
2825 SEQ_PUT_FIELD(s, iter->ts);
2826 if (trace_seq_has_overflowed(s))
2827 return TRACE_TYPE_PARTIAL_LINE;
2830 event = ftrace_find_event(entry->type);
2831 return event ? event->funcs->binary(iter, 0, event) :
2835 int trace_empty(struct trace_iterator *iter)
2837 struct ring_buffer_iter *buf_iter;
2840 /* If we are looking at one CPU buffer, only check that one */
2841 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2842 cpu = iter->cpu_file;
2843 buf_iter = trace_buffer_iter(iter, cpu);
2845 if (!ring_buffer_iter_empty(buf_iter))
2848 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2854 for_each_tracing_cpu(cpu) {
2855 buf_iter = trace_buffer_iter(iter, cpu);
2857 if (!ring_buffer_iter_empty(buf_iter))
2860 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2868 /* Called with trace_event_read_lock() held. */
2869 enum print_line_t print_trace_line(struct trace_iterator *iter)
2871 struct trace_array *tr = iter->tr;
2872 unsigned long trace_flags = tr->trace_flags;
2873 enum print_line_t ret;
2875 if (iter->lost_events) {
2876 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2877 iter->cpu, iter->lost_events);
2878 if (trace_seq_has_overflowed(&iter->seq))
2879 return TRACE_TYPE_PARTIAL_LINE;
2882 if (iter->trace && iter->trace->print_line) {
2883 ret = iter->trace->print_line(iter);
2884 if (ret != TRACE_TYPE_UNHANDLED)
2888 if (iter->ent->type == TRACE_BPUTS &&
2889 trace_flags & TRACE_ITER_PRINTK &&
2890 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2891 return trace_print_bputs_msg_only(iter);
2893 if (iter->ent->type == TRACE_BPRINT &&
2894 trace_flags & TRACE_ITER_PRINTK &&
2895 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2896 return trace_print_bprintk_msg_only(iter);
2898 if (iter->ent->type == TRACE_PRINT &&
2899 trace_flags & TRACE_ITER_PRINTK &&
2900 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2901 return trace_print_printk_msg_only(iter);
2903 if (trace_flags & TRACE_ITER_BIN)
2904 return print_bin_fmt(iter);
2906 if (trace_flags & TRACE_ITER_HEX)
2907 return print_hex_fmt(iter);
2909 if (trace_flags & TRACE_ITER_RAW)
2910 return print_raw_fmt(iter);
2912 return print_trace_fmt(iter);
2915 void trace_latency_header(struct seq_file *m)
2917 struct trace_iterator *iter = m->private;
2918 struct trace_array *tr = iter->tr;
2920 /* print nothing if the buffers are empty */
2921 if (trace_empty(iter))
2924 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2925 print_trace_header(m, iter);
2927 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
2928 print_lat_help_header(m);
2931 void trace_default_header(struct seq_file *m)
2933 struct trace_iterator *iter = m->private;
2934 struct trace_array *tr = iter->tr;
2935 unsigned long trace_flags = tr->trace_flags;
2937 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2940 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2941 /* print nothing if the buffers are empty */
2942 if (trace_empty(iter))
2944 print_trace_header(m, iter);
2945 if (!(trace_flags & TRACE_ITER_VERBOSE))
2946 print_lat_help_header(m);
2948 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2949 if (trace_flags & TRACE_ITER_IRQ_INFO)
2950 if (trace_flags & TRACE_ITER_TGID)
2951 print_func_help_header_irq_tgid(iter->trace_buffer, m);
2953 print_func_help_header_irq(iter->trace_buffer, m);
2955 if (trace_flags & TRACE_ITER_TGID)
2956 print_func_help_header_tgid(iter->trace_buffer, m);
2958 print_func_help_header(iter->trace_buffer, m);
2963 static void test_ftrace_alive(struct seq_file *m)
2965 if (!ftrace_is_dead())
2967 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2968 "# MAY BE MISSING FUNCTION EVENTS\n");
2971 #ifdef CONFIG_TRACER_MAX_TRACE
2972 static void show_snapshot_main_help(struct seq_file *m)
2974 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2975 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2976 "# Takes a snapshot of the main buffer.\n"
2977 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2978 "# (Doesn't have to be '2' works with any number that\n"
2979 "# is not a '0' or '1')\n");
2982 static void show_snapshot_percpu_help(struct seq_file *m)
2984 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2985 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2986 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2987 "# Takes a snapshot of the main buffer for this cpu.\n");
2989 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2990 "# Must use main snapshot file to allocate.\n");
2992 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2993 "# (Doesn't have to be '2' works with any number that\n"
2994 "# is not a '0' or '1')\n");
2997 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2999 if (iter->tr->allocated_snapshot)
3000 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3002 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3004 seq_puts(m, "# Snapshot commands:\n");
3005 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3006 show_snapshot_main_help(m);
3008 show_snapshot_percpu_help(m);
3011 /* Should never be called */
3012 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3015 static int s_show(struct seq_file *m, void *v)
3017 struct trace_iterator *iter = v;
3020 if (iter->ent == NULL) {
3022 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3024 test_ftrace_alive(m);
3026 if (iter->snapshot && trace_empty(iter))
3027 print_snapshot_help(m, iter);
3028 else if (iter->trace && iter->trace->print_header)
3029 iter->trace->print_header(m);
3031 trace_default_header(m);
3033 } else if (iter->leftover) {
3035 * If we filled the seq_file buffer earlier, we
3036 * want to just show it now.
3038 ret = trace_print_seq(m, &iter->seq);
3040 /* ret should this time be zero, but you never know */
3041 iter->leftover = ret;
3044 print_trace_line(iter);
3045 ret = trace_print_seq(m, &iter->seq);
3047 * If we overflow the seq_file buffer, then it will
3048 * ask us for this data again at start up.
3050 * ret is 0 if seq_file write succeeded.
3053 iter->leftover = ret;
3060 * Should be used after trace_array_get(), trace_types_lock
3061 * ensures that i_cdev was already initialized.
3063 static inline int tracing_get_cpu(struct inode *inode)
3065 if (inode->i_cdev) /* See trace_create_cpu_file() */
3066 return (long)inode->i_cdev - 1;
3067 return RING_BUFFER_ALL_CPUS;
3070 static const struct seq_operations tracer_seq_ops = {
3077 static struct trace_iterator *
3078 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3080 struct trace_array *tr = inode->i_private;
3081 struct trace_iterator *iter;
3084 if (tracing_disabled)
3085 return ERR_PTR(-ENODEV);
3087 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3089 return ERR_PTR(-ENOMEM);
3091 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3093 if (!iter->buffer_iter)
3097 * We make a copy of the current tracer to avoid concurrent
3098 * changes on it while we are reading.
3100 mutex_lock(&trace_types_lock);
3101 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3105 *iter->trace = *tr->current_trace;
3107 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3112 #ifdef CONFIG_TRACER_MAX_TRACE
3113 /* Currently only the top directory has a snapshot */
3114 if (tr->current_trace->print_max || snapshot)
3115 iter->trace_buffer = &tr->max_buffer;
3118 iter->trace_buffer = &tr->trace_buffer;
3119 iter->snapshot = snapshot;
3121 iter->cpu_file = tracing_get_cpu(inode);
3122 mutex_init(&iter->mutex);
3124 /* Notify the tracer early; before we stop tracing. */
3125 if (iter->trace && iter->trace->open)
3126 iter->trace->open(iter);
3128 /* Annotate start of buffers if we had overruns */
3129 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3130 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3132 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3133 if (trace_clocks[tr->clock_id].in_ns)
3134 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3136 /* stop the trace while dumping if we are not opening "snapshot" */
3137 if (!iter->snapshot)
3138 tracing_stop_tr(tr);
3140 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3141 for_each_tracing_cpu(cpu) {
3142 iter->buffer_iter[cpu] =
3143 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3145 ring_buffer_read_prepare_sync();
3146 for_each_tracing_cpu(cpu) {
3147 ring_buffer_read_start(iter->buffer_iter[cpu]);
3148 tracing_iter_reset(iter, cpu);
3151 cpu = iter->cpu_file;
3152 iter->buffer_iter[cpu] =
3153 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3154 ring_buffer_read_prepare_sync();
3155 ring_buffer_read_start(iter->buffer_iter[cpu]);
3156 tracing_iter_reset(iter, cpu);
3159 mutex_unlock(&trace_types_lock);
3164 mutex_unlock(&trace_types_lock);
3166 kfree(iter->buffer_iter);
3168 seq_release_private(inode, file);
3169 return ERR_PTR(-ENOMEM);
3172 int tracing_open_generic(struct inode *inode, struct file *filp)
3174 if (tracing_disabled)
3177 filp->private_data = inode->i_private;
3181 bool tracing_is_disabled(void)
3183 return (tracing_disabled) ? true: false;
3187 * Open and update trace_array ref count.
3188 * Must have the current trace_array passed to it.
3190 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3192 struct trace_array *tr = inode->i_private;
3194 if (tracing_disabled)
3197 if (trace_array_get(tr) < 0)
3200 filp->private_data = inode->i_private;
3205 static int tracing_release(struct inode *inode, struct file *file)
3207 struct trace_array *tr = inode->i_private;
3208 struct seq_file *m = file->private_data;
3209 struct trace_iterator *iter;
3212 if (!(file->f_mode & FMODE_READ)) {
3213 trace_array_put(tr);
3217 /* Writes do not use seq_file */
3219 mutex_lock(&trace_types_lock);
3221 for_each_tracing_cpu(cpu) {
3222 if (iter->buffer_iter[cpu])
3223 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3226 if (iter->trace && iter->trace->close)
3227 iter->trace->close(iter);
3229 if (!iter->snapshot)
3230 /* reenable tracing if it was previously enabled */
3231 tracing_start_tr(tr);
3233 __trace_array_put(tr);
3235 mutex_unlock(&trace_types_lock);
3237 mutex_destroy(&iter->mutex);
3238 free_cpumask_var(iter->started);
3240 kfree(iter->buffer_iter);
3241 seq_release_private(inode, file);
3246 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3248 struct trace_array *tr = inode->i_private;
3250 trace_array_put(tr);
3254 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3256 struct trace_array *tr = inode->i_private;
3258 trace_array_put(tr);
3260 return single_release(inode, file);
3263 static int tracing_open(struct inode *inode, struct file *file)
3265 struct trace_array *tr = inode->i_private;
3266 struct trace_iterator *iter;
3269 if (trace_array_get(tr) < 0)
3272 /* If this file was open for write, then erase contents */
3273 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3274 int cpu = tracing_get_cpu(inode);
3276 if (cpu == RING_BUFFER_ALL_CPUS)
3277 tracing_reset_online_cpus(&tr->trace_buffer);
3279 tracing_reset(&tr->trace_buffer, cpu);
3282 if (file->f_mode & FMODE_READ) {
3283 iter = __tracing_open(inode, file, false);
3285 ret = PTR_ERR(iter);
3286 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3287 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3291 trace_array_put(tr);
3297 * Some tracers are not suitable for instance buffers.
3298 * A tracer is always available for the global array (toplevel)
3299 * or if it explicitly states that it is.
3302 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3304 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3307 /* Find the next tracer that this trace array may use */
3308 static struct tracer *
3309 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3311 while (t && !trace_ok_for_array(t, tr))
3318 t_next(struct seq_file *m, void *v, loff_t *pos)
3320 struct trace_array *tr = m->private;
3321 struct tracer *t = v;
3326 t = get_tracer_for_array(tr, t->next);
3331 static void *t_start(struct seq_file *m, loff_t *pos)
3333 struct trace_array *tr = m->private;
3337 mutex_lock(&trace_types_lock);
3339 t = get_tracer_for_array(tr, trace_types);
3340 for (; t && l < *pos; t = t_next(m, t, &l))
3346 static void t_stop(struct seq_file *m, void *p)
3348 mutex_unlock(&trace_types_lock);
3351 static int t_show(struct seq_file *m, void *v)
3353 struct tracer *t = v;
3358 seq_puts(m, t->name);
3367 static const struct seq_operations show_traces_seq_ops = {
3374 static int show_traces_open(struct inode *inode, struct file *file)
3376 struct trace_array *tr = inode->i_private;
3380 if (tracing_disabled)
3383 ret = seq_open(file, &show_traces_seq_ops);
3387 m = file->private_data;
3394 tracing_write_stub(struct file *filp, const char __user *ubuf,
3395 size_t count, loff_t *ppos)
3400 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3404 if (file->f_mode & FMODE_READ)
3405 ret = seq_lseek(file, offset, whence);
3407 file->f_pos = ret = 0;
3412 static const struct file_operations tracing_fops = {
3413 .open = tracing_open,
3415 .write = tracing_write_stub,
3416 .llseek = tracing_lseek,
3417 .release = tracing_release,
3420 static const struct file_operations show_traces_fops = {
3421 .open = show_traces_open,
3423 .release = seq_release,
3424 .llseek = seq_lseek,
3428 * The tracer itself will not take this lock, but still we want
3429 * to provide a consistent cpumask to user-space:
3431 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3434 * Temporary storage for the character representation of the
3435 * CPU bitmask (and one more byte for the newline):
3437 static char mask_str[NR_CPUS + 1];
3440 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3441 size_t count, loff_t *ppos)
3443 struct trace_array *tr = file_inode(filp)->i_private;
3446 mutex_lock(&tracing_cpumask_update_lock);
3448 len = snprintf(mask_str, count, "%*pb\n",
3449 cpumask_pr_args(tr->tracing_cpumask));
3454 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3457 mutex_unlock(&tracing_cpumask_update_lock);
3463 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3464 size_t count, loff_t *ppos)
3466 struct trace_array *tr = file_inode(filp)->i_private;
3467 cpumask_var_t tracing_cpumask_new;
3470 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3473 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3477 mutex_lock(&tracing_cpumask_update_lock);
3479 local_irq_disable();
3480 arch_spin_lock(&tr->max_lock);
3481 for_each_tracing_cpu(cpu) {
3483 * Increase/decrease the disabled counter if we are
3484 * about to flip a bit in the cpumask:
3486 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3487 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3488 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3489 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3491 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3492 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3493 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3494 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3497 arch_spin_unlock(&tr->max_lock);
3500 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3502 mutex_unlock(&tracing_cpumask_update_lock);
3503 free_cpumask_var(tracing_cpumask_new);
3508 free_cpumask_var(tracing_cpumask_new);
3513 static const struct file_operations tracing_cpumask_fops = {
3514 .open = tracing_open_generic_tr,
3515 .read = tracing_cpumask_read,
3516 .write = tracing_cpumask_write,
3517 .release = tracing_release_generic_tr,
3518 .llseek = generic_file_llseek,
3521 static int tracing_trace_options_show(struct seq_file *m, void *v)
3523 struct tracer_opt *trace_opts;
3524 struct trace_array *tr = m->private;
3528 mutex_lock(&trace_types_lock);
3529 tracer_flags = tr->current_trace->flags->val;
3530 trace_opts = tr->current_trace->flags->opts;
3532 for (i = 0; trace_options[i]; i++) {
3533 if (tr->trace_flags & (1 << i))
3534 seq_printf(m, "%s\n", trace_options[i]);
3536 seq_printf(m, "no%s\n", trace_options[i]);
3539 for (i = 0; trace_opts[i].name; i++) {
3540 if (tracer_flags & trace_opts[i].bit)
3541 seq_printf(m, "%s\n", trace_opts[i].name);
3543 seq_printf(m, "no%s\n", trace_opts[i].name);
3545 mutex_unlock(&trace_types_lock);
3550 static int __set_tracer_option(struct trace_array *tr,
3551 struct tracer_flags *tracer_flags,
3552 struct tracer_opt *opts, int neg)
3554 struct tracer *trace = tr->current_trace;
3557 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3562 tracer_flags->val &= ~opts->bit;
3564 tracer_flags->val |= opts->bit;
3568 /* Try to assign a tracer specific option */
3569 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3571 struct tracer *trace = tr->current_trace;
3572 struct tracer_flags *tracer_flags = trace->flags;
3573 struct tracer_opt *opts = NULL;
3576 for (i = 0; tracer_flags->opts[i].name; i++) {
3577 opts = &tracer_flags->opts[i];
3579 if (strcmp(cmp, opts->name) == 0)
3580 return __set_tracer_option(tr, trace->flags, opts, neg);
3586 /* Some tracers require overwrite to stay enabled */
3587 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3589 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3595 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3597 /* do nothing if flag is already set */
3598 if (!!(tr->trace_flags & mask) == !!enabled)
3601 /* Give the tracer a chance to approve the change */
3602 if (tr->current_trace->flag_changed)
3603 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3607 tr->trace_flags |= mask;
3609 tr->trace_flags &= ~mask;
3611 if (mask == TRACE_ITER_RECORD_CMD)
3612 trace_event_enable_cmd_record(enabled);
3614 if (mask == TRACE_ITER_OVERWRITE) {
3615 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3616 #ifdef CONFIG_TRACER_MAX_TRACE
3617 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3621 if (mask == TRACE_ITER_PRINTK) {
3622 trace_printk_start_stop_comm(enabled);
3623 trace_printk_control(enabled);
3629 static int trace_set_options(struct trace_array *tr, char *option)
3635 size_t orig_len = strlen(option);
3637 cmp = strstrip(option);
3639 if (strncmp(cmp, "no", 2) == 0) {
3644 mutex_lock(&trace_types_lock);
3646 for (i = 0; trace_options[i]; i++) {
3647 if (strcmp(cmp, trace_options[i]) == 0) {
3648 ret = set_tracer_flag(tr, 1 << i, !neg);
3653 /* If no option could be set, test the specific tracer options */
3654 if (!trace_options[i])
3655 ret = set_tracer_option(tr, cmp, neg);
3657 mutex_unlock(&trace_types_lock);
3660 * If the first trailing whitespace is replaced with '\0' by strstrip,
3661 * turn it back into a space.
3663 if (orig_len > strlen(option))
3664 option[strlen(option)] = ' ';
3669 static void __init apply_trace_boot_options(void)
3671 char *buf = trace_boot_options_buf;
3675 option = strsep(&buf, ",");
3681 trace_set_options(&global_trace, option);
3683 /* Put back the comma to allow this to be called again */
3690 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3691 size_t cnt, loff_t *ppos)
3693 struct seq_file *m = filp->private_data;
3694 struct trace_array *tr = m->private;
3698 if (cnt >= sizeof(buf))
3701 if (copy_from_user(&buf, ubuf, cnt))
3706 ret = trace_set_options(tr, buf);
3715 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3717 struct trace_array *tr = inode->i_private;
3720 if (tracing_disabled)
3723 if (trace_array_get(tr) < 0)
3726 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3728 trace_array_put(tr);
3733 static const struct file_operations tracing_iter_fops = {
3734 .open = tracing_trace_options_open,
3736 .llseek = seq_lseek,
3737 .release = tracing_single_release_tr,
3738 .write = tracing_trace_options_write,
3741 static const char readme_msg[] =
3742 "tracing mini-HOWTO:\n\n"
3743 "# echo 0 > tracing_on : quick way to disable tracing\n"
3744 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3745 " Important files:\n"
3746 " trace\t\t\t- The static contents of the buffer\n"
3747 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3748 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3749 " current_tracer\t- function and latency tracers\n"
3750 " available_tracers\t- list of configured tracers for current_tracer\n"
3751 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3752 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3753 " trace_clock\t\t-change the clock used to order events\n"
3754 " local: Per cpu clock but may not be synced across CPUs\n"
3755 " global: Synced across CPUs but slows tracing down.\n"
3756 " counter: Not a clock, but just an increment\n"
3757 " uptime: Jiffy counter from time of boot\n"
3758 " perf: Same clock that perf events use\n"
3759 #ifdef CONFIG_X86_64
3760 " x86-tsc: TSC cycle counter\n"
3762 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3763 " tracing_cpumask\t- Limit which CPUs to trace\n"
3764 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3765 "\t\t\t Remove sub-buffer with rmdir\n"
3766 " trace_options\t\t- Set format or modify how tracing happens\n"
3767 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3768 "\t\t\t option name\n"
3769 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3770 #ifdef CONFIG_DYNAMIC_FTRACE
3771 "\n available_filter_functions - list of functions that can be filtered on\n"
3772 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3773 "\t\t\t functions\n"
3774 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3775 "\t modules: Can select a group via module\n"
3776 "\t Format: :mod:<module-name>\n"
3777 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3778 "\t triggers: a command to perform when function is hit\n"
3779 "\t Format: <function>:<trigger>[:count]\n"
3780 "\t trigger: traceon, traceoff\n"
3781 "\t\t enable_event:<system>:<event>\n"
3782 "\t\t disable_event:<system>:<event>\n"
3783 #ifdef CONFIG_STACKTRACE
3786 #ifdef CONFIG_TRACER_SNAPSHOT
3791 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3792 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3793 "\t The first one will disable tracing every time do_fault is hit\n"
3794 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3795 "\t The first time do trap is hit and it disables tracing, the\n"
3796 "\t counter will decrement to 2. If tracing is already disabled,\n"
3797 "\t the counter will not decrement. It only decrements when the\n"
3798 "\t trigger did work\n"
3799 "\t To remove trigger without count:\n"
3800 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3801 "\t To remove trigger with a count:\n"
3802 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3803 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3804 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3805 "\t modules: Can select a group via module command :mod:\n"
3806 "\t Does not accept triggers\n"
3807 #endif /* CONFIG_DYNAMIC_FTRACE */
3808 #ifdef CONFIG_FUNCTION_TRACER
3809 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3812 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3813 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3814 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3815 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3817 #ifdef CONFIG_TRACER_SNAPSHOT
3818 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3819 "\t\t\t snapshot buffer. Read the contents for more\n"
3820 "\t\t\t information\n"
3822 #ifdef CONFIG_STACK_TRACER
3823 " stack_trace\t\t- Shows the max stack trace when active\n"
3824 " stack_max_size\t- Shows current max stack size that was traced\n"
3825 "\t\t\t Write into this file to reset the max size (trigger a\n"
3826 "\t\t\t new trace)\n"
3827 #ifdef CONFIG_DYNAMIC_FTRACE
3828 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3831 #endif /* CONFIG_STACK_TRACER */
3832 " events/\t\t- Directory containing all trace event subsystems:\n"
3833 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3834 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3835 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3837 " filter\t\t- If set, only events passing filter are traced\n"
3838 " events/<system>/<event>/\t- Directory containing control files for\n"
3840 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3841 " filter\t\t- If set, only events passing filter are traced\n"
3842 " trigger\t\t- If set, a command to perform when event is hit\n"
3843 "\t Format: <trigger>[:count][if <filter>]\n"
3844 "\t trigger: traceon, traceoff\n"
3845 "\t enable_event:<system>:<event>\n"
3846 "\t disable_event:<system>:<event>\n"
3847 #ifdef CONFIG_STACKTRACE
3850 #ifdef CONFIG_TRACER_SNAPSHOT
3853 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3854 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3855 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3856 "\t events/block/block_unplug/trigger\n"
3857 "\t The first disables tracing every time block_unplug is hit.\n"
3858 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3859 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3860 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3861 "\t Like function triggers, the counter is only decremented if it\n"
3862 "\t enabled or disabled tracing.\n"
3863 "\t To remove a trigger without a count:\n"
3864 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3865 "\t To remove a trigger with a count:\n"
3866 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3867 "\t Filters can be ignored when removing a trigger.\n"
3871 tracing_readme_read(struct file *filp, char __user *ubuf,
3872 size_t cnt, loff_t *ppos)
3874 return simple_read_from_buffer(ubuf, cnt, ppos,
3875 readme_msg, strlen(readme_msg));
3878 static const struct file_operations tracing_readme_fops = {
3879 .open = tracing_open_generic,
3880 .read = tracing_readme_read,
3881 .llseek = generic_file_llseek,
3884 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3886 unsigned int *ptr = v;
3888 if (*pos || m->count)
3893 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3895 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3904 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3910 arch_spin_lock(&trace_cmdline_lock);
3912 v = &savedcmd->map_cmdline_to_pid[0];
3914 v = saved_cmdlines_next(m, v, &l);
3922 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3924 arch_spin_unlock(&trace_cmdline_lock);
3928 static int saved_cmdlines_show(struct seq_file *m, void *v)
3930 char buf[TASK_COMM_LEN];
3931 unsigned int *pid = v;
3933 __trace_find_cmdline(*pid, buf);
3934 seq_printf(m, "%d %s\n", *pid, buf);
3938 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3939 .start = saved_cmdlines_start,
3940 .next = saved_cmdlines_next,
3941 .stop = saved_cmdlines_stop,
3942 .show = saved_cmdlines_show,
3945 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3947 if (tracing_disabled)
3950 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3953 static const struct file_operations tracing_saved_cmdlines_fops = {
3954 .open = tracing_saved_cmdlines_open,
3956 .llseek = seq_lseek,
3957 .release = seq_release,
3961 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3962 size_t cnt, loff_t *ppos)
3967 arch_spin_lock(&trace_cmdline_lock);
3968 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3969 arch_spin_unlock(&trace_cmdline_lock);
3971 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3974 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3976 kfree(s->saved_cmdlines);
3977 kfree(s->map_cmdline_to_pid);
3981 static int tracing_resize_saved_cmdlines(unsigned int val)
3983 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3985 s = kmalloc(sizeof(*s), GFP_KERNEL);
3989 if (allocate_cmdlines_buffer(val, s) < 0) {
3994 arch_spin_lock(&trace_cmdline_lock);
3995 savedcmd_temp = savedcmd;
3997 arch_spin_unlock(&trace_cmdline_lock);
3998 free_saved_cmdlines_buffer(savedcmd_temp);
4004 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4005 size_t cnt, loff_t *ppos)
4010 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4014 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4015 if (!val || val > PID_MAX_DEFAULT)
4018 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4027 static const struct file_operations tracing_saved_cmdlines_size_fops = {
4028 .open = tracing_open_generic,
4029 .read = tracing_saved_cmdlines_size_read,
4030 .write = tracing_saved_cmdlines_size_write,
4033 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
4034 static union trace_enum_map_item *
4035 update_enum_map(union trace_enum_map_item *ptr)
4037 if (!ptr->map.enum_string) {
4038 if (ptr->tail.next) {
4039 ptr = ptr->tail.next;
4040 /* Set ptr to the next real item (skip head) */
4048 static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4050 union trace_enum_map_item *ptr = v;
4053 * Paranoid! If ptr points to end, we don't want to increment past it.
4054 * This really should never happen.
4056 ptr = update_enum_map(ptr);
4057 if (WARN_ON_ONCE(!ptr))
4064 ptr = update_enum_map(ptr);
4069 static void *enum_map_start(struct seq_file *m, loff_t *pos)
4071 union trace_enum_map_item *v;
4074 mutex_lock(&trace_enum_mutex);
4076 v = trace_enum_maps;
4080 while (v && l < *pos) {
4081 v = enum_map_next(m, v, &l);
4087 static void enum_map_stop(struct seq_file *m, void *v)
4089 mutex_unlock(&trace_enum_mutex);
4092 static int enum_map_show(struct seq_file *m, void *v)
4094 union trace_enum_map_item *ptr = v;
4096 seq_printf(m, "%s %ld (%s)\n",
4097 ptr->map.enum_string, ptr->map.enum_value,
4103 static const struct seq_operations tracing_enum_map_seq_ops = {
4104 .start = enum_map_start,
4105 .next = enum_map_next,
4106 .stop = enum_map_stop,
4107 .show = enum_map_show,
4110 static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4112 if (tracing_disabled)
4115 return seq_open(filp, &tracing_enum_map_seq_ops);
4118 static const struct file_operations tracing_enum_map_fops = {
4119 .open = tracing_enum_map_open,
4121 .llseek = seq_lseek,
4122 .release = seq_release,
4125 static inline union trace_enum_map_item *
4126 trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4128 /* Return tail of array given the head */
4129 return ptr + ptr->head.length + 1;
4133 trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4136 struct trace_enum_map **stop;
4137 struct trace_enum_map **map;
4138 union trace_enum_map_item *map_array;
4139 union trace_enum_map_item *ptr;
4144 * The trace_enum_maps contains the map plus a head and tail item,
4145 * where the head holds the module and length of array, and the
4146 * tail holds a pointer to the next list.
4148 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4150 pr_warning("Unable to allocate trace enum mapping\n");
4154 mutex_lock(&trace_enum_mutex);
4156 if (!trace_enum_maps)
4157 trace_enum_maps = map_array;
4159 ptr = trace_enum_maps;
4161 ptr = trace_enum_jmp_to_tail(ptr);
4162 if (!ptr->tail.next)
4164 ptr = ptr->tail.next;
4167 ptr->tail.next = map_array;
4169 map_array->head.mod = mod;
4170 map_array->head.length = len;
4173 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4174 map_array->map = **map;
4177 memset(map_array, 0, sizeof(*map_array));
4179 mutex_unlock(&trace_enum_mutex);
4182 static void trace_create_enum_file(struct dentry *d_tracer)
4184 trace_create_file("enum_map", 0444, d_tracer,
4185 NULL, &tracing_enum_map_fops);
4188 #else /* CONFIG_TRACE_ENUM_MAP_FILE */
4189 static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4190 static inline void trace_insert_enum_map_file(struct module *mod,
4191 struct trace_enum_map **start, int len) { }
4192 #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4194 static void trace_insert_enum_map(struct module *mod,
4195 struct trace_enum_map **start, int len)
4197 struct trace_enum_map **map;
4204 trace_event_enum_update(map, len);
4206 trace_insert_enum_map_file(mod, start, len);
4210 tracing_saved_tgids_read(struct file *file, char __user *ubuf,
4211 size_t cnt, loff_t *ppos)
4219 file_buf = kmalloc(SAVED_CMDLINES_DEFAULT*(16+1+16), GFP_KERNEL);
4225 for (i = 0; i < SAVED_CMDLINES_DEFAULT; i++) {
4229 pid = savedcmd->map_cmdline_to_pid[i];
4230 if (pid == -1 || pid == NO_CMDLINE_MAP)
4233 tgid = trace_find_tgid(pid);
4234 r = sprintf(buf, "%d %d\n", pid, tgid);
4239 len = simple_read_from_buffer(ubuf, cnt, ppos,
4247 static const struct file_operations tracing_saved_tgids_fops = {
4248 .open = tracing_open_generic,
4249 .read = tracing_saved_tgids_read,
4250 .llseek = generic_file_llseek,
4254 tracing_set_trace_read(struct file *filp, char __user *ubuf,
4255 size_t cnt, loff_t *ppos)
4257 struct trace_array *tr = filp->private_data;
4258 char buf[MAX_TRACER_SIZE+2];
4261 mutex_lock(&trace_types_lock);
4262 r = sprintf(buf, "%s\n", tr->current_trace->name);
4263 mutex_unlock(&trace_types_lock);
4265 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4268 int tracer_init(struct tracer *t, struct trace_array *tr)
4270 tracing_reset_online_cpus(&tr->trace_buffer);
4274 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4278 for_each_tracing_cpu(cpu)
4279 per_cpu_ptr(buf->data, cpu)->entries = val;
4282 #ifdef CONFIG_TRACER_MAX_TRACE
4283 /* resize @tr's buffer to the size of @size_tr's entries */
4284 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4285 struct trace_buffer *size_buf, int cpu_id)
4289 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4290 for_each_tracing_cpu(cpu) {
4291 ret = ring_buffer_resize(trace_buf->buffer,
4292 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4295 per_cpu_ptr(trace_buf->data, cpu)->entries =
4296 per_cpu_ptr(size_buf->data, cpu)->entries;
4299 ret = ring_buffer_resize(trace_buf->buffer,
4300 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4302 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4303 per_cpu_ptr(size_buf->data, cpu_id)->entries;
4308 #endif /* CONFIG_TRACER_MAX_TRACE */
4310 static int __tracing_resize_ring_buffer(struct trace_array *tr,
4311 unsigned long size, int cpu)
4316 * If kernel or user changes the size of the ring buffer
4317 * we use the size that was given, and we can forget about
4318 * expanding it later.
4320 ring_buffer_expanded = true;
4322 /* May be called before buffers are initialized */
4323 if (!tr->trace_buffer.buffer)
4326 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4330 #ifdef CONFIG_TRACER_MAX_TRACE
4331 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4332 !tr->current_trace->use_max_tr)
4335 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4337 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4338 &tr->trace_buffer, cpu);
4341 * AARGH! We are left with different
4342 * size max buffer!!!!
4343 * The max buffer is our "snapshot" buffer.
4344 * When a tracer needs a snapshot (one of the
4345 * latency tracers), it swaps the max buffer
4346 * with the saved snap shot. We succeeded to
4347 * update the size of the main buffer, but failed to
4348 * update the size of the max buffer. But when we tried
4349 * to reset the main buffer to the original size, we
4350 * failed there too. This is very unlikely to
4351 * happen, but if it does, warn and kill all
4355 tracing_disabled = 1;
4360 if (cpu == RING_BUFFER_ALL_CPUS)
4361 set_buffer_entries(&tr->max_buffer, size);
4363 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4366 #endif /* CONFIG_TRACER_MAX_TRACE */
4368 if (cpu == RING_BUFFER_ALL_CPUS)
4369 set_buffer_entries(&tr->trace_buffer, size);
4371 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4376 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4377 unsigned long size, int cpu_id)
4381 mutex_lock(&trace_types_lock);
4383 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4384 /* make sure, this cpu is enabled in the mask */
4385 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4391 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4396 mutex_unlock(&trace_types_lock);
4403 * tracing_update_buffers - used by tracing facility to expand ring buffers
4405 * To save on memory when the tracing is never used on a system with it
4406 * configured in. The ring buffers are set to a minimum size. But once
4407 * a user starts to use the tracing facility, then they need to grow
4408 * to their default size.
4410 * This function is to be called when a tracer is about to be used.
4412 int tracing_update_buffers(void)
4416 mutex_lock(&trace_types_lock);
4417 if (!ring_buffer_expanded)
4418 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4419 RING_BUFFER_ALL_CPUS);
4420 mutex_unlock(&trace_types_lock);
4425 struct trace_option_dentry;
4428 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4431 * Used to clear out the tracer before deletion of an instance.
4432 * Must have trace_types_lock held.
4434 static void tracing_set_nop(struct trace_array *tr)
4436 if (tr->current_trace == &nop_trace)
4439 tr->current_trace->enabled--;
4441 if (tr->current_trace->reset)
4442 tr->current_trace->reset(tr);
4444 tr->current_trace = &nop_trace;
4447 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
4449 /* Only enable if the directory has been created already. */
4453 create_trace_option_files(tr, t);
4456 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4459 #ifdef CONFIG_TRACER_MAX_TRACE
4464 mutex_lock(&trace_types_lock);
4466 if (!ring_buffer_expanded) {
4467 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4468 RING_BUFFER_ALL_CPUS);
4474 for (t = trace_types; t; t = t->next) {
4475 if (strcmp(t->name, buf) == 0)
4482 if (t == tr->current_trace)
4485 /* Some tracers are only allowed for the top level buffer */
4486 if (!trace_ok_for_array(t, tr)) {
4491 /* If trace pipe files are being read, we can't change the tracer */
4492 if (tr->current_trace->ref) {
4497 trace_branch_disable();
4499 tr->current_trace->enabled--;
4501 if (tr->current_trace->reset)
4502 tr->current_trace->reset(tr);
4504 /* Current trace needs to be nop_trace before synchronize_sched */
4505 tr->current_trace = &nop_trace;
4507 #ifdef CONFIG_TRACER_MAX_TRACE
4508 had_max_tr = tr->allocated_snapshot;
4510 if (had_max_tr && !t->use_max_tr) {
4512 * We need to make sure that the update_max_tr sees that
4513 * current_trace changed to nop_trace to keep it from
4514 * swapping the buffers after we resize it.
4515 * The update_max_tr is called from interrupts disabled
4516 * so a synchronized_sched() is sufficient.
4518 synchronize_sched();
4523 #ifdef CONFIG_TRACER_MAX_TRACE
4524 if (t->use_max_tr && !had_max_tr) {
4525 ret = alloc_snapshot(tr);
4532 ret = tracer_init(t, tr);
4537 tr->current_trace = t;
4538 tr->current_trace->enabled++;
4539 trace_branch_enable(tr);
4541 mutex_unlock(&trace_types_lock);
4547 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4548 size_t cnt, loff_t *ppos)
4550 struct trace_array *tr = filp->private_data;
4551 char buf[MAX_TRACER_SIZE+1];
4558 if (cnt > MAX_TRACER_SIZE)
4559 cnt = MAX_TRACER_SIZE;
4561 if (copy_from_user(&buf, ubuf, cnt))
4566 /* strip ending whitespace. */
4567 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4570 err = tracing_set_tracer(tr, buf);
4580 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4581 size_t cnt, loff_t *ppos)
4586 r = snprintf(buf, sizeof(buf), "%ld\n",
4587 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4588 if (r > sizeof(buf))
4590 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4594 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4595 size_t cnt, loff_t *ppos)
4600 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4610 tracing_thresh_read(struct file *filp, char __user *ubuf,
4611 size_t cnt, loff_t *ppos)
4613 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4617 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4618 size_t cnt, loff_t *ppos)
4620 struct trace_array *tr = filp->private_data;
4623 mutex_lock(&trace_types_lock);
4624 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4628 if (tr->current_trace->update_thresh) {
4629 ret = tr->current_trace->update_thresh(tr);
4636 mutex_unlock(&trace_types_lock);
4641 #ifdef CONFIG_TRACER_MAX_TRACE
4644 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4645 size_t cnt, loff_t *ppos)
4647 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4651 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4652 size_t cnt, loff_t *ppos)
4654 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4659 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4661 struct trace_array *tr = inode->i_private;
4662 struct trace_iterator *iter;
4665 if (tracing_disabled)
4668 if (trace_array_get(tr) < 0)
4671 mutex_lock(&trace_types_lock);
4673 /* create a buffer to store the information to pass to userspace */
4674 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4677 __trace_array_put(tr);
4681 trace_seq_init(&iter->seq);
4682 iter->trace = tr->current_trace;
4684 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4689 /* trace pipe does not show start of buffer */
4690 cpumask_setall(iter->started);
4692 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4693 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4695 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4696 if (trace_clocks[tr->clock_id].in_ns)
4697 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4700 iter->trace_buffer = &tr->trace_buffer;
4701 iter->cpu_file = tracing_get_cpu(inode);
4702 mutex_init(&iter->mutex);
4703 filp->private_data = iter;
4705 if (iter->trace->pipe_open)
4706 iter->trace->pipe_open(iter);
4708 nonseekable_open(inode, filp);
4710 tr->current_trace->ref++;
4712 mutex_unlock(&trace_types_lock);
4718 __trace_array_put(tr);
4719 mutex_unlock(&trace_types_lock);
4723 static int tracing_release_pipe(struct inode *inode, struct file *file)
4725 struct trace_iterator *iter = file->private_data;
4726 struct trace_array *tr = inode->i_private;
4728 mutex_lock(&trace_types_lock);
4730 tr->current_trace->ref--;
4732 if (iter->trace->pipe_close)
4733 iter->trace->pipe_close(iter);
4735 mutex_unlock(&trace_types_lock);
4737 free_cpumask_var(iter->started);
4738 mutex_destroy(&iter->mutex);
4741 trace_array_put(tr);
4747 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4749 struct trace_array *tr = iter->tr;
4751 /* Iterators are static, they should be filled or empty */
4752 if (trace_buffer_iter(iter, iter->cpu_file))
4753 return POLLIN | POLLRDNORM;
4755 if (tr->trace_flags & TRACE_ITER_BLOCK)
4757 * Always select as readable when in blocking mode
4759 return POLLIN | POLLRDNORM;
4761 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4766 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4768 struct trace_iterator *iter = filp->private_data;
4770 return trace_poll(iter, filp, poll_table);
4773 /* Must be called with iter->mutex held. */
4774 static int tracing_wait_pipe(struct file *filp)
4776 struct trace_iterator *iter = filp->private_data;
4779 while (trace_empty(iter)) {
4781 if ((filp->f_flags & O_NONBLOCK)) {
4786 * We block until we read something and tracing is disabled.
4787 * We still block if tracing is disabled, but we have never
4788 * read anything. This allows a user to cat this file, and
4789 * then enable tracing. But after we have read something,
4790 * we give an EOF when tracing is again disabled.
4792 * iter->pos will be 0 if we haven't read anything.
4794 if (!tracing_is_on() && iter->pos)
4797 mutex_unlock(&iter->mutex);
4799 ret = wait_on_pipe(iter, false);
4801 mutex_lock(&iter->mutex);
4814 tracing_read_pipe(struct file *filp, char __user *ubuf,
4815 size_t cnt, loff_t *ppos)
4817 struct trace_iterator *iter = filp->private_data;
4821 * Avoid more than one consumer on a single file descriptor
4822 * This is just a matter of traces coherency, the ring buffer itself
4825 mutex_lock(&iter->mutex);
4827 /* return any leftover data */
4828 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4832 trace_seq_init(&iter->seq);
4834 if (iter->trace->read) {
4835 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4841 sret = tracing_wait_pipe(filp);
4845 /* stop when tracing is finished */
4846 if (trace_empty(iter)) {
4851 if (cnt >= PAGE_SIZE)
4852 cnt = PAGE_SIZE - 1;
4854 /* reset all but tr, trace, and overruns */
4855 memset(&iter->seq, 0,
4856 sizeof(struct trace_iterator) -
4857 offsetof(struct trace_iterator, seq));
4858 cpumask_clear(iter->started);
4861 trace_event_read_lock();
4862 trace_access_lock(iter->cpu_file);
4863 while (trace_find_next_entry_inc(iter) != NULL) {
4864 enum print_line_t ret;
4865 int save_len = iter->seq.seq.len;
4867 ret = print_trace_line(iter);
4868 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4869 /* don't print partial lines */
4870 iter->seq.seq.len = save_len;
4873 if (ret != TRACE_TYPE_NO_CONSUME)
4874 trace_consume(iter);
4876 if (trace_seq_used(&iter->seq) >= cnt)
4880 * Setting the full flag means we reached the trace_seq buffer
4881 * size and we should leave by partial output condition above.
4882 * One of the trace_seq_* functions is not used properly.
4884 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4887 trace_access_unlock(iter->cpu_file);
4888 trace_event_read_unlock();
4890 /* Now copy what we have to the user */
4891 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4892 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4893 trace_seq_init(&iter->seq);
4896 * If there was nothing to send to user, in spite of consuming trace
4897 * entries, go back to wait for more entries.
4903 mutex_unlock(&iter->mutex);
4908 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4911 __free_page(spd->pages[idx]);
4914 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4916 .confirm = generic_pipe_buf_confirm,
4917 .release = generic_pipe_buf_release,
4918 .steal = generic_pipe_buf_steal,
4919 .get = generic_pipe_buf_get,
4923 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4929 /* Seq buffer is page-sized, exactly what we need. */
4931 save_len = iter->seq.seq.len;
4932 ret = print_trace_line(iter);
4934 if (trace_seq_has_overflowed(&iter->seq)) {
4935 iter->seq.seq.len = save_len;
4940 * This should not be hit, because it should only
4941 * be set if the iter->seq overflowed. But check it
4942 * anyway to be safe.
4944 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4945 iter->seq.seq.len = save_len;
4949 count = trace_seq_used(&iter->seq) - save_len;
4952 iter->seq.seq.len = save_len;
4956 if (ret != TRACE_TYPE_NO_CONSUME)
4957 trace_consume(iter);
4959 if (!trace_find_next_entry_inc(iter)) {
4969 static ssize_t tracing_splice_read_pipe(struct file *filp,
4971 struct pipe_inode_info *pipe,
4975 struct page *pages_def[PIPE_DEF_BUFFERS];
4976 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4977 struct trace_iterator *iter = filp->private_data;
4978 struct splice_pipe_desc spd = {
4980 .partial = partial_def,
4981 .nr_pages = 0, /* This gets updated below. */
4982 .nr_pages_max = PIPE_DEF_BUFFERS,
4984 .ops = &tracing_pipe_buf_ops,
4985 .spd_release = tracing_spd_release_pipe,
4991 if (splice_grow_spd(pipe, &spd))
4994 mutex_lock(&iter->mutex);
4996 if (iter->trace->splice_read) {
4997 ret = iter->trace->splice_read(iter, filp,
4998 ppos, pipe, len, flags);
5003 ret = tracing_wait_pipe(filp);
5007 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
5012 trace_event_read_lock();
5013 trace_access_lock(iter->cpu_file);
5015 /* Fill as many pages as possible. */
5016 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
5017 spd.pages[i] = alloc_page(GFP_KERNEL);
5021 rem = tracing_fill_pipe_page(rem, iter);
5023 /* Copy the data into the page, so we can start over. */
5024 ret = trace_seq_to_buffer(&iter->seq,
5025 page_address(spd.pages[i]),
5026 trace_seq_used(&iter->seq));
5028 __free_page(spd.pages[i]);
5031 spd.partial[i].offset = 0;
5032 spd.partial[i].len = trace_seq_used(&iter->seq);
5034 trace_seq_init(&iter->seq);
5037 trace_access_unlock(iter->cpu_file);
5038 trace_event_read_unlock();
5039 mutex_unlock(&iter->mutex);
5044 ret = splice_to_pipe(pipe, &spd);
5048 splice_shrink_spd(&spd);
5052 mutex_unlock(&iter->mutex);
5057 tracing_entries_read(struct file *filp, char __user *ubuf,
5058 size_t cnt, loff_t *ppos)
5060 struct inode *inode = file_inode(filp);
5061 struct trace_array *tr = inode->i_private;
5062 int cpu = tracing_get_cpu(inode);
5067 mutex_lock(&trace_types_lock);
5069 if (cpu == RING_BUFFER_ALL_CPUS) {
5070 int cpu, buf_size_same;
5075 /* check if all cpu sizes are same */
5076 for_each_tracing_cpu(cpu) {
5077 /* fill in the size from first enabled cpu */
5079 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5080 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
5086 if (buf_size_same) {
5087 if (!ring_buffer_expanded)
5088 r = sprintf(buf, "%lu (expanded: %lu)\n",
5090 trace_buf_size >> 10);
5092 r = sprintf(buf, "%lu\n", size >> 10);
5094 r = sprintf(buf, "X\n");
5096 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5098 mutex_unlock(&trace_types_lock);
5100 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5105 tracing_entries_write(struct file *filp, const char __user *ubuf,
5106 size_t cnt, loff_t *ppos)
5108 struct inode *inode = file_inode(filp);
5109 struct trace_array *tr = inode->i_private;
5113 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5117 /* must have at least 1 entry */
5121 /* value is in KB */
5123 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5133 tracing_total_entries_read(struct file *filp, char __user *ubuf,
5134 size_t cnt, loff_t *ppos)
5136 struct trace_array *tr = filp->private_data;
5139 unsigned long size = 0, expanded_size = 0;
5141 mutex_lock(&trace_types_lock);
5142 for_each_tracing_cpu(cpu) {
5143 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5144 if (!ring_buffer_expanded)
5145 expanded_size += trace_buf_size >> 10;
5147 if (ring_buffer_expanded)
5148 r = sprintf(buf, "%lu\n", size);
5150 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5151 mutex_unlock(&trace_types_lock);
5153 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5157 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5158 size_t cnt, loff_t *ppos)
5161 * There is no need to read what the user has written, this function
5162 * is just to make sure that there is no error when "echo" is used
5171 tracing_free_buffer_release(struct inode *inode, struct file *filp)
5173 struct trace_array *tr = inode->i_private;
5175 /* disable tracing ? */
5176 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5177 tracer_tracing_off(tr);
5178 /* resize the ring buffer to 0 */
5179 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5181 trace_array_put(tr);
5187 tracing_mark_write(struct file *filp, const char __user *ubuf,
5188 size_t cnt, loff_t *fpos)
5190 unsigned long addr = (unsigned long)ubuf;
5191 struct trace_array *tr = filp->private_data;
5192 struct ring_buffer_event *event;
5193 struct ring_buffer *buffer;
5194 struct print_entry *entry;
5195 unsigned long irq_flags;
5196 struct page *pages[2];
5206 if (tracing_disabled)
5209 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5212 if (cnt > TRACE_BUF_SIZE)
5213 cnt = TRACE_BUF_SIZE;
5216 * Userspace is injecting traces into the kernel trace buffer.
5217 * We want to be as non intrusive as possible.
5218 * To do so, we do not want to allocate any special buffers
5219 * or take any locks, but instead write the userspace data
5220 * straight into the ring buffer.
5222 * First we need to pin the userspace buffer into memory,
5223 * which, most likely it is, because it just referenced it.
5224 * But there's no guarantee that it is. By using get_user_pages_fast()
5225 * and kmap_atomic/kunmap_atomic() we can get access to the
5226 * pages directly. We then write the data directly into the
5229 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5231 /* check if we cross pages */
5232 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5235 offset = addr & (PAGE_SIZE - 1);
5238 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5239 if (ret < nr_pages) {
5241 put_page(pages[ret]);
5246 for (i = 0; i < nr_pages; i++)
5247 map_page[i] = kmap_atomic(pages[i]);
5249 local_save_flags(irq_flags);
5250 size = sizeof(*entry) + cnt + 2; /* possible \n added */
5251 buffer = tr->trace_buffer.buffer;
5252 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5253 irq_flags, preempt_count());
5255 /* Ring buffer disabled, return as if not open for write */
5260 entry = ring_buffer_event_data(event);
5261 entry->ip = _THIS_IP_;
5263 if (nr_pages == 2) {
5264 len = PAGE_SIZE - offset;
5265 memcpy(&entry->buf, map_page[0] + offset, len);
5266 memcpy(&entry->buf[len], map_page[1], cnt - len);
5268 memcpy(&entry->buf, map_page[0] + offset, cnt);
5270 if (entry->buf[cnt - 1] != '\n') {
5271 entry->buf[cnt] = '\n';
5272 entry->buf[cnt + 1] = '\0';
5274 entry->buf[cnt] = '\0';
5276 __buffer_unlock_commit(buffer, event);
5283 for (i = nr_pages - 1; i >= 0; i--) {
5284 kunmap_atomic(map_page[i]);
5291 static int tracing_clock_show(struct seq_file *m, void *v)
5293 struct trace_array *tr = m->private;
5296 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5298 "%s%s%s%s", i ? " " : "",
5299 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5300 i == tr->clock_id ? "]" : "");
5306 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5310 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5311 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5314 if (i == ARRAY_SIZE(trace_clocks))
5317 mutex_lock(&trace_types_lock);
5321 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5324 * New clock may not be consistent with the previous clock.
5325 * Reset the buffer so that it doesn't have incomparable timestamps.
5327 tracing_reset_online_cpus(&tr->trace_buffer);
5329 #ifdef CONFIG_TRACER_MAX_TRACE
5330 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5331 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5332 tracing_reset_online_cpus(&tr->max_buffer);
5335 mutex_unlock(&trace_types_lock);
5340 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5341 size_t cnt, loff_t *fpos)
5343 struct seq_file *m = filp->private_data;
5344 struct trace_array *tr = m->private;
5346 const char *clockstr;
5349 if (cnt >= sizeof(buf))
5352 if (copy_from_user(&buf, ubuf, cnt))
5357 clockstr = strstrip(buf);
5359 ret = tracing_set_clock(tr, clockstr);
5368 static int tracing_clock_open(struct inode *inode, struct file *file)
5370 struct trace_array *tr = inode->i_private;
5373 if (tracing_disabled)
5376 if (trace_array_get(tr))
5379 ret = single_open(file, tracing_clock_show, inode->i_private);
5381 trace_array_put(tr);
5386 struct ftrace_buffer_info {
5387 struct trace_iterator iter;
5392 #ifdef CONFIG_TRACER_SNAPSHOT
5393 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5395 struct trace_array *tr = inode->i_private;
5396 struct trace_iterator *iter;
5400 if (trace_array_get(tr) < 0)
5403 if (file->f_mode & FMODE_READ) {
5404 iter = __tracing_open(inode, file, true);
5406 ret = PTR_ERR(iter);
5408 /* Writes still need the seq_file to hold the private data */
5410 m = kzalloc(sizeof(*m), GFP_KERNEL);
5413 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5421 iter->trace_buffer = &tr->max_buffer;
5422 iter->cpu_file = tracing_get_cpu(inode);
5424 file->private_data = m;
5428 trace_array_put(tr);
5434 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5437 struct seq_file *m = filp->private_data;
5438 struct trace_iterator *iter = m->private;
5439 struct trace_array *tr = iter->tr;
5443 ret = tracing_update_buffers();
5447 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5451 mutex_lock(&trace_types_lock);
5453 if (tr->current_trace->use_max_tr) {
5460 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5464 if (tr->allocated_snapshot)
5468 /* Only allow per-cpu swap if the ring buffer supports it */
5469 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5470 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5475 if (!tr->allocated_snapshot) {
5476 ret = alloc_snapshot(tr);
5480 local_irq_disable();
5481 /* Now, we're going to swap */
5482 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5483 update_max_tr(tr, current, smp_processor_id());
5485 update_max_tr_single(tr, current, iter->cpu_file);
5489 if (tr->allocated_snapshot) {
5490 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5491 tracing_reset_online_cpus(&tr->max_buffer);
5493 tracing_reset(&tr->max_buffer, iter->cpu_file);
5503 mutex_unlock(&trace_types_lock);
5507 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5509 struct seq_file *m = file->private_data;
5512 ret = tracing_release(inode, file);
5514 if (file->f_mode & FMODE_READ)
5517 /* If write only, the seq_file is just a stub */
5525 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5526 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5527 size_t count, loff_t *ppos);
5528 static int tracing_buffers_release(struct inode *inode, struct file *file);
5529 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5530 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5532 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5534 struct ftrace_buffer_info *info;
5537 ret = tracing_buffers_open(inode, filp);
5541 info = filp->private_data;
5543 if (info->iter.trace->use_max_tr) {
5544 tracing_buffers_release(inode, filp);
5548 info->iter.snapshot = true;
5549 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5554 #endif /* CONFIG_TRACER_SNAPSHOT */
5557 static const struct file_operations tracing_thresh_fops = {
5558 .open = tracing_open_generic,
5559 .read = tracing_thresh_read,
5560 .write = tracing_thresh_write,
5561 .llseek = generic_file_llseek,
5564 #ifdef CONFIG_TRACER_MAX_TRACE
5565 static const struct file_operations tracing_max_lat_fops = {
5566 .open = tracing_open_generic,
5567 .read = tracing_max_lat_read,
5568 .write = tracing_max_lat_write,
5569 .llseek = generic_file_llseek,
5573 static const struct file_operations set_tracer_fops = {
5574 .open = tracing_open_generic,
5575 .read = tracing_set_trace_read,
5576 .write = tracing_set_trace_write,
5577 .llseek = generic_file_llseek,
5580 static const struct file_operations tracing_pipe_fops = {
5581 .open = tracing_open_pipe,
5582 .poll = tracing_poll_pipe,
5583 .read = tracing_read_pipe,
5584 .splice_read = tracing_splice_read_pipe,
5585 .release = tracing_release_pipe,
5586 .llseek = no_llseek,
5589 static const struct file_operations tracing_entries_fops = {
5590 .open = tracing_open_generic_tr,
5591 .read = tracing_entries_read,
5592 .write = tracing_entries_write,
5593 .llseek = generic_file_llseek,
5594 .release = tracing_release_generic_tr,
5597 static const struct file_operations tracing_total_entries_fops = {
5598 .open = tracing_open_generic_tr,
5599 .read = tracing_total_entries_read,
5600 .llseek = generic_file_llseek,
5601 .release = tracing_release_generic_tr,
5604 static const struct file_operations tracing_free_buffer_fops = {
5605 .open = tracing_open_generic_tr,
5606 .write = tracing_free_buffer_write,
5607 .release = tracing_free_buffer_release,
5610 static const struct file_operations tracing_mark_fops = {
5611 .open = tracing_open_generic_tr,
5612 .write = tracing_mark_write,
5613 .llseek = generic_file_llseek,
5614 .release = tracing_release_generic_tr,
5617 static const struct file_operations trace_clock_fops = {
5618 .open = tracing_clock_open,
5620 .llseek = seq_lseek,
5621 .release = tracing_single_release_tr,
5622 .write = tracing_clock_write,
5625 #ifdef CONFIG_TRACER_SNAPSHOT
5626 static const struct file_operations snapshot_fops = {
5627 .open = tracing_snapshot_open,
5629 .write = tracing_snapshot_write,
5630 .llseek = tracing_lseek,
5631 .release = tracing_snapshot_release,
5634 static const struct file_operations snapshot_raw_fops = {
5635 .open = snapshot_raw_open,
5636 .read = tracing_buffers_read,
5637 .release = tracing_buffers_release,
5638 .splice_read = tracing_buffers_splice_read,
5639 .llseek = no_llseek,
5642 #endif /* CONFIG_TRACER_SNAPSHOT */
5644 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5646 struct trace_array *tr = inode->i_private;
5647 struct ftrace_buffer_info *info;
5650 if (tracing_disabled)
5653 if (trace_array_get(tr) < 0)
5656 info = kzalloc(sizeof(*info), GFP_KERNEL);
5658 trace_array_put(tr);
5662 mutex_lock(&trace_types_lock);
5665 info->iter.cpu_file = tracing_get_cpu(inode);
5666 info->iter.trace = tr->current_trace;
5667 info->iter.trace_buffer = &tr->trace_buffer;
5669 /* Force reading ring buffer for first read */
5670 info->read = (unsigned int)-1;
5672 filp->private_data = info;
5674 tr->current_trace->ref++;
5676 mutex_unlock(&trace_types_lock);
5678 ret = nonseekable_open(inode, filp);
5680 trace_array_put(tr);
5686 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5688 struct ftrace_buffer_info *info = filp->private_data;
5689 struct trace_iterator *iter = &info->iter;
5691 return trace_poll(iter, filp, poll_table);
5695 tracing_buffers_read(struct file *filp, char __user *ubuf,
5696 size_t count, loff_t *ppos)
5698 struct ftrace_buffer_info *info = filp->private_data;
5699 struct trace_iterator *iter = &info->iter;
5706 #ifdef CONFIG_TRACER_MAX_TRACE
5707 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5712 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5717 /* Do we have previous read data to read? */
5718 if (info->read < PAGE_SIZE)
5722 trace_access_lock(iter->cpu_file);
5723 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5727 trace_access_unlock(iter->cpu_file);
5730 if (trace_empty(iter)) {
5731 if ((filp->f_flags & O_NONBLOCK))
5734 ret = wait_on_pipe(iter, false);
5745 size = PAGE_SIZE - info->read;
5749 ret = copy_to_user(ubuf, info->spare + info->read, size);
5761 static int tracing_buffers_release(struct inode *inode, struct file *file)
5763 struct ftrace_buffer_info *info = file->private_data;
5764 struct trace_iterator *iter = &info->iter;
5766 mutex_lock(&trace_types_lock);
5768 iter->tr->current_trace->ref--;
5770 __trace_array_put(iter->tr);
5773 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5776 mutex_unlock(&trace_types_lock);
5782 struct ring_buffer *buffer;
5787 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5788 struct pipe_buffer *buf)
5790 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5795 ring_buffer_free_read_page(ref->buffer, ref->page);
5800 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5801 struct pipe_buffer *buf)
5803 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5808 /* Pipe buffer operations for a buffer. */
5809 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5811 .confirm = generic_pipe_buf_confirm,
5812 .release = buffer_pipe_buf_release,
5813 .steal = generic_pipe_buf_steal,
5814 .get = buffer_pipe_buf_get,
5818 * Callback from splice_to_pipe(), if we need to release some pages
5819 * at the end of the spd in case we error'ed out in filling the pipe.
5821 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5823 struct buffer_ref *ref =
5824 (struct buffer_ref *)spd->partial[i].private;
5829 ring_buffer_free_read_page(ref->buffer, ref->page);
5831 spd->partial[i].private = 0;
5835 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5836 struct pipe_inode_info *pipe, size_t len,
5839 struct ftrace_buffer_info *info = file->private_data;
5840 struct trace_iterator *iter = &info->iter;
5841 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5842 struct page *pages_def[PIPE_DEF_BUFFERS];
5843 struct splice_pipe_desc spd = {
5845 .partial = partial_def,
5846 .nr_pages_max = PIPE_DEF_BUFFERS,
5848 .ops = &buffer_pipe_buf_ops,
5849 .spd_release = buffer_spd_release,
5851 struct buffer_ref *ref;
5852 int entries, size, i;
5855 #ifdef CONFIG_TRACER_MAX_TRACE
5856 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5860 if (*ppos & (PAGE_SIZE - 1))
5863 if (len & (PAGE_SIZE - 1)) {
5864 if (len < PAGE_SIZE)
5869 if (splice_grow_spd(pipe, &spd))
5873 trace_access_lock(iter->cpu_file);
5874 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5876 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5880 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5887 ref->buffer = iter->trace_buffer->buffer;
5888 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5895 r = ring_buffer_read_page(ref->buffer, &ref->page,
5896 len, iter->cpu_file, 1);
5898 ring_buffer_free_read_page(ref->buffer, ref->page);
5904 * zero out any left over data, this is going to
5907 size = ring_buffer_page_len(ref->page);
5908 if (size < PAGE_SIZE)
5909 memset(ref->page + size, 0, PAGE_SIZE - size);
5911 page = virt_to_page(ref->page);
5913 spd.pages[i] = page;
5914 spd.partial[i].len = PAGE_SIZE;
5915 spd.partial[i].offset = 0;
5916 spd.partial[i].private = (unsigned long)ref;
5920 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5923 trace_access_unlock(iter->cpu_file);
5926 /* did we read anything? */
5927 if (!spd.nr_pages) {
5932 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5935 ret = wait_on_pipe(iter, true);
5942 ret = splice_to_pipe(pipe, &spd);
5944 splice_shrink_spd(&spd);
5949 static const struct file_operations tracing_buffers_fops = {
5950 .open = tracing_buffers_open,
5951 .read = tracing_buffers_read,
5952 .poll = tracing_buffers_poll,
5953 .release = tracing_buffers_release,
5954 .splice_read = tracing_buffers_splice_read,
5955 .llseek = no_llseek,
5959 tracing_stats_read(struct file *filp, char __user *ubuf,
5960 size_t count, loff_t *ppos)
5962 struct inode *inode = file_inode(filp);
5963 struct trace_array *tr = inode->i_private;
5964 struct trace_buffer *trace_buf = &tr->trace_buffer;
5965 int cpu = tracing_get_cpu(inode);
5966 struct trace_seq *s;
5968 unsigned long long t;
5969 unsigned long usec_rem;
5971 s = kmalloc(sizeof(*s), GFP_KERNEL);
5977 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5978 trace_seq_printf(s, "entries: %ld\n", cnt);
5980 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5981 trace_seq_printf(s, "overrun: %ld\n", cnt);
5983 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5984 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5986 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5987 trace_seq_printf(s, "bytes: %ld\n", cnt);
5989 if (trace_clocks[tr->clock_id].in_ns) {
5990 /* local or global for trace_clock */
5991 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5992 usec_rem = do_div(t, USEC_PER_SEC);
5993 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5996 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5997 usec_rem = do_div(t, USEC_PER_SEC);
5998 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6000 /* counter or tsc mode for trace_clock */
6001 trace_seq_printf(s, "oldest event ts: %llu\n",
6002 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
6004 trace_seq_printf(s, "now ts: %llu\n",
6005 ring_buffer_time_stamp(trace_buf->buffer, cpu));
6008 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
6009 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6011 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
6012 trace_seq_printf(s, "read events: %ld\n", cnt);
6014 count = simple_read_from_buffer(ubuf, count, ppos,
6015 s->buffer, trace_seq_used(s));
6022 static const struct file_operations tracing_stats_fops = {
6023 .open = tracing_open_generic_tr,
6024 .read = tracing_stats_read,
6025 .llseek = generic_file_llseek,
6026 .release = tracing_release_generic_tr,
6029 #ifdef CONFIG_DYNAMIC_FTRACE
6031 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
6037 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
6038 size_t cnt, loff_t *ppos)
6040 static char ftrace_dyn_info_buffer[1024];
6041 static DEFINE_MUTEX(dyn_info_mutex);
6042 unsigned long *p = filp->private_data;
6043 char *buf = ftrace_dyn_info_buffer;
6044 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
6047 mutex_lock(&dyn_info_mutex);
6048 r = sprintf(buf, "%ld ", *p);
6050 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
6053 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6055 mutex_unlock(&dyn_info_mutex);
6060 static const struct file_operations tracing_dyn_info_fops = {
6061 .open = tracing_open_generic,
6062 .read = tracing_read_dyn_info,
6063 .llseek = generic_file_llseek,
6065 #endif /* CONFIG_DYNAMIC_FTRACE */
6067 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6069 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6075 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6077 unsigned long *count = (long *)data;
6089 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6090 struct ftrace_probe_ops *ops, void *data)
6092 long count = (long)data;
6094 seq_printf(m, "%ps:", (void *)ip);
6096 seq_puts(m, "snapshot");
6099 seq_puts(m, ":unlimited\n");
6101 seq_printf(m, ":count=%ld\n", count);
6106 static struct ftrace_probe_ops snapshot_probe_ops = {
6107 .func = ftrace_snapshot,
6108 .print = ftrace_snapshot_print,
6111 static struct ftrace_probe_ops snapshot_count_probe_ops = {
6112 .func = ftrace_count_snapshot,
6113 .print = ftrace_snapshot_print,
6117 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6118 char *glob, char *cmd, char *param, int enable)
6120 struct ftrace_probe_ops *ops;
6121 void *count = (void *)-1;
6125 /* hash funcs only work with set_ftrace_filter */
6129 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6131 if (glob[0] == '!') {
6132 unregister_ftrace_function_probe_func(glob+1, ops);
6139 number = strsep(¶m, ":");
6141 if (!strlen(number))
6145 * We use the callback data field (which is a pointer)
6148 ret = kstrtoul(number, 0, (unsigned long *)&count);
6153 ret = register_ftrace_function_probe(glob, ops, count);
6156 alloc_snapshot(&global_trace);
6158 return ret < 0 ? ret : 0;
6161 static struct ftrace_func_command ftrace_snapshot_cmd = {
6163 .func = ftrace_trace_snapshot_callback,
6166 static __init int register_snapshot_cmd(void)
6168 return register_ftrace_command(&ftrace_snapshot_cmd);
6171 static inline __init int register_snapshot_cmd(void) { return 0; }
6172 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6174 static struct dentry *tracing_get_dentry(struct trace_array *tr)
6176 if (WARN_ON(!tr->dir))
6177 return ERR_PTR(-ENODEV);
6179 /* Top directory uses NULL as the parent */
6180 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6183 /* All sub buffers have a descriptor */
6187 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6189 struct dentry *d_tracer;
6192 return tr->percpu_dir;
6194 d_tracer = tracing_get_dentry(tr);
6195 if (IS_ERR(d_tracer))
6198 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6200 WARN_ONCE(!tr->percpu_dir,
6201 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6203 return tr->percpu_dir;
6206 static struct dentry *
6207 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6208 void *data, long cpu, const struct file_operations *fops)
6210 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6212 if (ret) /* See tracing_get_cpu() */
6213 d_inode(ret)->i_cdev = (void *)(cpu + 1);
6218 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6220 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6221 struct dentry *d_cpu;
6222 char cpu_dir[30]; /* 30 characters should be more than enough */
6227 snprintf(cpu_dir, 30, "cpu%ld", cpu);
6228 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6230 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
6234 /* per cpu trace_pipe */
6235 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6236 tr, cpu, &tracing_pipe_fops);
6239 trace_create_cpu_file("trace", 0644, d_cpu,
6240 tr, cpu, &tracing_fops);
6242 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6243 tr, cpu, &tracing_buffers_fops);
6245 trace_create_cpu_file("stats", 0444, d_cpu,
6246 tr, cpu, &tracing_stats_fops);
6248 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6249 tr, cpu, &tracing_entries_fops);
6251 #ifdef CONFIG_TRACER_SNAPSHOT
6252 trace_create_cpu_file("snapshot", 0644, d_cpu,
6253 tr, cpu, &snapshot_fops);
6255 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6256 tr, cpu, &snapshot_raw_fops);
6260 #ifdef CONFIG_FTRACE_SELFTEST
6261 /* Let selftest have access to static functions in this file */
6262 #include "trace_selftest.c"
6266 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6269 struct trace_option_dentry *topt = filp->private_data;
6272 if (topt->flags->val & topt->opt->bit)
6277 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6281 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6284 struct trace_option_dentry *topt = filp->private_data;
6288 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6292 if (val != 0 && val != 1)
6295 if (!!(topt->flags->val & topt->opt->bit) != val) {
6296 mutex_lock(&trace_types_lock);
6297 ret = __set_tracer_option(topt->tr, topt->flags,
6299 mutex_unlock(&trace_types_lock);
6310 static const struct file_operations trace_options_fops = {
6311 .open = tracing_open_generic,
6312 .read = trace_options_read,
6313 .write = trace_options_write,
6314 .llseek = generic_file_llseek,
6318 * In order to pass in both the trace_array descriptor as well as the index
6319 * to the flag that the trace option file represents, the trace_array
6320 * has a character array of trace_flags_index[], which holds the index
6321 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6322 * The address of this character array is passed to the flag option file
6323 * read/write callbacks.
6325 * In order to extract both the index and the trace_array descriptor,
6326 * get_tr_index() uses the following algorithm.
6330 * As the pointer itself contains the address of the index (remember
6333 * Then to get the trace_array descriptor, by subtracting that index
6334 * from the ptr, we get to the start of the index itself.
6336 * ptr - idx == &index[0]
6338 * Then a simple container_of() from that pointer gets us to the
6339 * trace_array descriptor.
6341 static void get_tr_index(void *data, struct trace_array **ptr,
6342 unsigned int *pindex)
6344 *pindex = *(unsigned char *)data;
6346 *ptr = container_of(data - *pindex, struct trace_array,
6351 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6354 void *tr_index = filp->private_data;
6355 struct trace_array *tr;
6359 get_tr_index(tr_index, &tr, &index);
6361 if (tr->trace_flags & (1 << index))
6366 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6370 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6373 void *tr_index = filp->private_data;
6374 struct trace_array *tr;
6379 get_tr_index(tr_index, &tr, &index);
6381 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6385 if (val != 0 && val != 1)
6388 mutex_lock(&trace_types_lock);
6389 ret = set_tracer_flag(tr, 1 << index, val);
6390 mutex_unlock(&trace_types_lock);
6400 static const struct file_operations trace_options_core_fops = {
6401 .open = tracing_open_generic,
6402 .read = trace_options_core_read,
6403 .write = trace_options_core_write,
6404 .llseek = generic_file_llseek,
6407 struct dentry *trace_create_file(const char *name,
6409 struct dentry *parent,
6411 const struct file_operations *fops)
6415 ret = tracefs_create_file(name, mode, parent, data, fops);
6417 pr_warning("Could not create tracefs '%s' entry\n", name);
6423 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6425 struct dentry *d_tracer;
6430 d_tracer = tracing_get_dentry(tr);
6431 if (IS_ERR(d_tracer))
6434 tr->options = tracefs_create_dir("options", d_tracer);
6436 pr_warning("Could not create tracefs directory 'options'\n");
6444 create_trace_option_file(struct trace_array *tr,
6445 struct trace_option_dentry *topt,
6446 struct tracer_flags *flags,
6447 struct tracer_opt *opt)
6449 struct dentry *t_options;
6451 t_options = trace_options_init_dentry(tr);
6455 topt->flags = flags;
6459 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6460 &trace_options_fops);
6465 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6467 struct trace_option_dentry *topts;
6468 struct trace_options *tr_topts;
6469 struct tracer_flags *flags;
6470 struct tracer_opt *opts;
6477 flags = tracer->flags;
6479 if (!flags || !flags->opts)
6483 * If this is an instance, only create flags for tracers
6484 * the instance may have.
6486 if (!trace_ok_for_array(tracer, tr))
6489 for (i = 0; i < tr->nr_topts; i++) {
6491 * Check if these flags have already been added.
6492 * Some tracers share flags.
6494 if (tr->topts[i].tracer->flags == tracer->flags)
6500 for (cnt = 0; opts[cnt].name; cnt++)
6503 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6507 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6514 tr->topts = tr_topts;
6515 tr->topts[tr->nr_topts].tracer = tracer;
6516 tr->topts[tr->nr_topts].topts = topts;
6519 for (cnt = 0; opts[cnt].name; cnt++) {
6520 create_trace_option_file(tr, &topts[cnt], flags,
6522 WARN_ONCE(topts[cnt].entry == NULL,
6523 "Failed to create trace option: %s",
6528 static struct dentry *
6529 create_trace_option_core_file(struct trace_array *tr,
6530 const char *option, long index)
6532 struct dentry *t_options;
6534 t_options = trace_options_init_dentry(tr);
6538 return trace_create_file(option, 0644, t_options,
6539 (void *)&tr->trace_flags_index[index],
6540 &trace_options_core_fops);
6543 static void create_trace_options_dir(struct trace_array *tr)
6545 struct dentry *t_options;
6546 bool top_level = tr == &global_trace;
6549 t_options = trace_options_init_dentry(tr);
6553 for (i = 0; trace_options[i]; i++) {
6555 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6556 create_trace_option_core_file(tr, trace_options[i], i);
6561 rb_simple_read(struct file *filp, char __user *ubuf,
6562 size_t cnt, loff_t *ppos)
6564 struct trace_array *tr = filp->private_data;
6568 r = tracer_tracing_is_on(tr);
6569 r = sprintf(buf, "%d\n", r);
6571 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6575 rb_simple_write(struct file *filp, const char __user *ubuf,
6576 size_t cnt, loff_t *ppos)
6578 struct trace_array *tr = filp->private_data;
6579 struct ring_buffer *buffer = tr->trace_buffer.buffer;
6583 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6588 mutex_lock(&trace_types_lock);
6590 tracer_tracing_on(tr);
6591 if (tr->current_trace->start)
6592 tr->current_trace->start(tr);
6594 tracer_tracing_off(tr);
6595 if (tr->current_trace->stop)
6596 tr->current_trace->stop(tr);
6598 mutex_unlock(&trace_types_lock);
6606 static const struct file_operations rb_simple_fops = {
6607 .open = tracing_open_generic_tr,
6608 .read = rb_simple_read,
6609 .write = rb_simple_write,
6610 .release = tracing_release_generic_tr,
6611 .llseek = default_llseek,
6614 struct dentry *trace_instance_dir;
6617 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6620 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6622 enum ring_buffer_flags rb_flags;
6624 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6628 buf->buffer = ring_buffer_alloc(size, rb_flags);
6632 buf->data = alloc_percpu(struct trace_array_cpu);
6634 ring_buffer_free(buf->buffer);
6638 /* Allocate the first page for all buffers */
6639 set_buffer_entries(&tr->trace_buffer,
6640 ring_buffer_size(tr->trace_buffer.buffer, 0));
6645 static int allocate_trace_buffers(struct trace_array *tr, int size)
6649 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6653 #ifdef CONFIG_TRACER_MAX_TRACE
6654 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6655 allocate_snapshot ? size : 1);
6657 ring_buffer_free(tr->trace_buffer.buffer);
6658 free_percpu(tr->trace_buffer.data);
6661 tr->allocated_snapshot = allocate_snapshot;
6664 * Only the top level trace array gets its snapshot allocated
6665 * from the kernel command line.
6667 allocate_snapshot = false;
6672 static void free_trace_buffer(struct trace_buffer *buf)
6675 ring_buffer_free(buf->buffer);
6677 free_percpu(buf->data);
6682 static void free_trace_buffers(struct trace_array *tr)
6687 free_trace_buffer(&tr->trace_buffer);
6689 #ifdef CONFIG_TRACER_MAX_TRACE
6690 free_trace_buffer(&tr->max_buffer);
6694 static void init_trace_flags_index(struct trace_array *tr)
6698 /* Used by the trace options files */
6699 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6700 tr->trace_flags_index[i] = i;
6703 static void __update_tracer_options(struct trace_array *tr)
6707 for (t = trace_types; t; t = t->next)
6708 add_tracer_options(tr, t);
6711 static void update_tracer_options(struct trace_array *tr)
6713 mutex_lock(&trace_types_lock);
6714 __update_tracer_options(tr);
6715 mutex_unlock(&trace_types_lock);
6718 static int instance_mkdir(const char *name)
6720 struct trace_array *tr;
6723 mutex_lock(&trace_types_lock);
6726 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6727 if (tr->name && strcmp(tr->name, name) == 0)
6732 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6736 tr->name = kstrdup(name, GFP_KERNEL);
6740 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6743 tr->trace_flags = global_trace.trace_flags;
6745 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6747 raw_spin_lock_init(&tr->start_lock);
6749 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6751 tr->current_trace = &nop_trace;
6753 INIT_LIST_HEAD(&tr->systems);
6754 INIT_LIST_HEAD(&tr->events);
6756 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6759 tr->dir = tracefs_create_dir(name, trace_instance_dir);
6763 ret = event_trace_add_tracer(tr->dir, tr);
6765 tracefs_remove_recursive(tr->dir);
6769 init_tracer_tracefs(tr, tr->dir);
6770 init_trace_flags_index(tr);
6771 __update_tracer_options(tr);
6773 list_add(&tr->list, &ftrace_trace_arrays);
6775 mutex_unlock(&trace_types_lock);
6780 free_trace_buffers(tr);
6781 free_cpumask_var(tr->tracing_cpumask);
6786 mutex_unlock(&trace_types_lock);
6792 static int instance_rmdir(const char *name)
6794 struct trace_array *tr;
6799 mutex_lock(&trace_types_lock);
6802 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6803 if (tr->name && strcmp(tr->name, name) == 0) {
6812 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6815 list_del(&tr->list);
6817 tracing_set_nop(tr);
6818 event_trace_del_tracer(tr);
6819 ftrace_destroy_function_files(tr);
6820 tracefs_remove_recursive(tr->dir);
6821 free_trace_buffers(tr);
6823 for (i = 0; i < tr->nr_topts; i++) {
6824 kfree(tr->topts[i].topts);
6834 mutex_unlock(&trace_types_lock);
6839 static __init void create_trace_instances(struct dentry *d_tracer)
6841 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6844 if (WARN_ON(!trace_instance_dir))
6849 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6853 trace_create_file("available_tracers", 0444, d_tracer,
6854 tr, &show_traces_fops);
6856 trace_create_file("current_tracer", 0644, d_tracer,
6857 tr, &set_tracer_fops);
6859 trace_create_file("tracing_cpumask", 0644, d_tracer,
6860 tr, &tracing_cpumask_fops);
6862 trace_create_file("trace_options", 0644, d_tracer,
6863 tr, &tracing_iter_fops);
6865 trace_create_file("trace", 0644, d_tracer,
6868 trace_create_file("trace_pipe", 0444, d_tracer,
6869 tr, &tracing_pipe_fops);
6871 trace_create_file("buffer_size_kb", 0644, d_tracer,
6872 tr, &tracing_entries_fops);
6874 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6875 tr, &tracing_total_entries_fops);
6877 trace_create_file("free_buffer", 0200, d_tracer,
6878 tr, &tracing_free_buffer_fops);
6880 trace_create_file("trace_marker", 0220, d_tracer,
6881 tr, &tracing_mark_fops);
6883 trace_create_file("saved_tgids", 0444, d_tracer,
6884 tr, &tracing_saved_tgids_fops);
6886 trace_create_file("trace_clock", 0644, d_tracer, tr,
6889 trace_create_file("tracing_on", 0644, d_tracer,
6890 tr, &rb_simple_fops);
6892 create_trace_options_dir(tr);
6894 #ifdef CONFIG_TRACER_MAX_TRACE
6895 trace_create_file("tracing_max_latency", 0644, d_tracer,
6896 &tr->max_latency, &tracing_max_lat_fops);
6899 if (ftrace_create_function_files(tr, d_tracer))
6900 WARN(1, "Could not allocate function filter files");
6902 #ifdef CONFIG_TRACER_SNAPSHOT
6903 trace_create_file("snapshot", 0644, d_tracer,
6904 tr, &snapshot_fops);
6907 for_each_tracing_cpu(cpu)
6908 tracing_init_tracefs_percpu(tr, cpu);
6912 static struct vfsmount *trace_automount(void *ingore)
6914 struct vfsmount *mnt;
6915 struct file_system_type *type;
6918 * To maintain backward compatibility for tools that mount
6919 * debugfs to get to the tracing facility, tracefs is automatically
6920 * mounted to the debugfs/tracing directory.
6922 type = get_fs_type("tracefs");
6925 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6926 put_filesystem(type);
6935 * tracing_init_dentry - initialize top level trace array
6937 * This is called when creating files or directories in the tracing
6938 * directory. It is called via fs_initcall() by any of the boot up code
6939 * and expects to return the dentry of the top level tracing directory.
6941 struct dentry *tracing_init_dentry(void)
6943 struct trace_array *tr = &global_trace;
6945 /* The top level trace array uses NULL as parent */
6949 if (WARN_ON(!tracefs_initialized()) ||
6950 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6951 WARN_ON(!debugfs_initialized())))
6952 return ERR_PTR(-ENODEV);
6955 * As there may still be users that expect the tracing
6956 * files to exist in debugfs/tracing, we must automount
6957 * the tracefs file system there, so older tools still
6958 * work with the newer kerenl.
6960 tr->dir = debugfs_create_automount("tracing", NULL,
6961 trace_automount, NULL);
6963 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6964 return ERR_PTR(-ENOMEM);
6970 extern struct trace_enum_map *__start_ftrace_enum_maps[];
6971 extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6973 static void __init trace_enum_init(void)
6977 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6978 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
6981 #ifdef CONFIG_MODULES
6982 static void trace_module_add_enums(struct module *mod)
6984 if (!mod->num_trace_enums)
6988 * Modules with bad taint do not have events created, do
6989 * not bother with enums either.
6991 if (trace_module_has_bad_taint(mod))
6994 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
6997 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
6998 static void trace_module_remove_enums(struct module *mod)
7000 union trace_enum_map_item *map;
7001 union trace_enum_map_item **last = &trace_enum_maps;
7003 if (!mod->num_trace_enums)
7006 mutex_lock(&trace_enum_mutex);
7008 map = trace_enum_maps;
7011 if (map->head.mod == mod)
7013 map = trace_enum_jmp_to_tail(map);
7014 last = &map->tail.next;
7015 map = map->tail.next;
7020 *last = trace_enum_jmp_to_tail(map)->tail.next;
7023 mutex_unlock(&trace_enum_mutex);
7026 static inline void trace_module_remove_enums(struct module *mod) { }
7027 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7029 static int trace_module_notify(struct notifier_block *self,
7030 unsigned long val, void *data)
7032 struct module *mod = data;
7035 case MODULE_STATE_COMING:
7036 trace_module_add_enums(mod);
7038 case MODULE_STATE_GOING:
7039 trace_module_remove_enums(mod);
7046 static struct notifier_block trace_module_nb = {
7047 .notifier_call = trace_module_notify,
7050 #endif /* CONFIG_MODULES */
7052 static __init int tracer_init_tracefs(void)
7054 struct dentry *d_tracer;
7056 trace_access_lock_init();
7058 d_tracer = tracing_init_dentry();
7059 if (IS_ERR(d_tracer))
7062 init_tracer_tracefs(&global_trace, d_tracer);
7064 trace_create_file("tracing_thresh", 0644, d_tracer,
7065 &global_trace, &tracing_thresh_fops);
7067 trace_create_file("README", 0444, d_tracer,
7068 NULL, &tracing_readme_fops);
7070 trace_create_file("saved_cmdlines", 0444, d_tracer,
7071 NULL, &tracing_saved_cmdlines_fops);
7073 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7074 NULL, &tracing_saved_cmdlines_size_fops);
7078 trace_create_enum_file(d_tracer);
7080 #ifdef CONFIG_MODULES
7081 register_module_notifier(&trace_module_nb);
7084 #ifdef CONFIG_DYNAMIC_FTRACE
7085 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7086 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
7089 create_trace_instances(d_tracer);
7091 update_tracer_options(&global_trace);
7096 static int trace_panic_handler(struct notifier_block *this,
7097 unsigned long event, void *unused)
7099 if (ftrace_dump_on_oops)
7100 ftrace_dump(ftrace_dump_on_oops);
7104 static struct notifier_block trace_panic_notifier = {
7105 .notifier_call = trace_panic_handler,
7107 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7110 static int trace_die_handler(struct notifier_block *self,
7116 if (ftrace_dump_on_oops)
7117 ftrace_dump(ftrace_dump_on_oops);
7125 static struct notifier_block trace_die_notifier = {
7126 .notifier_call = trace_die_handler,
7131 * printk is set to max of 1024, we really don't need it that big.
7132 * Nothing should be printing 1000 characters anyway.
7134 #define TRACE_MAX_PRINT 1000
7137 * Define here KERN_TRACE so that we have one place to modify
7138 * it if we decide to change what log level the ftrace dump
7141 #define KERN_TRACE KERN_EMERG
7144 trace_printk_seq(struct trace_seq *s)
7146 /* Probably should print a warning here. */
7147 if (s->seq.len >= TRACE_MAX_PRINT)
7148 s->seq.len = TRACE_MAX_PRINT;
7151 * More paranoid code. Although the buffer size is set to
7152 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7153 * an extra layer of protection.
7155 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7156 s->seq.len = s->seq.size - 1;
7158 /* should be zero ended, but we are paranoid. */
7159 s->buffer[s->seq.len] = 0;
7161 printk(KERN_TRACE "%s", s->buffer);
7166 void trace_init_global_iter(struct trace_iterator *iter)
7168 iter->tr = &global_trace;
7169 iter->trace = iter->tr->current_trace;
7170 iter->cpu_file = RING_BUFFER_ALL_CPUS;
7171 iter->trace_buffer = &global_trace.trace_buffer;
7173 if (iter->trace && iter->trace->open)
7174 iter->trace->open(iter);
7176 /* Annotate start of buffers if we had overruns */
7177 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7178 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7180 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7181 if (trace_clocks[iter->tr->clock_id].in_ns)
7182 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
7185 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
7187 /* use static because iter can be a bit big for the stack */
7188 static struct trace_iterator iter;
7189 static atomic_t dump_running;
7190 struct trace_array *tr = &global_trace;
7191 unsigned int old_userobj;
7192 unsigned long flags;
7195 /* Only allow one dump user at a time. */
7196 if (atomic_inc_return(&dump_running) != 1) {
7197 atomic_dec(&dump_running);
7202 * Always turn off tracing when we dump.
7203 * We don't need to show trace output of what happens
7204 * between multiple crashes.
7206 * If the user does a sysrq-z, then they can re-enable
7207 * tracing with echo 1 > tracing_on.
7211 local_irq_save(flags);
7213 /* Simulate the iterator */
7214 trace_init_global_iter(&iter);
7216 for_each_tracing_cpu(cpu) {
7217 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7220 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
7222 /* don't look at user memory in panic mode */
7223 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7225 switch (oops_dump_mode) {
7227 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7230 iter.cpu_file = raw_smp_processor_id();
7235 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7236 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7239 printk(KERN_TRACE "Dumping ftrace buffer:\n");
7241 /* Did function tracer already get disabled? */
7242 if (ftrace_is_dead()) {
7243 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7244 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7248 * We need to stop all tracing on all CPUS to read the
7249 * the next buffer. This is a bit expensive, but is
7250 * not done often. We fill all what we can read,
7251 * and then release the locks again.
7254 while (!trace_empty(&iter)) {
7257 printk(KERN_TRACE "---------------------------------\n");
7261 /* reset all but tr, trace, and overruns */
7262 memset(&iter.seq, 0,
7263 sizeof(struct trace_iterator) -
7264 offsetof(struct trace_iterator, seq));
7265 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7268 if (trace_find_next_entry_inc(&iter) != NULL) {
7271 ret = print_trace_line(&iter);
7272 if (ret != TRACE_TYPE_NO_CONSUME)
7273 trace_consume(&iter);
7275 touch_nmi_watchdog();
7277 trace_printk_seq(&iter.seq);
7281 printk(KERN_TRACE " (ftrace buffer empty)\n");
7283 printk(KERN_TRACE "---------------------------------\n");
7286 tr->trace_flags |= old_userobj;
7288 for_each_tracing_cpu(cpu) {
7289 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7291 atomic_dec(&dump_running);
7292 local_irq_restore(flags);
7294 EXPORT_SYMBOL_GPL(ftrace_dump);
7296 __init static int tracer_alloc_buffers(void)
7302 * Make sure we don't accidently add more trace options
7303 * than we have bits for.
7305 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
7307 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7310 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7311 goto out_free_buffer_mask;
7313 /* Only allocate trace_printk buffers if a trace_printk exists */
7314 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7315 /* Must be called before global_trace.buffer is allocated */
7316 trace_printk_init_buffers();
7318 /* To save memory, keep the ring buffer size to its minimum */
7319 if (ring_buffer_expanded)
7320 ring_buf_size = trace_buf_size;
7324 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7325 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7327 raw_spin_lock_init(&global_trace.start_lock);
7329 /* Used for event triggers */
7330 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7332 goto out_free_cpumask;
7334 if (trace_create_savedcmd() < 0)
7335 goto out_free_temp_buffer;
7337 /* TODO: make the number of buffers hot pluggable with CPUS */
7338 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7339 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7341 goto out_free_savedcmd;
7344 if (global_trace.buffer_disabled)
7347 if (trace_boot_clock) {
7348 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7350 pr_warning("Trace clock %s not defined, going back to default\n",
7355 * register_tracer() might reference current_trace, so it
7356 * needs to be set before we register anything. This is
7357 * just a bootstrap of current_trace anyway.
7359 global_trace.current_trace = &nop_trace;
7361 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7363 ftrace_init_global_array_ops(&global_trace);
7365 init_trace_flags_index(&global_trace);
7367 register_tracer(&nop_trace);
7369 /* All seems OK, enable tracing */
7370 tracing_disabled = 0;
7372 atomic_notifier_chain_register(&panic_notifier_list,
7373 &trace_panic_notifier);
7375 register_die_notifier(&trace_die_notifier);
7377 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7379 INIT_LIST_HEAD(&global_trace.systems);
7380 INIT_LIST_HEAD(&global_trace.events);
7381 list_add(&global_trace.list, &ftrace_trace_arrays);
7383 apply_trace_boot_options();
7385 register_snapshot_cmd();
7390 free_saved_cmdlines_buffer(savedcmd);
7391 out_free_temp_buffer:
7392 ring_buffer_free(temp_buffer);
7394 free_cpumask_var(global_trace.tracing_cpumask);
7395 out_free_buffer_mask:
7396 free_cpumask_var(tracing_buffer_mask);
7401 void __init trace_init(void)
7403 if (tracepoint_printk) {
7404 tracepoint_print_iter =
7405 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7406 if (WARN_ON(!tracepoint_print_iter))
7407 tracepoint_printk = 0;
7409 tracer_alloc_buffers();
7413 __init static int clear_boot_tracer(void)
7416 * The default tracer at boot buffer is an init section.
7417 * This function is called in lateinit. If we did not
7418 * find the boot tracer, then clear it out, to prevent
7419 * later registration from accessing the buffer that is
7420 * about to be freed.
7422 if (!default_bootup_tracer)
7425 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7426 default_bootup_tracer);
7427 default_bootup_tracer = NULL;
7432 fs_initcall(tracer_init_tracefs);
7433 late_initcall(clear_boot_tracer);