2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/kprobes.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/sched/rt.h>
46 #include "trace_output.h"
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
52 bool ring_buffer_expanded;
55 * We need to change this state when a selftest is running.
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
58 * insertions into the ring-buffer such as trace_printk could occurred
59 * at the same time, giving false positive or negative results.
61 static bool __read_mostly tracing_selftest_running;
64 * If a tracer is running, we do not want to run SELFTEST.
66 bool __read_mostly tracing_selftest_disabled;
68 /* Pipe tracepoints to printk */
69 struct trace_iterator *tracepoint_print_iter;
70 int tracepoint_printk;
72 /* For tracers that don't implement custom flags */
73 static struct tracer_opt dummy_tracer_opt[] = {
77 static struct tracer_flags dummy_tracer_flags = {
79 .opts = dummy_tracer_opt
83 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
93 static DEFINE_PER_CPU(bool, trace_cmdline_save);
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
101 static int tracing_disabled = 1;
103 cpumask_var_t __read_mostly tracing_buffer_mask;
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
121 enum ftrace_dump_mode ftrace_dump_on_oops;
123 /* When set, tracing will stop when a WARN*() is hit */
124 int __disable_trace_on_warning;
126 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
127 /* Map of enums to their values, for "enum_map" file */
128 struct trace_enum_map_head {
130 unsigned long length;
133 union trace_enum_map_item;
135 struct trace_enum_map_tail {
137 * "end" is first and points to NULL as it must be different
138 * than "mod" or "enum_string"
140 union trace_enum_map_item *next;
141 const char *end; /* points to NULL */
144 static DEFINE_MUTEX(trace_enum_mutex);
147 * The trace_enum_maps are saved in an array with two extra elements,
148 * one at the beginning, and one at the end. The beginning item contains
149 * the count of the saved maps (head.length), and the module they
150 * belong to if not built in (head.mod). The ending item contains a
151 * pointer to the next array of saved enum_map items.
153 union trace_enum_map_item {
154 struct trace_enum_map map;
155 struct trace_enum_map_head head;
156 struct trace_enum_map_tail tail;
159 static union trace_enum_map_item *trace_enum_maps;
160 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
162 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
164 #define MAX_TRACER_SIZE 100
165 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
166 static char *default_bootup_tracer;
168 static bool allocate_snapshot;
170 static int __init set_cmdline_ftrace(char *str)
172 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
173 default_bootup_tracer = bootup_tracer_buf;
174 /* We are using ftrace early, expand it */
175 ring_buffer_expanded = true;
178 __setup("ftrace=", set_cmdline_ftrace);
180 static int __init set_ftrace_dump_on_oops(char *str)
182 if (*str++ != '=' || !*str) {
183 ftrace_dump_on_oops = DUMP_ALL;
187 if (!strcmp("orig_cpu", str)) {
188 ftrace_dump_on_oops = DUMP_ORIG;
194 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
196 static int __init stop_trace_on_warning(char *str)
198 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
199 __disable_trace_on_warning = 1;
202 __setup("traceoff_on_warning", stop_trace_on_warning);
204 static int __init boot_alloc_snapshot(char *str)
206 allocate_snapshot = true;
207 /* We also need the main ring buffer expanded */
208 ring_buffer_expanded = true;
211 __setup("alloc_snapshot", boot_alloc_snapshot);
214 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
216 static int __init set_trace_boot_options(char *str)
218 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
221 __setup("trace_options=", set_trace_boot_options);
223 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
224 static char *trace_boot_clock __initdata;
226 static int __init set_trace_boot_clock(char *str)
228 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
229 trace_boot_clock = trace_boot_clock_buf;
232 __setup("trace_clock=", set_trace_boot_clock);
234 static int __init set_tracepoint_printk(char *str)
236 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
237 tracepoint_printk = 1;
240 __setup("tp_printk", set_tracepoint_printk);
242 unsigned long long ns2usecs(cycle_t nsec)
249 /* trace_flags holds trace_options default values */
250 #define TRACE_DEFAULT_FLAGS \
251 (FUNCTION_DEFAULT_FLAGS | \
252 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
253 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
254 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
255 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
257 /* trace_options that are only supported by global_trace */
258 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
259 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
263 * The global_trace is the descriptor that holds the tracing
264 * buffers for the live tracing. For each CPU, it contains
265 * a link list of pages that will store trace entries. The
266 * page descriptor of the pages in the memory is used to hold
267 * the link list by linking the lru item in the page descriptor
268 * to each of the pages in the buffer per CPU.
270 * For each active CPU there is a data field that holds the
271 * pages for the buffer for that CPU. Each CPU has the same number
272 * of pages allocated for its buffer.
274 static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
278 LIST_HEAD(ftrace_trace_arrays);
280 int trace_array_get(struct trace_array *this_tr)
282 struct trace_array *tr;
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
293 mutex_unlock(&trace_types_lock);
298 static void __trace_array_put(struct trace_array *this_tr)
300 WARN_ON(!this_tr->ref);
304 void trace_array_put(struct trace_array *this_tr)
306 mutex_lock(&trace_types_lock);
307 __trace_array_put(this_tr);
308 mutex_unlock(&trace_types_lock);
311 int filter_check_discard(struct trace_event_file *file, void *rec,
312 struct ring_buffer *buffer,
313 struct ring_buffer_event *event)
315 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
316 !filter_match_preds(file->filter, rec)) {
317 ring_buffer_discard_commit(buffer, event);
323 EXPORT_SYMBOL_GPL(filter_check_discard);
325 int call_filter_check_discard(struct trace_event_call *call, void *rec,
326 struct ring_buffer *buffer,
327 struct ring_buffer_event *event)
329 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
330 !filter_match_preds(call->filter, rec)) {
331 ring_buffer_discard_commit(buffer, event);
337 EXPORT_SYMBOL_GPL(call_filter_check_discard);
339 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
343 /* Early boot up does not have a buffer yet */
345 return trace_clock_local();
347 ts = ring_buffer_time_stamp(buf->buffer, cpu);
348 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
353 cycle_t ftrace_now(int cpu)
355 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
359 * tracing_is_enabled - Show if global_trace has been disabled
361 * Shows if the global trace has been enabled or not. It uses the
362 * mirror flag "buffer_disabled" to be used in fast paths such as for
363 * the irqsoff tracer. But it may be inaccurate due to races. If you
364 * need to know the accurate state, use tracing_is_on() which is a little
365 * slower, but accurate.
367 int tracing_is_enabled(void)
370 * For quick access (irqsoff uses this in fast path), just
371 * return the mirror variable of the state of the ring buffer.
372 * It's a little racy, but we don't really care.
375 return !global_trace.buffer_disabled;
379 * trace_buf_size is the size in bytes that is allocated
380 * for a buffer. Note, the number of bytes is always rounded
383 * This number is purposely set to a low number of 16384.
384 * If the dump on oops happens, it will be much appreciated
385 * to not have to wait for all that output. Anyway this can be
386 * boot time and run time configurable.
388 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
390 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
392 /* trace_types holds a link list of available tracers. */
393 static struct tracer *trace_types __read_mostly;
396 * trace_types_lock is used to protect the trace_types list.
398 DEFINE_MUTEX(trace_types_lock);
401 * serialize the access of the ring buffer
403 * ring buffer serializes readers, but it is low level protection.
404 * The validity of the events (which returns by ring_buffer_peek() ..etc)
405 * are not protected by ring buffer.
407 * The content of events may become garbage if we allow other process consumes
408 * these events concurrently:
409 * A) the page of the consumed events may become a normal page
410 * (not reader page) in ring buffer, and this page will be rewrited
411 * by events producer.
412 * B) The page of the consumed events may become a page for splice_read,
413 * and this page will be returned to system.
415 * These primitives allow multi process access to different cpu ring buffer
418 * These primitives don't distinguish read-only and read-consume access.
419 * Multi read-only access are also serialized.
423 static DECLARE_RWSEM(all_cpu_access_lock);
424 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
426 static inline void trace_access_lock(int cpu)
428 if (cpu == RING_BUFFER_ALL_CPUS) {
429 /* gain it for accessing the whole ring buffer. */
430 down_write(&all_cpu_access_lock);
432 /* gain it for accessing a cpu ring buffer. */
434 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
435 down_read(&all_cpu_access_lock);
437 /* Secondly block other access to this @cpu ring buffer. */
438 mutex_lock(&per_cpu(cpu_access_lock, cpu));
442 static inline void trace_access_unlock(int cpu)
444 if (cpu == RING_BUFFER_ALL_CPUS) {
445 up_write(&all_cpu_access_lock);
447 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
448 up_read(&all_cpu_access_lock);
452 static inline void trace_access_lock_init(void)
456 for_each_possible_cpu(cpu)
457 mutex_init(&per_cpu(cpu_access_lock, cpu));
462 static DEFINE_MUTEX(access_lock);
464 static inline void trace_access_lock(int cpu)
467 mutex_lock(&access_lock);
470 static inline void trace_access_unlock(int cpu)
473 mutex_unlock(&access_lock);
476 static inline void trace_access_lock_init(void)
482 #ifdef CONFIG_STACKTRACE
483 static void __ftrace_trace_stack(struct ring_buffer *buffer,
485 int skip, int pc, struct pt_regs *regs);
486 static inline void ftrace_trace_stack(struct trace_array *tr,
487 struct ring_buffer *buffer,
489 int skip, int pc, struct pt_regs *regs);
492 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
494 int skip, int pc, struct pt_regs *regs)
497 static inline void ftrace_trace_stack(struct trace_array *tr,
498 struct ring_buffer *buffer,
500 int skip, int pc, struct pt_regs *regs)
506 static void tracer_tracing_on(struct trace_array *tr)
508 if (tr->trace_buffer.buffer)
509 ring_buffer_record_on(tr->trace_buffer.buffer);
511 * This flag is looked at when buffers haven't been allocated
512 * yet, or by some tracers (like irqsoff), that just want to
513 * know if the ring buffer has been disabled, but it can handle
514 * races of where it gets disabled but we still do a record.
515 * As the check is in the fast path of the tracers, it is more
516 * important to be fast than accurate.
518 tr->buffer_disabled = 0;
519 /* Make the flag seen by readers */
524 * tracing_on - enable tracing buffers
526 * This function enables tracing buffers that may have been
527 * disabled with tracing_off.
529 void tracing_on(void)
531 tracer_tracing_on(&global_trace);
533 EXPORT_SYMBOL_GPL(tracing_on);
536 * __trace_puts - write a constant string into the trace buffer.
537 * @ip: The address of the caller
538 * @str: The constant string to write
539 * @size: The size of the string.
541 int __trace_puts(unsigned long ip, const char *str, int size)
543 struct ring_buffer_event *event;
544 struct ring_buffer *buffer;
545 struct print_entry *entry;
546 unsigned long irq_flags;
550 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
553 pc = preempt_count();
555 if (unlikely(tracing_selftest_running || tracing_disabled))
558 alloc = sizeof(*entry) + size + 2; /* possible \n added */
560 local_save_flags(irq_flags);
561 buffer = global_trace.trace_buffer.buffer;
562 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
567 entry = ring_buffer_event_data(event);
570 memcpy(&entry->buf, str, size);
572 /* Add a newline if necessary */
573 if (entry->buf[size - 1] != '\n') {
574 entry->buf[size] = '\n';
575 entry->buf[size + 1] = '\0';
577 entry->buf[size] = '\0';
579 __buffer_unlock_commit(buffer, event);
580 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
584 EXPORT_SYMBOL_GPL(__trace_puts);
587 * __trace_bputs - write the pointer to a constant string into trace buffer
588 * @ip: The address of the caller
589 * @str: The constant string to write to the buffer to
591 int __trace_bputs(unsigned long ip, const char *str)
593 struct ring_buffer_event *event;
594 struct ring_buffer *buffer;
595 struct bputs_entry *entry;
596 unsigned long irq_flags;
597 int size = sizeof(struct bputs_entry);
600 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
603 pc = preempt_count();
605 if (unlikely(tracing_selftest_running || tracing_disabled))
608 local_save_flags(irq_flags);
609 buffer = global_trace.trace_buffer.buffer;
610 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
615 entry = ring_buffer_event_data(event);
619 __buffer_unlock_commit(buffer, event);
620 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
624 EXPORT_SYMBOL_GPL(__trace_bputs);
626 #ifdef CONFIG_TRACER_SNAPSHOT
628 * trace_snapshot - take a snapshot of the current buffer.
630 * This causes a swap between the snapshot buffer and the current live
631 * tracing buffer. You can use this to take snapshots of the live
632 * trace when some condition is triggered, but continue to trace.
634 * Note, make sure to allocate the snapshot with either
635 * a tracing_snapshot_alloc(), or by doing it manually
636 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
638 * If the snapshot buffer is not allocated, it will stop tracing.
639 * Basically making a permanent snapshot.
641 void tracing_snapshot(void)
643 struct trace_array *tr = &global_trace;
644 struct tracer *tracer = tr->current_trace;
648 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
649 internal_trace_puts("*** snapshot is being ignored ***\n");
653 if (!tr->allocated_snapshot) {
654 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
655 internal_trace_puts("*** stopping trace here! ***\n");
660 /* Note, snapshot can not be used when the tracer uses it */
661 if (tracer->use_max_tr) {
662 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
663 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
667 local_irq_save(flags);
668 update_max_tr(tr, current, smp_processor_id());
669 local_irq_restore(flags);
671 EXPORT_SYMBOL_GPL(tracing_snapshot);
673 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
674 struct trace_buffer *size_buf, int cpu_id);
675 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
677 static int alloc_snapshot(struct trace_array *tr)
681 if (!tr->allocated_snapshot) {
683 /* allocate spare buffer */
684 ret = resize_buffer_duplicate_size(&tr->max_buffer,
685 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
689 tr->allocated_snapshot = true;
695 static void free_snapshot(struct trace_array *tr)
698 * We don't free the ring buffer. instead, resize it because
699 * The max_tr ring buffer has some state (e.g. ring->clock) and
700 * we want preserve it.
702 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
703 set_buffer_entries(&tr->max_buffer, 1);
704 tracing_reset_online_cpus(&tr->max_buffer);
705 tr->allocated_snapshot = false;
709 * tracing_alloc_snapshot - allocate snapshot buffer.
711 * This only allocates the snapshot buffer if it isn't already
712 * allocated - it doesn't also take a snapshot.
714 * This is meant to be used in cases where the snapshot buffer needs
715 * to be set up for events that can't sleep but need to be able to
716 * trigger a snapshot.
718 int tracing_alloc_snapshot(void)
720 struct trace_array *tr = &global_trace;
723 ret = alloc_snapshot(tr);
728 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
731 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
733 * This is similar to trace_snapshot(), but it will allocate the
734 * snapshot buffer if it isn't already allocated. Use this only
735 * where it is safe to sleep, as the allocation may sleep.
737 * This causes a swap between the snapshot buffer and the current live
738 * tracing buffer. You can use this to take snapshots of the live
739 * trace when some condition is triggered, but continue to trace.
741 void tracing_snapshot_alloc(void)
745 ret = tracing_alloc_snapshot();
751 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
753 void tracing_snapshot(void)
755 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
757 EXPORT_SYMBOL_GPL(tracing_snapshot);
758 int tracing_alloc_snapshot(void)
760 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
763 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
764 void tracing_snapshot_alloc(void)
769 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
770 #endif /* CONFIG_TRACER_SNAPSHOT */
772 static void tracer_tracing_off(struct trace_array *tr)
774 if (tr->trace_buffer.buffer)
775 ring_buffer_record_off(tr->trace_buffer.buffer);
777 * This flag is looked at when buffers haven't been allocated
778 * yet, or by some tracers (like irqsoff), that just want to
779 * know if the ring buffer has been disabled, but it can handle
780 * races of where it gets disabled but we still do a record.
781 * As the check is in the fast path of the tracers, it is more
782 * important to be fast than accurate.
784 tr->buffer_disabled = 1;
785 /* Make the flag seen by readers */
790 * tracing_off - turn off tracing buffers
792 * This function stops the tracing buffers from recording data.
793 * It does not disable any overhead the tracers themselves may
794 * be causing. This function simply causes all recording to
795 * the ring buffers to fail.
797 void tracing_off(void)
799 tracer_tracing_off(&global_trace);
801 EXPORT_SYMBOL_GPL(tracing_off);
803 void disable_trace_on_warning(void)
805 if (__disable_trace_on_warning)
810 * tracer_tracing_is_on - show real state of ring buffer enabled
811 * @tr : the trace array to know if ring buffer is enabled
813 * Shows real state of the ring buffer if it is enabled or not.
815 static int tracer_tracing_is_on(struct trace_array *tr)
817 if (tr->trace_buffer.buffer)
818 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
819 return !tr->buffer_disabled;
823 * tracing_is_on - show state of ring buffers enabled
825 int tracing_is_on(void)
827 return tracer_tracing_is_on(&global_trace);
829 EXPORT_SYMBOL_GPL(tracing_is_on);
831 static int __init set_buf_size(char *str)
833 unsigned long buf_size;
837 buf_size = memparse(str, &str);
838 /* nr_entries can not be zero */
841 trace_buf_size = buf_size;
844 __setup("trace_buf_size=", set_buf_size);
846 static int __init set_tracing_thresh(char *str)
848 unsigned long threshold;
853 ret = kstrtoul(str, 0, &threshold);
856 tracing_thresh = threshold * 1000;
859 __setup("tracing_thresh=", set_tracing_thresh);
861 unsigned long nsecs_to_usecs(unsigned long nsecs)
867 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
868 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
869 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
870 * of strings in the order that the enums were defined.
875 /* These must match the bit postions in trace_iterator_flags */
876 static const char *trace_options[] = {
884 int in_ns; /* is this clock in nanoseconds? */
886 { trace_clock_local, "local", 1 },
887 { trace_clock_global, "global", 1 },
888 { trace_clock_counter, "counter", 0 },
889 { trace_clock_jiffies, "uptime", 0 },
890 { trace_clock, "perf", 1 },
891 { ktime_get_mono_fast_ns, "mono", 1 },
892 { ktime_get_raw_fast_ns, "mono_raw", 1 },
897 * trace_parser_get_init - gets the buffer for trace parser
899 int trace_parser_get_init(struct trace_parser *parser, int size)
901 memset(parser, 0, sizeof(*parser));
903 parser->buffer = kmalloc(size, GFP_KERNEL);
912 * trace_parser_put - frees the buffer for trace parser
914 void trace_parser_put(struct trace_parser *parser)
916 kfree(parser->buffer);
920 * trace_get_user - reads the user input string separated by space
921 * (matched by isspace(ch))
923 * For each string found the 'struct trace_parser' is updated,
924 * and the function returns.
926 * Returns number of bytes read.
928 * See kernel/trace/trace.h for 'struct trace_parser' details.
930 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
931 size_t cnt, loff_t *ppos)
938 trace_parser_clear(parser);
940 ret = get_user(ch, ubuf++);
948 * The parser is not finished with the last write,
949 * continue reading the user input without skipping spaces.
952 /* skip white space */
953 while (cnt && isspace(ch)) {
954 ret = get_user(ch, ubuf++);
961 /* only spaces were written */
971 /* read the non-space input */
972 while (cnt && !isspace(ch)) {
973 if (parser->idx < parser->size - 1)
974 parser->buffer[parser->idx++] = ch;
979 ret = get_user(ch, ubuf++);
986 /* We either got finished input or we have to wait for another call. */
988 parser->buffer[parser->idx] = 0;
989 parser->cont = false;
990 } else if (parser->idx < parser->size - 1) {
992 parser->buffer[parser->idx++] = ch;
1005 /* TODO add a seq_buf_to_buffer() */
1006 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1010 if (trace_seq_used(s) <= s->seq.readpos)
1013 len = trace_seq_used(s) - s->seq.readpos;
1016 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1018 s->seq.readpos += cnt;
1022 unsigned long __read_mostly tracing_thresh;
1024 #ifdef CONFIG_TRACER_MAX_TRACE
1026 * Copy the new maximum trace into the separate maximum-trace
1027 * structure. (this way the maximum trace is permanently saved,
1028 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1031 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1033 struct trace_buffer *trace_buf = &tr->trace_buffer;
1034 struct trace_buffer *max_buf = &tr->max_buffer;
1035 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1036 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1039 max_buf->time_start = data->preempt_timestamp;
1041 max_data->saved_latency = tr->max_latency;
1042 max_data->critical_start = data->critical_start;
1043 max_data->critical_end = data->critical_end;
1045 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1046 max_data->pid = tsk->pid;
1048 * If tsk == current, then use current_uid(), as that does not use
1049 * RCU. The irq tracer can be called out of RCU scope.
1052 max_data->uid = current_uid();
1054 max_data->uid = task_uid(tsk);
1056 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1057 max_data->policy = tsk->policy;
1058 max_data->rt_priority = tsk->rt_priority;
1060 /* record this tasks comm */
1061 tracing_record_cmdline(tsk);
1065 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1067 * @tsk: the task with the latency
1068 * @cpu: The cpu that initiated the trace.
1070 * Flip the buffers between the @tr and the max_tr and record information
1071 * about which task was the cause of this latency.
1074 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1076 struct ring_buffer *buf;
1081 WARN_ON_ONCE(!irqs_disabled());
1083 if (!tr->allocated_snapshot) {
1084 /* Only the nop tracer should hit this when disabling */
1085 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1089 arch_spin_lock(&tr->max_lock);
1091 buf = tr->trace_buffer.buffer;
1092 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1093 tr->max_buffer.buffer = buf;
1095 __update_max_tr(tr, tsk, cpu);
1096 arch_spin_unlock(&tr->max_lock);
1100 * update_max_tr_single - only copy one trace over, and reset the rest
1102 * @tsk - task with the latency
1103 * @cpu - the cpu of the buffer to copy.
1105 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1108 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1115 WARN_ON_ONCE(!irqs_disabled());
1116 if (!tr->allocated_snapshot) {
1117 /* Only the nop tracer should hit this when disabling */
1118 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1122 arch_spin_lock(&tr->max_lock);
1124 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1126 if (ret == -EBUSY) {
1128 * We failed to swap the buffer due to a commit taking
1129 * place on this CPU. We fail to record, but we reset
1130 * the max trace buffer (no one writes directly to it)
1131 * and flag that it failed.
1133 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1134 "Failed to swap buffers due to commit in progress\n");
1137 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1139 __update_max_tr(tr, tsk, cpu);
1140 arch_spin_unlock(&tr->max_lock);
1142 #endif /* CONFIG_TRACER_MAX_TRACE */
1144 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1146 /* Iterators are static, they should be filled or empty */
1147 if (trace_buffer_iter(iter, iter->cpu_file))
1150 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1154 #ifdef CONFIG_FTRACE_STARTUP_TEST
1155 static int run_tracer_selftest(struct tracer *type)
1157 struct trace_array *tr = &global_trace;
1158 struct tracer *saved_tracer = tr->current_trace;
1161 if (!type->selftest || tracing_selftest_disabled)
1165 * Run a selftest on this tracer.
1166 * Here we reset the trace buffer, and set the current
1167 * tracer to be this tracer. The tracer can then run some
1168 * internal tracing to verify that everything is in order.
1169 * If we fail, we do not register this tracer.
1171 tracing_reset_online_cpus(&tr->trace_buffer);
1173 tr->current_trace = type;
1175 #ifdef CONFIG_TRACER_MAX_TRACE
1176 if (type->use_max_tr) {
1177 /* If we expanded the buffers, make sure the max is expanded too */
1178 if (ring_buffer_expanded)
1179 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1180 RING_BUFFER_ALL_CPUS);
1181 tr->allocated_snapshot = true;
1185 /* the test is responsible for initializing and enabling */
1186 pr_info("Testing tracer %s: ", type->name);
1187 ret = type->selftest(type, tr);
1188 /* the test is responsible for resetting too */
1189 tr->current_trace = saved_tracer;
1191 printk(KERN_CONT "FAILED!\n");
1192 /* Add the warning after printing 'FAILED' */
1196 /* Only reset on passing, to avoid touching corrupted buffers */
1197 tracing_reset_online_cpus(&tr->trace_buffer);
1199 #ifdef CONFIG_TRACER_MAX_TRACE
1200 if (type->use_max_tr) {
1201 tr->allocated_snapshot = false;
1203 /* Shrink the max buffer again */
1204 if (ring_buffer_expanded)
1205 ring_buffer_resize(tr->max_buffer.buffer, 1,
1206 RING_BUFFER_ALL_CPUS);
1210 printk(KERN_CONT "PASSED\n");
1214 static inline int run_tracer_selftest(struct tracer *type)
1218 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1220 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1222 static void __init apply_trace_boot_options(void);
1225 * register_tracer - register a tracer with the ftrace system.
1226 * @type - the plugin for the tracer
1228 * Register a new plugin tracer.
1230 int __init register_tracer(struct tracer *type)
1236 pr_info("Tracer must have a name\n");
1240 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1241 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1245 mutex_lock(&trace_types_lock);
1247 tracing_selftest_running = true;
1249 for (t = trace_types; t; t = t->next) {
1250 if (strcmp(type->name, t->name) == 0) {
1252 pr_info("Tracer %s already registered\n",
1259 if (!type->set_flag)
1260 type->set_flag = &dummy_set_flag;
1262 type->flags = &dummy_tracer_flags;
1264 if (!type->flags->opts)
1265 type->flags->opts = dummy_tracer_opt;
1267 ret = run_tracer_selftest(type);
1271 type->next = trace_types;
1273 add_tracer_options(&global_trace, type);
1276 tracing_selftest_running = false;
1277 mutex_unlock(&trace_types_lock);
1279 if (ret || !default_bootup_tracer)
1282 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1285 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1286 /* Do we want this tracer to start on bootup? */
1287 tracing_set_tracer(&global_trace, type->name);
1288 default_bootup_tracer = NULL;
1290 apply_trace_boot_options();
1292 /* disable other selftests, since this will break it. */
1293 tracing_selftest_disabled = true;
1294 #ifdef CONFIG_FTRACE_STARTUP_TEST
1295 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1303 void tracing_reset(struct trace_buffer *buf, int cpu)
1305 struct ring_buffer *buffer = buf->buffer;
1310 ring_buffer_record_disable(buffer);
1312 /* Make sure all commits have finished */
1313 synchronize_sched();
1314 ring_buffer_reset_cpu(buffer, cpu);
1316 ring_buffer_record_enable(buffer);
1319 void tracing_reset_online_cpus(struct trace_buffer *buf)
1321 struct ring_buffer *buffer = buf->buffer;
1327 ring_buffer_record_disable(buffer);
1329 /* Make sure all commits have finished */
1330 synchronize_sched();
1332 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1334 for_each_online_cpu(cpu)
1335 ring_buffer_reset_cpu(buffer, cpu);
1337 ring_buffer_record_enable(buffer);
1340 /* Must have trace_types_lock held */
1341 void tracing_reset_all_online_cpus(void)
1343 struct trace_array *tr;
1345 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1346 tracing_reset_online_cpus(&tr->trace_buffer);
1347 #ifdef CONFIG_TRACER_MAX_TRACE
1348 tracing_reset_online_cpus(&tr->max_buffer);
1353 #define SAVED_CMDLINES_DEFAULT 128
1354 #define NO_CMDLINE_MAP UINT_MAX
1355 static unsigned saved_tgids[SAVED_CMDLINES_DEFAULT];
1356 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1357 struct saved_cmdlines_buffer {
1358 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1359 unsigned *map_cmdline_to_pid;
1360 unsigned cmdline_num;
1362 char *saved_cmdlines;
1364 static struct saved_cmdlines_buffer *savedcmd;
1366 /* temporary disable recording */
1367 static atomic_t trace_record_cmdline_disabled __read_mostly;
1369 static inline char *get_saved_cmdlines(int idx)
1371 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1374 static inline void set_cmdline(int idx, const char *cmdline)
1376 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1379 static int allocate_cmdlines_buffer(unsigned int val,
1380 struct saved_cmdlines_buffer *s)
1382 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1384 if (!s->map_cmdline_to_pid)
1387 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1388 if (!s->saved_cmdlines) {
1389 kfree(s->map_cmdline_to_pid);
1394 s->cmdline_num = val;
1395 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1396 sizeof(s->map_pid_to_cmdline));
1397 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1398 val * sizeof(*s->map_cmdline_to_pid));
1403 static int trace_create_savedcmd(void)
1407 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1411 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1421 int is_tracing_stopped(void)
1423 return global_trace.stop_count;
1427 * tracing_start - quick start of the tracer
1429 * If tracing is enabled but was stopped by tracing_stop,
1430 * this will start the tracer back up.
1432 void tracing_start(void)
1434 struct ring_buffer *buffer;
1435 unsigned long flags;
1437 if (tracing_disabled)
1440 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1441 if (--global_trace.stop_count) {
1442 if (global_trace.stop_count < 0) {
1443 /* Someone screwed up their debugging */
1445 global_trace.stop_count = 0;
1450 /* Prevent the buffers from switching */
1451 arch_spin_lock(&global_trace.max_lock);
1453 buffer = global_trace.trace_buffer.buffer;
1455 ring_buffer_record_enable(buffer);
1457 #ifdef CONFIG_TRACER_MAX_TRACE
1458 buffer = global_trace.max_buffer.buffer;
1460 ring_buffer_record_enable(buffer);
1463 arch_spin_unlock(&global_trace.max_lock);
1466 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1469 static void tracing_start_tr(struct trace_array *tr)
1471 struct ring_buffer *buffer;
1472 unsigned long flags;
1474 if (tracing_disabled)
1477 /* If global, we need to also start the max tracer */
1478 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1479 return tracing_start();
1481 raw_spin_lock_irqsave(&tr->start_lock, flags);
1483 if (--tr->stop_count) {
1484 if (tr->stop_count < 0) {
1485 /* Someone screwed up their debugging */
1492 buffer = tr->trace_buffer.buffer;
1494 ring_buffer_record_enable(buffer);
1497 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1501 * tracing_stop - quick stop of the tracer
1503 * Light weight way to stop tracing. Use in conjunction with
1506 void tracing_stop(void)
1508 struct ring_buffer *buffer;
1509 unsigned long flags;
1511 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1512 if (global_trace.stop_count++)
1515 /* Prevent the buffers from switching */
1516 arch_spin_lock(&global_trace.max_lock);
1518 buffer = global_trace.trace_buffer.buffer;
1520 ring_buffer_record_disable(buffer);
1522 #ifdef CONFIG_TRACER_MAX_TRACE
1523 buffer = global_trace.max_buffer.buffer;
1525 ring_buffer_record_disable(buffer);
1528 arch_spin_unlock(&global_trace.max_lock);
1531 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1534 static void tracing_stop_tr(struct trace_array *tr)
1536 struct ring_buffer *buffer;
1537 unsigned long flags;
1539 /* If global, we need to also stop the max tracer */
1540 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1541 return tracing_stop();
1543 raw_spin_lock_irqsave(&tr->start_lock, flags);
1544 if (tr->stop_count++)
1547 buffer = tr->trace_buffer.buffer;
1549 ring_buffer_record_disable(buffer);
1552 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1555 void trace_stop_cmdline_recording(void);
1557 static int trace_save_cmdline(struct task_struct *tsk)
1561 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1565 * It's not the end of the world if we don't get
1566 * the lock, but we also don't want to spin
1567 * nor do we want to disable interrupts,
1568 * so if we miss here, then better luck next time.
1570 if (!arch_spin_trylock(&trace_cmdline_lock))
1573 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1574 if (idx == NO_CMDLINE_MAP) {
1575 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1578 * Check whether the cmdline buffer at idx has a pid
1579 * mapped. We are going to overwrite that entry so we
1580 * need to clear the map_pid_to_cmdline. Otherwise we
1581 * would read the new comm for the old pid.
1583 pid = savedcmd->map_cmdline_to_pid[idx];
1584 if (pid != NO_CMDLINE_MAP)
1585 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1587 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1588 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1590 savedcmd->cmdline_idx = idx;
1593 set_cmdline(idx, tsk->comm);
1594 saved_tgids[idx] = tsk->tgid;
1595 arch_spin_unlock(&trace_cmdline_lock);
1600 static void __trace_find_cmdline(int pid, char comm[])
1605 strcpy(comm, "<idle>");
1609 if (WARN_ON_ONCE(pid < 0)) {
1610 strcpy(comm, "<XXX>");
1614 if (pid > PID_MAX_DEFAULT) {
1615 strcpy(comm, "<...>");
1619 map = savedcmd->map_pid_to_cmdline[pid];
1620 if (map != NO_CMDLINE_MAP)
1621 strcpy(comm, get_saved_cmdlines(map));
1623 strcpy(comm, "<...>");
1626 void trace_find_cmdline(int pid, char comm[])
1629 arch_spin_lock(&trace_cmdline_lock);
1631 __trace_find_cmdline(pid, comm);
1633 arch_spin_unlock(&trace_cmdline_lock);
1637 int trace_find_tgid(int pid)
1643 arch_spin_lock(&trace_cmdline_lock);
1644 map = savedcmd->map_pid_to_cmdline[pid];
1645 if (map != NO_CMDLINE_MAP)
1646 tgid = saved_tgids[map];
1650 arch_spin_unlock(&trace_cmdline_lock);
1656 void tracing_record_cmdline(struct task_struct *tsk)
1658 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1661 if (!__this_cpu_read(trace_cmdline_save))
1664 if (trace_save_cmdline(tsk))
1665 __this_cpu_write(trace_cmdline_save, false);
1669 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1672 struct task_struct *tsk = current;
1674 entry->preempt_count = pc & 0xff;
1675 entry->pid = (tsk) ? tsk->pid : 0;
1677 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1678 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1680 TRACE_FLAG_IRQS_NOSUPPORT |
1682 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1683 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1684 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1685 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1687 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1689 struct ring_buffer_event *
1690 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1693 unsigned long flags, int pc)
1695 struct ring_buffer_event *event;
1697 event = ring_buffer_lock_reserve(buffer, len);
1698 if (event != NULL) {
1699 struct trace_entry *ent = ring_buffer_event_data(event);
1701 tracing_generic_entry_update(ent, flags, pc);
1709 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1711 __this_cpu_write(trace_cmdline_save, true);
1712 ring_buffer_unlock_commit(buffer, event);
1715 void trace_buffer_unlock_commit(struct trace_array *tr,
1716 struct ring_buffer *buffer,
1717 struct ring_buffer_event *event,
1718 unsigned long flags, int pc)
1720 __buffer_unlock_commit(buffer, event);
1722 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
1723 ftrace_trace_userstack(buffer, flags, pc);
1725 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1727 static struct ring_buffer *temp_buffer;
1729 struct ring_buffer_event *
1730 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1731 struct trace_event_file *trace_file,
1732 int type, unsigned long len,
1733 unsigned long flags, int pc)
1735 struct ring_buffer_event *entry;
1737 *current_rb = trace_file->tr->trace_buffer.buffer;
1738 entry = trace_buffer_lock_reserve(*current_rb,
1739 type, len, flags, pc);
1741 * If tracing is off, but we have triggers enabled
1742 * we still need to look at the event data. Use the temp_buffer
1743 * to store the trace event for the tigger to use. It's recusive
1744 * safe and will not be recorded anywhere.
1746 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
1747 *current_rb = temp_buffer;
1748 entry = trace_buffer_lock_reserve(*current_rb,
1749 type, len, flags, pc);
1753 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1755 struct ring_buffer_event *
1756 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1757 int type, unsigned long len,
1758 unsigned long flags, int pc)
1760 *current_rb = global_trace.trace_buffer.buffer;
1761 return trace_buffer_lock_reserve(*current_rb,
1762 type, len, flags, pc);
1764 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1766 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1767 struct ring_buffer *buffer,
1768 struct ring_buffer_event *event,
1769 unsigned long flags, int pc,
1770 struct pt_regs *regs)
1772 __buffer_unlock_commit(buffer, event);
1774 ftrace_trace_stack(tr, buffer, flags, 6, pc, regs);
1775 ftrace_trace_userstack(buffer, flags, pc);
1777 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1779 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1780 struct ring_buffer_event *event)
1782 ring_buffer_discard_commit(buffer, event);
1784 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1787 trace_function(struct trace_array *tr,
1788 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1791 struct trace_event_call *call = &event_function;
1792 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1793 struct ring_buffer_event *event;
1794 struct ftrace_entry *entry;
1796 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1800 entry = ring_buffer_event_data(event);
1802 entry->parent_ip = parent_ip;
1804 if (!call_filter_check_discard(call, entry, buffer, event))
1805 __buffer_unlock_commit(buffer, event);
1808 #ifdef CONFIG_STACKTRACE
1810 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1811 struct ftrace_stack {
1812 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1815 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1816 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1818 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1819 unsigned long flags,
1820 int skip, int pc, struct pt_regs *regs)
1822 struct trace_event_call *call = &event_kernel_stack;
1823 struct ring_buffer_event *event;
1824 struct stack_entry *entry;
1825 struct stack_trace trace;
1827 int size = FTRACE_STACK_ENTRIES;
1829 trace.nr_entries = 0;
1833 * Since events can happen in NMIs there's no safe way to
1834 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1835 * or NMI comes in, it will just have to use the default
1836 * FTRACE_STACK_SIZE.
1838 preempt_disable_notrace();
1840 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1842 * We don't need any atomic variables, just a barrier.
1843 * If an interrupt comes in, we don't care, because it would
1844 * have exited and put the counter back to what we want.
1845 * We just need a barrier to keep gcc from moving things
1849 if (use_stack == 1) {
1850 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1851 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1854 save_stack_trace_regs(regs, &trace);
1856 save_stack_trace(&trace);
1858 if (trace.nr_entries > size)
1859 size = trace.nr_entries;
1861 /* From now on, use_stack is a boolean */
1864 size *= sizeof(unsigned long);
1866 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1867 sizeof(*entry) + size, flags, pc);
1870 entry = ring_buffer_event_data(event);
1872 memset(&entry->caller, 0, size);
1875 memcpy(&entry->caller, trace.entries,
1876 trace.nr_entries * sizeof(unsigned long));
1878 trace.max_entries = FTRACE_STACK_ENTRIES;
1879 trace.entries = entry->caller;
1881 save_stack_trace_regs(regs, &trace);
1883 save_stack_trace(&trace);
1886 entry->size = trace.nr_entries;
1888 if (!call_filter_check_discard(call, entry, buffer, event))
1889 __buffer_unlock_commit(buffer, event);
1892 /* Again, don't let gcc optimize things here */
1894 __this_cpu_dec(ftrace_stack_reserve);
1895 preempt_enable_notrace();
1899 static inline void ftrace_trace_stack(struct trace_array *tr,
1900 struct ring_buffer *buffer,
1901 unsigned long flags,
1902 int skip, int pc, struct pt_regs *regs)
1904 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
1907 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1910 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1913 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1917 * trace_dump_stack - record a stack back trace in the trace buffer
1918 * @skip: Number of functions to skip (helper handlers)
1920 void trace_dump_stack(int skip)
1922 unsigned long flags;
1924 if (tracing_disabled || tracing_selftest_running)
1927 local_save_flags(flags);
1930 * Skip 3 more, seems to get us at the caller of
1934 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1935 flags, skip, preempt_count(), NULL);
1938 static DEFINE_PER_CPU(int, user_stack_count);
1941 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1943 struct trace_event_call *call = &event_user_stack;
1944 struct ring_buffer_event *event;
1945 struct userstack_entry *entry;
1946 struct stack_trace trace;
1948 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
1952 * NMIs can not handle page faults, even with fix ups.
1953 * The save user stack can (and often does) fault.
1955 if (unlikely(in_nmi()))
1959 * prevent recursion, since the user stack tracing may
1960 * trigger other kernel events.
1963 if (__this_cpu_read(user_stack_count))
1966 __this_cpu_inc(user_stack_count);
1968 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1969 sizeof(*entry), flags, pc);
1971 goto out_drop_count;
1972 entry = ring_buffer_event_data(event);
1974 entry->tgid = current->tgid;
1975 memset(&entry->caller, 0, sizeof(entry->caller));
1977 trace.nr_entries = 0;
1978 trace.max_entries = FTRACE_STACK_ENTRIES;
1980 trace.entries = entry->caller;
1982 save_stack_trace_user(&trace);
1983 if (!call_filter_check_discard(call, entry, buffer, event))
1984 __buffer_unlock_commit(buffer, event);
1987 __this_cpu_dec(user_stack_count);
1993 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1995 ftrace_trace_userstack(tr, flags, preempt_count());
1999 #endif /* CONFIG_STACKTRACE */
2001 /* created for use with alloc_percpu */
2002 struct trace_buffer_struct {
2003 char buffer[TRACE_BUF_SIZE];
2006 static struct trace_buffer_struct *trace_percpu_buffer;
2007 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
2008 static struct trace_buffer_struct *trace_percpu_irq_buffer;
2009 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
2012 * The buffer used is dependent on the context. There is a per cpu
2013 * buffer for normal context, softirq contex, hard irq context and
2014 * for NMI context. Thise allows for lockless recording.
2016 * Note, if the buffers failed to be allocated, then this returns NULL
2018 static char *get_trace_buf(void)
2020 struct trace_buffer_struct *percpu_buffer;
2023 * If we have allocated per cpu buffers, then we do not
2024 * need to do any locking.
2027 percpu_buffer = trace_percpu_nmi_buffer;
2029 percpu_buffer = trace_percpu_irq_buffer;
2030 else if (in_softirq())
2031 percpu_buffer = trace_percpu_sirq_buffer;
2033 percpu_buffer = trace_percpu_buffer;
2038 return this_cpu_ptr(&percpu_buffer->buffer[0]);
2041 static int alloc_percpu_trace_buffer(void)
2043 struct trace_buffer_struct *buffers;
2044 struct trace_buffer_struct *sirq_buffers;
2045 struct trace_buffer_struct *irq_buffers;
2046 struct trace_buffer_struct *nmi_buffers;
2048 buffers = alloc_percpu(struct trace_buffer_struct);
2052 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2056 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2060 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2064 trace_percpu_buffer = buffers;
2065 trace_percpu_sirq_buffer = sirq_buffers;
2066 trace_percpu_irq_buffer = irq_buffers;
2067 trace_percpu_nmi_buffer = nmi_buffers;
2072 free_percpu(irq_buffers);
2074 free_percpu(sirq_buffers);
2076 free_percpu(buffers);
2078 WARN(1, "Could not allocate percpu trace_printk buffer");
2082 static int buffers_allocated;
2084 void trace_printk_init_buffers(void)
2086 if (buffers_allocated)
2089 if (alloc_percpu_trace_buffer())
2092 /* trace_printk() is for debug use only. Don't use it in production. */
2095 pr_warning("**********************************************************\n");
2096 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2097 pr_warning("** **\n");
2098 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2099 pr_warning("** **\n");
2100 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2101 pr_warning("** unsafe for production use. **\n");
2102 pr_warning("** **\n");
2103 pr_warning("** If you see this message and you are not debugging **\n");
2104 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2105 pr_warning("** **\n");
2106 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2107 pr_warning("**********************************************************\n");
2109 /* Expand the buffers to set size */
2110 tracing_update_buffers();
2112 buffers_allocated = 1;
2115 * trace_printk_init_buffers() can be called by modules.
2116 * If that happens, then we need to start cmdline recording
2117 * directly here. If the global_trace.buffer is already
2118 * allocated here, then this was called by module code.
2120 if (global_trace.trace_buffer.buffer)
2121 tracing_start_cmdline_record();
2124 void trace_printk_start_comm(void)
2126 /* Start tracing comms if trace printk is set */
2127 if (!buffers_allocated)
2129 tracing_start_cmdline_record();
2132 static void trace_printk_start_stop_comm(int enabled)
2134 if (!buffers_allocated)
2138 tracing_start_cmdline_record();
2140 tracing_stop_cmdline_record();
2144 * trace_vbprintk - write binary msg to tracing buffer
2147 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2149 struct trace_event_call *call = &event_bprint;
2150 struct ring_buffer_event *event;
2151 struct ring_buffer *buffer;
2152 struct trace_array *tr = &global_trace;
2153 struct bprint_entry *entry;
2154 unsigned long flags;
2156 int len = 0, size, pc;
2158 if (unlikely(tracing_selftest_running || tracing_disabled))
2161 /* Don't pollute graph traces with trace_vprintk internals */
2162 pause_graph_tracing();
2164 pc = preempt_count();
2165 preempt_disable_notrace();
2167 tbuffer = get_trace_buf();
2173 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2175 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2178 local_save_flags(flags);
2179 size = sizeof(*entry) + sizeof(u32) * len;
2180 buffer = tr->trace_buffer.buffer;
2181 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2185 entry = ring_buffer_event_data(event);
2189 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2190 if (!call_filter_check_discard(call, entry, buffer, event)) {
2191 __buffer_unlock_commit(buffer, event);
2192 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2196 preempt_enable_notrace();
2197 unpause_graph_tracing();
2201 EXPORT_SYMBOL_GPL(trace_vbprintk);
2204 __trace_array_vprintk(struct ring_buffer *buffer,
2205 unsigned long ip, const char *fmt, va_list args)
2207 struct trace_event_call *call = &event_print;
2208 struct ring_buffer_event *event;
2209 int len = 0, size, pc;
2210 struct print_entry *entry;
2211 unsigned long flags;
2214 if (tracing_disabled || tracing_selftest_running)
2217 /* Don't pollute graph traces with trace_vprintk internals */
2218 pause_graph_tracing();
2220 pc = preempt_count();
2221 preempt_disable_notrace();
2224 tbuffer = get_trace_buf();
2230 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2232 local_save_flags(flags);
2233 size = sizeof(*entry) + len + 1;
2234 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2238 entry = ring_buffer_event_data(event);
2241 memcpy(&entry->buf, tbuffer, len + 1);
2242 if (!call_filter_check_discard(call, entry, buffer, event)) {
2243 __buffer_unlock_commit(buffer, event);
2244 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2247 preempt_enable_notrace();
2248 unpause_graph_tracing();
2253 int trace_array_vprintk(struct trace_array *tr,
2254 unsigned long ip, const char *fmt, va_list args)
2256 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2259 int trace_array_printk(struct trace_array *tr,
2260 unsigned long ip, const char *fmt, ...)
2265 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2269 ret = trace_array_vprintk(tr, ip, fmt, ap);
2274 int trace_array_printk_buf(struct ring_buffer *buffer,
2275 unsigned long ip, const char *fmt, ...)
2280 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2284 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2289 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2291 return trace_array_vprintk(&global_trace, ip, fmt, args);
2293 EXPORT_SYMBOL_GPL(trace_vprintk);
2295 static void trace_iterator_increment(struct trace_iterator *iter)
2297 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2301 ring_buffer_read(buf_iter, NULL);
2304 static struct trace_entry *
2305 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2306 unsigned long *lost_events)
2308 struct ring_buffer_event *event;
2309 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2312 event = ring_buffer_iter_peek(buf_iter, ts);
2314 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2318 iter->ent_size = ring_buffer_event_length(event);
2319 return ring_buffer_event_data(event);
2325 static struct trace_entry *
2326 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2327 unsigned long *missing_events, u64 *ent_ts)
2329 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2330 struct trace_entry *ent, *next = NULL;
2331 unsigned long lost_events = 0, next_lost = 0;
2332 int cpu_file = iter->cpu_file;
2333 u64 next_ts = 0, ts;
2339 * If we are in a per_cpu trace file, don't bother by iterating over
2340 * all cpu and peek directly.
2342 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2343 if (ring_buffer_empty_cpu(buffer, cpu_file))
2345 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2347 *ent_cpu = cpu_file;
2352 for_each_tracing_cpu(cpu) {
2354 if (ring_buffer_empty_cpu(buffer, cpu))
2357 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2360 * Pick the entry with the smallest timestamp:
2362 if (ent && (!next || ts < next_ts)) {
2366 next_lost = lost_events;
2367 next_size = iter->ent_size;
2371 iter->ent_size = next_size;
2374 *ent_cpu = next_cpu;
2380 *missing_events = next_lost;
2385 /* Find the next real entry, without updating the iterator itself */
2386 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2387 int *ent_cpu, u64 *ent_ts)
2389 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2392 /* Find the next real entry, and increment the iterator to the next entry */
2393 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2395 iter->ent = __find_next_entry(iter, &iter->cpu,
2396 &iter->lost_events, &iter->ts);
2399 trace_iterator_increment(iter);
2401 return iter->ent ? iter : NULL;
2404 static void trace_consume(struct trace_iterator *iter)
2406 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2407 &iter->lost_events);
2410 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2412 struct trace_iterator *iter = m->private;
2416 WARN_ON_ONCE(iter->leftover);
2420 /* can't go backwards */
2425 ent = trace_find_next_entry_inc(iter);
2429 while (ent && iter->idx < i)
2430 ent = trace_find_next_entry_inc(iter);
2437 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2439 struct ring_buffer_event *event;
2440 struct ring_buffer_iter *buf_iter;
2441 unsigned long entries = 0;
2444 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2446 buf_iter = trace_buffer_iter(iter, cpu);
2450 ring_buffer_iter_reset(buf_iter);
2453 * We could have the case with the max latency tracers
2454 * that a reset never took place on a cpu. This is evident
2455 * by the timestamp being before the start of the buffer.
2457 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2458 if (ts >= iter->trace_buffer->time_start)
2461 ring_buffer_read(buf_iter, NULL);
2464 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2468 * The current tracer is copied to avoid a global locking
2471 static void *s_start(struct seq_file *m, loff_t *pos)
2473 struct trace_iterator *iter = m->private;
2474 struct trace_array *tr = iter->tr;
2475 int cpu_file = iter->cpu_file;
2481 * copy the tracer to avoid using a global lock all around.
2482 * iter->trace is a copy of current_trace, the pointer to the
2483 * name may be used instead of a strcmp(), as iter->trace->name
2484 * will point to the same string as current_trace->name.
2486 mutex_lock(&trace_types_lock);
2487 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2488 *iter->trace = *tr->current_trace;
2489 mutex_unlock(&trace_types_lock);
2491 #ifdef CONFIG_TRACER_MAX_TRACE
2492 if (iter->snapshot && iter->trace->use_max_tr)
2493 return ERR_PTR(-EBUSY);
2496 if (!iter->snapshot)
2497 atomic_inc(&trace_record_cmdline_disabled);
2499 if (*pos != iter->pos) {
2504 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2505 for_each_tracing_cpu(cpu)
2506 tracing_iter_reset(iter, cpu);
2508 tracing_iter_reset(iter, cpu_file);
2511 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2516 * If we overflowed the seq_file before, then we want
2517 * to just reuse the trace_seq buffer again.
2523 p = s_next(m, p, &l);
2527 trace_event_read_lock();
2528 trace_access_lock(cpu_file);
2532 static void s_stop(struct seq_file *m, void *p)
2534 struct trace_iterator *iter = m->private;
2536 #ifdef CONFIG_TRACER_MAX_TRACE
2537 if (iter->snapshot && iter->trace->use_max_tr)
2541 if (!iter->snapshot)
2542 atomic_dec(&trace_record_cmdline_disabled);
2544 trace_access_unlock(iter->cpu_file);
2545 trace_event_read_unlock();
2549 get_total_entries(struct trace_buffer *buf,
2550 unsigned long *total, unsigned long *entries)
2552 unsigned long count;
2558 for_each_tracing_cpu(cpu) {
2559 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2561 * If this buffer has skipped entries, then we hold all
2562 * entries for the trace and we need to ignore the
2563 * ones before the time stamp.
2565 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2566 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2567 /* total is the same as the entries */
2571 ring_buffer_overrun_cpu(buf->buffer, cpu);
2576 static void print_lat_help_header(struct seq_file *m)
2578 seq_puts(m, "# _------=> CPU# \n"
2579 "# / _-----=> irqs-off \n"
2580 "# | / _----=> need-resched \n"
2581 "# || / _---=> hardirq/softirq \n"
2582 "# ||| / _--=> preempt-depth \n"
2584 "# cmd pid ||||| time | caller \n"
2585 "# \\ / ||||| \\ | / \n");
2588 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2590 unsigned long total;
2591 unsigned long entries;
2593 get_total_entries(buf, &total, &entries);
2594 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2595 entries, total, num_online_cpus());
2599 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2601 print_event_info(buf, m);
2602 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2606 static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
2608 print_event_info(buf, m);
2609 seq_puts(m, "# TASK-PID TGID CPU# TIMESTAMP FUNCTION\n");
2610 seq_puts(m, "# | | | | | |\n");
2613 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2615 print_event_info(buf, m);
2616 seq_puts(m, "# _-----=> irqs-off\n"
2617 "# / _----=> need-resched\n"
2618 "# | / _---=> hardirq/softirq\n"
2619 "# || / _--=> preempt-depth\n"
2621 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2622 "# | | | |||| | |\n");
2625 static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
2627 print_event_info(buf, m);
2628 seq_puts(m, "# _-----=> irqs-off\n");
2629 seq_puts(m, "# / _----=> need-resched\n");
2630 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2631 seq_puts(m, "# || / _--=> preempt-depth\n");
2632 seq_puts(m, "# ||| / delay\n");
2633 seq_puts(m, "# TASK-PID TGID CPU# |||| TIMESTAMP FUNCTION\n");
2634 seq_puts(m, "# | | | | |||| | |\n");
2638 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2640 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
2641 struct trace_buffer *buf = iter->trace_buffer;
2642 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2643 struct tracer *type = iter->trace;
2644 unsigned long entries;
2645 unsigned long total;
2646 const char *name = "preemption";
2650 get_total_entries(buf, &total, &entries);
2652 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2654 seq_puts(m, "# -----------------------------------"
2655 "---------------------------------\n");
2656 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2657 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2658 nsecs_to_usecs(data->saved_latency),
2662 #if defined(CONFIG_PREEMPT_NONE)
2664 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2666 #elif defined(CONFIG_PREEMPT)
2671 /* These are reserved for later use */
2674 seq_printf(m, " #P:%d)\n", num_online_cpus());
2678 seq_puts(m, "# -----------------\n");
2679 seq_printf(m, "# | task: %.16s-%d "
2680 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2681 data->comm, data->pid,
2682 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2683 data->policy, data->rt_priority);
2684 seq_puts(m, "# -----------------\n");
2686 if (data->critical_start) {
2687 seq_puts(m, "# => started at: ");
2688 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2689 trace_print_seq(m, &iter->seq);
2690 seq_puts(m, "\n# => ended at: ");
2691 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2692 trace_print_seq(m, &iter->seq);
2693 seq_puts(m, "\n#\n");
2699 static void test_cpu_buff_start(struct trace_iterator *iter)
2701 struct trace_seq *s = &iter->seq;
2702 struct trace_array *tr = iter->tr;
2704 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
2707 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2710 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
2713 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2717 cpumask_set_cpu(iter->cpu, iter->started);
2719 /* Don't print started cpu buffer for the first entry of the trace */
2721 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2725 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2727 struct trace_array *tr = iter->tr;
2728 struct trace_seq *s = &iter->seq;
2729 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
2730 struct trace_entry *entry;
2731 struct trace_event *event;
2735 test_cpu_buff_start(iter);
2737 event = ftrace_find_event(entry->type);
2739 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2740 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2741 trace_print_lat_context(iter);
2743 trace_print_context(iter);
2746 if (trace_seq_has_overflowed(s))
2747 return TRACE_TYPE_PARTIAL_LINE;
2750 return event->funcs->trace(iter, sym_flags, event);
2752 trace_seq_printf(s, "Unknown type %d\n", entry->type);
2754 return trace_handle_return(s);
2757 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2759 struct trace_array *tr = iter->tr;
2760 struct trace_seq *s = &iter->seq;
2761 struct trace_entry *entry;
2762 struct trace_event *event;
2766 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
2767 trace_seq_printf(s, "%d %d %llu ",
2768 entry->pid, iter->cpu, iter->ts);
2770 if (trace_seq_has_overflowed(s))
2771 return TRACE_TYPE_PARTIAL_LINE;
2773 event = ftrace_find_event(entry->type);
2775 return event->funcs->raw(iter, 0, event);
2777 trace_seq_printf(s, "%d ?\n", entry->type);
2779 return trace_handle_return(s);
2782 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2784 struct trace_array *tr = iter->tr;
2785 struct trace_seq *s = &iter->seq;
2786 unsigned char newline = '\n';
2787 struct trace_entry *entry;
2788 struct trace_event *event;
2792 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2793 SEQ_PUT_HEX_FIELD(s, entry->pid);
2794 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2795 SEQ_PUT_HEX_FIELD(s, iter->ts);
2796 if (trace_seq_has_overflowed(s))
2797 return TRACE_TYPE_PARTIAL_LINE;
2800 event = ftrace_find_event(entry->type);
2802 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2803 if (ret != TRACE_TYPE_HANDLED)
2807 SEQ_PUT_FIELD(s, newline);
2809 return trace_handle_return(s);
2812 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2814 struct trace_array *tr = iter->tr;
2815 struct trace_seq *s = &iter->seq;
2816 struct trace_entry *entry;
2817 struct trace_event *event;
2821 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2822 SEQ_PUT_FIELD(s, entry->pid);
2823 SEQ_PUT_FIELD(s, iter->cpu);
2824 SEQ_PUT_FIELD(s, iter->ts);
2825 if (trace_seq_has_overflowed(s))
2826 return TRACE_TYPE_PARTIAL_LINE;
2829 event = ftrace_find_event(entry->type);
2830 return event ? event->funcs->binary(iter, 0, event) :
2834 int trace_empty(struct trace_iterator *iter)
2836 struct ring_buffer_iter *buf_iter;
2839 /* If we are looking at one CPU buffer, only check that one */
2840 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2841 cpu = iter->cpu_file;
2842 buf_iter = trace_buffer_iter(iter, cpu);
2844 if (!ring_buffer_iter_empty(buf_iter))
2847 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2853 for_each_tracing_cpu(cpu) {
2854 buf_iter = trace_buffer_iter(iter, cpu);
2856 if (!ring_buffer_iter_empty(buf_iter))
2859 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2867 /* Called with trace_event_read_lock() held. */
2868 enum print_line_t print_trace_line(struct trace_iterator *iter)
2870 struct trace_array *tr = iter->tr;
2871 unsigned long trace_flags = tr->trace_flags;
2872 enum print_line_t ret;
2874 if (iter->lost_events) {
2875 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2876 iter->cpu, iter->lost_events);
2877 if (trace_seq_has_overflowed(&iter->seq))
2878 return TRACE_TYPE_PARTIAL_LINE;
2881 if (iter->trace && iter->trace->print_line) {
2882 ret = iter->trace->print_line(iter);
2883 if (ret != TRACE_TYPE_UNHANDLED)
2887 if (iter->ent->type == TRACE_BPUTS &&
2888 trace_flags & TRACE_ITER_PRINTK &&
2889 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2890 return trace_print_bputs_msg_only(iter);
2892 if (iter->ent->type == TRACE_BPRINT &&
2893 trace_flags & TRACE_ITER_PRINTK &&
2894 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2895 return trace_print_bprintk_msg_only(iter);
2897 if (iter->ent->type == TRACE_PRINT &&
2898 trace_flags & TRACE_ITER_PRINTK &&
2899 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2900 return trace_print_printk_msg_only(iter);
2902 if (trace_flags & TRACE_ITER_BIN)
2903 return print_bin_fmt(iter);
2905 if (trace_flags & TRACE_ITER_HEX)
2906 return print_hex_fmt(iter);
2908 if (trace_flags & TRACE_ITER_RAW)
2909 return print_raw_fmt(iter);
2911 return print_trace_fmt(iter);
2914 void trace_latency_header(struct seq_file *m)
2916 struct trace_iterator *iter = m->private;
2917 struct trace_array *tr = iter->tr;
2919 /* print nothing if the buffers are empty */
2920 if (trace_empty(iter))
2923 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2924 print_trace_header(m, iter);
2926 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
2927 print_lat_help_header(m);
2930 void trace_default_header(struct seq_file *m)
2932 struct trace_iterator *iter = m->private;
2933 struct trace_array *tr = iter->tr;
2934 unsigned long trace_flags = tr->trace_flags;
2936 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2939 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2940 /* print nothing if the buffers are empty */
2941 if (trace_empty(iter))
2943 print_trace_header(m, iter);
2944 if (!(trace_flags & TRACE_ITER_VERBOSE))
2945 print_lat_help_header(m);
2947 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2948 if (trace_flags & TRACE_ITER_IRQ_INFO)
2949 if (trace_flags & TRACE_ITER_TGID)
2950 print_func_help_header_irq_tgid(iter->trace_buffer, m);
2952 print_func_help_header_irq(iter->trace_buffer, m);
2954 if (trace_flags & TRACE_ITER_TGID)
2955 print_func_help_header_tgid(iter->trace_buffer, m);
2957 print_func_help_header(iter->trace_buffer, m);
2962 static void test_ftrace_alive(struct seq_file *m)
2964 if (!ftrace_is_dead())
2966 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2967 "# MAY BE MISSING FUNCTION EVENTS\n");
2970 #ifdef CONFIG_TRACER_MAX_TRACE
2971 static void show_snapshot_main_help(struct seq_file *m)
2973 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2974 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2975 "# Takes a snapshot of the main buffer.\n"
2976 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2977 "# (Doesn't have to be '2' works with any number that\n"
2978 "# is not a '0' or '1')\n");
2981 static void show_snapshot_percpu_help(struct seq_file *m)
2983 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2984 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2985 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2986 "# Takes a snapshot of the main buffer for this cpu.\n");
2988 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2989 "# Must use main snapshot file to allocate.\n");
2991 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2992 "# (Doesn't have to be '2' works with any number that\n"
2993 "# is not a '0' or '1')\n");
2996 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2998 if (iter->tr->allocated_snapshot)
2999 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3001 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3003 seq_puts(m, "# Snapshot commands:\n");
3004 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3005 show_snapshot_main_help(m);
3007 show_snapshot_percpu_help(m);
3010 /* Should never be called */
3011 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3014 static int s_show(struct seq_file *m, void *v)
3016 struct trace_iterator *iter = v;
3019 if (iter->ent == NULL) {
3021 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3023 test_ftrace_alive(m);
3025 if (iter->snapshot && trace_empty(iter))
3026 print_snapshot_help(m, iter);
3027 else if (iter->trace && iter->trace->print_header)
3028 iter->trace->print_header(m);
3030 trace_default_header(m);
3032 } else if (iter->leftover) {
3034 * If we filled the seq_file buffer earlier, we
3035 * want to just show it now.
3037 ret = trace_print_seq(m, &iter->seq);
3039 /* ret should this time be zero, but you never know */
3040 iter->leftover = ret;
3043 print_trace_line(iter);
3044 ret = trace_print_seq(m, &iter->seq);
3046 * If we overflow the seq_file buffer, then it will
3047 * ask us for this data again at start up.
3049 * ret is 0 if seq_file write succeeded.
3052 iter->leftover = ret;
3059 * Should be used after trace_array_get(), trace_types_lock
3060 * ensures that i_cdev was already initialized.
3062 static inline int tracing_get_cpu(struct inode *inode)
3064 if (inode->i_cdev) /* See trace_create_cpu_file() */
3065 return (long)inode->i_cdev - 1;
3066 return RING_BUFFER_ALL_CPUS;
3069 static const struct seq_operations tracer_seq_ops = {
3076 static struct trace_iterator *
3077 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3079 struct trace_array *tr = inode->i_private;
3080 struct trace_iterator *iter;
3083 if (tracing_disabled)
3084 return ERR_PTR(-ENODEV);
3086 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3088 return ERR_PTR(-ENOMEM);
3090 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3092 if (!iter->buffer_iter)
3096 * We make a copy of the current tracer to avoid concurrent
3097 * changes on it while we are reading.
3099 mutex_lock(&trace_types_lock);
3100 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3104 *iter->trace = *tr->current_trace;
3106 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3111 #ifdef CONFIG_TRACER_MAX_TRACE
3112 /* Currently only the top directory has a snapshot */
3113 if (tr->current_trace->print_max || snapshot)
3114 iter->trace_buffer = &tr->max_buffer;
3117 iter->trace_buffer = &tr->trace_buffer;
3118 iter->snapshot = snapshot;
3120 iter->cpu_file = tracing_get_cpu(inode);
3121 mutex_init(&iter->mutex);
3123 /* Notify the tracer early; before we stop tracing. */
3124 if (iter->trace && iter->trace->open)
3125 iter->trace->open(iter);
3127 /* Annotate start of buffers if we had overruns */
3128 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3129 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3131 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3132 if (trace_clocks[tr->clock_id].in_ns)
3133 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3135 /* stop the trace while dumping if we are not opening "snapshot" */
3136 if (!iter->snapshot)
3137 tracing_stop_tr(tr);
3139 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3140 for_each_tracing_cpu(cpu) {
3141 iter->buffer_iter[cpu] =
3142 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3144 ring_buffer_read_prepare_sync();
3145 for_each_tracing_cpu(cpu) {
3146 ring_buffer_read_start(iter->buffer_iter[cpu]);
3147 tracing_iter_reset(iter, cpu);
3150 cpu = iter->cpu_file;
3151 iter->buffer_iter[cpu] =
3152 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3153 ring_buffer_read_prepare_sync();
3154 ring_buffer_read_start(iter->buffer_iter[cpu]);
3155 tracing_iter_reset(iter, cpu);
3158 mutex_unlock(&trace_types_lock);
3163 mutex_unlock(&trace_types_lock);
3165 kfree(iter->buffer_iter);
3167 seq_release_private(inode, file);
3168 return ERR_PTR(-ENOMEM);
3171 int tracing_open_generic(struct inode *inode, struct file *filp)
3173 if (tracing_disabled)
3176 filp->private_data = inode->i_private;
3180 bool tracing_is_disabled(void)
3182 return (tracing_disabled) ? true: false;
3186 * Open and update trace_array ref count.
3187 * Must have the current trace_array passed to it.
3189 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3191 struct trace_array *tr = inode->i_private;
3193 if (tracing_disabled)
3196 if (trace_array_get(tr) < 0)
3199 filp->private_data = inode->i_private;
3204 static int tracing_release(struct inode *inode, struct file *file)
3206 struct trace_array *tr = inode->i_private;
3207 struct seq_file *m = file->private_data;
3208 struct trace_iterator *iter;
3211 if (!(file->f_mode & FMODE_READ)) {
3212 trace_array_put(tr);
3216 /* Writes do not use seq_file */
3218 mutex_lock(&trace_types_lock);
3220 for_each_tracing_cpu(cpu) {
3221 if (iter->buffer_iter[cpu])
3222 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3225 if (iter->trace && iter->trace->close)
3226 iter->trace->close(iter);
3228 if (!iter->snapshot)
3229 /* reenable tracing if it was previously enabled */
3230 tracing_start_tr(tr);
3232 __trace_array_put(tr);
3234 mutex_unlock(&trace_types_lock);
3236 mutex_destroy(&iter->mutex);
3237 free_cpumask_var(iter->started);
3239 kfree(iter->buffer_iter);
3240 seq_release_private(inode, file);
3245 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3247 struct trace_array *tr = inode->i_private;
3249 trace_array_put(tr);
3253 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3255 struct trace_array *tr = inode->i_private;
3257 trace_array_put(tr);
3259 return single_release(inode, file);
3262 static int tracing_open(struct inode *inode, struct file *file)
3264 struct trace_array *tr = inode->i_private;
3265 struct trace_iterator *iter;
3268 if (trace_array_get(tr) < 0)
3271 /* If this file was open for write, then erase contents */
3272 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3273 int cpu = tracing_get_cpu(inode);
3275 if (cpu == RING_BUFFER_ALL_CPUS)
3276 tracing_reset_online_cpus(&tr->trace_buffer);
3278 tracing_reset(&tr->trace_buffer, cpu);
3281 if (file->f_mode & FMODE_READ) {
3282 iter = __tracing_open(inode, file, false);
3284 ret = PTR_ERR(iter);
3285 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3286 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3290 trace_array_put(tr);
3296 * Some tracers are not suitable for instance buffers.
3297 * A tracer is always available for the global array (toplevel)
3298 * or if it explicitly states that it is.
3301 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3303 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3306 /* Find the next tracer that this trace array may use */
3307 static struct tracer *
3308 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3310 while (t && !trace_ok_for_array(t, tr))
3317 t_next(struct seq_file *m, void *v, loff_t *pos)
3319 struct trace_array *tr = m->private;
3320 struct tracer *t = v;
3325 t = get_tracer_for_array(tr, t->next);
3330 static void *t_start(struct seq_file *m, loff_t *pos)
3332 struct trace_array *tr = m->private;
3336 mutex_lock(&trace_types_lock);
3338 t = get_tracer_for_array(tr, trace_types);
3339 for (; t && l < *pos; t = t_next(m, t, &l))
3345 static void t_stop(struct seq_file *m, void *p)
3347 mutex_unlock(&trace_types_lock);
3350 static int t_show(struct seq_file *m, void *v)
3352 struct tracer *t = v;
3357 seq_puts(m, t->name);
3366 static const struct seq_operations show_traces_seq_ops = {
3373 static int show_traces_open(struct inode *inode, struct file *file)
3375 struct trace_array *tr = inode->i_private;
3379 if (tracing_disabled)
3382 ret = seq_open(file, &show_traces_seq_ops);
3386 m = file->private_data;
3393 tracing_write_stub(struct file *filp, const char __user *ubuf,
3394 size_t count, loff_t *ppos)
3399 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3403 if (file->f_mode & FMODE_READ)
3404 ret = seq_lseek(file, offset, whence);
3406 file->f_pos = ret = 0;
3411 static const struct file_operations tracing_fops = {
3412 .open = tracing_open,
3414 .write = tracing_write_stub,
3415 .llseek = tracing_lseek,
3416 .release = tracing_release,
3419 static const struct file_operations show_traces_fops = {
3420 .open = show_traces_open,
3422 .release = seq_release,
3423 .llseek = seq_lseek,
3427 * The tracer itself will not take this lock, but still we want
3428 * to provide a consistent cpumask to user-space:
3430 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3433 * Temporary storage for the character representation of the
3434 * CPU bitmask (and one more byte for the newline):
3436 static char mask_str[NR_CPUS + 1];
3439 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3440 size_t count, loff_t *ppos)
3442 struct trace_array *tr = file_inode(filp)->i_private;
3445 mutex_lock(&tracing_cpumask_update_lock);
3447 len = snprintf(mask_str, count, "%*pb\n",
3448 cpumask_pr_args(tr->tracing_cpumask));
3453 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3456 mutex_unlock(&tracing_cpumask_update_lock);
3462 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3463 size_t count, loff_t *ppos)
3465 struct trace_array *tr = file_inode(filp)->i_private;
3466 cpumask_var_t tracing_cpumask_new;
3469 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3472 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3476 mutex_lock(&tracing_cpumask_update_lock);
3478 local_irq_disable();
3479 arch_spin_lock(&tr->max_lock);
3480 for_each_tracing_cpu(cpu) {
3482 * Increase/decrease the disabled counter if we are
3483 * about to flip a bit in the cpumask:
3485 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3486 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3487 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3488 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3490 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3491 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3492 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3493 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3496 arch_spin_unlock(&tr->max_lock);
3499 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3501 mutex_unlock(&tracing_cpumask_update_lock);
3502 free_cpumask_var(tracing_cpumask_new);
3507 free_cpumask_var(tracing_cpumask_new);
3512 static const struct file_operations tracing_cpumask_fops = {
3513 .open = tracing_open_generic_tr,
3514 .read = tracing_cpumask_read,
3515 .write = tracing_cpumask_write,
3516 .release = tracing_release_generic_tr,
3517 .llseek = generic_file_llseek,
3520 static int tracing_trace_options_show(struct seq_file *m, void *v)
3522 struct tracer_opt *trace_opts;
3523 struct trace_array *tr = m->private;
3527 mutex_lock(&trace_types_lock);
3528 tracer_flags = tr->current_trace->flags->val;
3529 trace_opts = tr->current_trace->flags->opts;
3531 for (i = 0; trace_options[i]; i++) {
3532 if (tr->trace_flags & (1 << i))
3533 seq_printf(m, "%s\n", trace_options[i]);
3535 seq_printf(m, "no%s\n", trace_options[i]);
3538 for (i = 0; trace_opts[i].name; i++) {
3539 if (tracer_flags & trace_opts[i].bit)
3540 seq_printf(m, "%s\n", trace_opts[i].name);
3542 seq_printf(m, "no%s\n", trace_opts[i].name);
3544 mutex_unlock(&trace_types_lock);
3549 static int __set_tracer_option(struct trace_array *tr,
3550 struct tracer_flags *tracer_flags,
3551 struct tracer_opt *opts, int neg)
3553 struct tracer *trace = tr->current_trace;
3556 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3561 tracer_flags->val &= ~opts->bit;
3563 tracer_flags->val |= opts->bit;
3567 /* Try to assign a tracer specific option */
3568 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3570 struct tracer *trace = tr->current_trace;
3571 struct tracer_flags *tracer_flags = trace->flags;
3572 struct tracer_opt *opts = NULL;
3575 for (i = 0; tracer_flags->opts[i].name; i++) {
3576 opts = &tracer_flags->opts[i];
3578 if (strcmp(cmp, opts->name) == 0)
3579 return __set_tracer_option(tr, trace->flags, opts, neg);
3585 /* Some tracers require overwrite to stay enabled */
3586 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3588 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3594 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3596 /* do nothing if flag is already set */
3597 if (!!(tr->trace_flags & mask) == !!enabled)
3600 /* Give the tracer a chance to approve the change */
3601 if (tr->current_trace->flag_changed)
3602 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3606 tr->trace_flags |= mask;
3608 tr->trace_flags &= ~mask;
3610 if (mask == TRACE_ITER_RECORD_CMD)
3611 trace_event_enable_cmd_record(enabled);
3613 if (mask == TRACE_ITER_OVERWRITE) {
3614 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3615 #ifdef CONFIG_TRACER_MAX_TRACE
3616 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3620 if (mask == TRACE_ITER_PRINTK) {
3621 trace_printk_start_stop_comm(enabled);
3622 trace_printk_control(enabled);
3628 static int trace_set_options(struct trace_array *tr, char *option)
3634 size_t orig_len = strlen(option);
3636 cmp = strstrip(option);
3638 if (strncmp(cmp, "no", 2) == 0) {
3643 mutex_lock(&trace_types_lock);
3645 for (i = 0; trace_options[i]; i++) {
3646 if (strcmp(cmp, trace_options[i]) == 0) {
3647 ret = set_tracer_flag(tr, 1 << i, !neg);
3652 /* If no option could be set, test the specific tracer options */
3653 if (!trace_options[i])
3654 ret = set_tracer_option(tr, cmp, neg);
3656 mutex_unlock(&trace_types_lock);
3659 * If the first trailing whitespace is replaced with '\0' by strstrip,
3660 * turn it back into a space.
3662 if (orig_len > strlen(option))
3663 option[strlen(option)] = ' ';
3668 static void __init apply_trace_boot_options(void)
3670 char *buf = trace_boot_options_buf;
3674 option = strsep(&buf, ",");
3680 trace_set_options(&global_trace, option);
3682 /* Put back the comma to allow this to be called again */
3689 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3690 size_t cnt, loff_t *ppos)
3692 struct seq_file *m = filp->private_data;
3693 struct trace_array *tr = m->private;
3697 if (cnt >= sizeof(buf))
3700 if (copy_from_user(&buf, ubuf, cnt))
3705 ret = trace_set_options(tr, buf);
3714 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3716 struct trace_array *tr = inode->i_private;
3719 if (tracing_disabled)
3722 if (trace_array_get(tr) < 0)
3725 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3727 trace_array_put(tr);
3732 static const struct file_operations tracing_iter_fops = {
3733 .open = tracing_trace_options_open,
3735 .llseek = seq_lseek,
3736 .release = tracing_single_release_tr,
3737 .write = tracing_trace_options_write,
3740 static const char readme_msg[] =
3741 "tracing mini-HOWTO:\n\n"
3742 "# echo 0 > tracing_on : quick way to disable tracing\n"
3743 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3744 " Important files:\n"
3745 " trace\t\t\t- The static contents of the buffer\n"
3746 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3747 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3748 " current_tracer\t- function and latency tracers\n"
3749 " available_tracers\t- list of configured tracers for current_tracer\n"
3750 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3751 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3752 " trace_clock\t\t-change the clock used to order events\n"
3753 " local: Per cpu clock but may not be synced across CPUs\n"
3754 " global: Synced across CPUs but slows tracing down.\n"
3755 " counter: Not a clock, but just an increment\n"
3756 " uptime: Jiffy counter from time of boot\n"
3757 " perf: Same clock that perf events use\n"
3758 #ifdef CONFIG_X86_64
3759 " x86-tsc: TSC cycle counter\n"
3761 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3762 " tracing_cpumask\t- Limit which CPUs to trace\n"
3763 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3764 "\t\t\t Remove sub-buffer with rmdir\n"
3765 " trace_options\t\t- Set format or modify how tracing happens\n"
3766 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3767 "\t\t\t option name\n"
3768 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3769 #ifdef CONFIG_DYNAMIC_FTRACE
3770 "\n available_filter_functions - list of functions that can be filtered on\n"
3771 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3772 "\t\t\t functions\n"
3773 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3774 "\t modules: Can select a group via module\n"
3775 "\t Format: :mod:<module-name>\n"
3776 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3777 "\t triggers: a command to perform when function is hit\n"
3778 "\t Format: <function>:<trigger>[:count]\n"
3779 "\t trigger: traceon, traceoff\n"
3780 "\t\t enable_event:<system>:<event>\n"
3781 "\t\t disable_event:<system>:<event>\n"
3782 #ifdef CONFIG_STACKTRACE
3785 #ifdef CONFIG_TRACER_SNAPSHOT
3790 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3791 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3792 "\t The first one will disable tracing every time do_fault is hit\n"
3793 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3794 "\t The first time do trap is hit and it disables tracing, the\n"
3795 "\t counter will decrement to 2. If tracing is already disabled,\n"
3796 "\t the counter will not decrement. It only decrements when the\n"
3797 "\t trigger did work\n"
3798 "\t To remove trigger without count:\n"
3799 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3800 "\t To remove trigger with a count:\n"
3801 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3802 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3803 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3804 "\t modules: Can select a group via module command :mod:\n"
3805 "\t Does not accept triggers\n"
3806 #endif /* CONFIG_DYNAMIC_FTRACE */
3807 #ifdef CONFIG_FUNCTION_TRACER
3808 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3811 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3812 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3813 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3814 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3816 #ifdef CONFIG_TRACER_SNAPSHOT
3817 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3818 "\t\t\t snapshot buffer. Read the contents for more\n"
3819 "\t\t\t information\n"
3821 #ifdef CONFIG_STACK_TRACER
3822 " stack_trace\t\t- Shows the max stack trace when active\n"
3823 " stack_max_size\t- Shows current max stack size that was traced\n"
3824 "\t\t\t Write into this file to reset the max size (trigger a\n"
3825 "\t\t\t new trace)\n"
3826 #ifdef CONFIG_DYNAMIC_FTRACE
3827 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3830 #endif /* CONFIG_STACK_TRACER */
3831 " events/\t\t- Directory containing all trace event subsystems:\n"
3832 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3833 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3834 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3836 " filter\t\t- If set, only events passing filter are traced\n"
3837 " events/<system>/<event>/\t- Directory containing control files for\n"
3839 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3840 " filter\t\t- If set, only events passing filter are traced\n"
3841 " trigger\t\t- If set, a command to perform when event is hit\n"
3842 "\t Format: <trigger>[:count][if <filter>]\n"
3843 "\t trigger: traceon, traceoff\n"
3844 "\t enable_event:<system>:<event>\n"
3845 "\t disable_event:<system>:<event>\n"
3846 #ifdef CONFIG_STACKTRACE
3849 #ifdef CONFIG_TRACER_SNAPSHOT
3852 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3853 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3854 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3855 "\t events/block/block_unplug/trigger\n"
3856 "\t The first disables tracing every time block_unplug is hit.\n"
3857 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3858 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3859 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3860 "\t Like function triggers, the counter is only decremented if it\n"
3861 "\t enabled or disabled tracing.\n"
3862 "\t To remove a trigger without a count:\n"
3863 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3864 "\t To remove a trigger with a count:\n"
3865 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3866 "\t Filters can be ignored when removing a trigger.\n"
3870 tracing_readme_read(struct file *filp, char __user *ubuf,
3871 size_t cnt, loff_t *ppos)
3873 return simple_read_from_buffer(ubuf, cnt, ppos,
3874 readme_msg, strlen(readme_msg));
3877 static const struct file_operations tracing_readme_fops = {
3878 .open = tracing_open_generic,
3879 .read = tracing_readme_read,
3880 .llseek = generic_file_llseek,
3883 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3885 unsigned int *ptr = v;
3887 if (*pos || m->count)
3892 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3894 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3903 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3909 arch_spin_lock(&trace_cmdline_lock);
3911 v = &savedcmd->map_cmdline_to_pid[0];
3913 v = saved_cmdlines_next(m, v, &l);
3921 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3923 arch_spin_unlock(&trace_cmdline_lock);
3927 static int saved_cmdlines_show(struct seq_file *m, void *v)
3929 char buf[TASK_COMM_LEN];
3930 unsigned int *pid = v;
3932 __trace_find_cmdline(*pid, buf);
3933 seq_printf(m, "%d %s\n", *pid, buf);
3937 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3938 .start = saved_cmdlines_start,
3939 .next = saved_cmdlines_next,
3940 .stop = saved_cmdlines_stop,
3941 .show = saved_cmdlines_show,
3944 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3946 if (tracing_disabled)
3949 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3952 static const struct file_operations tracing_saved_cmdlines_fops = {
3953 .open = tracing_saved_cmdlines_open,
3955 .llseek = seq_lseek,
3956 .release = seq_release,
3960 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3961 size_t cnt, loff_t *ppos)
3966 arch_spin_lock(&trace_cmdline_lock);
3967 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3968 arch_spin_unlock(&trace_cmdline_lock);
3970 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3973 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3975 kfree(s->saved_cmdlines);
3976 kfree(s->map_cmdline_to_pid);
3980 static int tracing_resize_saved_cmdlines(unsigned int val)
3982 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3984 s = kmalloc(sizeof(*s), GFP_KERNEL);
3988 if (allocate_cmdlines_buffer(val, s) < 0) {
3993 arch_spin_lock(&trace_cmdline_lock);
3994 savedcmd_temp = savedcmd;
3996 arch_spin_unlock(&trace_cmdline_lock);
3997 free_saved_cmdlines_buffer(savedcmd_temp);
4003 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4004 size_t cnt, loff_t *ppos)
4009 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4013 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4014 if (!val || val > PID_MAX_DEFAULT)
4017 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4026 static const struct file_operations tracing_saved_cmdlines_size_fops = {
4027 .open = tracing_open_generic,
4028 .read = tracing_saved_cmdlines_size_read,
4029 .write = tracing_saved_cmdlines_size_write,
4032 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
4033 static union trace_enum_map_item *
4034 update_enum_map(union trace_enum_map_item *ptr)
4036 if (!ptr->map.enum_string) {
4037 if (ptr->tail.next) {
4038 ptr = ptr->tail.next;
4039 /* Set ptr to the next real item (skip head) */
4047 static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4049 union trace_enum_map_item *ptr = v;
4052 * Paranoid! If ptr points to end, we don't want to increment past it.
4053 * This really should never happen.
4055 ptr = update_enum_map(ptr);
4056 if (WARN_ON_ONCE(!ptr))
4063 ptr = update_enum_map(ptr);
4068 static void *enum_map_start(struct seq_file *m, loff_t *pos)
4070 union trace_enum_map_item *v;
4073 mutex_lock(&trace_enum_mutex);
4075 v = trace_enum_maps;
4079 while (v && l < *pos) {
4080 v = enum_map_next(m, v, &l);
4086 static void enum_map_stop(struct seq_file *m, void *v)
4088 mutex_unlock(&trace_enum_mutex);
4091 static int enum_map_show(struct seq_file *m, void *v)
4093 union trace_enum_map_item *ptr = v;
4095 seq_printf(m, "%s %ld (%s)\n",
4096 ptr->map.enum_string, ptr->map.enum_value,
4102 static const struct seq_operations tracing_enum_map_seq_ops = {
4103 .start = enum_map_start,
4104 .next = enum_map_next,
4105 .stop = enum_map_stop,
4106 .show = enum_map_show,
4109 static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4111 if (tracing_disabled)
4114 return seq_open(filp, &tracing_enum_map_seq_ops);
4117 static const struct file_operations tracing_enum_map_fops = {
4118 .open = tracing_enum_map_open,
4120 .llseek = seq_lseek,
4121 .release = seq_release,
4124 static inline union trace_enum_map_item *
4125 trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4127 /* Return tail of array given the head */
4128 return ptr + ptr->head.length + 1;
4132 trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4135 struct trace_enum_map **stop;
4136 struct trace_enum_map **map;
4137 union trace_enum_map_item *map_array;
4138 union trace_enum_map_item *ptr;
4143 * The trace_enum_maps contains the map plus a head and tail item,
4144 * where the head holds the module and length of array, and the
4145 * tail holds a pointer to the next list.
4147 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4149 pr_warning("Unable to allocate trace enum mapping\n");
4153 mutex_lock(&trace_enum_mutex);
4155 if (!trace_enum_maps)
4156 trace_enum_maps = map_array;
4158 ptr = trace_enum_maps;
4160 ptr = trace_enum_jmp_to_tail(ptr);
4161 if (!ptr->tail.next)
4163 ptr = ptr->tail.next;
4166 ptr->tail.next = map_array;
4168 map_array->head.mod = mod;
4169 map_array->head.length = len;
4172 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4173 map_array->map = **map;
4176 memset(map_array, 0, sizeof(*map_array));
4178 mutex_unlock(&trace_enum_mutex);
4181 static void trace_create_enum_file(struct dentry *d_tracer)
4183 trace_create_file("enum_map", 0444, d_tracer,
4184 NULL, &tracing_enum_map_fops);
4187 #else /* CONFIG_TRACE_ENUM_MAP_FILE */
4188 static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4189 static inline void trace_insert_enum_map_file(struct module *mod,
4190 struct trace_enum_map **start, int len) { }
4191 #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4193 static void trace_insert_enum_map(struct module *mod,
4194 struct trace_enum_map **start, int len)
4196 struct trace_enum_map **map;
4203 trace_event_enum_update(map, len);
4205 trace_insert_enum_map_file(mod, start, len);
4209 tracing_saved_tgids_read(struct file *file, char __user *ubuf,
4210 size_t cnt, loff_t *ppos)
4218 file_buf = kmalloc(SAVED_CMDLINES_DEFAULT*(16+1+16), GFP_KERNEL);
4224 for (i = 0; i < SAVED_CMDLINES_DEFAULT; i++) {
4228 pid = savedcmd->map_cmdline_to_pid[i];
4229 if (pid == -1 || pid == NO_CMDLINE_MAP)
4232 tgid = trace_find_tgid(pid);
4233 r = sprintf(buf, "%d %d\n", pid, tgid);
4238 len = simple_read_from_buffer(ubuf, cnt, ppos,
4246 static const struct file_operations tracing_saved_tgids_fops = {
4247 .open = tracing_open_generic,
4248 .read = tracing_saved_tgids_read,
4249 .llseek = generic_file_llseek,
4253 tracing_set_trace_read(struct file *filp, char __user *ubuf,
4254 size_t cnt, loff_t *ppos)
4256 struct trace_array *tr = filp->private_data;
4257 char buf[MAX_TRACER_SIZE+2];
4260 mutex_lock(&trace_types_lock);
4261 r = sprintf(buf, "%s\n", tr->current_trace->name);
4262 mutex_unlock(&trace_types_lock);
4264 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4267 int tracer_init(struct tracer *t, struct trace_array *tr)
4269 tracing_reset_online_cpus(&tr->trace_buffer);
4273 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4277 for_each_tracing_cpu(cpu)
4278 per_cpu_ptr(buf->data, cpu)->entries = val;
4281 #ifdef CONFIG_TRACER_MAX_TRACE
4282 /* resize @tr's buffer to the size of @size_tr's entries */
4283 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4284 struct trace_buffer *size_buf, int cpu_id)
4288 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4289 for_each_tracing_cpu(cpu) {
4290 ret = ring_buffer_resize(trace_buf->buffer,
4291 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4294 per_cpu_ptr(trace_buf->data, cpu)->entries =
4295 per_cpu_ptr(size_buf->data, cpu)->entries;
4298 ret = ring_buffer_resize(trace_buf->buffer,
4299 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4301 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4302 per_cpu_ptr(size_buf->data, cpu_id)->entries;
4307 #endif /* CONFIG_TRACER_MAX_TRACE */
4309 static int __tracing_resize_ring_buffer(struct trace_array *tr,
4310 unsigned long size, int cpu)
4315 * If kernel or user changes the size of the ring buffer
4316 * we use the size that was given, and we can forget about
4317 * expanding it later.
4319 ring_buffer_expanded = true;
4321 /* May be called before buffers are initialized */
4322 if (!tr->trace_buffer.buffer)
4325 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4329 #ifdef CONFIG_TRACER_MAX_TRACE
4330 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4331 !tr->current_trace->use_max_tr)
4334 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4336 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4337 &tr->trace_buffer, cpu);
4340 * AARGH! We are left with different
4341 * size max buffer!!!!
4342 * The max buffer is our "snapshot" buffer.
4343 * When a tracer needs a snapshot (one of the
4344 * latency tracers), it swaps the max buffer
4345 * with the saved snap shot. We succeeded to
4346 * update the size of the main buffer, but failed to
4347 * update the size of the max buffer. But when we tried
4348 * to reset the main buffer to the original size, we
4349 * failed there too. This is very unlikely to
4350 * happen, but if it does, warn and kill all
4354 tracing_disabled = 1;
4359 if (cpu == RING_BUFFER_ALL_CPUS)
4360 set_buffer_entries(&tr->max_buffer, size);
4362 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4365 #endif /* CONFIG_TRACER_MAX_TRACE */
4367 if (cpu == RING_BUFFER_ALL_CPUS)
4368 set_buffer_entries(&tr->trace_buffer, size);
4370 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4375 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4376 unsigned long size, int cpu_id)
4380 mutex_lock(&trace_types_lock);
4382 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4383 /* make sure, this cpu is enabled in the mask */
4384 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4390 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4395 mutex_unlock(&trace_types_lock);
4402 * tracing_update_buffers - used by tracing facility to expand ring buffers
4404 * To save on memory when the tracing is never used on a system with it
4405 * configured in. The ring buffers are set to a minimum size. But once
4406 * a user starts to use the tracing facility, then they need to grow
4407 * to their default size.
4409 * This function is to be called when a tracer is about to be used.
4411 int tracing_update_buffers(void)
4415 mutex_lock(&trace_types_lock);
4416 if (!ring_buffer_expanded)
4417 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4418 RING_BUFFER_ALL_CPUS);
4419 mutex_unlock(&trace_types_lock);
4424 struct trace_option_dentry;
4427 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4430 * Used to clear out the tracer before deletion of an instance.
4431 * Must have trace_types_lock held.
4433 static void tracing_set_nop(struct trace_array *tr)
4435 if (tr->current_trace == &nop_trace)
4438 tr->current_trace->enabled--;
4440 if (tr->current_trace->reset)
4441 tr->current_trace->reset(tr);
4443 tr->current_trace = &nop_trace;
4446 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
4448 /* Only enable if the directory has been created already. */
4452 create_trace_option_files(tr, t);
4455 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4458 #ifdef CONFIG_TRACER_MAX_TRACE
4463 mutex_lock(&trace_types_lock);
4465 if (!ring_buffer_expanded) {
4466 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4467 RING_BUFFER_ALL_CPUS);
4473 for (t = trace_types; t; t = t->next) {
4474 if (strcmp(t->name, buf) == 0)
4481 if (t == tr->current_trace)
4484 /* Some tracers are only allowed for the top level buffer */
4485 if (!trace_ok_for_array(t, tr)) {
4490 /* If trace pipe files are being read, we can't change the tracer */
4491 if (tr->current_trace->ref) {
4496 trace_branch_disable();
4498 tr->current_trace->enabled--;
4500 if (tr->current_trace->reset)
4501 tr->current_trace->reset(tr);
4503 /* Current trace needs to be nop_trace before synchronize_sched */
4504 tr->current_trace = &nop_trace;
4506 #ifdef CONFIG_TRACER_MAX_TRACE
4507 had_max_tr = tr->allocated_snapshot;
4509 if (had_max_tr && !t->use_max_tr) {
4511 * We need to make sure that the update_max_tr sees that
4512 * current_trace changed to nop_trace to keep it from
4513 * swapping the buffers after we resize it.
4514 * The update_max_tr is called from interrupts disabled
4515 * so a synchronized_sched() is sufficient.
4517 synchronize_sched();
4522 #ifdef CONFIG_TRACER_MAX_TRACE
4523 if (t->use_max_tr && !had_max_tr) {
4524 ret = alloc_snapshot(tr);
4531 ret = tracer_init(t, tr);
4536 tr->current_trace = t;
4537 tr->current_trace->enabled++;
4538 trace_branch_enable(tr);
4540 mutex_unlock(&trace_types_lock);
4546 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4547 size_t cnt, loff_t *ppos)
4549 struct trace_array *tr = filp->private_data;
4550 char buf[MAX_TRACER_SIZE+1];
4557 if (cnt > MAX_TRACER_SIZE)
4558 cnt = MAX_TRACER_SIZE;
4560 if (copy_from_user(&buf, ubuf, cnt))
4565 /* strip ending whitespace. */
4566 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4569 err = tracing_set_tracer(tr, buf);
4579 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4580 size_t cnt, loff_t *ppos)
4585 r = snprintf(buf, sizeof(buf), "%ld\n",
4586 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4587 if (r > sizeof(buf))
4589 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4593 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4594 size_t cnt, loff_t *ppos)
4599 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4609 tracing_thresh_read(struct file *filp, char __user *ubuf,
4610 size_t cnt, loff_t *ppos)
4612 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4616 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4617 size_t cnt, loff_t *ppos)
4619 struct trace_array *tr = filp->private_data;
4622 mutex_lock(&trace_types_lock);
4623 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4627 if (tr->current_trace->update_thresh) {
4628 ret = tr->current_trace->update_thresh(tr);
4635 mutex_unlock(&trace_types_lock);
4640 #ifdef CONFIG_TRACER_MAX_TRACE
4643 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4644 size_t cnt, loff_t *ppos)
4646 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4650 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4651 size_t cnt, loff_t *ppos)
4653 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4658 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4660 struct trace_array *tr = inode->i_private;
4661 struct trace_iterator *iter;
4664 if (tracing_disabled)
4667 if (trace_array_get(tr) < 0)
4670 mutex_lock(&trace_types_lock);
4672 /* create a buffer to store the information to pass to userspace */
4673 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4676 __trace_array_put(tr);
4680 trace_seq_init(&iter->seq);
4681 iter->trace = tr->current_trace;
4683 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4688 /* trace pipe does not show start of buffer */
4689 cpumask_setall(iter->started);
4691 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4692 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4694 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4695 if (trace_clocks[tr->clock_id].in_ns)
4696 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4699 iter->trace_buffer = &tr->trace_buffer;
4700 iter->cpu_file = tracing_get_cpu(inode);
4701 mutex_init(&iter->mutex);
4702 filp->private_data = iter;
4704 if (iter->trace->pipe_open)
4705 iter->trace->pipe_open(iter);
4707 nonseekable_open(inode, filp);
4709 tr->current_trace->ref++;
4711 mutex_unlock(&trace_types_lock);
4717 __trace_array_put(tr);
4718 mutex_unlock(&trace_types_lock);
4722 static int tracing_release_pipe(struct inode *inode, struct file *file)
4724 struct trace_iterator *iter = file->private_data;
4725 struct trace_array *tr = inode->i_private;
4727 mutex_lock(&trace_types_lock);
4729 tr->current_trace->ref--;
4731 if (iter->trace->pipe_close)
4732 iter->trace->pipe_close(iter);
4734 mutex_unlock(&trace_types_lock);
4736 free_cpumask_var(iter->started);
4737 mutex_destroy(&iter->mutex);
4740 trace_array_put(tr);
4746 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4748 struct trace_array *tr = iter->tr;
4750 /* Iterators are static, they should be filled or empty */
4751 if (trace_buffer_iter(iter, iter->cpu_file))
4752 return POLLIN | POLLRDNORM;
4754 if (tr->trace_flags & TRACE_ITER_BLOCK)
4756 * Always select as readable when in blocking mode
4758 return POLLIN | POLLRDNORM;
4760 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4765 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4767 struct trace_iterator *iter = filp->private_data;
4769 return trace_poll(iter, filp, poll_table);
4772 /* Must be called with iter->mutex held. */
4773 static int tracing_wait_pipe(struct file *filp)
4775 struct trace_iterator *iter = filp->private_data;
4778 while (trace_empty(iter)) {
4780 if ((filp->f_flags & O_NONBLOCK)) {
4785 * We block until we read something and tracing is disabled.
4786 * We still block if tracing is disabled, but we have never
4787 * read anything. This allows a user to cat this file, and
4788 * then enable tracing. But after we have read something,
4789 * we give an EOF when tracing is again disabled.
4791 * iter->pos will be 0 if we haven't read anything.
4793 if (!tracing_is_on() && iter->pos)
4796 mutex_unlock(&iter->mutex);
4798 ret = wait_on_pipe(iter, false);
4800 mutex_lock(&iter->mutex);
4813 tracing_read_pipe(struct file *filp, char __user *ubuf,
4814 size_t cnt, loff_t *ppos)
4816 struct trace_iterator *iter = filp->private_data;
4819 /* return any leftover data */
4820 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4824 trace_seq_init(&iter->seq);
4827 * Avoid more than one consumer on a single file descriptor
4828 * This is just a matter of traces coherency, the ring buffer itself
4831 mutex_lock(&iter->mutex);
4832 if (iter->trace->read) {
4833 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4839 sret = tracing_wait_pipe(filp);
4843 /* stop when tracing is finished */
4844 if (trace_empty(iter)) {
4849 if (cnt >= PAGE_SIZE)
4850 cnt = PAGE_SIZE - 1;
4852 /* reset all but tr, trace, and overruns */
4853 memset(&iter->seq, 0,
4854 sizeof(struct trace_iterator) -
4855 offsetof(struct trace_iterator, seq));
4856 cpumask_clear(iter->started);
4859 trace_event_read_lock();
4860 trace_access_lock(iter->cpu_file);
4861 while (trace_find_next_entry_inc(iter) != NULL) {
4862 enum print_line_t ret;
4863 int save_len = iter->seq.seq.len;
4865 ret = print_trace_line(iter);
4866 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4867 /* don't print partial lines */
4868 iter->seq.seq.len = save_len;
4871 if (ret != TRACE_TYPE_NO_CONSUME)
4872 trace_consume(iter);
4874 if (trace_seq_used(&iter->seq) >= cnt)
4878 * Setting the full flag means we reached the trace_seq buffer
4879 * size and we should leave by partial output condition above.
4880 * One of the trace_seq_* functions is not used properly.
4882 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4885 trace_access_unlock(iter->cpu_file);
4886 trace_event_read_unlock();
4888 /* Now copy what we have to the user */
4889 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4890 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4891 trace_seq_init(&iter->seq);
4894 * If there was nothing to send to user, in spite of consuming trace
4895 * entries, go back to wait for more entries.
4901 mutex_unlock(&iter->mutex);
4906 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4909 __free_page(spd->pages[idx]);
4912 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4914 .confirm = generic_pipe_buf_confirm,
4915 .release = generic_pipe_buf_release,
4916 .steal = generic_pipe_buf_steal,
4917 .get = generic_pipe_buf_get,
4921 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4927 /* Seq buffer is page-sized, exactly what we need. */
4929 save_len = iter->seq.seq.len;
4930 ret = print_trace_line(iter);
4932 if (trace_seq_has_overflowed(&iter->seq)) {
4933 iter->seq.seq.len = save_len;
4938 * This should not be hit, because it should only
4939 * be set if the iter->seq overflowed. But check it
4940 * anyway to be safe.
4942 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4943 iter->seq.seq.len = save_len;
4947 count = trace_seq_used(&iter->seq) - save_len;
4950 iter->seq.seq.len = save_len;
4954 if (ret != TRACE_TYPE_NO_CONSUME)
4955 trace_consume(iter);
4957 if (!trace_find_next_entry_inc(iter)) {
4967 static ssize_t tracing_splice_read_pipe(struct file *filp,
4969 struct pipe_inode_info *pipe,
4973 struct page *pages_def[PIPE_DEF_BUFFERS];
4974 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4975 struct trace_iterator *iter = filp->private_data;
4976 struct splice_pipe_desc spd = {
4978 .partial = partial_def,
4979 .nr_pages = 0, /* This gets updated below. */
4980 .nr_pages_max = PIPE_DEF_BUFFERS,
4982 .ops = &tracing_pipe_buf_ops,
4983 .spd_release = tracing_spd_release_pipe,
4989 if (splice_grow_spd(pipe, &spd))
4992 mutex_lock(&iter->mutex);
4994 if (iter->trace->splice_read) {
4995 ret = iter->trace->splice_read(iter, filp,
4996 ppos, pipe, len, flags);
5001 ret = tracing_wait_pipe(filp);
5005 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
5010 trace_event_read_lock();
5011 trace_access_lock(iter->cpu_file);
5013 /* Fill as many pages as possible. */
5014 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
5015 spd.pages[i] = alloc_page(GFP_KERNEL);
5019 rem = tracing_fill_pipe_page(rem, iter);
5021 /* Copy the data into the page, so we can start over. */
5022 ret = trace_seq_to_buffer(&iter->seq,
5023 page_address(spd.pages[i]),
5024 trace_seq_used(&iter->seq));
5026 __free_page(spd.pages[i]);
5029 spd.partial[i].offset = 0;
5030 spd.partial[i].len = trace_seq_used(&iter->seq);
5032 trace_seq_init(&iter->seq);
5035 trace_access_unlock(iter->cpu_file);
5036 trace_event_read_unlock();
5037 mutex_unlock(&iter->mutex);
5041 ret = splice_to_pipe(pipe, &spd);
5043 splice_shrink_spd(&spd);
5047 mutex_unlock(&iter->mutex);
5052 tracing_entries_read(struct file *filp, char __user *ubuf,
5053 size_t cnt, loff_t *ppos)
5055 struct inode *inode = file_inode(filp);
5056 struct trace_array *tr = inode->i_private;
5057 int cpu = tracing_get_cpu(inode);
5062 mutex_lock(&trace_types_lock);
5064 if (cpu == RING_BUFFER_ALL_CPUS) {
5065 int cpu, buf_size_same;
5070 /* check if all cpu sizes are same */
5071 for_each_tracing_cpu(cpu) {
5072 /* fill in the size from first enabled cpu */
5074 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5075 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
5081 if (buf_size_same) {
5082 if (!ring_buffer_expanded)
5083 r = sprintf(buf, "%lu (expanded: %lu)\n",
5085 trace_buf_size >> 10);
5087 r = sprintf(buf, "%lu\n", size >> 10);
5089 r = sprintf(buf, "X\n");
5091 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5093 mutex_unlock(&trace_types_lock);
5095 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5100 tracing_entries_write(struct file *filp, const char __user *ubuf,
5101 size_t cnt, loff_t *ppos)
5103 struct inode *inode = file_inode(filp);
5104 struct trace_array *tr = inode->i_private;
5108 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5112 /* must have at least 1 entry */
5116 /* value is in KB */
5118 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5128 tracing_total_entries_read(struct file *filp, char __user *ubuf,
5129 size_t cnt, loff_t *ppos)
5131 struct trace_array *tr = filp->private_data;
5134 unsigned long size = 0, expanded_size = 0;
5136 mutex_lock(&trace_types_lock);
5137 for_each_tracing_cpu(cpu) {
5138 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5139 if (!ring_buffer_expanded)
5140 expanded_size += trace_buf_size >> 10;
5142 if (ring_buffer_expanded)
5143 r = sprintf(buf, "%lu\n", size);
5145 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5146 mutex_unlock(&trace_types_lock);
5148 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5152 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5153 size_t cnt, loff_t *ppos)
5156 * There is no need to read what the user has written, this function
5157 * is just to make sure that there is no error when "echo" is used
5166 tracing_free_buffer_release(struct inode *inode, struct file *filp)
5168 struct trace_array *tr = inode->i_private;
5170 /* disable tracing ? */
5171 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5172 tracer_tracing_off(tr);
5173 /* resize the ring buffer to 0 */
5174 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5176 trace_array_put(tr);
5182 tracing_mark_write(struct file *filp, const char __user *ubuf,
5183 size_t cnt, loff_t *fpos)
5185 unsigned long addr = (unsigned long)ubuf;
5186 struct trace_array *tr = filp->private_data;
5187 struct ring_buffer_event *event;
5188 struct ring_buffer *buffer;
5189 struct print_entry *entry;
5190 unsigned long irq_flags;
5191 struct page *pages[2];
5201 if (tracing_disabled)
5204 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5207 if (cnt > TRACE_BUF_SIZE)
5208 cnt = TRACE_BUF_SIZE;
5211 * Userspace is injecting traces into the kernel trace buffer.
5212 * We want to be as non intrusive as possible.
5213 * To do so, we do not want to allocate any special buffers
5214 * or take any locks, but instead write the userspace data
5215 * straight into the ring buffer.
5217 * First we need to pin the userspace buffer into memory,
5218 * which, most likely it is, because it just referenced it.
5219 * But there's no guarantee that it is. By using get_user_pages_fast()
5220 * and kmap_atomic/kunmap_atomic() we can get access to the
5221 * pages directly. We then write the data directly into the
5224 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5226 /* check if we cross pages */
5227 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5230 offset = addr & (PAGE_SIZE - 1);
5233 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5234 if (ret < nr_pages) {
5236 put_page(pages[ret]);
5241 for (i = 0; i < nr_pages; i++)
5242 map_page[i] = kmap_atomic(pages[i]);
5244 local_save_flags(irq_flags);
5245 size = sizeof(*entry) + cnt + 2; /* possible \n added */
5246 buffer = tr->trace_buffer.buffer;
5247 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5248 irq_flags, preempt_count());
5250 /* Ring buffer disabled, return as if not open for write */
5255 entry = ring_buffer_event_data(event);
5256 entry->ip = _THIS_IP_;
5258 if (nr_pages == 2) {
5259 len = PAGE_SIZE - offset;
5260 memcpy(&entry->buf, map_page[0] + offset, len);
5261 memcpy(&entry->buf[len], map_page[1], cnt - len);
5263 memcpy(&entry->buf, map_page[0] + offset, cnt);
5265 if (entry->buf[cnt - 1] != '\n') {
5266 entry->buf[cnt] = '\n';
5267 entry->buf[cnt + 1] = '\0';
5269 entry->buf[cnt] = '\0';
5271 __buffer_unlock_commit(buffer, event);
5278 for (i = nr_pages - 1; i >= 0; i--) {
5279 kunmap_atomic(map_page[i]);
5286 static int tracing_clock_show(struct seq_file *m, void *v)
5288 struct trace_array *tr = m->private;
5291 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5293 "%s%s%s%s", i ? " " : "",
5294 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5295 i == tr->clock_id ? "]" : "");
5301 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5305 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5306 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5309 if (i == ARRAY_SIZE(trace_clocks))
5312 mutex_lock(&trace_types_lock);
5316 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5319 * New clock may not be consistent with the previous clock.
5320 * Reset the buffer so that it doesn't have incomparable timestamps.
5322 tracing_reset_online_cpus(&tr->trace_buffer);
5324 #ifdef CONFIG_TRACER_MAX_TRACE
5325 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5326 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5327 tracing_reset_online_cpus(&tr->max_buffer);
5330 mutex_unlock(&trace_types_lock);
5335 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5336 size_t cnt, loff_t *fpos)
5338 struct seq_file *m = filp->private_data;
5339 struct trace_array *tr = m->private;
5341 const char *clockstr;
5344 if (cnt >= sizeof(buf))
5347 if (copy_from_user(&buf, ubuf, cnt))
5352 clockstr = strstrip(buf);
5354 ret = tracing_set_clock(tr, clockstr);
5363 static int tracing_clock_open(struct inode *inode, struct file *file)
5365 struct trace_array *tr = inode->i_private;
5368 if (tracing_disabled)
5371 if (trace_array_get(tr))
5374 ret = single_open(file, tracing_clock_show, inode->i_private);
5376 trace_array_put(tr);
5381 struct ftrace_buffer_info {
5382 struct trace_iterator iter;
5387 #ifdef CONFIG_TRACER_SNAPSHOT
5388 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5390 struct trace_array *tr = inode->i_private;
5391 struct trace_iterator *iter;
5395 if (trace_array_get(tr) < 0)
5398 if (file->f_mode & FMODE_READ) {
5399 iter = __tracing_open(inode, file, true);
5401 ret = PTR_ERR(iter);
5403 /* Writes still need the seq_file to hold the private data */
5405 m = kzalloc(sizeof(*m), GFP_KERNEL);
5408 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5416 iter->trace_buffer = &tr->max_buffer;
5417 iter->cpu_file = tracing_get_cpu(inode);
5419 file->private_data = m;
5423 trace_array_put(tr);
5429 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5432 struct seq_file *m = filp->private_data;
5433 struct trace_iterator *iter = m->private;
5434 struct trace_array *tr = iter->tr;
5438 ret = tracing_update_buffers();
5442 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5446 mutex_lock(&trace_types_lock);
5448 if (tr->current_trace->use_max_tr) {
5455 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5459 if (tr->allocated_snapshot)
5463 /* Only allow per-cpu swap if the ring buffer supports it */
5464 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5465 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5470 if (!tr->allocated_snapshot) {
5471 ret = alloc_snapshot(tr);
5475 local_irq_disable();
5476 /* Now, we're going to swap */
5477 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5478 update_max_tr(tr, current, smp_processor_id());
5480 update_max_tr_single(tr, current, iter->cpu_file);
5484 if (tr->allocated_snapshot) {
5485 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5486 tracing_reset_online_cpus(&tr->max_buffer);
5488 tracing_reset(&tr->max_buffer, iter->cpu_file);
5498 mutex_unlock(&trace_types_lock);
5502 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5504 struct seq_file *m = file->private_data;
5507 ret = tracing_release(inode, file);
5509 if (file->f_mode & FMODE_READ)
5512 /* If write only, the seq_file is just a stub */
5520 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5521 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5522 size_t count, loff_t *ppos);
5523 static int tracing_buffers_release(struct inode *inode, struct file *file);
5524 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5525 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5527 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5529 struct ftrace_buffer_info *info;
5532 ret = tracing_buffers_open(inode, filp);
5536 info = filp->private_data;
5538 if (info->iter.trace->use_max_tr) {
5539 tracing_buffers_release(inode, filp);
5543 info->iter.snapshot = true;
5544 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5549 #endif /* CONFIG_TRACER_SNAPSHOT */
5552 static const struct file_operations tracing_thresh_fops = {
5553 .open = tracing_open_generic,
5554 .read = tracing_thresh_read,
5555 .write = tracing_thresh_write,
5556 .llseek = generic_file_llseek,
5559 #ifdef CONFIG_TRACER_MAX_TRACE
5560 static const struct file_operations tracing_max_lat_fops = {
5561 .open = tracing_open_generic,
5562 .read = tracing_max_lat_read,
5563 .write = tracing_max_lat_write,
5564 .llseek = generic_file_llseek,
5568 static const struct file_operations set_tracer_fops = {
5569 .open = tracing_open_generic,
5570 .read = tracing_set_trace_read,
5571 .write = tracing_set_trace_write,
5572 .llseek = generic_file_llseek,
5575 static const struct file_operations tracing_pipe_fops = {
5576 .open = tracing_open_pipe,
5577 .poll = tracing_poll_pipe,
5578 .read = tracing_read_pipe,
5579 .splice_read = tracing_splice_read_pipe,
5580 .release = tracing_release_pipe,
5581 .llseek = no_llseek,
5584 static const struct file_operations tracing_entries_fops = {
5585 .open = tracing_open_generic_tr,
5586 .read = tracing_entries_read,
5587 .write = tracing_entries_write,
5588 .llseek = generic_file_llseek,
5589 .release = tracing_release_generic_tr,
5592 static const struct file_operations tracing_total_entries_fops = {
5593 .open = tracing_open_generic_tr,
5594 .read = tracing_total_entries_read,
5595 .llseek = generic_file_llseek,
5596 .release = tracing_release_generic_tr,
5599 static const struct file_operations tracing_free_buffer_fops = {
5600 .open = tracing_open_generic_tr,
5601 .write = tracing_free_buffer_write,
5602 .release = tracing_free_buffer_release,
5605 static const struct file_operations tracing_mark_fops = {
5606 .open = tracing_open_generic_tr,
5607 .write = tracing_mark_write,
5608 .llseek = generic_file_llseek,
5609 .release = tracing_release_generic_tr,
5612 static const struct file_operations trace_clock_fops = {
5613 .open = tracing_clock_open,
5615 .llseek = seq_lseek,
5616 .release = tracing_single_release_tr,
5617 .write = tracing_clock_write,
5620 #ifdef CONFIG_TRACER_SNAPSHOT
5621 static const struct file_operations snapshot_fops = {
5622 .open = tracing_snapshot_open,
5624 .write = tracing_snapshot_write,
5625 .llseek = tracing_lseek,
5626 .release = tracing_snapshot_release,
5629 static const struct file_operations snapshot_raw_fops = {
5630 .open = snapshot_raw_open,
5631 .read = tracing_buffers_read,
5632 .release = tracing_buffers_release,
5633 .splice_read = tracing_buffers_splice_read,
5634 .llseek = no_llseek,
5637 #endif /* CONFIG_TRACER_SNAPSHOT */
5639 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5641 struct trace_array *tr = inode->i_private;
5642 struct ftrace_buffer_info *info;
5645 if (tracing_disabled)
5648 if (trace_array_get(tr) < 0)
5651 info = kzalloc(sizeof(*info), GFP_KERNEL);
5653 trace_array_put(tr);
5657 mutex_lock(&trace_types_lock);
5660 info->iter.cpu_file = tracing_get_cpu(inode);
5661 info->iter.trace = tr->current_trace;
5662 info->iter.trace_buffer = &tr->trace_buffer;
5664 /* Force reading ring buffer for first read */
5665 info->read = (unsigned int)-1;
5667 filp->private_data = info;
5669 tr->current_trace->ref++;
5671 mutex_unlock(&trace_types_lock);
5673 ret = nonseekable_open(inode, filp);
5675 trace_array_put(tr);
5681 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5683 struct ftrace_buffer_info *info = filp->private_data;
5684 struct trace_iterator *iter = &info->iter;
5686 return trace_poll(iter, filp, poll_table);
5690 tracing_buffers_read(struct file *filp, char __user *ubuf,
5691 size_t count, loff_t *ppos)
5693 struct ftrace_buffer_info *info = filp->private_data;
5694 struct trace_iterator *iter = &info->iter;
5701 #ifdef CONFIG_TRACER_MAX_TRACE
5702 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5707 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5712 /* Do we have previous read data to read? */
5713 if (info->read < PAGE_SIZE)
5717 trace_access_lock(iter->cpu_file);
5718 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5722 trace_access_unlock(iter->cpu_file);
5725 if (trace_empty(iter)) {
5726 if ((filp->f_flags & O_NONBLOCK))
5729 ret = wait_on_pipe(iter, false);
5740 size = PAGE_SIZE - info->read;
5744 ret = copy_to_user(ubuf, info->spare + info->read, size);
5756 static int tracing_buffers_release(struct inode *inode, struct file *file)
5758 struct ftrace_buffer_info *info = file->private_data;
5759 struct trace_iterator *iter = &info->iter;
5761 mutex_lock(&trace_types_lock);
5763 iter->tr->current_trace->ref--;
5765 __trace_array_put(iter->tr);
5768 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5771 mutex_unlock(&trace_types_lock);
5777 struct ring_buffer *buffer;
5782 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5783 struct pipe_buffer *buf)
5785 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5790 ring_buffer_free_read_page(ref->buffer, ref->page);
5795 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5796 struct pipe_buffer *buf)
5798 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5803 /* Pipe buffer operations for a buffer. */
5804 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5806 .confirm = generic_pipe_buf_confirm,
5807 .release = buffer_pipe_buf_release,
5808 .steal = generic_pipe_buf_steal,
5809 .get = buffer_pipe_buf_get,
5813 * Callback from splice_to_pipe(), if we need to release some pages
5814 * at the end of the spd in case we error'ed out in filling the pipe.
5816 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5818 struct buffer_ref *ref =
5819 (struct buffer_ref *)spd->partial[i].private;
5824 ring_buffer_free_read_page(ref->buffer, ref->page);
5826 spd->partial[i].private = 0;
5830 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5831 struct pipe_inode_info *pipe, size_t len,
5834 struct ftrace_buffer_info *info = file->private_data;
5835 struct trace_iterator *iter = &info->iter;
5836 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5837 struct page *pages_def[PIPE_DEF_BUFFERS];
5838 struct splice_pipe_desc spd = {
5840 .partial = partial_def,
5841 .nr_pages_max = PIPE_DEF_BUFFERS,
5843 .ops = &buffer_pipe_buf_ops,
5844 .spd_release = buffer_spd_release,
5846 struct buffer_ref *ref;
5847 int entries, size, i;
5850 #ifdef CONFIG_TRACER_MAX_TRACE
5851 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5855 if (splice_grow_spd(pipe, &spd))
5858 if (*ppos & (PAGE_SIZE - 1))
5861 if (len & (PAGE_SIZE - 1)) {
5862 if (len < PAGE_SIZE)
5868 trace_access_lock(iter->cpu_file);
5869 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5871 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5875 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5882 ref->buffer = iter->trace_buffer->buffer;
5883 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5890 r = ring_buffer_read_page(ref->buffer, &ref->page,
5891 len, iter->cpu_file, 1);
5893 ring_buffer_free_read_page(ref->buffer, ref->page);
5899 * zero out any left over data, this is going to
5902 size = ring_buffer_page_len(ref->page);
5903 if (size < PAGE_SIZE)
5904 memset(ref->page + size, 0, PAGE_SIZE - size);
5906 page = virt_to_page(ref->page);
5908 spd.pages[i] = page;
5909 spd.partial[i].len = PAGE_SIZE;
5910 spd.partial[i].offset = 0;
5911 spd.partial[i].private = (unsigned long)ref;
5915 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5918 trace_access_unlock(iter->cpu_file);
5921 /* did we read anything? */
5922 if (!spd.nr_pages) {
5926 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5929 ret = wait_on_pipe(iter, true);
5936 ret = splice_to_pipe(pipe, &spd);
5937 splice_shrink_spd(&spd);
5942 static const struct file_operations tracing_buffers_fops = {
5943 .open = tracing_buffers_open,
5944 .read = tracing_buffers_read,
5945 .poll = tracing_buffers_poll,
5946 .release = tracing_buffers_release,
5947 .splice_read = tracing_buffers_splice_read,
5948 .llseek = no_llseek,
5952 tracing_stats_read(struct file *filp, char __user *ubuf,
5953 size_t count, loff_t *ppos)
5955 struct inode *inode = file_inode(filp);
5956 struct trace_array *tr = inode->i_private;
5957 struct trace_buffer *trace_buf = &tr->trace_buffer;
5958 int cpu = tracing_get_cpu(inode);
5959 struct trace_seq *s;
5961 unsigned long long t;
5962 unsigned long usec_rem;
5964 s = kmalloc(sizeof(*s), GFP_KERNEL);
5970 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5971 trace_seq_printf(s, "entries: %ld\n", cnt);
5973 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5974 trace_seq_printf(s, "overrun: %ld\n", cnt);
5976 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5977 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5979 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5980 trace_seq_printf(s, "bytes: %ld\n", cnt);
5982 if (trace_clocks[tr->clock_id].in_ns) {
5983 /* local or global for trace_clock */
5984 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5985 usec_rem = do_div(t, USEC_PER_SEC);
5986 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5989 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5990 usec_rem = do_div(t, USEC_PER_SEC);
5991 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5993 /* counter or tsc mode for trace_clock */
5994 trace_seq_printf(s, "oldest event ts: %llu\n",
5995 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5997 trace_seq_printf(s, "now ts: %llu\n",
5998 ring_buffer_time_stamp(trace_buf->buffer, cpu));
6001 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
6002 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6004 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
6005 trace_seq_printf(s, "read events: %ld\n", cnt);
6007 count = simple_read_from_buffer(ubuf, count, ppos,
6008 s->buffer, trace_seq_used(s));
6015 static const struct file_operations tracing_stats_fops = {
6016 .open = tracing_open_generic_tr,
6017 .read = tracing_stats_read,
6018 .llseek = generic_file_llseek,
6019 .release = tracing_release_generic_tr,
6022 #ifdef CONFIG_DYNAMIC_FTRACE
6024 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
6030 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
6031 size_t cnt, loff_t *ppos)
6033 static char ftrace_dyn_info_buffer[1024];
6034 static DEFINE_MUTEX(dyn_info_mutex);
6035 unsigned long *p = filp->private_data;
6036 char *buf = ftrace_dyn_info_buffer;
6037 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
6040 mutex_lock(&dyn_info_mutex);
6041 r = sprintf(buf, "%ld ", *p);
6043 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
6046 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6048 mutex_unlock(&dyn_info_mutex);
6053 static const struct file_operations tracing_dyn_info_fops = {
6054 .open = tracing_open_generic,
6055 .read = tracing_read_dyn_info,
6056 .llseek = generic_file_llseek,
6058 #endif /* CONFIG_DYNAMIC_FTRACE */
6060 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6062 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6068 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6070 unsigned long *count = (long *)data;
6082 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6083 struct ftrace_probe_ops *ops, void *data)
6085 long count = (long)data;
6087 seq_printf(m, "%ps:", (void *)ip);
6089 seq_puts(m, "snapshot");
6092 seq_puts(m, ":unlimited\n");
6094 seq_printf(m, ":count=%ld\n", count);
6099 static struct ftrace_probe_ops snapshot_probe_ops = {
6100 .func = ftrace_snapshot,
6101 .print = ftrace_snapshot_print,
6104 static struct ftrace_probe_ops snapshot_count_probe_ops = {
6105 .func = ftrace_count_snapshot,
6106 .print = ftrace_snapshot_print,
6110 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6111 char *glob, char *cmd, char *param, int enable)
6113 struct ftrace_probe_ops *ops;
6114 void *count = (void *)-1;
6118 /* hash funcs only work with set_ftrace_filter */
6122 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6124 if (glob[0] == '!') {
6125 unregister_ftrace_function_probe_func(glob+1, ops);
6132 number = strsep(¶m, ":");
6134 if (!strlen(number))
6138 * We use the callback data field (which is a pointer)
6141 ret = kstrtoul(number, 0, (unsigned long *)&count);
6146 ret = register_ftrace_function_probe(glob, ops, count);
6149 alloc_snapshot(&global_trace);
6151 return ret < 0 ? ret : 0;
6154 static struct ftrace_func_command ftrace_snapshot_cmd = {
6156 .func = ftrace_trace_snapshot_callback,
6159 static __init int register_snapshot_cmd(void)
6161 return register_ftrace_command(&ftrace_snapshot_cmd);
6164 static inline __init int register_snapshot_cmd(void) { return 0; }
6165 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6167 static struct dentry *tracing_get_dentry(struct trace_array *tr)
6169 if (WARN_ON(!tr->dir))
6170 return ERR_PTR(-ENODEV);
6172 /* Top directory uses NULL as the parent */
6173 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6176 /* All sub buffers have a descriptor */
6180 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6182 struct dentry *d_tracer;
6185 return tr->percpu_dir;
6187 d_tracer = tracing_get_dentry(tr);
6188 if (IS_ERR(d_tracer))
6191 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6193 WARN_ONCE(!tr->percpu_dir,
6194 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6196 return tr->percpu_dir;
6199 static struct dentry *
6200 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6201 void *data, long cpu, const struct file_operations *fops)
6203 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6205 if (ret) /* See tracing_get_cpu() */
6206 d_inode(ret)->i_cdev = (void *)(cpu + 1);
6211 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6213 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6214 struct dentry *d_cpu;
6215 char cpu_dir[30]; /* 30 characters should be more than enough */
6220 snprintf(cpu_dir, 30, "cpu%ld", cpu);
6221 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6223 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
6227 /* per cpu trace_pipe */
6228 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6229 tr, cpu, &tracing_pipe_fops);
6232 trace_create_cpu_file("trace", 0644, d_cpu,
6233 tr, cpu, &tracing_fops);
6235 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6236 tr, cpu, &tracing_buffers_fops);
6238 trace_create_cpu_file("stats", 0444, d_cpu,
6239 tr, cpu, &tracing_stats_fops);
6241 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6242 tr, cpu, &tracing_entries_fops);
6244 #ifdef CONFIG_TRACER_SNAPSHOT
6245 trace_create_cpu_file("snapshot", 0644, d_cpu,
6246 tr, cpu, &snapshot_fops);
6248 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6249 tr, cpu, &snapshot_raw_fops);
6253 #ifdef CONFIG_FTRACE_SELFTEST
6254 /* Let selftest have access to static functions in this file */
6255 #include "trace_selftest.c"
6259 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6262 struct trace_option_dentry *topt = filp->private_data;
6265 if (topt->flags->val & topt->opt->bit)
6270 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6274 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6277 struct trace_option_dentry *topt = filp->private_data;
6281 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6285 if (val != 0 && val != 1)
6288 if (!!(topt->flags->val & topt->opt->bit) != val) {
6289 mutex_lock(&trace_types_lock);
6290 ret = __set_tracer_option(topt->tr, topt->flags,
6292 mutex_unlock(&trace_types_lock);
6303 static const struct file_operations trace_options_fops = {
6304 .open = tracing_open_generic,
6305 .read = trace_options_read,
6306 .write = trace_options_write,
6307 .llseek = generic_file_llseek,
6311 * In order to pass in both the trace_array descriptor as well as the index
6312 * to the flag that the trace option file represents, the trace_array
6313 * has a character array of trace_flags_index[], which holds the index
6314 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6315 * The address of this character array is passed to the flag option file
6316 * read/write callbacks.
6318 * In order to extract both the index and the trace_array descriptor,
6319 * get_tr_index() uses the following algorithm.
6323 * As the pointer itself contains the address of the index (remember
6326 * Then to get the trace_array descriptor, by subtracting that index
6327 * from the ptr, we get to the start of the index itself.
6329 * ptr - idx == &index[0]
6331 * Then a simple container_of() from that pointer gets us to the
6332 * trace_array descriptor.
6334 static void get_tr_index(void *data, struct trace_array **ptr,
6335 unsigned int *pindex)
6337 *pindex = *(unsigned char *)data;
6339 *ptr = container_of(data - *pindex, struct trace_array,
6344 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6347 void *tr_index = filp->private_data;
6348 struct trace_array *tr;
6352 get_tr_index(tr_index, &tr, &index);
6354 if (tr->trace_flags & (1 << index))
6359 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6363 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6366 void *tr_index = filp->private_data;
6367 struct trace_array *tr;
6372 get_tr_index(tr_index, &tr, &index);
6374 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6378 if (val != 0 && val != 1)
6381 mutex_lock(&trace_types_lock);
6382 ret = set_tracer_flag(tr, 1 << index, val);
6383 mutex_unlock(&trace_types_lock);
6393 static const struct file_operations trace_options_core_fops = {
6394 .open = tracing_open_generic,
6395 .read = trace_options_core_read,
6396 .write = trace_options_core_write,
6397 .llseek = generic_file_llseek,
6400 struct dentry *trace_create_file(const char *name,
6402 struct dentry *parent,
6404 const struct file_operations *fops)
6408 ret = tracefs_create_file(name, mode, parent, data, fops);
6410 pr_warning("Could not create tracefs '%s' entry\n", name);
6416 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6418 struct dentry *d_tracer;
6423 d_tracer = tracing_get_dentry(tr);
6424 if (IS_ERR(d_tracer))
6427 tr->options = tracefs_create_dir("options", d_tracer);
6429 pr_warning("Could not create tracefs directory 'options'\n");
6437 create_trace_option_file(struct trace_array *tr,
6438 struct trace_option_dentry *topt,
6439 struct tracer_flags *flags,
6440 struct tracer_opt *opt)
6442 struct dentry *t_options;
6444 t_options = trace_options_init_dentry(tr);
6448 topt->flags = flags;
6452 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6453 &trace_options_fops);
6458 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6460 struct trace_option_dentry *topts;
6461 struct trace_options *tr_topts;
6462 struct tracer_flags *flags;
6463 struct tracer_opt *opts;
6470 flags = tracer->flags;
6472 if (!flags || !flags->opts)
6476 * If this is an instance, only create flags for tracers
6477 * the instance may have.
6479 if (!trace_ok_for_array(tracer, tr))
6482 for (i = 0; i < tr->nr_topts; i++) {
6484 * Check if these flags have already been added.
6485 * Some tracers share flags.
6487 if (tr->topts[i].tracer->flags == tracer->flags)
6493 for (cnt = 0; opts[cnt].name; cnt++)
6496 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6500 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6507 tr->topts = tr_topts;
6508 tr->topts[tr->nr_topts].tracer = tracer;
6509 tr->topts[tr->nr_topts].topts = topts;
6512 for (cnt = 0; opts[cnt].name; cnt++) {
6513 create_trace_option_file(tr, &topts[cnt], flags,
6515 WARN_ONCE(topts[cnt].entry == NULL,
6516 "Failed to create trace option: %s",
6521 static struct dentry *
6522 create_trace_option_core_file(struct trace_array *tr,
6523 const char *option, long index)
6525 struct dentry *t_options;
6527 t_options = trace_options_init_dentry(tr);
6531 return trace_create_file(option, 0644, t_options,
6532 (void *)&tr->trace_flags_index[index],
6533 &trace_options_core_fops);
6536 static void create_trace_options_dir(struct trace_array *tr)
6538 struct dentry *t_options;
6539 bool top_level = tr == &global_trace;
6542 t_options = trace_options_init_dentry(tr);
6546 for (i = 0; trace_options[i]; i++) {
6548 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6549 create_trace_option_core_file(tr, trace_options[i], i);
6554 rb_simple_read(struct file *filp, char __user *ubuf,
6555 size_t cnt, loff_t *ppos)
6557 struct trace_array *tr = filp->private_data;
6561 r = tracer_tracing_is_on(tr);
6562 r = sprintf(buf, "%d\n", r);
6564 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6568 rb_simple_write(struct file *filp, const char __user *ubuf,
6569 size_t cnt, loff_t *ppos)
6571 struct trace_array *tr = filp->private_data;
6572 struct ring_buffer *buffer = tr->trace_buffer.buffer;
6576 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6581 mutex_lock(&trace_types_lock);
6583 tracer_tracing_on(tr);
6584 if (tr->current_trace->start)
6585 tr->current_trace->start(tr);
6587 tracer_tracing_off(tr);
6588 if (tr->current_trace->stop)
6589 tr->current_trace->stop(tr);
6591 mutex_unlock(&trace_types_lock);
6599 static const struct file_operations rb_simple_fops = {
6600 .open = tracing_open_generic_tr,
6601 .read = rb_simple_read,
6602 .write = rb_simple_write,
6603 .release = tracing_release_generic_tr,
6604 .llseek = default_llseek,
6607 struct dentry *trace_instance_dir;
6610 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6613 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6615 enum ring_buffer_flags rb_flags;
6617 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6621 buf->buffer = ring_buffer_alloc(size, rb_flags);
6625 buf->data = alloc_percpu(struct trace_array_cpu);
6627 ring_buffer_free(buf->buffer);
6631 /* Allocate the first page for all buffers */
6632 set_buffer_entries(&tr->trace_buffer,
6633 ring_buffer_size(tr->trace_buffer.buffer, 0));
6638 static int allocate_trace_buffers(struct trace_array *tr, int size)
6642 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6646 #ifdef CONFIG_TRACER_MAX_TRACE
6647 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6648 allocate_snapshot ? size : 1);
6650 ring_buffer_free(tr->trace_buffer.buffer);
6651 free_percpu(tr->trace_buffer.data);
6654 tr->allocated_snapshot = allocate_snapshot;
6657 * Only the top level trace array gets its snapshot allocated
6658 * from the kernel command line.
6660 allocate_snapshot = false;
6665 static void free_trace_buffer(struct trace_buffer *buf)
6668 ring_buffer_free(buf->buffer);
6670 free_percpu(buf->data);
6675 static void free_trace_buffers(struct trace_array *tr)
6680 free_trace_buffer(&tr->trace_buffer);
6682 #ifdef CONFIG_TRACER_MAX_TRACE
6683 free_trace_buffer(&tr->max_buffer);
6687 static void init_trace_flags_index(struct trace_array *tr)
6691 /* Used by the trace options files */
6692 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6693 tr->trace_flags_index[i] = i;
6696 static void __update_tracer_options(struct trace_array *tr)
6700 for (t = trace_types; t; t = t->next)
6701 add_tracer_options(tr, t);
6704 static void update_tracer_options(struct trace_array *tr)
6706 mutex_lock(&trace_types_lock);
6707 __update_tracer_options(tr);
6708 mutex_unlock(&trace_types_lock);
6711 static int instance_mkdir(const char *name)
6713 struct trace_array *tr;
6716 mutex_lock(&trace_types_lock);
6719 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6720 if (tr->name && strcmp(tr->name, name) == 0)
6725 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6729 tr->name = kstrdup(name, GFP_KERNEL);
6733 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6736 tr->trace_flags = global_trace.trace_flags;
6738 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6740 raw_spin_lock_init(&tr->start_lock);
6742 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6744 tr->current_trace = &nop_trace;
6746 INIT_LIST_HEAD(&tr->systems);
6747 INIT_LIST_HEAD(&tr->events);
6749 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6752 tr->dir = tracefs_create_dir(name, trace_instance_dir);
6756 ret = event_trace_add_tracer(tr->dir, tr);
6758 tracefs_remove_recursive(tr->dir);
6762 init_tracer_tracefs(tr, tr->dir);
6763 init_trace_flags_index(tr);
6764 __update_tracer_options(tr);
6766 list_add(&tr->list, &ftrace_trace_arrays);
6768 mutex_unlock(&trace_types_lock);
6773 free_trace_buffers(tr);
6774 free_cpumask_var(tr->tracing_cpumask);
6779 mutex_unlock(&trace_types_lock);
6785 static int instance_rmdir(const char *name)
6787 struct trace_array *tr;
6792 mutex_lock(&trace_types_lock);
6795 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6796 if (tr->name && strcmp(tr->name, name) == 0) {
6805 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6808 list_del(&tr->list);
6810 tracing_set_nop(tr);
6811 event_trace_del_tracer(tr);
6812 ftrace_destroy_function_files(tr);
6813 tracefs_remove_recursive(tr->dir);
6814 free_trace_buffers(tr);
6816 for (i = 0; i < tr->nr_topts; i++) {
6817 kfree(tr->topts[i].topts);
6827 mutex_unlock(&trace_types_lock);
6832 static __init void create_trace_instances(struct dentry *d_tracer)
6834 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6837 if (WARN_ON(!trace_instance_dir))
6842 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6846 trace_create_file("available_tracers", 0444, d_tracer,
6847 tr, &show_traces_fops);
6849 trace_create_file("current_tracer", 0644, d_tracer,
6850 tr, &set_tracer_fops);
6852 trace_create_file("tracing_cpumask", 0644, d_tracer,
6853 tr, &tracing_cpumask_fops);
6855 trace_create_file("trace_options", 0644, d_tracer,
6856 tr, &tracing_iter_fops);
6858 trace_create_file("trace", 0644, d_tracer,
6861 trace_create_file("trace_pipe", 0444, d_tracer,
6862 tr, &tracing_pipe_fops);
6864 trace_create_file("buffer_size_kb", 0644, d_tracer,
6865 tr, &tracing_entries_fops);
6867 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6868 tr, &tracing_total_entries_fops);
6870 trace_create_file("free_buffer", 0200, d_tracer,
6871 tr, &tracing_free_buffer_fops);
6873 trace_create_file("trace_marker", 0220, d_tracer,
6874 tr, &tracing_mark_fops);
6876 trace_create_file("saved_tgids", 0444, d_tracer,
6877 tr, &tracing_saved_tgids_fops);
6879 trace_create_file("trace_clock", 0644, d_tracer, tr,
6882 trace_create_file("tracing_on", 0644, d_tracer,
6883 tr, &rb_simple_fops);
6885 create_trace_options_dir(tr);
6887 #ifdef CONFIG_TRACER_MAX_TRACE
6888 trace_create_file("tracing_max_latency", 0644, d_tracer,
6889 &tr->max_latency, &tracing_max_lat_fops);
6892 if (ftrace_create_function_files(tr, d_tracer))
6893 WARN(1, "Could not allocate function filter files");
6895 #ifdef CONFIG_TRACER_SNAPSHOT
6896 trace_create_file("snapshot", 0644, d_tracer,
6897 tr, &snapshot_fops);
6900 for_each_tracing_cpu(cpu)
6901 tracing_init_tracefs_percpu(tr, cpu);
6905 static struct vfsmount *trace_automount(void *ingore)
6907 struct vfsmount *mnt;
6908 struct file_system_type *type;
6911 * To maintain backward compatibility for tools that mount
6912 * debugfs to get to the tracing facility, tracefs is automatically
6913 * mounted to the debugfs/tracing directory.
6915 type = get_fs_type("tracefs");
6918 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6919 put_filesystem(type);
6928 * tracing_init_dentry - initialize top level trace array
6930 * This is called when creating files or directories in the tracing
6931 * directory. It is called via fs_initcall() by any of the boot up code
6932 * and expects to return the dentry of the top level tracing directory.
6934 struct dentry *tracing_init_dentry(void)
6936 struct trace_array *tr = &global_trace;
6938 /* The top level trace array uses NULL as parent */
6942 if (WARN_ON(!tracefs_initialized()) ||
6943 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6944 WARN_ON(!debugfs_initialized())))
6945 return ERR_PTR(-ENODEV);
6948 * As there may still be users that expect the tracing
6949 * files to exist in debugfs/tracing, we must automount
6950 * the tracefs file system there, so older tools still
6951 * work with the newer kerenl.
6953 tr->dir = debugfs_create_automount("tracing", NULL,
6954 trace_automount, NULL);
6956 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6957 return ERR_PTR(-ENOMEM);
6963 extern struct trace_enum_map *__start_ftrace_enum_maps[];
6964 extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6966 static void __init trace_enum_init(void)
6970 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6971 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
6974 #ifdef CONFIG_MODULES
6975 static void trace_module_add_enums(struct module *mod)
6977 if (!mod->num_trace_enums)
6981 * Modules with bad taint do not have events created, do
6982 * not bother with enums either.
6984 if (trace_module_has_bad_taint(mod))
6987 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
6990 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
6991 static void trace_module_remove_enums(struct module *mod)
6993 union trace_enum_map_item *map;
6994 union trace_enum_map_item **last = &trace_enum_maps;
6996 if (!mod->num_trace_enums)
6999 mutex_lock(&trace_enum_mutex);
7001 map = trace_enum_maps;
7004 if (map->head.mod == mod)
7006 map = trace_enum_jmp_to_tail(map);
7007 last = &map->tail.next;
7008 map = map->tail.next;
7013 *last = trace_enum_jmp_to_tail(map)->tail.next;
7016 mutex_unlock(&trace_enum_mutex);
7019 static inline void trace_module_remove_enums(struct module *mod) { }
7020 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7022 static int trace_module_notify(struct notifier_block *self,
7023 unsigned long val, void *data)
7025 struct module *mod = data;
7028 case MODULE_STATE_COMING:
7029 trace_module_add_enums(mod);
7031 case MODULE_STATE_GOING:
7032 trace_module_remove_enums(mod);
7039 static struct notifier_block trace_module_nb = {
7040 .notifier_call = trace_module_notify,
7043 #endif /* CONFIG_MODULES */
7045 static __init int tracer_init_tracefs(void)
7047 struct dentry *d_tracer;
7049 trace_access_lock_init();
7051 d_tracer = tracing_init_dentry();
7052 if (IS_ERR(d_tracer))
7055 init_tracer_tracefs(&global_trace, d_tracer);
7057 trace_create_file("tracing_thresh", 0644, d_tracer,
7058 &global_trace, &tracing_thresh_fops);
7060 trace_create_file("README", 0444, d_tracer,
7061 NULL, &tracing_readme_fops);
7063 trace_create_file("saved_cmdlines", 0444, d_tracer,
7064 NULL, &tracing_saved_cmdlines_fops);
7066 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7067 NULL, &tracing_saved_cmdlines_size_fops);
7071 trace_create_enum_file(d_tracer);
7073 #ifdef CONFIG_MODULES
7074 register_module_notifier(&trace_module_nb);
7077 #ifdef CONFIG_DYNAMIC_FTRACE
7078 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7079 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
7082 create_trace_instances(d_tracer);
7084 update_tracer_options(&global_trace);
7089 static int trace_panic_handler(struct notifier_block *this,
7090 unsigned long event, void *unused)
7092 if (ftrace_dump_on_oops)
7093 ftrace_dump(ftrace_dump_on_oops);
7097 static struct notifier_block trace_panic_notifier = {
7098 .notifier_call = trace_panic_handler,
7100 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7103 static int trace_die_handler(struct notifier_block *self,
7109 if (ftrace_dump_on_oops)
7110 ftrace_dump(ftrace_dump_on_oops);
7118 static struct notifier_block trace_die_notifier = {
7119 .notifier_call = trace_die_handler,
7124 * printk is set to max of 1024, we really don't need it that big.
7125 * Nothing should be printing 1000 characters anyway.
7127 #define TRACE_MAX_PRINT 1000
7130 * Define here KERN_TRACE so that we have one place to modify
7131 * it if we decide to change what log level the ftrace dump
7134 #define KERN_TRACE KERN_EMERG
7137 trace_printk_seq(struct trace_seq *s)
7139 /* Probably should print a warning here. */
7140 if (s->seq.len >= TRACE_MAX_PRINT)
7141 s->seq.len = TRACE_MAX_PRINT;
7144 * More paranoid code. Although the buffer size is set to
7145 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7146 * an extra layer of protection.
7148 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7149 s->seq.len = s->seq.size - 1;
7151 /* should be zero ended, but we are paranoid. */
7152 s->buffer[s->seq.len] = 0;
7154 printk(KERN_TRACE "%s", s->buffer);
7159 void trace_init_global_iter(struct trace_iterator *iter)
7161 iter->tr = &global_trace;
7162 iter->trace = iter->tr->current_trace;
7163 iter->cpu_file = RING_BUFFER_ALL_CPUS;
7164 iter->trace_buffer = &global_trace.trace_buffer;
7166 if (iter->trace && iter->trace->open)
7167 iter->trace->open(iter);
7169 /* Annotate start of buffers if we had overruns */
7170 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7171 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7173 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7174 if (trace_clocks[iter->tr->clock_id].in_ns)
7175 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
7178 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
7180 /* use static because iter can be a bit big for the stack */
7181 static struct trace_iterator iter;
7182 static atomic_t dump_running;
7183 struct trace_array *tr = &global_trace;
7184 unsigned int old_userobj;
7185 unsigned long flags;
7188 /* Only allow one dump user at a time. */
7189 if (atomic_inc_return(&dump_running) != 1) {
7190 atomic_dec(&dump_running);
7195 * Always turn off tracing when we dump.
7196 * We don't need to show trace output of what happens
7197 * between multiple crashes.
7199 * If the user does a sysrq-z, then they can re-enable
7200 * tracing with echo 1 > tracing_on.
7204 local_irq_save(flags);
7206 /* Simulate the iterator */
7207 trace_init_global_iter(&iter);
7209 for_each_tracing_cpu(cpu) {
7210 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7213 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
7215 /* don't look at user memory in panic mode */
7216 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7218 switch (oops_dump_mode) {
7220 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7223 iter.cpu_file = raw_smp_processor_id();
7228 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7229 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7232 printk(KERN_TRACE "Dumping ftrace buffer:\n");
7234 /* Did function tracer already get disabled? */
7235 if (ftrace_is_dead()) {
7236 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7237 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7241 * We need to stop all tracing on all CPUS to read the
7242 * the next buffer. This is a bit expensive, but is
7243 * not done often. We fill all what we can read,
7244 * and then release the locks again.
7247 while (!trace_empty(&iter)) {
7250 printk(KERN_TRACE "---------------------------------\n");
7254 /* reset all but tr, trace, and overruns */
7255 memset(&iter.seq, 0,
7256 sizeof(struct trace_iterator) -
7257 offsetof(struct trace_iterator, seq));
7258 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7261 if (trace_find_next_entry_inc(&iter) != NULL) {
7264 ret = print_trace_line(&iter);
7265 if (ret != TRACE_TYPE_NO_CONSUME)
7266 trace_consume(&iter);
7268 touch_nmi_watchdog();
7270 trace_printk_seq(&iter.seq);
7274 printk(KERN_TRACE " (ftrace buffer empty)\n");
7276 printk(KERN_TRACE "---------------------------------\n");
7279 tr->trace_flags |= old_userobj;
7281 for_each_tracing_cpu(cpu) {
7282 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7284 atomic_dec(&dump_running);
7285 local_irq_restore(flags);
7287 EXPORT_SYMBOL_GPL(ftrace_dump);
7289 __init static int tracer_alloc_buffers(void)
7295 * Make sure we don't accidently add more trace options
7296 * than we have bits for.
7298 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
7300 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7303 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7304 goto out_free_buffer_mask;
7306 /* Only allocate trace_printk buffers if a trace_printk exists */
7307 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7308 /* Must be called before global_trace.buffer is allocated */
7309 trace_printk_init_buffers();
7311 /* To save memory, keep the ring buffer size to its minimum */
7312 if (ring_buffer_expanded)
7313 ring_buf_size = trace_buf_size;
7317 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7318 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7320 raw_spin_lock_init(&global_trace.start_lock);
7322 /* Used for event triggers */
7323 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7325 goto out_free_cpumask;
7327 if (trace_create_savedcmd() < 0)
7328 goto out_free_temp_buffer;
7330 /* TODO: make the number of buffers hot pluggable with CPUS */
7331 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7332 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7334 goto out_free_savedcmd;
7337 if (global_trace.buffer_disabled)
7340 if (trace_boot_clock) {
7341 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7343 pr_warning("Trace clock %s not defined, going back to default\n",
7348 * register_tracer() might reference current_trace, so it
7349 * needs to be set before we register anything. This is
7350 * just a bootstrap of current_trace anyway.
7352 global_trace.current_trace = &nop_trace;
7354 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7356 ftrace_init_global_array_ops(&global_trace);
7358 init_trace_flags_index(&global_trace);
7360 register_tracer(&nop_trace);
7362 /* All seems OK, enable tracing */
7363 tracing_disabled = 0;
7365 atomic_notifier_chain_register(&panic_notifier_list,
7366 &trace_panic_notifier);
7368 register_die_notifier(&trace_die_notifier);
7370 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7372 INIT_LIST_HEAD(&global_trace.systems);
7373 INIT_LIST_HEAD(&global_trace.events);
7374 list_add(&global_trace.list, &ftrace_trace_arrays);
7376 apply_trace_boot_options();
7378 register_snapshot_cmd();
7383 free_saved_cmdlines_buffer(savedcmd);
7384 out_free_temp_buffer:
7385 ring_buffer_free(temp_buffer);
7387 free_cpumask_var(global_trace.tracing_cpumask);
7388 out_free_buffer_mask:
7389 free_cpumask_var(tracing_buffer_mask);
7394 void __init trace_init(void)
7396 if (tracepoint_printk) {
7397 tracepoint_print_iter =
7398 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7399 if (WARN_ON(!tracepoint_print_iter))
7400 tracepoint_printk = 0;
7402 tracer_alloc_buffers();
7406 __init static int clear_boot_tracer(void)
7409 * The default tracer at boot buffer is an init section.
7410 * This function is called in lateinit. If we did not
7411 * find the boot tracer, then clear it out, to prevent
7412 * later registration from accessing the buffer that is
7413 * about to be freed.
7415 if (!default_bootup_tracer)
7418 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7419 default_bootup_tracer);
7420 default_bootup_tracer = NULL;
7425 fs_initcall(tracer_init_tracefs);
7426 late_initcall(clear_boot_tracer);