2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
41 #include <linux/sched/rt.h>
44 #include "trace_output.h"
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
50 bool ring_buffer_expanded;
53 * We need to change this state when a selftest is running.
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
56 * insertions into the ring-buffer such as trace_printk could occurred
57 * at the same time, giving false positive or negative results.
59 static bool __read_mostly tracing_selftest_running;
62 * If a tracer is running, we do not want to run SELFTEST.
64 bool __read_mostly tracing_selftest_disabled;
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt[] = {
71 static struct tracer_flags dummy_tracer_flags = {
73 .opts = dummy_tracer_opt
77 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
83 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
87 static DEFINE_PER_CPU(bool, trace_cmdline_save);
90 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
95 static int tracing_disabled = 1;
97 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
99 cpumask_var_t __read_mostly tracing_buffer_mask;
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
117 enum ftrace_dump_mode ftrace_dump_on_oops;
119 /* When set, tracing will stop when a WARN*() is hit */
120 int __disable_trace_on_warning;
122 static int tracing_set_tracer(const char *buf);
124 #define MAX_TRACER_SIZE 100
125 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
126 static char *default_bootup_tracer;
128 static bool allocate_snapshot;
130 static int __init set_cmdline_ftrace(char *str)
132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
133 default_bootup_tracer = bootup_tracer_buf;
134 /* We are using ftrace early, expand it */
135 ring_buffer_expanded = true;
138 __setup("ftrace=", set_cmdline_ftrace);
140 static int __init set_ftrace_dump_on_oops(char *str)
142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
154 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
156 static int __init stop_trace_on_warning(char *str)
158 __disable_trace_on_warning = 1;
161 __setup("traceoff_on_warning=", stop_trace_on_warning);
163 static int __init boot_alloc_snapshot(char *str)
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
170 __setup("alloc_snapshot", boot_alloc_snapshot);
173 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174 static char *trace_boot_options __initdata;
176 static int __init set_trace_boot_options(char *str)
178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
179 trace_boot_options = trace_boot_options_buf;
182 __setup("trace_options=", set_trace_boot_options);
185 unsigned long long ns2usecs(cycle_t nsec)
193 * The global_trace is the descriptor that holds the tracing
194 * buffers for the live tracing. For each CPU, it contains
195 * a link list of pages that will store trace entries. The
196 * page descriptor of the pages in the memory is used to hold
197 * the link list by linking the lru item in the page descriptor
198 * to each of the pages in the buffer per CPU.
200 * For each active CPU there is a data field that holds the
201 * pages for the buffer for that CPU. Each CPU has the same number
202 * of pages allocated for its buffer.
204 static struct trace_array global_trace;
206 LIST_HEAD(ftrace_trace_arrays);
208 int trace_array_get(struct trace_array *this_tr)
210 struct trace_array *tr;
213 mutex_lock(&trace_types_lock);
214 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
221 mutex_unlock(&trace_types_lock);
226 static void __trace_array_put(struct trace_array *this_tr)
228 WARN_ON(!this_tr->ref);
232 void trace_array_put(struct trace_array *this_tr)
234 mutex_lock(&trace_types_lock);
235 __trace_array_put(this_tr);
236 mutex_unlock(&trace_types_lock);
239 int filter_check_discard(struct ftrace_event_file *file, void *rec,
240 struct ring_buffer *buffer,
241 struct ring_buffer_event *event)
243 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
244 !filter_match_preds(file->filter, rec)) {
245 ring_buffer_discard_commit(buffer, event);
251 EXPORT_SYMBOL_GPL(filter_check_discard);
253 int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
254 struct ring_buffer *buffer,
255 struct ring_buffer_event *event)
257 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
258 !filter_match_preds(call->filter, rec)) {
259 ring_buffer_discard_commit(buffer, event);
265 EXPORT_SYMBOL_GPL(call_filter_check_discard);
267 cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
271 /* Early boot up does not have a buffer yet */
273 return trace_clock_local();
275 ts = ring_buffer_time_stamp(buf->buffer, cpu);
276 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
281 cycle_t ftrace_now(int cpu)
283 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
287 * tracing_is_enabled - Show if global_trace has been disabled
289 * Shows if the global trace has been enabled or not. It uses the
290 * mirror flag "buffer_disabled" to be used in fast paths such as for
291 * the irqsoff tracer. But it may be inaccurate due to races. If you
292 * need to know the accurate state, use tracing_is_on() which is a little
293 * slower, but accurate.
295 int tracing_is_enabled(void)
298 * For quick access (irqsoff uses this in fast path), just
299 * return the mirror variable of the state of the ring buffer.
300 * It's a little racy, but we don't really care.
303 return !global_trace.buffer_disabled;
307 * trace_buf_size is the size in bytes that is allocated
308 * for a buffer. Note, the number of bytes is always rounded
311 * This number is purposely set to a low number of 16384.
312 * If the dump on oops happens, it will be much appreciated
313 * to not have to wait for all that output. Anyway this can be
314 * boot time and run time configurable.
316 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
318 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
320 /* trace_types holds a link list of available tracers. */
321 static struct tracer *trace_types __read_mostly;
324 * trace_types_lock is used to protect the trace_types list.
326 DEFINE_MUTEX(trace_types_lock);
329 * serialize the access of the ring buffer
331 * ring buffer serializes readers, but it is low level protection.
332 * The validity of the events (which returns by ring_buffer_peek() ..etc)
333 * are not protected by ring buffer.
335 * The content of events may become garbage if we allow other process consumes
336 * these events concurrently:
337 * A) the page of the consumed events may become a normal page
338 * (not reader page) in ring buffer, and this page will be rewrited
339 * by events producer.
340 * B) The page of the consumed events may become a page for splice_read,
341 * and this page will be returned to system.
343 * These primitives allow multi process access to different cpu ring buffer
346 * These primitives don't distinguish read-only and read-consume access.
347 * Multi read-only access are also serialized.
351 static DECLARE_RWSEM(all_cpu_access_lock);
352 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
354 static inline void trace_access_lock(int cpu)
356 if (cpu == RING_BUFFER_ALL_CPUS) {
357 /* gain it for accessing the whole ring buffer. */
358 down_write(&all_cpu_access_lock);
360 /* gain it for accessing a cpu ring buffer. */
362 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
363 down_read(&all_cpu_access_lock);
365 /* Secondly block other access to this @cpu ring buffer. */
366 mutex_lock(&per_cpu(cpu_access_lock, cpu));
370 static inline void trace_access_unlock(int cpu)
372 if (cpu == RING_BUFFER_ALL_CPUS) {
373 up_write(&all_cpu_access_lock);
375 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
376 up_read(&all_cpu_access_lock);
380 static inline void trace_access_lock_init(void)
384 for_each_possible_cpu(cpu)
385 mutex_init(&per_cpu(cpu_access_lock, cpu));
390 static DEFINE_MUTEX(access_lock);
392 static inline void trace_access_lock(int cpu)
395 mutex_lock(&access_lock);
398 static inline void trace_access_unlock(int cpu)
401 mutex_unlock(&access_lock);
404 static inline void trace_access_lock_init(void)
410 /* trace_flags holds trace_options default values */
411 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
412 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
413 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
414 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
416 static void tracer_tracing_on(struct trace_array *tr)
418 if (tr->trace_buffer.buffer)
419 ring_buffer_record_on(tr->trace_buffer.buffer);
421 * This flag is looked at when buffers haven't been allocated
422 * yet, or by some tracers (like irqsoff), that just want to
423 * know if the ring buffer has been disabled, but it can handle
424 * races of where it gets disabled but we still do a record.
425 * As the check is in the fast path of the tracers, it is more
426 * important to be fast than accurate.
428 tr->buffer_disabled = 0;
429 /* Make the flag seen by readers */
434 * tracing_on - enable tracing buffers
436 * This function enables tracing buffers that may have been
437 * disabled with tracing_off.
439 void tracing_on(void)
441 tracer_tracing_on(&global_trace);
443 EXPORT_SYMBOL_GPL(tracing_on);
446 * __trace_puts - write a constant string into the trace buffer.
447 * @ip: The address of the caller
448 * @str: The constant string to write
449 * @size: The size of the string.
451 int __trace_puts(unsigned long ip, const char *str, int size)
453 struct ring_buffer_event *event;
454 struct ring_buffer *buffer;
455 struct print_entry *entry;
456 unsigned long irq_flags;
459 if (unlikely(tracing_selftest_running || tracing_disabled))
462 alloc = sizeof(*entry) + size + 2; /* possible \n added */
464 local_save_flags(irq_flags);
465 buffer = global_trace.trace_buffer.buffer;
466 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
467 irq_flags, preempt_count());
471 entry = ring_buffer_event_data(event);
474 memcpy(&entry->buf, str, size);
476 /* Add a newline if necessary */
477 if (entry->buf[size - 1] != '\n') {
478 entry->buf[size] = '\n';
479 entry->buf[size + 1] = '\0';
481 entry->buf[size] = '\0';
483 __buffer_unlock_commit(buffer, event);
487 EXPORT_SYMBOL_GPL(__trace_puts);
490 * __trace_bputs - write the pointer to a constant string into trace buffer
491 * @ip: The address of the caller
492 * @str: The constant string to write to the buffer to
494 int __trace_bputs(unsigned long ip, const char *str)
496 struct ring_buffer_event *event;
497 struct ring_buffer *buffer;
498 struct bputs_entry *entry;
499 unsigned long irq_flags;
500 int size = sizeof(struct bputs_entry);
502 if (unlikely(tracing_selftest_running || tracing_disabled))
505 local_save_flags(irq_flags);
506 buffer = global_trace.trace_buffer.buffer;
507 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
508 irq_flags, preempt_count());
512 entry = ring_buffer_event_data(event);
516 __buffer_unlock_commit(buffer, event);
520 EXPORT_SYMBOL_GPL(__trace_bputs);
522 #ifdef CONFIG_TRACER_SNAPSHOT
524 * trace_snapshot - take a snapshot of the current buffer.
526 * This causes a swap between the snapshot buffer and the current live
527 * tracing buffer. You can use this to take snapshots of the live
528 * trace when some condition is triggered, but continue to trace.
530 * Note, make sure to allocate the snapshot with either
531 * a tracing_snapshot_alloc(), or by doing it manually
532 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
534 * If the snapshot buffer is not allocated, it will stop tracing.
535 * Basically making a permanent snapshot.
537 void tracing_snapshot(void)
539 struct trace_array *tr = &global_trace;
540 struct tracer *tracer = tr->current_trace;
544 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
545 internal_trace_puts("*** snapshot is being ignored ***\n");
549 if (!tr->allocated_snapshot) {
550 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
551 internal_trace_puts("*** stopping trace here! ***\n");
556 /* Note, snapshot can not be used when the tracer uses it */
557 if (tracer->use_max_tr) {
558 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
559 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
563 local_irq_save(flags);
564 update_max_tr(tr, current, smp_processor_id());
565 local_irq_restore(flags);
567 EXPORT_SYMBOL_GPL(tracing_snapshot);
569 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
570 struct trace_buffer *size_buf, int cpu_id);
571 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
573 static int alloc_snapshot(struct trace_array *tr)
577 if (!tr->allocated_snapshot) {
579 /* allocate spare buffer */
580 ret = resize_buffer_duplicate_size(&tr->max_buffer,
581 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
585 tr->allocated_snapshot = true;
591 void free_snapshot(struct trace_array *tr)
594 * We don't free the ring buffer. instead, resize it because
595 * The max_tr ring buffer has some state (e.g. ring->clock) and
596 * we want preserve it.
598 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
599 set_buffer_entries(&tr->max_buffer, 1);
600 tracing_reset_online_cpus(&tr->max_buffer);
601 tr->allocated_snapshot = false;
605 * tracing_alloc_snapshot - allocate snapshot buffer.
607 * This only allocates the snapshot buffer if it isn't already
608 * allocated - it doesn't also take a snapshot.
610 * This is meant to be used in cases where the snapshot buffer needs
611 * to be set up for events that can't sleep but need to be able to
612 * trigger a snapshot.
614 int tracing_alloc_snapshot(void)
616 struct trace_array *tr = &global_trace;
619 ret = alloc_snapshot(tr);
624 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
627 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
629 * This is similar to trace_snapshot(), but it will allocate the
630 * snapshot buffer if it isn't already allocated. Use this only
631 * where it is safe to sleep, as the allocation may sleep.
633 * This causes a swap between the snapshot buffer and the current live
634 * tracing buffer. You can use this to take snapshots of the live
635 * trace when some condition is triggered, but continue to trace.
637 void tracing_snapshot_alloc(void)
641 ret = tracing_alloc_snapshot();
647 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
649 void tracing_snapshot(void)
651 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
653 EXPORT_SYMBOL_GPL(tracing_snapshot);
654 int tracing_alloc_snapshot(void)
656 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
659 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
660 void tracing_snapshot_alloc(void)
665 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
666 #endif /* CONFIG_TRACER_SNAPSHOT */
668 static void tracer_tracing_off(struct trace_array *tr)
670 if (tr->trace_buffer.buffer)
671 ring_buffer_record_off(tr->trace_buffer.buffer);
673 * This flag is looked at when buffers haven't been allocated
674 * yet, or by some tracers (like irqsoff), that just want to
675 * know if the ring buffer has been disabled, but it can handle
676 * races of where it gets disabled but we still do a record.
677 * As the check is in the fast path of the tracers, it is more
678 * important to be fast than accurate.
680 tr->buffer_disabled = 1;
681 /* Make the flag seen by readers */
686 * tracing_off - turn off tracing buffers
688 * This function stops the tracing buffers from recording data.
689 * It does not disable any overhead the tracers themselves may
690 * be causing. This function simply causes all recording to
691 * the ring buffers to fail.
693 void tracing_off(void)
695 tracer_tracing_off(&global_trace);
697 EXPORT_SYMBOL_GPL(tracing_off);
699 void disable_trace_on_warning(void)
701 if (__disable_trace_on_warning)
706 * tracer_tracing_is_on - show real state of ring buffer enabled
707 * @tr : the trace array to know if ring buffer is enabled
709 * Shows real state of the ring buffer if it is enabled or not.
711 static int tracer_tracing_is_on(struct trace_array *tr)
713 if (tr->trace_buffer.buffer)
714 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
715 return !tr->buffer_disabled;
719 * tracing_is_on - show state of ring buffers enabled
721 int tracing_is_on(void)
723 return tracer_tracing_is_on(&global_trace);
725 EXPORT_SYMBOL_GPL(tracing_is_on);
727 static int __init set_buf_size(char *str)
729 unsigned long buf_size;
733 buf_size = memparse(str, &str);
734 /* nr_entries can not be zero */
737 trace_buf_size = buf_size;
740 __setup("trace_buf_size=", set_buf_size);
742 static int __init set_tracing_thresh(char *str)
744 unsigned long threshold;
749 ret = kstrtoul(str, 0, &threshold);
752 tracing_thresh = threshold * 1000;
755 __setup("tracing_thresh=", set_tracing_thresh);
757 unsigned long nsecs_to_usecs(unsigned long nsecs)
762 /* These must match the bit postions in trace_iterator_flags */
763 static const char *trace_options[] = {
796 int in_ns; /* is this clock in nanoseconds? */
798 { trace_clock_local, "local", 1 },
799 { trace_clock_global, "global", 1 },
800 { trace_clock_counter, "counter", 0 },
801 { trace_clock_jiffies, "uptime", 1 },
802 { trace_clock, "perf", 1 },
807 * trace_parser_get_init - gets the buffer for trace parser
809 int trace_parser_get_init(struct trace_parser *parser, int size)
811 memset(parser, 0, sizeof(*parser));
813 parser->buffer = kmalloc(size, GFP_KERNEL);
822 * trace_parser_put - frees the buffer for trace parser
824 void trace_parser_put(struct trace_parser *parser)
826 kfree(parser->buffer);
830 * trace_get_user - reads the user input string separated by space
831 * (matched by isspace(ch))
833 * For each string found the 'struct trace_parser' is updated,
834 * and the function returns.
836 * Returns number of bytes read.
838 * See kernel/trace/trace.h for 'struct trace_parser' details.
840 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
841 size_t cnt, loff_t *ppos)
848 trace_parser_clear(parser);
850 ret = get_user(ch, ubuf++);
858 * The parser is not finished with the last write,
859 * continue reading the user input without skipping spaces.
862 /* skip white space */
863 while (cnt && isspace(ch)) {
864 ret = get_user(ch, ubuf++);
871 /* only spaces were written */
881 /* read the non-space input */
882 while (cnt && !isspace(ch)) {
883 if (parser->idx < parser->size - 1)
884 parser->buffer[parser->idx++] = ch;
889 ret = get_user(ch, ubuf++);
896 /* We either got finished input or we have to wait for another call. */
898 parser->buffer[parser->idx] = 0;
899 parser->cont = false;
900 } else if (parser->idx < parser->size - 1) {
902 parser->buffer[parser->idx++] = ch;
915 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
923 if (s->len <= s->readpos)
926 len = s->len - s->readpos;
929 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
939 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
943 if (s->len <= s->readpos)
946 len = s->len - s->readpos;
949 memcpy(buf, s->buffer + s->readpos, cnt);
956 * ftrace_max_lock is used to protect the swapping of buffers
957 * when taking a max snapshot. The buffers themselves are
958 * protected by per_cpu spinlocks. But the action of the swap
959 * needs its own lock.
961 * This is defined as a arch_spinlock_t in order to help
962 * with performance when lockdep debugging is enabled.
964 * It is also used in other places outside the update_max_tr
965 * so it needs to be defined outside of the
966 * CONFIG_TRACER_MAX_TRACE.
968 static arch_spinlock_t ftrace_max_lock =
969 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
971 unsigned long __read_mostly tracing_thresh;
973 #ifdef CONFIG_TRACER_MAX_TRACE
974 unsigned long __read_mostly tracing_max_latency;
977 * Copy the new maximum trace into the separate maximum-trace
978 * structure. (this way the maximum trace is permanently saved,
979 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
982 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
984 struct trace_buffer *trace_buf = &tr->trace_buffer;
985 struct trace_buffer *max_buf = &tr->max_buffer;
986 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
987 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
990 max_buf->time_start = data->preempt_timestamp;
992 max_data->saved_latency = tracing_max_latency;
993 max_data->critical_start = data->critical_start;
994 max_data->critical_end = data->critical_end;
996 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
997 max_data->pid = tsk->pid;
999 * If tsk == current, then use current_uid(), as that does not use
1000 * RCU. The irq tracer can be called out of RCU scope.
1003 max_data->uid = current_uid();
1005 max_data->uid = task_uid(tsk);
1007 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1008 max_data->policy = tsk->policy;
1009 max_data->rt_priority = tsk->rt_priority;
1011 /* record this tasks comm */
1012 tracing_record_cmdline(tsk);
1016 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1018 * @tsk: the task with the latency
1019 * @cpu: The cpu that initiated the trace.
1021 * Flip the buffers between the @tr and the max_tr and record information
1022 * about which task was the cause of this latency.
1025 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1027 struct ring_buffer *buf;
1032 WARN_ON_ONCE(!irqs_disabled());
1034 if (!tr->allocated_snapshot) {
1035 /* Only the nop tracer should hit this when disabling */
1036 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1040 arch_spin_lock(&ftrace_max_lock);
1042 buf = tr->trace_buffer.buffer;
1043 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1044 tr->max_buffer.buffer = buf;
1046 __update_max_tr(tr, tsk, cpu);
1047 arch_spin_unlock(&ftrace_max_lock);
1051 * update_max_tr_single - only copy one trace over, and reset the rest
1053 * @tsk - task with the latency
1054 * @cpu - the cpu of the buffer to copy.
1056 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1059 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1066 WARN_ON_ONCE(!irqs_disabled());
1067 if (!tr->allocated_snapshot) {
1068 /* Only the nop tracer should hit this when disabling */
1069 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1073 arch_spin_lock(&ftrace_max_lock);
1075 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1077 if (ret == -EBUSY) {
1079 * We failed to swap the buffer due to a commit taking
1080 * place on this CPU. We fail to record, but we reset
1081 * the max trace buffer (no one writes directly to it)
1082 * and flag that it failed.
1084 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1085 "Failed to swap buffers due to commit in progress\n");
1088 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1090 __update_max_tr(tr, tsk, cpu);
1091 arch_spin_unlock(&ftrace_max_lock);
1093 #endif /* CONFIG_TRACER_MAX_TRACE */
1095 static void default_wait_pipe(struct trace_iterator *iter)
1097 /* Iterators are static, they should be filled or empty */
1098 if (trace_buffer_iter(iter, iter->cpu_file))
1101 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
1104 #ifdef CONFIG_FTRACE_STARTUP_TEST
1105 static int run_tracer_selftest(struct tracer *type)
1107 struct trace_array *tr = &global_trace;
1108 struct tracer *saved_tracer = tr->current_trace;
1111 if (!type->selftest || tracing_selftest_disabled)
1115 * Run a selftest on this tracer.
1116 * Here we reset the trace buffer, and set the current
1117 * tracer to be this tracer. The tracer can then run some
1118 * internal tracing to verify that everything is in order.
1119 * If we fail, we do not register this tracer.
1121 tracing_reset_online_cpus(&tr->trace_buffer);
1123 tr->current_trace = type;
1125 #ifdef CONFIG_TRACER_MAX_TRACE
1126 if (type->use_max_tr) {
1127 /* If we expanded the buffers, make sure the max is expanded too */
1128 if (ring_buffer_expanded)
1129 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1130 RING_BUFFER_ALL_CPUS);
1131 tr->allocated_snapshot = true;
1135 /* the test is responsible for initializing and enabling */
1136 pr_info("Testing tracer %s: ", type->name);
1137 ret = type->selftest(type, tr);
1138 /* the test is responsible for resetting too */
1139 tr->current_trace = saved_tracer;
1141 printk(KERN_CONT "FAILED!\n");
1142 /* Add the warning after printing 'FAILED' */
1146 /* Only reset on passing, to avoid touching corrupted buffers */
1147 tracing_reset_online_cpus(&tr->trace_buffer);
1149 #ifdef CONFIG_TRACER_MAX_TRACE
1150 if (type->use_max_tr) {
1151 tr->allocated_snapshot = false;
1153 /* Shrink the max buffer again */
1154 if (ring_buffer_expanded)
1155 ring_buffer_resize(tr->max_buffer.buffer, 1,
1156 RING_BUFFER_ALL_CPUS);
1160 printk(KERN_CONT "PASSED\n");
1164 static inline int run_tracer_selftest(struct tracer *type)
1168 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1171 * register_tracer - register a tracer with the ftrace system.
1172 * @type - the plugin for the tracer
1174 * Register a new plugin tracer.
1176 int register_tracer(struct tracer *type)
1182 pr_info("Tracer must have a name\n");
1186 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1187 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1191 mutex_lock(&trace_types_lock);
1193 tracing_selftest_running = true;
1195 for (t = trace_types; t; t = t->next) {
1196 if (strcmp(type->name, t->name) == 0) {
1198 pr_info("Tracer %s already registered\n",
1205 if (!type->set_flag)
1206 type->set_flag = &dummy_set_flag;
1208 type->flags = &dummy_tracer_flags;
1210 if (!type->flags->opts)
1211 type->flags->opts = dummy_tracer_opt;
1212 if (!type->wait_pipe)
1213 type->wait_pipe = default_wait_pipe;
1215 ret = run_tracer_selftest(type);
1219 type->next = trace_types;
1223 tracing_selftest_running = false;
1224 mutex_unlock(&trace_types_lock);
1226 if (ret || !default_bootup_tracer)
1229 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1232 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1233 /* Do we want this tracer to start on bootup? */
1234 tracing_set_tracer(type->name);
1235 default_bootup_tracer = NULL;
1236 /* disable other selftests, since this will break it. */
1237 tracing_selftest_disabled = true;
1238 #ifdef CONFIG_FTRACE_STARTUP_TEST
1239 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1247 void tracing_reset(struct trace_buffer *buf, int cpu)
1249 struct ring_buffer *buffer = buf->buffer;
1254 ring_buffer_record_disable(buffer);
1256 /* Make sure all commits have finished */
1257 synchronize_sched();
1258 ring_buffer_reset_cpu(buffer, cpu);
1260 ring_buffer_record_enable(buffer);
1263 void tracing_reset_online_cpus(struct trace_buffer *buf)
1265 struct ring_buffer *buffer = buf->buffer;
1271 ring_buffer_record_disable(buffer);
1273 /* Make sure all commits have finished */
1274 synchronize_sched();
1276 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1278 for_each_online_cpu(cpu)
1279 ring_buffer_reset_cpu(buffer, cpu);
1281 ring_buffer_record_enable(buffer);
1284 /* Must have trace_types_lock held */
1285 void tracing_reset_all_online_cpus(void)
1287 struct trace_array *tr;
1289 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1290 tracing_reset_online_cpus(&tr->trace_buffer);
1291 #ifdef CONFIG_TRACER_MAX_TRACE
1292 tracing_reset_online_cpus(&tr->max_buffer);
1297 #define SAVED_CMDLINES 128
1298 #define NO_CMDLINE_MAP UINT_MAX
1299 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1300 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1301 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1302 static int cmdline_idx;
1303 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1305 /* temporary disable recording */
1306 static atomic_t trace_record_cmdline_disabled __read_mostly;
1308 static void trace_init_cmdlines(void)
1310 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1311 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
1315 int is_tracing_stopped(void)
1317 return global_trace.stop_count;
1321 * tracing_start - quick start of the tracer
1323 * If tracing is enabled but was stopped by tracing_stop,
1324 * this will start the tracer back up.
1326 void tracing_start(void)
1328 struct ring_buffer *buffer;
1329 unsigned long flags;
1331 if (tracing_disabled)
1334 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1335 if (--global_trace.stop_count) {
1336 if (global_trace.stop_count < 0) {
1337 /* Someone screwed up their debugging */
1339 global_trace.stop_count = 0;
1344 /* Prevent the buffers from switching */
1345 arch_spin_lock(&ftrace_max_lock);
1347 buffer = global_trace.trace_buffer.buffer;
1349 ring_buffer_record_enable(buffer);
1351 #ifdef CONFIG_TRACER_MAX_TRACE
1352 buffer = global_trace.max_buffer.buffer;
1354 ring_buffer_record_enable(buffer);
1357 arch_spin_unlock(&ftrace_max_lock);
1361 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1364 static void tracing_start_tr(struct trace_array *tr)
1366 struct ring_buffer *buffer;
1367 unsigned long flags;
1369 if (tracing_disabled)
1372 /* If global, we need to also start the max tracer */
1373 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1374 return tracing_start();
1376 raw_spin_lock_irqsave(&tr->start_lock, flags);
1378 if (--tr->stop_count) {
1379 if (tr->stop_count < 0) {
1380 /* Someone screwed up their debugging */
1387 buffer = tr->trace_buffer.buffer;
1389 ring_buffer_record_enable(buffer);
1392 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1396 * tracing_stop - quick stop of the tracer
1398 * Light weight way to stop tracing. Use in conjunction with
1401 void tracing_stop(void)
1403 struct ring_buffer *buffer;
1404 unsigned long flags;
1407 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1408 if (global_trace.stop_count++)
1411 /* Prevent the buffers from switching */
1412 arch_spin_lock(&ftrace_max_lock);
1414 buffer = global_trace.trace_buffer.buffer;
1416 ring_buffer_record_disable(buffer);
1418 #ifdef CONFIG_TRACER_MAX_TRACE
1419 buffer = global_trace.max_buffer.buffer;
1421 ring_buffer_record_disable(buffer);
1424 arch_spin_unlock(&ftrace_max_lock);
1427 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1430 static void tracing_stop_tr(struct trace_array *tr)
1432 struct ring_buffer *buffer;
1433 unsigned long flags;
1435 /* If global, we need to also stop the max tracer */
1436 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1437 return tracing_stop();
1439 raw_spin_lock_irqsave(&tr->start_lock, flags);
1440 if (tr->stop_count++)
1443 buffer = tr->trace_buffer.buffer;
1445 ring_buffer_record_disable(buffer);
1448 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1451 void trace_stop_cmdline_recording(void);
1453 static void trace_save_cmdline(struct task_struct *tsk)
1457 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1461 * It's not the end of the world if we don't get
1462 * the lock, but we also don't want to spin
1463 * nor do we want to disable interrupts,
1464 * so if we miss here, then better luck next time.
1466 if (!arch_spin_trylock(&trace_cmdline_lock))
1469 idx = map_pid_to_cmdline[tsk->pid];
1470 if (idx == NO_CMDLINE_MAP) {
1471 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1474 * Check whether the cmdline buffer at idx has a pid
1475 * mapped. We are going to overwrite that entry so we
1476 * need to clear the map_pid_to_cmdline. Otherwise we
1477 * would read the new comm for the old pid.
1479 pid = map_cmdline_to_pid[idx];
1480 if (pid != NO_CMDLINE_MAP)
1481 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1483 map_cmdline_to_pid[idx] = tsk->pid;
1484 map_pid_to_cmdline[tsk->pid] = idx;
1489 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1491 arch_spin_unlock(&trace_cmdline_lock);
1494 void trace_find_cmdline(int pid, char comm[])
1499 strcpy(comm, "<idle>");
1503 if (WARN_ON_ONCE(pid < 0)) {
1504 strcpy(comm, "<XXX>");
1508 if (pid > PID_MAX_DEFAULT) {
1509 strcpy(comm, "<...>");
1514 arch_spin_lock(&trace_cmdline_lock);
1515 map = map_pid_to_cmdline[pid];
1516 if (map != NO_CMDLINE_MAP)
1517 strcpy(comm, saved_cmdlines[map]);
1519 strcpy(comm, "<...>");
1521 arch_spin_unlock(&trace_cmdline_lock);
1525 void tracing_record_cmdline(struct task_struct *tsk)
1527 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1530 if (!__this_cpu_read(trace_cmdline_save))
1533 __this_cpu_write(trace_cmdline_save, false);
1535 trace_save_cmdline(tsk);
1539 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1542 struct task_struct *tsk = current;
1544 entry->preempt_count = pc & 0xff;
1545 entry->pid = (tsk) ? tsk->pid : 0;
1547 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1548 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1550 TRACE_FLAG_IRQS_NOSUPPORT |
1552 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1553 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1554 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1555 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1557 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1559 struct ring_buffer_event *
1560 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1563 unsigned long flags, int pc)
1565 struct ring_buffer_event *event;
1567 event = ring_buffer_lock_reserve(buffer, len);
1568 if (event != NULL) {
1569 struct trace_entry *ent = ring_buffer_event_data(event);
1571 tracing_generic_entry_update(ent, flags, pc);
1579 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1581 __this_cpu_write(trace_cmdline_save, true);
1582 ring_buffer_unlock_commit(buffer, event);
1586 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1587 struct ring_buffer_event *event,
1588 unsigned long flags, int pc)
1590 __buffer_unlock_commit(buffer, event);
1592 ftrace_trace_stack(buffer, flags, 6, pc);
1593 ftrace_trace_userstack(buffer, flags, pc);
1596 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1597 struct ring_buffer_event *event,
1598 unsigned long flags, int pc)
1600 __trace_buffer_unlock_commit(buffer, event, flags, pc);
1602 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1604 struct ring_buffer_event *
1605 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1606 struct ftrace_event_file *ftrace_file,
1607 int type, unsigned long len,
1608 unsigned long flags, int pc)
1610 *current_rb = ftrace_file->tr->trace_buffer.buffer;
1611 return trace_buffer_lock_reserve(*current_rb,
1612 type, len, flags, pc);
1614 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1616 struct ring_buffer_event *
1617 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1618 int type, unsigned long len,
1619 unsigned long flags, int pc)
1621 *current_rb = global_trace.trace_buffer.buffer;
1622 return trace_buffer_lock_reserve(*current_rb,
1623 type, len, flags, pc);
1625 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1627 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1628 struct ring_buffer_event *event,
1629 unsigned long flags, int pc)
1631 __trace_buffer_unlock_commit(buffer, event, flags, pc);
1633 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1635 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1636 struct ring_buffer_event *event,
1637 unsigned long flags, int pc,
1638 struct pt_regs *regs)
1640 __buffer_unlock_commit(buffer, event);
1642 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1643 ftrace_trace_userstack(buffer, flags, pc);
1645 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1647 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1648 struct ring_buffer_event *event)
1650 ring_buffer_discard_commit(buffer, event);
1652 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1655 trace_function(struct trace_array *tr,
1656 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1659 struct ftrace_event_call *call = &event_function;
1660 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1661 struct ring_buffer_event *event;
1662 struct ftrace_entry *entry;
1664 /* If we are reading the ring buffer, don't trace */
1665 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1668 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1672 entry = ring_buffer_event_data(event);
1674 entry->parent_ip = parent_ip;
1676 if (!call_filter_check_discard(call, entry, buffer, event))
1677 __buffer_unlock_commit(buffer, event);
1680 #ifdef CONFIG_STACKTRACE
1682 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1683 struct ftrace_stack {
1684 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1687 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1688 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1690 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1691 unsigned long flags,
1692 int skip, int pc, struct pt_regs *regs)
1694 struct ftrace_event_call *call = &event_kernel_stack;
1695 struct ring_buffer_event *event;
1696 struct stack_entry *entry;
1697 struct stack_trace trace;
1699 int size = FTRACE_STACK_ENTRIES;
1701 trace.nr_entries = 0;
1705 * Since events can happen in NMIs there's no safe way to
1706 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1707 * or NMI comes in, it will just have to use the default
1708 * FTRACE_STACK_SIZE.
1710 preempt_disable_notrace();
1712 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1714 * We don't need any atomic variables, just a barrier.
1715 * If an interrupt comes in, we don't care, because it would
1716 * have exited and put the counter back to what we want.
1717 * We just need a barrier to keep gcc from moving things
1721 if (use_stack == 1) {
1722 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1723 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1726 save_stack_trace_regs(regs, &trace);
1728 save_stack_trace(&trace);
1730 if (trace.nr_entries > size)
1731 size = trace.nr_entries;
1733 /* From now on, use_stack is a boolean */
1736 size *= sizeof(unsigned long);
1738 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1739 sizeof(*entry) + size, flags, pc);
1742 entry = ring_buffer_event_data(event);
1744 memset(&entry->caller, 0, size);
1747 memcpy(&entry->caller, trace.entries,
1748 trace.nr_entries * sizeof(unsigned long));
1750 trace.max_entries = FTRACE_STACK_ENTRIES;
1751 trace.entries = entry->caller;
1753 save_stack_trace_regs(regs, &trace);
1755 save_stack_trace(&trace);
1758 entry->size = trace.nr_entries;
1760 if (!call_filter_check_discard(call, entry, buffer, event))
1761 __buffer_unlock_commit(buffer, event);
1764 /* Again, don't let gcc optimize things here */
1766 __this_cpu_dec(ftrace_stack_reserve);
1767 preempt_enable_notrace();
1771 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1772 int skip, int pc, struct pt_regs *regs)
1774 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1777 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1780 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1783 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1786 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1789 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1792 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1796 * trace_dump_stack - record a stack back trace in the trace buffer
1797 * @skip: Number of functions to skip (helper handlers)
1799 void trace_dump_stack(int skip)
1801 unsigned long flags;
1803 if (tracing_disabled || tracing_selftest_running)
1806 local_save_flags(flags);
1809 * Skip 3 more, seems to get us at the caller of
1813 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1814 flags, skip, preempt_count(), NULL);
1817 static DEFINE_PER_CPU(int, user_stack_count);
1820 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1822 struct ftrace_event_call *call = &event_user_stack;
1823 struct ring_buffer_event *event;
1824 struct userstack_entry *entry;
1825 struct stack_trace trace;
1827 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1831 * NMIs can not handle page faults, even with fix ups.
1832 * The save user stack can (and often does) fault.
1834 if (unlikely(in_nmi()))
1838 * prevent recursion, since the user stack tracing may
1839 * trigger other kernel events.
1842 if (__this_cpu_read(user_stack_count))
1845 __this_cpu_inc(user_stack_count);
1847 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1848 sizeof(*entry), flags, pc);
1850 goto out_drop_count;
1851 entry = ring_buffer_event_data(event);
1853 entry->tgid = current->tgid;
1854 memset(&entry->caller, 0, sizeof(entry->caller));
1856 trace.nr_entries = 0;
1857 trace.max_entries = FTRACE_STACK_ENTRIES;
1859 trace.entries = entry->caller;
1861 save_stack_trace_user(&trace);
1862 if (!call_filter_check_discard(call, entry, buffer, event))
1863 __buffer_unlock_commit(buffer, event);
1866 __this_cpu_dec(user_stack_count);
1872 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1874 ftrace_trace_userstack(tr, flags, preempt_count());
1878 #endif /* CONFIG_STACKTRACE */
1880 /* created for use with alloc_percpu */
1881 struct trace_buffer_struct {
1882 char buffer[TRACE_BUF_SIZE];
1885 static struct trace_buffer_struct *trace_percpu_buffer;
1886 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1887 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1888 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1891 * The buffer used is dependent on the context. There is a per cpu
1892 * buffer for normal context, softirq contex, hard irq context and
1893 * for NMI context. Thise allows for lockless recording.
1895 * Note, if the buffers failed to be allocated, then this returns NULL
1897 static char *get_trace_buf(void)
1899 struct trace_buffer_struct *percpu_buffer;
1902 * If we have allocated per cpu buffers, then we do not
1903 * need to do any locking.
1906 percpu_buffer = trace_percpu_nmi_buffer;
1908 percpu_buffer = trace_percpu_irq_buffer;
1909 else if (in_softirq())
1910 percpu_buffer = trace_percpu_sirq_buffer;
1912 percpu_buffer = trace_percpu_buffer;
1917 return this_cpu_ptr(&percpu_buffer->buffer[0]);
1920 static int alloc_percpu_trace_buffer(void)
1922 struct trace_buffer_struct *buffers;
1923 struct trace_buffer_struct *sirq_buffers;
1924 struct trace_buffer_struct *irq_buffers;
1925 struct trace_buffer_struct *nmi_buffers;
1927 buffers = alloc_percpu(struct trace_buffer_struct);
1931 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1935 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1939 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1943 trace_percpu_buffer = buffers;
1944 trace_percpu_sirq_buffer = sirq_buffers;
1945 trace_percpu_irq_buffer = irq_buffers;
1946 trace_percpu_nmi_buffer = nmi_buffers;
1951 free_percpu(irq_buffers);
1953 free_percpu(sirq_buffers);
1955 free_percpu(buffers);
1957 WARN(1, "Could not allocate percpu trace_printk buffer");
1961 static int buffers_allocated;
1963 void trace_printk_init_buffers(void)
1965 if (buffers_allocated)
1968 if (alloc_percpu_trace_buffer())
1971 pr_info("ftrace: Allocated trace_printk buffers\n");
1973 /* Expand the buffers to set size */
1974 tracing_update_buffers();
1976 buffers_allocated = 1;
1979 * trace_printk_init_buffers() can be called by modules.
1980 * If that happens, then we need to start cmdline recording
1981 * directly here. If the global_trace.buffer is already
1982 * allocated here, then this was called by module code.
1984 if (global_trace.trace_buffer.buffer)
1985 tracing_start_cmdline_record();
1988 void trace_printk_start_comm(void)
1990 /* Start tracing comms if trace printk is set */
1991 if (!buffers_allocated)
1993 tracing_start_cmdline_record();
1996 static void trace_printk_start_stop_comm(int enabled)
1998 if (!buffers_allocated)
2002 tracing_start_cmdline_record();
2004 tracing_stop_cmdline_record();
2008 * trace_vbprintk - write binary msg to tracing buffer
2011 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2013 struct ftrace_event_call *call = &event_bprint;
2014 struct ring_buffer_event *event;
2015 struct ring_buffer *buffer;
2016 struct trace_array *tr = &global_trace;
2017 struct bprint_entry *entry;
2018 unsigned long flags;
2020 int len = 0, size, pc;
2022 if (unlikely(tracing_selftest_running || tracing_disabled))
2025 /* Don't pollute graph traces with trace_vprintk internals */
2026 pause_graph_tracing();
2028 pc = preempt_count();
2029 preempt_disable_notrace();
2031 tbuffer = get_trace_buf();
2037 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2039 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2042 local_save_flags(flags);
2043 size = sizeof(*entry) + sizeof(u32) * len;
2044 buffer = tr->trace_buffer.buffer;
2045 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2049 entry = ring_buffer_event_data(event);
2053 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2054 if (!call_filter_check_discard(call, entry, buffer, event)) {
2055 __buffer_unlock_commit(buffer, event);
2056 ftrace_trace_stack(buffer, flags, 6, pc);
2060 preempt_enable_notrace();
2061 unpause_graph_tracing();
2065 EXPORT_SYMBOL_GPL(trace_vbprintk);
2068 __trace_array_vprintk(struct ring_buffer *buffer,
2069 unsigned long ip, const char *fmt, va_list args)
2071 struct ftrace_event_call *call = &event_print;
2072 struct ring_buffer_event *event;
2073 int len = 0, size, pc;
2074 struct print_entry *entry;
2075 unsigned long flags;
2078 if (tracing_disabled || tracing_selftest_running)
2081 /* Don't pollute graph traces with trace_vprintk internals */
2082 pause_graph_tracing();
2084 pc = preempt_count();
2085 preempt_disable_notrace();
2088 tbuffer = get_trace_buf();
2094 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2095 if (len > TRACE_BUF_SIZE)
2098 local_save_flags(flags);
2099 size = sizeof(*entry) + len + 1;
2100 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2104 entry = ring_buffer_event_data(event);
2107 memcpy(&entry->buf, tbuffer, len);
2108 entry->buf[len] = '\0';
2109 if (!call_filter_check_discard(call, entry, buffer, event)) {
2110 __buffer_unlock_commit(buffer, event);
2111 ftrace_trace_stack(buffer, flags, 6, pc);
2114 preempt_enable_notrace();
2115 unpause_graph_tracing();
2120 int trace_array_vprintk(struct trace_array *tr,
2121 unsigned long ip, const char *fmt, va_list args)
2123 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2126 int trace_array_printk(struct trace_array *tr,
2127 unsigned long ip, const char *fmt, ...)
2132 if (!(trace_flags & TRACE_ITER_PRINTK))
2136 ret = trace_array_vprintk(tr, ip, fmt, ap);
2141 int trace_array_printk_buf(struct ring_buffer *buffer,
2142 unsigned long ip, const char *fmt, ...)
2147 if (!(trace_flags & TRACE_ITER_PRINTK))
2151 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2156 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2158 return trace_array_vprintk(&global_trace, ip, fmt, args);
2160 EXPORT_SYMBOL_GPL(trace_vprintk);
2162 static void trace_iterator_increment(struct trace_iterator *iter)
2164 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2168 ring_buffer_read(buf_iter, NULL);
2171 static struct trace_entry *
2172 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2173 unsigned long *lost_events)
2175 struct ring_buffer_event *event;
2176 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2179 event = ring_buffer_iter_peek(buf_iter, ts);
2181 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2185 iter->ent_size = ring_buffer_event_length(event);
2186 return ring_buffer_event_data(event);
2192 static struct trace_entry *
2193 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2194 unsigned long *missing_events, u64 *ent_ts)
2196 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2197 struct trace_entry *ent, *next = NULL;
2198 unsigned long lost_events = 0, next_lost = 0;
2199 int cpu_file = iter->cpu_file;
2200 u64 next_ts = 0, ts;
2206 * If we are in a per_cpu trace file, don't bother by iterating over
2207 * all cpu and peek directly.
2209 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2210 if (ring_buffer_empty_cpu(buffer, cpu_file))
2212 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2214 *ent_cpu = cpu_file;
2219 for_each_tracing_cpu(cpu) {
2221 if (ring_buffer_empty_cpu(buffer, cpu))
2224 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2227 * Pick the entry with the smallest timestamp:
2229 if (ent && (!next || ts < next_ts)) {
2233 next_lost = lost_events;
2234 next_size = iter->ent_size;
2238 iter->ent_size = next_size;
2241 *ent_cpu = next_cpu;
2247 *missing_events = next_lost;
2252 /* Find the next real entry, without updating the iterator itself */
2253 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2254 int *ent_cpu, u64 *ent_ts)
2256 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2259 /* Find the next real entry, and increment the iterator to the next entry */
2260 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2262 iter->ent = __find_next_entry(iter, &iter->cpu,
2263 &iter->lost_events, &iter->ts);
2266 trace_iterator_increment(iter);
2268 return iter->ent ? iter : NULL;
2271 static void trace_consume(struct trace_iterator *iter)
2273 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2274 &iter->lost_events);
2277 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2279 struct trace_iterator *iter = m->private;
2283 WARN_ON_ONCE(iter->leftover);
2287 /* can't go backwards */
2292 ent = trace_find_next_entry_inc(iter);
2296 while (ent && iter->idx < i)
2297 ent = trace_find_next_entry_inc(iter);
2304 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2306 struct ring_buffer_event *event;
2307 struct ring_buffer_iter *buf_iter;
2308 unsigned long entries = 0;
2311 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2313 buf_iter = trace_buffer_iter(iter, cpu);
2317 ring_buffer_iter_reset(buf_iter);
2320 * We could have the case with the max latency tracers
2321 * that a reset never took place on a cpu. This is evident
2322 * by the timestamp being before the start of the buffer.
2324 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2325 if (ts >= iter->trace_buffer->time_start)
2328 ring_buffer_read(buf_iter, NULL);
2331 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2335 * The current tracer is copied to avoid a global locking
2338 static void *s_start(struct seq_file *m, loff_t *pos)
2340 struct trace_iterator *iter = m->private;
2341 struct trace_array *tr = iter->tr;
2342 int cpu_file = iter->cpu_file;
2348 * copy the tracer to avoid using a global lock all around.
2349 * iter->trace is a copy of current_trace, the pointer to the
2350 * name may be used instead of a strcmp(), as iter->trace->name
2351 * will point to the same string as current_trace->name.
2353 mutex_lock(&trace_types_lock);
2354 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2355 *iter->trace = *tr->current_trace;
2356 mutex_unlock(&trace_types_lock);
2358 #ifdef CONFIG_TRACER_MAX_TRACE
2359 if (iter->snapshot && iter->trace->use_max_tr)
2360 return ERR_PTR(-EBUSY);
2363 if (!iter->snapshot)
2364 atomic_inc(&trace_record_cmdline_disabled);
2366 if (*pos != iter->pos) {
2371 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2372 for_each_tracing_cpu(cpu)
2373 tracing_iter_reset(iter, cpu);
2375 tracing_iter_reset(iter, cpu_file);
2378 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2383 * If we overflowed the seq_file before, then we want
2384 * to just reuse the trace_seq buffer again.
2390 p = s_next(m, p, &l);
2394 trace_event_read_lock();
2395 trace_access_lock(cpu_file);
2399 static void s_stop(struct seq_file *m, void *p)
2401 struct trace_iterator *iter = m->private;
2403 #ifdef CONFIG_TRACER_MAX_TRACE
2404 if (iter->snapshot && iter->trace->use_max_tr)
2408 if (!iter->snapshot)
2409 atomic_dec(&trace_record_cmdline_disabled);
2411 trace_access_unlock(iter->cpu_file);
2412 trace_event_read_unlock();
2416 get_total_entries(struct trace_buffer *buf,
2417 unsigned long *total, unsigned long *entries)
2419 unsigned long count;
2425 for_each_tracing_cpu(cpu) {
2426 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2428 * If this buffer has skipped entries, then we hold all
2429 * entries for the trace and we need to ignore the
2430 * ones before the time stamp.
2432 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2433 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2434 /* total is the same as the entries */
2438 ring_buffer_overrun_cpu(buf->buffer, cpu);
2443 static void print_lat_help_header(struct seq_file *m)
2445 seq_puts(m, "# _------=> CPU# \n");
2446 seq_puts(m, "# / _-----=> irqs-off \n");
2447 seq_puts(m, "# | / _----=> need-resched \n");
2448 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2449 seq_puts(m, "# ||| / _--=> preempt-depth \n");
2450 seq_puts(m, "# |||| / delay \n");
2451 seq_puts(m, "# cmd pid ||||| time | caller \n");
2452 seq_puts(m, "# \\ / ||||| \\ | / \n");
2455 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2457 unsigned long total;
2458 unsigned long entries;
2460 get_total_entries(buf, &total, &entries);
2461 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2462 entries, total, num_online_cpus());
2466 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2468 print_event_info(buf, m);
2469 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
2470 seq_puts(m, "# | | | | |\n");
2473 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2475 print_event_info(buf, m);
2476 seq_puts(m, "# _-----=> irqs-off\n");
2477 seq_puts(m, "# / _----=> need-resched\n");
2478 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2479 seq_puts(m, "# || / _--=> preempt-depth\n");
2480 seq_puts(m, "# ||| / delay\n");
2481 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2482 seq_puts(m, "# | | | |||| | |\n");
2486 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2488 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2489 struct trace_buffer *buf = iter->trace_buffer;
2490 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2491 struct tracer *type = iter->trace;
2492 unsigned long entries;
2493 unsigned long total;
2494 const char *name = "preemption";
2498 get_total_entries(buf, &total, &entries);
2500 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2502 seq_puts(m, "# -----------------------------------"
2503 "---------------------------------\n");
2504 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2505 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2506 nsecs_to_usecs(data->saved_latency),
2510 #if defined(CONFIG_PREEMPT_NONE)
2512 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2514 #elif defined(CONFIG_PREEMPT)
2519 /* These are reserved for later use */
2522 seq_printf(m, " #P:%d)\n", num_online_cpus());
2526 seq_puts(m, "# -----------------\n");
2527 seq_printf(m, "# | task: %.16s-%d "
2528 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2529 data->comm, data->pid,
2530 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2531 data->policy, data->rt_priority);
2532 seq_puts(m, "# -----------------\n");
2534 if (data->critical_start) {
2535 seq_puts(m, "# => started at: ");
2536 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2537 trace_print_seq(m, &iter->seq);
2538 seq_puts(m, "\n# => ended at: ");
2539 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2540 trace_print_seq(m, &iter->seq);
2541 seq_puts(m, "\n#\n");
2547 static void test_cpu_buff_start(struct trace_iterator *iter)
2549 struct trace_seq *s = &iter->seq;
2551 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2554 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2557 if (cpumask_test_cpu(iter->cpu, iter->started))
2560 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2563 cpumask_set_cpu(iter->cpu, iter->started);
2565 /* Don't print started cpu buffer for the first entry of the trace */
2567 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2571 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2573 struct trace_seq *s = &iter->seq;
2574 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2575 struct trace_entry *entry;
2576 struct trace_event *event;
2580 test_cpu_buff_start(iter);
2582 event = ftrace_find_event(entry->type);
2584 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2585 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2586 if (!trace_print_lat_context(iter))
2589 if (!trace_print_context(iter))
2595 return event->funcs->trace(iter, sym_flags, event);
2597 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2600 return TRACE_TYPE_HANDLED;
2602 return TRACE_TYPE_PARTIAL_LINE;
2605 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2607 struct trace_seq *s = &iter->seq;
2608 struct trace_entry *entry;
2609 struct trace_event *event;
2613 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2614 if (!trace_seq_printf(s, "%d %d %llu ",
2615 entry->pid, iter->cpu, iter->ts))
2619 event = ftrace_find_event(entry->type);
2621 return event->funcs->raw(iter, 0, event);
2623 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2626 return TRACE_TYPE_HANDLED;
2628 return TRACE_TYPE_PARTIAL_LINE;
2631 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2633 struct trace_seq *s = &iter->seq;
2634 unsigned char newline = '\n';
2635 struct trace_entry *entry;
2636 struct trace_event *event;
2640 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2641 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2642 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2643 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2646 event = ftrace_find_event(entry->type);
2648 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2649 if (ret != TRACE_TYPE_HANDLED)
2653 SEQ_PUT_FIELD_RET(s, newline);
2655 return TRACE_TYPE_HANDLED;
2658 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2660 struct trace_seq *s = &iter->seq;
2661 struct trace_entry *entry;
2662 struct trace_event *event;
2666 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2667 SEQ_PUT_FIELD_RET(s, entry->pid);
2668 SEQ_PUT_FIELD_RET(s, iter->cpu);
2669 SEQ_PUT_FIELD_RET(s, iter->ts);
2672 event = ftrace_find_event(entry->type);
2673 return event ? event->funcs->binary(iter, 0, event) :
2677 int trace_empty(struct trace_iterator *iter)
2679 struct ring_buffer_iter *buf_iter;
2682 /* If we are looking at one CPU buffer, only check that one */
2683 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2684 cpu = iter->cpu_file;
2685 buf_iter = trace_buffer_iter(iter, cpu);
2687 if (!ring_buffer_iter_empty(buf_iter))
2690 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2696 for_each_tracing_cpu(cpu) {
2697 buf_iter = trace_buffer_iter(iter, cpu);
2699 if (!ring_buffer_iter_empty(buf_iter))
2702 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2710 /* Called with trace_event_read_lock() held. */
2711 enum print_line_t print_trace_line(struct trace_iterator *iter)
2713 enum print_line_t ret;
2715 if (iter->lost_events &&
2716 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2717 iter->cpu, iter->lost_events))
2718 return TRACE_TYPE_PARTIAL_LINE;
2720 if (iter->trace && iter->trace->print_line) {
2721 ret = iter->trace->print_line(iter);
2722 if (ret != TRACE_TYPE_UNHANDLED)
2726 if (iter->ent->type == TRACE_BPUTS &&
2727 trace_flags & TRACE_ITER_PRINTK &&
2728 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2729 return trace_print_bputs_msg_only(iter);
2731 if (iter->ent->type == TRACE_BPRINT &&
2732 trace_flags & TRACE_ITER_PRINTK &&
2733 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2734 return trace_print_bprintk_msg_only(iter);
2736 if (iter->ent->type == TRACE_PRINT &&
2737 trace_flags & TRACE_ITER_PRINTK &&
2738 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2739 return trace_print_printk_msg_only(iter);
2741 if (trace_flags & TRACE_ITER_BIN)
2742 return print_bin_fmt(iter);
2744 if (trace_flags & TRACE_ITER_HEX)
2745 return print_hex_fmt(iter);
2747 if (trace_flags & TRACE_ITER_RAW)
2748 return print_raw_fmt(iter);
2750 return print_trace_fmt(iter);
2753 void trace_latency_header(struct seq_file *m)
2755 struct trace_iterator *iter = m->private;
2757 /* print nothing if the buffers are empty */
2758 if (trace_empty(iter))
2761 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2762 print_trace_header(m, iter);
2764 if (!(trace_flags & TRACE_ITER_VERBOSE))
2765 print_lat_help_header(m);
2768 void trace_default_header(struct seq_file *m)
2770 struct trace_iterator *iter = m->private;
2772 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2775 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2776 /* print nothing if the buffers are empty */
2777 if (trace_empty(iter))
2779 print_trace_header(m, iter);
2780 if (!(trace_flags & TRACE_ITER_VERBOSE))
2781 print_lat_help_header(m);
2783 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2784 if (trace_flags & TRACE_ITER_IRQ_INFO)
2785 print_func_help_header_irq(iter->trace_buffer, m);
2787 print_func_help_header(iter->trace_buffer, m);
2792 static void test_ftrace_alive(struct seq_file *m)
2794 if (!ftrace_is_dead())
2796 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2797 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2800 #ifdef CONFIG_TRACER_MAX_TRACE
2801 static void show_snapshot_main_help(struct seq_file *m)
2803 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2804 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2805 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2806 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
2807 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2808 seq_printf(m, "# is not a '0' or '1')\n");
2811 static void show_snapshot_percpu_help(struct seq_file *m)
2813 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2814 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2815 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2816 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2818 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2819 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2821 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2822 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2823 seq_printf(m, "# is not a '0' or '1')\n");
2826 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2828 if (iter->tr->allocated_snapshot)
2829 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2831 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2833 seq_printf(m, "# Snapshot commands:\n");
2834 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2835 show_snapshot_main_help(m);
2837 show_snapshot_percpu_help(m);
2840 /* Should never be called */
2841 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2844 static int s_show(struct seq_file *m, void *v)
2846 struct trace_iterator *iter = v;
2849 if (iter->ent == NULL) {
2851 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2853 test_ftrace_alive(m);
2855 if (iter->snapshot && trace_empty(iter))
2856 print_snapshot_help(m, iter);
2857 else if (iter->trace && iter->trace->print_header)
2858 iter->trace->print_header(m);
2860 trace_default_header(m);
2862 } else if (iter->leftover) {
2864 * If we filled the seq_file buffer earlier, we
2865 * want to just show it now.
2867 ret = trace_print_seq(m, &iter->seq);
2869 /* ret should this time be zero, but you never know */
2870 iter->leftover = ret;
2873 print_trace_line(iter);
2874 ret = trace_print_seq(m, &iter->seq);
2876 * If we overflow the seq_file buffer, then it will
2877 * ask us for this data again at start up.
2879 * ret is 0 if seq_file write succeeded.
2882 iter->leftover = ret;
2889 * Should be used after trace_array_get(), trace_types_lock
2890 * ensures that i_cdev was already initialized.
2892 static inline int tracing_get_cpu(struct inode *inode)
2894 if (inode->i_cdev) /* See trace_create_cpu_file() */
2895 return (long)inode->i_cdev - 1;
2896 return RING_BUFFER_ALL_CPUS;
2899 static const struct seq_operations tracer_seq_ops = {
2906 static struct trace_iterator *
2907 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2909 struct trace_array *tr = inode->i_private;
2910 struct trace_iterator *iter;
2913 if (tracing_disabled)
2914 return ERR_PTR(-ENODEV);
2916 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2918 return ERR_PTR(-ENOMEM);
2920 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2922 if (!iter->buffer_iter)
2926 * We make a copy of the current tracer to avoid concurrent
2927 * changes on it while we are reading.
2929 mutex_lock(&trace_types_lock);
2930 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2934 *iter->trace = *tr->current_trace;
2936 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2941 #ifdef CONFIG_TRACER_MAX_TRACE
2942 /* Currently only the top directory has a snapshot */
2943 if (tr->current_trace->print_max || snapshot)
2944 iter->trace_buffer = &tr->max_buffer;
2947 iter->trace_buffer = &tr->trace_buffer;
2948 iter->snapshot = snapshot;
2950 iter->cpu_file = tracing_get_cpu(inode);
2951 mutex_init(&iter->mutex);
2953 /* Notify the tracer early; before we stop tracing. */
2954 if (iter->trace && iter->trace->open)
2955 iter->trace->open(iter);
2957 /* Annotate start of buffers if we had overruns */
2958 if (ring_buffer_overruns(iter->trace_buffer->buffer))
2959 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2961 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2962 if (trace_clocks[tr->clock_id].in_ns)
2963 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2965 /* stop the trace while dumping if we are not opening "snapshot" */
2966 if (!iter->snapshot)
2967 tracing_stop_tr(tr);
2969 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
2970 for_each_tracing_cpu(cpu) {
2971 iter->buffer_iter[cpu] =
2972 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2974 ring_buffer_read_prepare_sync();
2975 for_each_tracing_cpu(cpu) {
2976 ring_buffer_read_start(iter->buffer_iter[cpu]);
2977 tracing_iter_reset(iter, cpu);
2980 cpu = iter->cpu_file;
2981 iter->buffer_iter[cpu] =
2982 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2983 ring_buffer_read_prepare_sync();
2984 ring_buffer_read_start(iter->buffer_iter[cpu]);
2985 tracing_iter_reset(iter, cpu);
2988 mutex_unlock(&trace_types_lock);
2993 mutex_unlock(&trace_types_lock);
2995 kfree(iter->buffer_iter);
2997 seq_release_private(inode, file);
2998 return ERR_PTR(-ENOMEM);
3001 int tracing_open_generic(struct inode *inode, struct file *filp)
3003 if (tracing_disabled)
3006 filp->private_data = inode->i_private;
3010 bool tracing_is_disabled(void)
3012 return (tracing_disabled) ? true: false;
3016 * Open and update trace_array ref count.
3017 * Must have the current trace_array passed to it.
3019 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3021 struct trace_array *tr = inode->i_private;
3023 if (tracing_disabled)
3026 if (trace_array_get(tr) < 0)
3029 filp->private_data = inode->i_private;
3034 static int tracing_release(struct inode *inode, struct file *file)
3036 struct trace_array *tr = inode->i_private;
3037 struct seq_file *m = file->private_data;
3038 struct trace_iterator *iter;
3041 if (!(file->f_mode & FMODE_READ)) {
3042 trace_array_put(tr);
3046 /* Writes do not use seq_file */
3048 mutex_lock(&trace_types_lock);
3050 for_each_tracing_cpu(cpu) {
3051 if (iter->buffer_iter[cpu])
3052 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3055 if (iter->trace && iter->trace->close)
3056 iter->trace->close(iter);
3058 if (!iter->snapshot)
3059 /* reenable tracing if it was previously enabled */
3060 tracing_start_tr(tr);
3062 __trace_array_put(tr);
3064 mutex_unlock(&trace_types_lock);
3066 mutex_destroy(&iter->mutex);
3067 free_cpumask_var(iter->started);
3069 kfree(iter->buffer_iter);
3070 seq_release_private(inode, file);
3075 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3077 struct trace_array *tr = inode->i_private;
3079 trace_array_put(tr);
3083 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3085 struct trace_array *tr = inode->i_private;
3087 trace_array_put(tr);
3089 return single_release(inode, file);
3092 static int tracing_open(struct inode *inode, struct file *file)
3094 struct trace_array *tr = inode->i_private;
3095 struct trace_iterator *iter;
3098 if (trace_array_get(tr) < 0)
3101 /* If this file was open for write, then erase contents */
3102 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3103 int cpu = tracing_get_cpu(inode);
3105 if (cpu == RING_BUFFER_ALL_CPUS)
3106 tracing_reset_online_cpus(&tr->trace_buffer);
3108 tracing_reset(&tr->trace_buffer, cpu);
3111 if (file->f_mode & FMODE_READ) {
3112 iter = __tracing_open(inode, file, false);
3114 ret = PTR_ERR(iter);
3115 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3116 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3120 trace_array_put(tr);
3126 t_next(struct seq_file *m, void *v, loff_t *pos)
3128 struct tracer *t = v;
3138 static void *t_start(struct seq_file *m, loff_t *pos)
3143 mutex_lock(&trace_types_lock);
3144 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
3150 static void t_stop(struct seq_file *m, void *p)
3152 mutex_unlock(&trace_types_lock);
3155 static int t_show(struct seq_file *m, void *v)
3157 struct tracer *t = v;
3162 seq_printf(m, "%s", t->name);
3171 static const struct seq_operations show_traces_seq_ops = {
3178 static int show_traces_open(struct inode *inode, struct file *file)
3180 if (tracing_disabled)
3183 return seq_open(file, &show_traces_seq_ops);
3187 tracing_write_stub(struct file *filp, const char __user *ubuf,
3188 size_t count, loff_t *ppos)
3193 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3197 if (file->f_mode & FMODE_READ)
3198 ret = seq_lseek(file, offset, whence);
3200 file->f_pos = ret = 0;
3205 static const struct file_operations tracing_fops = {
3206 .open = tracing_open,
3208 .write = tracing_write_stub,
3209 .llseek = tracing_lseek,
3210 .release = tracing_release,
3213 static const struct file_operations show_traces_fops = {
3214 .open = show_traces_open,
3216 .release = seq_release,
3217 .llseek = seq_lseek,
3221 * The tracer itself will not take this lock, but still we want
3222 * to provide a consistent cpumask to user-space:
3224 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3227 * Temporary storage for the character representation of the
3228 * CPU bitmask (and one more byte for the newline):
3230 static char mask_str[NR_CPUS + 1];
3233 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3234 size_t count, loff_t *ppos)
3236 struct trace_array *tr = file_inode(filp)->i_private;
3239 mutex_lock(&tracing_cpumask_update_lock);
3241 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
3242 if (count - len < 2) {
3246 len += sprintf(mask_str + len, "\n");
3247 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3250 mutex_unlock(&tracing_cpumask_update_lock);
3256 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3257 size_t count, loff_t *ppos)
3259 struct trace_array *tr = file_inode(filp)->i_private;
3260 cpumask_var_t tracing_cpumask_new;
3263 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3266 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3270 mutex_lock(&tracing_cpumask_update_lock);
3272 local_irq_disable();
3273 arch_spin_lock(&ftrace_max_lock);
3274 for_each_tracing_cpu(cpu) {
3276 * Increase/decrease the disabled counter if we are
3277 * about to flip a bit in the cpumask:
3279 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3280 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3281 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3282 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3284 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3285 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3286 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3287 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3290 arch_spin_unlock(&ftrace_max_lock);
3293 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3295 mutex_unlock(&tracing_cpumask_update_lock);
3296 free_cpumask_var(tracing_cpumask_new);
3301 free_cpumask_var(tracing_cpumask_new);
3306 static const struct file_operations tracing_cpumask_fops = {
3307 .open = tracing_open_generic_tr,
3308 .read = tracing_cpumask_read,
3309 .write = tracing_cpumask_write,
3310 .release = tracing_release_generic_tr,
3311 .llseek = generic_file_llseek,
3314 static int tracing_trace_options_show(struct seq_file *m, void *v)
3316 struct tracer_opt *trace_opts;
3317 struct trace_array *tr = m->private;
3321 mutex_lock(&trace_types_lock);
3322 tracer_flags = tr->current_trace->flags->val;
3323 trace_opts = tr->current_trace->flags->opts;
3325 for (i = 0; trace_options[i]; i++) {
3326 if (trace_flags & (1 << i))
3327 seq_printf(m, "%s\n", trace_options[i]);
3329 seq_printf(m, "no%s\n", trace_options[i]);
3332 for (i = 0; trace_opts[i].name; i++) {
3333 if (tracer_flags & trace_opts[i].bit)
3334 seq_printf(m, "%s\n", trace_opts[i].name);
3336 seq_printf(m, "no%s\n", trace_opts[i].name);
3338 mutex_unlock(&trace_types_lock);
3343 static int __set_tracer_option(struct trace_array *tr,
3344 struct tracer_flags *tracer_flags,
3345 struct tracer_opt *opts, int neg)
3347 struct tracer *trace = tr->current_trace;
3350 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3355 tracer_flags->val &= ~opts->bit;
3357 tracer_flags->val |= opts->bit;
3361 /* Try to assign a tracer specific option */
3362 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3364 struct tracer *trace = tr->current_trace;
3365 struct tracer_flags *tracer_flags = trace->flags;
3366 struct tracer_opt *opts = NULL;
3369 for (i = 0; tracer_flags->opts[i].name; i++) {
3370 opts = &tracer_flags->opts[i];
3372 if (strcmp(cmp, opts->name) == 0)
3373 return __set_tracer_option(tr, trace->flags, opts, neg);
3379 /* Some tracers require overwrite to stay enabled */
3380 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3382 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3388 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3390 /* do nothing if flag is already set */
3391 if (!!(trace_flags & mask) == !!enabled)
3394 /* Give the tracer a chance to approve the change */
3395 if (tr->current_trace->flag_changed)
3396 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3400 trace_flags |= mask;
3402 trace_flags &= ~mask;
3404 if (mask == TRACE_ITER_RECORD_CMD)
3405 trace_event_enable_cmd_record(enabled);
3407 if (mask == TRACE_ITER_OVERWRITE) {
3408 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3409 #ifdef CONFIG_TRACER_MAX_TRACE
3410 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3414 if (mask == TRACE_ITER_PRINTK)
3415 trace_printk_start_stop_comm(enabled);
3420 static int trace_set_options(struct trace_array *tr, char *option)
3427 cmp = strstrip(option);
3429 if (strncmp(cmp, "no", 2) == 0) {
3434 mutex_lock(&trace_types_lock);
3436 for (i = 0; trace_options[i]; i++) {
3437 if (strcmp(cmp, trace_options[i]) == 0) {
3438 ret = set_tracer_flag(tr, 1 << i, !neg);
3443 /* If no option could be set, test the specific tracer options */
3444 if (!trace_options[i])
3445 ret = set_tracer_option(tr, cmp, neg);
3447 mutex_unlock(&trace_types_lock);
3453 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3454 size_t cnt, loff_t *ppos)
3456 struct seq_file *m = filp->private_data;
3457 struct trace_array *tr = m->private;
3461 if (cnt >= sizeof(buf))
3464 if (copy_from_user(&buf, ubuf, cnt))
3469 ret = trace_set_options(tr, buf);
3478 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3480 struct trace_array *tr = inode->i_private;
3483 if (tracing_disabled)
3486 if (trace_array_get(tr) < 0)
3489 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3491 trace_array_put(tr);
3496 static const struct file_operations tracing_iter_fops = {
3497 .open = tracing_trace_options_open,
3499 .llseek = seq_lseek,
3500 .release = tracing_single_release_tr,
3501 .write = tracing_trace_options_write,
3504 static const char readme_msg[] =
3505 "tracing mini-HOWTO:\n\n"
3506 "# echo 0 > tracing_on : quick way to disable tracing\n"
3507 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3508 " Important files:\n"
3509 " trace\t\t\t- The static contents of the buffer\n"
3510 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3511 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3512 " current_tracer\t- function and latency tracers\n"
3513 " available_tracers\t- list of configured tracers for current_tracer\n"
3514 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3515 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3516 " trace_clock\t\t-change the clock used to order events\n"
3517 " local: Per cpu clock but may not be synced across CPUs\n"
3518 " global: Synced across CPUs but slows tracing down.\n"
3519 " counter: Not a clock, but just an increment\n"
3520 " uptime: Jiffy counter from time of boot\n"
3521 " perf: Same clock that perf events use\n"
3522 #ifdef CONFIG_X86_64
3523 " x86-tsc: TSC cycle counter\n"
3525 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3526 " tracing_cpumask\t- Limit which CPUs to trace\n"
3527 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3528 "\t\t\t Remove sub-buffer with rmdir\n"
3529 " trace_options\t\t- Set format or modify how tracing happens\n"
3530 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3531 "\t\t\t option name\n"
3532 #ifdef CONFIG_DYNAMIC_FTRACE
3533 "\n available_filter_functions - list of functions that can be filtered on\n"
3534 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3535 "\t\t\t functions\n"
3536 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3537 "\t modules: Can select a group via module\n"
3538 "\t Format: :mod:<module-name>\n"
3539 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3540 "\t triggers: a command to perform when function is hit\n"
3541 "\t Format: <function>:<trigger>[:count]\n"
3542 "\t trigger: traceon, traceoff\n"
3543 "\t\t enable_event:<system>:<event>\n"
3544 "\t\t disable_event:<system>:<event>\n"
3545 #ifdef CONFIG_STACKTRACE
3548 #ifdef CONFIG_TRACER_SNAPSHOT
3551 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3552 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3553 "\t The first one will disable tracing every time do_fault is hit\n"
3554 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3555 "\t The first time do trap is hit and it disables tracing, the\n"
3556 "\t counter will decrement to 2. If tracing is already disabled,\n"
3557 "\t the counter will not decrement. It only decrements when the\n"
3558 "\t trigger did work\n"
3559 "\t To remove trigger without count:\n"
3560 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3561 "\t To remove trigger with a count:\n"
3562 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3563 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3564 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3565 "\t modules: Can select a group via module command :mod:\n"
3566 "\t Does not accept triggers\n"
3567 #endif /* CONFIG_DYNAMIC_FTRACE */
3568 #ifdef CONFIG_FUNCTION_TRACER
3569 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3572 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3573 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3574 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3576 #ifdef CONFIG_TRACER_SNAPSHOT
3577 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3578 "\t\t\t snapshot buffer. Read the contents for more\n"
3579 "\t\t\t information\n"
3581 #ifdef CONFIG_STACK_TRACER
3582 " stack_trace\t\t- Shows the max stack trace when active\n"
3583 " stack_max_size\t- Shows current max stack size that was traced\n"
3584 "\t\t\t Write into this file to reset the max size (trigger a\n"
3585 "\t\t\t new trace)\n"
3586 #ifdef CONFIG_DYNAMIC_FTRACE
3587 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3590 #endif /* CONFIG_STACK_TRACER */
3591 " events/\t\t- Directory containing all trace event subsystems:\n"
3592 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3593 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3594 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3596 " filter\t\t- If set, only events passing filter are traced\n"
3597 " events/<system>/<event>/\t- Directory containing control files for\n"
3599 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3600 " filter\t\t- If set, only events passing filter are traced\n"
3601 " trigger\t\t- If set, a command to perform when event is hit\n"
3602 "\t Format: <trigger>[:count][if <filter>]\n"
3603 "\t trigger: traceon, traceoff\n"
3604 "\t enable_event:<system>:<event>\n"
3605 "\t disable_event:<system>:<event>\n"
3606 #ifdef CONFIG_STACKTRACE
3609 #ifdef CONFIG_TRACER_SNAPSHOT
3612 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3613 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3614 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3615 "\t events/block/block_unplug/trigger\n"
3616 "\t The first disables tracing every time block_unplug is hit.\n"
3617 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3618 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3619 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3620 "\t Like function triggers, the counter is only decremented if it\n"
3621 "\t enabled or disabled tracing.\n"
3622 "\t To remove a trigger without a count:\n"
3623 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3624 "\t To remove a trigger with a count:\n"
3625 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3626 "\t Filters can be ignored when removing a trigger.\n"
3630 tracing_readme_read(struct file *filp, char __user *ubuf,
3631 size_t cnt, loff_t *ppos)
3633 return simple_read_from_buffer(ubuf, cnt, ppos,
3634 readme_msg, strlen(readme_msg));
3637 static const struct file_operations tracing_readme_fops = {
3638 .open = tracing_open_generic,
3639 .read = tracing_readme_read,
3640 .llseek = generic_file_llseek,
3644 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3645 size_t cnt, loff_t *ppos)
3654 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3658 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3666 for (i = 0; i < SAVED_CMDLINES; i++) {
3669 pid = map_cmdline_to_pid[i];
3670 if (pid == -1 || pid == NO_CMDLINE_MAP)
3673 trace_find_cmdline(pid, buf_comm);
3674 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3679 len = simple_read_from_buffer(ubuf, cnt, ppos,
3688 static const struct file_operations tracing_saved_cmdlines_fops = {
3689 .open = tracing_open_generic,
3690 .read = tracing_saved_cmdlines_read,
3691 .llseek = generic_file_llseek,
3695 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3696 size_t cnt, loff_t *ppos)
3698 struct trace_array *tr = filp->private_data;
3699 char buf[MAX_TRACER_SIZE+2];
3702 mutex_lock(&trace_types_lock);
3703 r = sprintf(buf, "%s\n", tr->current_trace->name);
3704 mutex_unlock(&trace_types_lock);
3706 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3709 int tracer_init(struct tracer *t, struct trace_array *tr)
3711 tracing_reset_online_cpus(&tr->trace_buffer);
3715 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3719 for_each_tracing_cpu(cpu)
3720 per_cpu_ptr(buf->data, cpu)->entries = val;
3723 #ifdef CONFIG_TRACER_MAX_TRACE
3724 /* resize @tr's buffer to the size of @size_tr's entries */
3725 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3726 struct trace_buffer *size_buf, int cpu_id)
3730 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3731 for_each_tracing_cpu(cpu) {
3732 ret = ring_buffer_resize(trace_buf->buffer,
3733 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3736 per_cpu_ptr(trace_buf->data, cpu)->entries =
3737 per_cpu_ptr(size_buf->data, cpu)->entries;
3740 ret = ring_buffer_resize(trace_buf->buffer,
3741 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3743 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3744 per_cpu_ptr(size_buf->data, cpu_id)->entries;
3749 #endif /* CONFIG_TRACER_MAX_TRACE */
3751 static int __tracing_resize_ring_buffer(struct trace_array *tr,
3752 unsigned long size, int cpu)
3757 * If kernel or user changes the size of the ring buffer
3758 * we use the size that was given, and we can forget about
3759 * expanding it later.
3761 ring_buffer_expanded = true;
3763 /* May be called before buffers are initialized */
3764 if (!tr->trace_buffer.buffer)
3767 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3771 #ifdef CONFIG_TRACER_MAX_TRACE
3772 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3773 !tr->current_trace->use_max_tr)
3776 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3778 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3779 &tr->trace_buffer, cpu);
3782 * AARGH! We are left with different
3783 * size max buffer!!!!
3784 * The max buffer is our "snapshot" buffer.
3785 * When a tracer needs a snapshot (one of the
3786 * latency tracers), it swaps the max buffer
3787 * with the saved snap shot. We succeeded to
3788 * update the size of the main buffer, but failed to
3789 * update the size of the max buffer. But when we tried
3790 * to reset the main buffer to the original size, we
3791 * failed there too. This is very unlikely to
3792 * happen, but if it does, warn and kill all
3796 tracing_disabled = 1;
3801 if (cpu == RING_BUFFER_ALL_CPUS)
3802 set_buffer_entries(&tr->max_buffer, size);
3804 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
3807 #endif /* CONFIG_TRACER_MAX_TRACE */
3809 if (cpu == RING_BUFFER_ALL_CPUS)
3810 set_buffer_entries(&tr->trace_buffer, size);
3812 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
3817 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3818 unsigned long size, int cpu_id)
3822 mutex_lock(&trace_types_lock);
3824 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3825 /* make sure, this cpu is enabled in the mask */
3826 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3832 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
3837 mutex_unlock(&trace_types_lock);
3844 * tracing_update_buffers - used by tracing facility to expand ring buffers
3846 * To save on memory when the tracing is never used on a system with it
3847 * configured in. The ring buffers are set to a minimum size. But once
3848 * a user starts to use the tracing facility, then they need to grow
3849 * to their default size.
3851 * This function is to be called when a tracer is about to be used.
3853 int tracing_update_buffers(void)
3857 mutex_lock(&trace_types_lock);
3858 if (!ring_buffer_expanded)
3859 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
3860 RING_BUFFER_ALL_CPUS);
3861 mutex_unlock(&trace_types_lock);
3866 struct trace_option_dentry;
3868 static struct trace_option_dentry *
3869 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
3872 destroy_trace_option_files(struct trace_option_dentry *topts);
3874 static int tracing_set_tracer(const char *buf)
3876 static struct trace_option_dentry *topts;
3877 struct trace_array *tr = &global_trace;
3879 #ifdef CONFIG_TRACER_MAX_TRACE
3884 mutex_lock(&trace_types_lock);
3886 if (!ring_buffer_expanded) {
3887 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
3888 RING_BUFFER_ALL_CPUS);
3894 for (t = trace_types; t; t = t->next) {
3895 if (strcmp(t->name, buf) == 0)
3902 if (t == tr->current_trace)
3905 trace_branch_disable();
3907 tr->current_trace->enabled = false;
3909 if (tr->current_trace->reset)
3910 tr->current_trace->reset(tr);
3912 /* Current trace needs to be nop_trace before synchronize_sched */
3913 tr->current_trace = &nop_trace;
3915 #ifdef CONFIG_TRACER_MAX_TRACE
3916 had_max_tr = tr->allocated_snapshot;
3918 if (had_max_tr && !t->use_max_tr) {
3920 * We need to make sure that the update_max_tr sees that
3921 * current_trace changed to nop_trace to keep it from
3922 * swapping the buffers after we resize it.
3923 * The update_max_tr is called from interrupts disabled
3924 * so a synchronized_sched() is sufficient.
3926 synchronize_sched();
3930 destroy_trace_option_files(topts);
3932 topts = create_trace_option_files(tr, t);
3934 #ifdef CONFIG_TRACER_MAX_TRACE
3935 if (t->use_max_tr && !had_max_tr) {
3936 ret = alloc_snapshot(tr);
3943 ret = tracer_init(t, tr);
3948 tr->current_trace = t;
3949 tr->current_trace->enabled = true;
3950 trace_branch_enable(tr);
3952 mutex_unlock(&trace_types_lock);
3958 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3959 size_t cnt, loff_t *ppos)
3961 char buf[MAX_TRACER_SIZE+1];
3968 if (cnt > MAX_TRACER_SIZE)
3969 cnt = MAX_TRACER_SIZE;
3971 if (copy_from_user(&buf, ubuf, cnt))
3976 /* strip ending whitespace. */
3977 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3980 err = tracing_set_tracer(buf);
3990 tracing_max_lat_read(struct file *filp, char __user *ubuf,
3991 size_t cnt, loff_t *ppos)
3993 unsigned long *ptr = filp->private_data;
3997 r = snprintf(buf, sizeof(buf), "%ld\n",
3998 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
3999 if (r > sizeof(buf))
4001 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4005 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4006 size_t cnt, loff_t *ppos)
4008 unsigned long *ptr = filp->private_data;
4012 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4021 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4023 struct trace_array *tr = inode->i_private;
4024 struct trace_iterator *iter;
4027 if (tracing_disabled)
4030 if (trace_array_get(tr) < 0)
4033 mutex_lock(&trace_types_lock);
4035 /* create a buffer to store the information to pass to userspace */
4036 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4039 __trace_array_put(tr);
4044 * We make a copy of the current tracer to avoid concurrent
4045 * changes on it while we are reading.
4047 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4052 *iter->trace = *tr->current_trace;
4054 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4059 /* trace pipe does not show start of buffer */
4060 cpumask_setall(iter->started);
4062 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4063 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4065 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4066 if (trace_clocks[tr->clock_id].in_ns)
4067 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4070 iter->trace_buffer = &tr->trace_buffer;
4071 iter->cpu_file = tracing_get_cpu(inode);
4072 mutex_init(&iter->mutex);
4073 filp->private_data = iter;
4075 if (iter->trace->pipe_open)
4076 iter->trace->pipe_open(iter);
4078 nonseekable_open(inode, filp);
4080 mutex_unlock(&trace_types_lock);
4086 __trace_array_put(tr);
4087 mutex_unlock(&trace_types_lock);
4091 static int tracing_release_pipe(struct inode *inode, struct file *file)
4093 struct trace_iterator *iter = file->private_data;
4094 struct trace_array *tr = inode->i_private;
4096 mutex_lock(&trace_types_lock);
4098 if (iter->trace->pipe_close)
4099 iter->trace->pipe_close(iter);
4101 mutex_unlock(&trace_types_lock);
4103 free_cpumask_var(iter->started);
4104 mutex_destroy(&iter->mutex);
4108 trace_array_put(tr);
4114 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4116 /* Iterators are static, they should be filled or empty */
4117 if (trace_buffer_iter(iter, iter->cpu_file))
4118 return POLLIN | POLLRDNORM;
4120 if (trace_flags & TRACE_ITER_BLOCK)
4122 * Always select as readable when in blocking mode
4124 return POLLIN | POLLRDNORM;
4126 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4131 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4133 struct trace_iterator *iter = filp->private_data;
4135 return trace_poll(iter, filp, poll_table);
4139 * This is a make-shift waitqueue.
4140 * A tracer might use this callback on some rare cases:
4142 * 1) the current tracer might hold the runqueue lock when it wakes up
4143 * a reader, hence a deadlock (sched, function, and function graph tracers)
4144 * 2) the function tracers, trace all functions, we don't want
4145 * the overhead of calling wake_up and friends
4146 * (and tracing them too)
4148 * Anyway, this is really very primitive wakeup.
4150 void poll_wait_pipe(struct trace_iterator *iter)
4152 set_current_state(TASK_INTERRUPTIBLE);
4153 /* sleep for 100 msecs, and try again. */
4154 schedule_timeout(HZ / 10);
4157 /* Must be called with trace_types_lock mutex held. */
4158 static int tracing_wait_pipe(struct file *filp)
4160 struct trace_iterator *iter = filp->private_data;
4162 while (trace_empty(iter)) {
4164 if ((filp->f_flags & O_NONBLOCK)) {
4168 mutex_unlock(&iter->mutex);
4170 iter->trace->wait_pipe(iter);
4172 mutex_lock(&iter->mutex);
4174 if (signal_pending(current))
4178 * We block until we read something and tracing is disabled.
4179 * We still block if tracing is disabled, but we have never
4180 * read anything. This allows a user to cat this file, and
4181 * then enable tracing. But after we have read something,
4182 * we give an EOF when tracing is again disabled.
4184 * iter->pos will be 0 if we haven't read anything.
4186 if (!tracing_is_on() && iter->pos)
4197 tracing_read_pipe(struct file *filp, char __user *ubuf,
4198 size_t cnt, loff_t *ppos)
4200 struct trace_iterator *iter = filp->private_data;
4201 struct trace_array *tr = iter->tr;
4204 /* return any leftover data */
4205 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4209 trace_seq_init(&iter->seq);
4211 /* copy the tracer to avoid using a global lock all around */
4212 mutex_lock(&trace_types_lock);
4213 if (unlikely(iter->trace->name != tr->current_trace->name))
4214 *iter->trace = *tr->current_trace;
4215 mutex_unlock(&trace_types_lock);
4218 * Avoid more than one consumer on a single file descriptor
4219 * This is just a matter of traces coherency, the ring buffer itself
4222 mutex_lock(&iter->mutex);
4223 if (iter->trace->read) {
4224 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4230 sret = tracing_wait_pipe(filp);
4234 /* stop when tracing is finished */
4235 if (trace_empty(iter)) {
4240 if (cnt >= PAGE_SIZE)
4241 cnt = PAGE_SIZE - 1;
4243 /* reset all but tr, trace, and overruns */
4244 memset(&iter->seq, 0,
4245 sizeof(struct trace_iterator) -
4246 offsetof(struct trace_iterator, seq));
4247 cpumask_clear(iter->started);
4250 trace_event_read_lock();
4251 trace_access_lock(iter->cpu_file);
4252 while (trace_find_next_entry_inc(iter) != NULL) {
4253 enum print_line_t ret;
4254 int len = iter->seq.len;
4256 ret = print_trace_line(iter);
4257 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4258 /* don't print partial lines */
4259 iter->seq.len = len;
4262 if (ret != TRACE_TYPE_NO_CONSUME)
4263 trace_consume(iter);
4265 if (iter->seq.len >= cnt)
4269 * Setting the full flag means we reached the trace_seq buffer
4270 * size and we should leave by partial output condition above.
4271 * One of the trace_seq_* functions is not used properly.
4273 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4276 trace_access_unlock(iter->cpu_file);
4277 trace_event_read_unlock();
4279 /* Now copy what we have to the user */
4280 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4281 if (iter->seq.readpos >= iter->seq.len)
4282 trace_seq_init(&iter->seq);
4285 * If there was nothing to send to user, in spite of consuming trace
4286 * entries, go back to wait for more entries.
4292 mutex_unlock(&iter->mutex);
4297 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4300 __free_page(spd->pages[idx]);
4303 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4305 .map = generic_pipe_buf_map,
4306 .unmap = generic_pipe_buf_unmap,
4307 .confirm = generic_pipe_buf_confirm,
4308 .release = generic_pipe_buf_release,
4309 .steal = generic_pipe_buf_steal,
4310 .get = generic_pipe_buf_get,
4314 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4319 /* Seq buffer is page-sized, exactly what we need. */
4321 count = iter->seq.len;
4322 ret = print_trace_line(iter);
4323 count = iter->seq.len - count;
4326 iter->seq.len -= count;
4329 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4330 iter->seq.len -= count;
4334 if (ret != TRACE_TYPE_NO_CONSUME)
4335 trace_consume(iter);
4337 if (!trace_find_next_entry_inc(iter)) {
4347 static ssize_t tracing_splice_read_pipe(struct file *filp,
4349 struct pipe_inode_info *pipe,
4353 struct page *pages_def[PIPE_DEF_BUFFERS];
4354 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4355 struct trace_iterator *iter = filp->private_data;
4356 struct splice_pipe_desc spd = {
4358 .partial = partial_def,
4359 .nr_pages = 0, /* This gets updated below. */
4360 .nr_pages_max = PIPE_DEF_BUFFERS,
4362 .ops = &tracing_pipe_buf_ops,
4363 .spd_release = tracing_spd_release_pipe,
4365 struct trace_array *tr = iter->tr;
4370 if (splice_grow_spd(pipe, &spd))
4373 /* copy the tracer to avoid using a global lock all around */
4374 mutex_lock(&trace_types_lock);
4375 if (unlikely(iter->trace->name != tr->current_trace->name))
4376 *iter->trace = *tr->current_trace;
4377 mutex_unlock(&trace_types_lock);
4379 mutex_lock(&iter->mutex);
4381 if (iter->trace->splice_read) {
4382 ret = iter->trace->splice_read(iter, filp,
4383 ppos, pipe, len, flags);
4388 ret = tracing_wait_pipe(filp);
4392 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4397 trace_event_read_lock();
4398 trace_access_lock(iter->cpu_file);
4400 /* Fill as many pages as possible. */
4401 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4402 spd.pages[i] = alloc_page(GFP_KERNEL);
4406 rem = tracing_fill_pipe_page(rem, iter);
4408 /* Copy the data into the page, so we can start over. */
4409 ret = trace_seq_to_buffer(&iter->seq,
4410 page_address(spd.pages[i]),
4413 __free_page(spd.pages[i]);
4416 spd.partial[i].offset = 0;
4417 spd.partial[i].len = iter->seq.len;
4419 trace_seq_init(&iter->seq);
4422 trace_access_unlock(iter->cpu_file);
4423 trace_event_read_unlock();
4424 mutex_unlock(&iter->mutex);
4428 ret = splice_to_pipe(pipe, &spd);
4430 splice_shrink_spd(&spd);
4434 mutex_unlock(&iter->mutex);
4439 tracing_entries_read(struct file *filp, char __user *ubuf,
4440 size_t cnt, loff_t *ppos)
4442 struct inode *inode = file_inode(filp);
4443 struct trace_array *tr = inode->i_private;
4444 int cpu = tracing_get_cpu(inode);
4449 mutex_lock(&trace_types_lock);
4451 if (cpu == RING_BUFFER_ALL_CPUS) {
4452 int cpu, buf_size_same;
4457 /* check if all cpu sizes are same */
4458 for_each_tracing_cpu(cpu) {
4459 /* fill in the size from first enabled cpu */
4461 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4462 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4468 if (buf_size_same) {
4469 if (!ring_buffer_expanded)
4470 r = sprintf(buf, "%lu (expanded: %lu)\n",
4472 trace_buf_size >> 10);
4474 r = sprintf(buf, "%lu\n", size >> 10);
4476 r = sprintf(buf, "X\n");
4478 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4480 mutex_unlock(&trace_types_lock);
4482 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4487 tracing_entries_write(struct file *filp, const char __user *ubuf,
4488 size_t cnt, loff_t *ppos)
4490 struct inode *inode = file_inode(filp);
4491 struct trace_array *tr = inode->i_private;
4495 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4499 /* must have at least 1 entry */
4503 /* value is in KB */
4505 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4515 tracing_total_entries_read(struct file *filp, char __user *ubuf,
4516 size_t cnt, loff_t *ppos)
4518 struct trace_array *tr = filp->private_data;
4521 unsigned long size = 0, expanded_size = 0;
4523 mutex_lock(&trace_types_lock);
4524 for_each_tracing_cpu(cpu) {
4525 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4526 if (!ring_buffer_expanded)
4527 expanded_size += trace_buf_size >> 10;
4529 if (ring_buffer_expanded)
4530 r = sprintf(buf, "%lu\n", size);
4532 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4533 mutex_unlock(&trace_types_lock);
4535 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4539 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4540 size_t cnt, loff_t *ppos)
4543 * There is no need to read what the user has written, this function
4544 * is just to make sure that there is no error when "echo" is used
4553 tracing_free_buffer_release(struct inode *inode, struct file *filp)
4555 struct trace_array *tr = inode->i_private;
4557 /* disable tracing ? */
4558 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4559 tracer_tracing_off(tr);
4560 /* resize the ring buffer to 0 */
4561 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4563 trace_array_put(tr);
4569 tracing_mark_write(struct file *filp, const char __user *ubuf,
4570 size_t cnt, loff_t *fpos)
4572 unsigned long addr = (unsigned long)ubuf;
4573 struct trace_array *tr = filp->private_data;
4574 struct ring_buffer_event *event;
4575 struct ring_buffer *buffer;
4576 struct print_entry *entry;
4577 unsigned long irq_flags;
4578 struct page *pages[2];
4588 if (tracing_disabled)
4591 if (!(trace_flags & TRACE_ITER_MARKERS))
4594 if (cnt > TRACE_BUF_SIZE)
4595 cnt = TRACE_BUF_SIZE;
4598 * Userspace is injecting traces into the kernel trace buffer.
4599 * We want to be as non intrusive as possible.
4600 * To do so, we do not want to allocate any special buffers
4601 * or take any locks, but instead write the userspace data
4602 * straight into the ring buffer.
4604 * First we need to pin the userspace buffer into memory,
4605 * which, most likely it is, because it just referenced it.
4606 * But there's no guarantee that it is. By using get_user_pages_fast()
4607 * and kmap_atomic/kunmap_atomic() we can get access to the
4608 * pages directly. We then write the data directly into the
4611 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4613 /* check if we cross pages */
4614 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4617 offset = addr & (PAGE_SIZE - 1);
4620 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4621 if (ret < nr_pages) {
4623 put_page(pages[ret]);
4628 for (i = 0; i < nr_pages; i++)
4629 map_page[i] = kmap_atomic(pages[i]);
4631 local_save_flags(irq_flags);
4632 size = sizeof(*entry) + cnt + 2; /* possible \n added */
4633 buffer = tr->trace_buffer.buffer;
4634 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4635 irq_flags, preempt_count());
4637 /* Ring buffer disabled, return as if not open for write */
4642 entry = ring_buffer_event_data(event);
4643 entry->ip = _THIS_IP_;
4645 if (nr_pages == 2) {
4646 len = PAGE_SIZE - offset;
4647 memcpy(&entry->buf, map_page[0] + offset, len);
4648 memcpy(&entry->buf[len], map_page[1], cnt - len);
4650 memcpy(&entry->buf, map_page[0] + offset, cnt);
4652 if (entry->buf[cnt - 1] != '\n') {
4653 entry->buf[cnt] = '\n';
4654 entry->buf[cnt + 1] = '\0';
4656 entry->buf[cnt] = '\0';
4658 __buffer_unlock_commit(buffer, event);
4665 for (i = 0; i < nr_pages; i++){
4666 kunmap_atomic(map_page[i]);
4673 static int tracing_clock_show(struct seq_file *m, void *v)
4675 struct trace_array *tr = m->private;
4678 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4680 "%s%s%s%s", i ? " " : "",
4681 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4682 i == tr->clock_id ? "]" : "");
4688 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4689 size_t cnt, loff_t *fpos)
4691 struct seq_file *m = filp->private_data;
4692 struct trace_array *tr = m->private;
4694 const char *clockstr;
4697 if (cnt >= sizeof(buf))
4700 if (copy_from_user(&buf, ubuf, cnt))
4705 clockstr = strstrip(buf);
4707 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4708 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4711 if (i == ARRAY_SIZE(trace_clocks))
4714 mutex_lock(&trace_types_lock);
4718 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4721 * New clock may not be consistent with the previous clock.
4722 * Reset the buffer so that it doesn't have incomparable timestamps.
4724 tracing_reset_online_cpus(&tr->trace_buffer);
4726 #ifdef CONFIG_TRACER_MAX_TRACE
4727 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4728 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4729 tracing_reset_online_cpus(&tr->max_buffer);
4732 mutex_unlock(&trace_types_lock);
4739 static int tracing_clock_open(struct inode *inode, struct file *file)
4741 struct trace_array *tr = inode->i_private;
4744 if (tracing_disabled)
4747 if (trace_array_get(tr))
4750 ret = single_open(file, tracing_clock_show, inode->i_private);
4752 trace_array_put(tr);
4757 struct ftrace_buffer_info {
4758 struct trace_iterator iter;
4763 #ifdef CONFIG_TRACER_SNAPSHOT
4764 static int tracing_snapshot_open(struct inode *inode, struct file *file)
4766 struct trace_array *tr = inode->i_private;
4767 struct trace_iterator *iter;
4771 if (trace_array_get(tr) < 0)
4774 if (file->f_mode & FMODE_READ) {
4775 iter = __tracing_open(inode, file, true);
4777 ret = PTR_ERR(iter);
4779 /* Writes still need the seq_file to hold the private data */
4781 m = kzalloc(sizeof(*m), GFP_KERNEL);
4784 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4792 iter->trace_buffer = &tr->max_buffer;
4793 iter->cpu_file = tracing_get_cpu(inode);
4795 file->private_data = m;
4799 trace_array_put(tr);
4805 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4808 struct seq_file *m = filp->private_data;
4809 struct trace_iterator *iter = m->private;
4810 struct trace_array *tr = iter->tr;
4814 ret = tracing_update_buffers();
4818 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4822 mutex_lock(&trace_types_lock);
4824 if (tr->current_trace->use_max_tr) {
4831 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4835 if (tr->allocated_snapshot)
4839 /* Only allow per-cpu swap if the ring buffer supports it */
4840 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4841 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4846 if (!tr->allocated_snapshot) {
4847 ret = alloc_snapshot(tr);
4851 local_irq_disable();
4852 /* Now, we're going to swap */
4853 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4854 update_max_tr(tr, current, smp_processor_id());
4856 update_max_tr_single(tr, current, iter->cpu_file);
4860 if (tr->allocated_snapshot) {
4861 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4862 tracing_reset_online_cpus(&tr->max_buffer);
4864 tracing_reset(&tr->max_buffer, iter->cpu_file);
4874 mutex_unlock(&trace_types_lock);
4878 static int tracing_snapshot_release(struct inode *inode, struct file *file)
4880 struct seq_file *m = file->private_data;
4883 ret = tracing_release(inode, file);
4885 if (file->f_mode & FMODE_READ)
4888 /* If write only, the seq_file is just a stub */
4896 static int tracing_buffers_open(struct inode *inode, struct file *filp);
4897 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4898 size_t count, loff_t *ppos);
4899 static int tracing_buffers_release(struct inode *inode, struct file *file);
4900 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4901 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4903 static int snapshot_raw_open(struct inode *inode, struct file *filp)
4905 struct ftrace_buffer_info *info;
4908 ret = tracing_buffers_open(inode, filp);
4912 info = filp->private_data;
4914 if (info->iter.trace->use_max_tr) {
4915 tracing_buffers_release(inode, filp);
4919 info->iter.snapshot = true;
4920 info->iter.trace_buffer = &info->iter.tr->max_buffer;
4925 #endif /* CONFIG_TRACER_SNAPSHOT */
4928 static const struct file_operations tracing_max_lat_fops = {
4929 .open = tracing_open_generic,
4930 .read = tracing_max_lat_read,
4931 .write = tracing_max_lat_write,
4932 .llseek = generic_file_llseek,
4935 static const struct file_operations set_tracer_fops = {
4936 .open = tracing_open_generic,
4937 .read = tracing_set_trace_read,
4938 .write = tracing_set_trace_write,
4939 .llseek = generic_file_llseek,
4942 static const struct file_operations tracing_pipe_fops = {
4943 .open = tracing_open_pipe,
4944 .poll = tracing_poll_pipe,
4945 .read = tracing_read_pipe,
4946 .splice_read = tracing_splice_read_pipe,
4947 .release = tracing_release_pipe,
4948 .llseek = no_llseek,
4951 static const struct file_operations tracing_entries_fops = {
4952 .open = tracing_open_generic_tr,
4953 .read = tracing_entries_read,
4954 .write = tracing_entries_write,
4955 .llseek = generic_file_llseek,
4956 .release = tracing_release_generic_tr,
4959 static const struct file_operations tracing_total_entries_fops = {
4960 .open = tracing_open_generic_tr,
4961 .read = tracing_total_entries_read,
4962 .llseek = generic_file_llseek,
4963 .release = tracing_release_generic_tr,
4966 static const struct file_operations tracing_free_buffer_fops = {
4967 .open = tracing_open_generic_tr,
4968 .write = tracing_free_buffer_write,
4969 .release = tracing_free_buffer_release,
4972 static const struct file_operations tracing_mark_fops = {
4973 .open = tracing_open_generic_tr,
4974 .write = tracing_mark_write,
4975 .llseek = generic_file_llseek,
4976 .release = tracing_release_generic_tr,
4979 static const struct file_operations trace_clock_fops = {
4980 .open = tracing_clock_open,
4982 .llseek = seq_lseek,
4983 .release = tracing_single_release_tr,
4984 .write = tracing_clock_write,
4987 #ifdef CONFIG_TRACER_SNAPSHOT
4988 static const struct file_operations snapshot_fops = {
4989 .open = tracing_snapshot_open,
4991 .write = tracing_snapshot_write,
4992 .llseek = tracing_lseek,
4993 .release = tracing_snapshot_release,
4996 static const struct file_operations snapshot_raw_fops = {
4997 .open = snapshot_raw_open,
4998 .read = tracing_buffers_read,
4999 .release = tracing_buffers_release,
5000 .splice_read = tracing_buffers_splice_read,
5001 .llseek = no_llseek,
5004 #endif /* CONFIG_TRACER_SNAPSHOT */
5006 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5008 struct trace_array *tr = inode->i_private;
5009 struct ftrace_buffer_info *info;
5012 if (tracing_disabled)
5015 if (trace_array_get(tr) < 0)
5018 info = kzalloc(sizeof(*info), GFP_KERNEL);
5020 trace_array_put(tr);
5024 mutex_lock(&trace_types_lock);
5027 info->iter.cpu_file = tracing_get_cpu(inode);
5028 info->iter.trace = tr->current_trace;
5029 info->iter.trace_buffer = &tr->trace_buffer;
5031 /* Force reading ring buffer for first read */
5032 info->read = (unsigned int)-1;
5034 filp->private_data = info;
5036 mutex_unlock(&trace_types_lock);
5038 ret = nonseekable_open(inode, filp);
5040 trace_array_put(tr);
5046 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5048 struct ftrace_buffer_info *info = filp->private_data;
5049 struct trace_iterator *iter = &info->iter;
5051 return trace_poll(iter, filp, poll_table);
5055 tracing_buffers_read(struct file *filp, char __user *ubuf,
5056 size_t count, loff_t *ppos)
5058 struct ftrace_buffer_info *info = filp->private_data;
5059 struct trace_iterator *iter = &info->iter;
5066 mutex_lock(&trace_types_lock);
5068 #ifdef CONFIG_TRACER_MAX_TRACE
5069 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5076 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5082 /* Do we have previous read data to read? */
5083 if (info->read < PAGE_SIZE)
5087 trace_access_lock(iter->cpu_file);
5088 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5092 trace_access_unlock(iter->cpu_file);
5095 if (trace_empty(iter)) {
5096 if ((filp->f_flags & O_NONBLOCK)) {
5100 mutex_unlock(&trace_types_lock);
5101 iter->trace->wait_pipe(iter);
5102 mutex_lock(&trace_types_lock);
5103 if (signal_pending(current)) {
5115 size = PAGE_SIZE - info->read;
5119 ret = copy_to_user(ubuf, info->spare + info->read, size);
5130 mutex_unlock(&trace_types_lock);
5135 static int tracing_buffers_release(struct inode *inode, struct file *file)
5137 struct ftrace_buffer_info *info = file->private_data;
5138 struct trace_iterator *iter = &info->iter;
5140 mutex_lock(&trace_types_lock);
5142 __trace_array_put(iter->tr);
5145 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5148 mutex_unlock(&trace_types_lock);
5154 struct ring_buffer *buffer;
5159 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5160 struct pipe_buffer *buf)
5162 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5167 ring_buffer_free_read_page(ref->buffer, ref->page);
5172 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5173 struct pipe_buffer *buf)
5175 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5180 /* Pipe buffer operations for a buffer. */
5181 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5183 .map = generic_pipe_buf_map,
5184 .unmap = generic_pipe_buf_unmap,
5185 .confirm = generic_pipe_buf_confirm,
5186 .release = buffer_pipe_buf_release,
5187 .steal = generic_pipe_buf_steal,
5188 .get = buffer_pipe_buf_get,
5192 * Callback from splice_to_pipe(), if we need to release some pages
5193 * at the end of the spd in case we error'ed out in filling the pipe.
5195 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5197 struct buffer_ref *ref =
5198 (struct buffer_ref *)spd->partial[i].private;
5203 ring_buffer_free_read_page(ref->buffer, ref->page);
5205 spd->partial[i].private = 0;
5209 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5210 struct pipe_inode_info *pipe, size_t len,
5213 struct ftrace_buffer_info *info = file->private_data;
5214 struct trace_iterator *iter = &info->iter;
5215 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5216 struct page *pages_def[PIPE_DEF_BUFFERS];
5217 struct splice_pipe_desc spd = {
5219 .partial = partial_def,
5220 .nr_pages_max = PIPE_DEF_BUFFERS,
5222 .ops = &buffer_pipe_buf_ops,
5223 .spd_release = buffer_spd_release,
5225 struct buffer_ref *ref;
5226 int entries, size, i;
5229 mutex_lock(&trace_types_lock);
5231 #ifdef CONFIG_TRACER_MAX_TRACE
5232 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5238 if (splice_grow_spd(pipe, &spd)) {
5243 if (*ppos & (PAGE_SIZE - 1)) {
5248 if (len & (PAGE_SIZE - 1)) {
5249 if (len < PAGE_SIZE) {
5257 trace_access_lock(iter->cpu_file);
5258 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5260 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
5264 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5269 ref->buffer = iter->trace_buffer->buffer;
5270 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5276 r = ring_buffer_read_page(ref->buffer, &ref->page,
5277 len, iter->cpu_file, 1);
5279 ring_buffer_free_read_page(ref->buffer, ref->page);
5285 * zero out any left over data, this is going to
5288 size = ring_buffer_page_len(ref->page);
5289 if (size < PAGE_SIZE)
5290 memset(ref->page + size, 0, PAGE_SIZE - size);
5292 page = virt_to_page(ref->page);
5294 spd.pages[i] = page;
5295 spd.partial[i].len = PAGE_SIZE;
5296 spd.partial[i].offset = 0;
5297 spd.partial[i].private = (unsigned long)ref;
5301 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5304 trace_access_unlock(iter->cpu_file);
5307 /* did we read anything? */
5308 if (!spd.nr_pages) {
5309 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5313 mutex_unlock(&trace_types_lock);
5314 iter->trace->wait_pipe(iter);
5315 mutex_lock(&trace_types_lock);
5316 if (signal_pending(current)) {
5323 ret = splice_to_pipe(pipe, &spd);
5324 splice_shrink_spd(&spd);
5326 mutex_unlock(&trace_types_lock);
5331 static const struct file_operations tracing_buffers_fops = {
5332 .open = tracing_buffers_open,
5333 .read = tracing_buffers_read,
5334 .poll = tracing_buffers_poll,
5335 .release = tracing_buffers_release,
5336 .splice_read = tracing_buffers_splice_read,
5337 .llseek = no_llseek,
5341 tracing_stats_read(struct file *filp, char __user *ubuf,
5342 size_t count, loff_t *ppos)
5344 struct inode *inode = file_inode(filp);
5345 struct trace_array *tr = inode->i_private;
5346 struct trace_buffer *trace_buf = &tr->trace_buffer;
5347 int cpu = tracing_get_cpu(inode);
5348 struct trace_seq *s;
5350 unsigned long long t;
5351 unsigned long usec_rem;
5353 s = kmalloc(sizeof(*s), GFP_KERNEL);
5359 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5360 trace_seq_printf(s, "entries: %ld\n", cnt);
5362 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5363 trace_seq_printf(s, "overrun: %ld\n", cnt);
5365 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5366 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5368 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5369 trace_seq_printf(s, "bytes: %ld\n", cnt);
5371 if (trace_clocks[tr->clock_id].in_ns) {
5372 /* local or global for trace_clock */
5373 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5374 usec_rem = do_div(t, USEC_PER_SEC);
5375 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5378 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5379 usec_rem = do_div(t, USEC_PER_SEC);
5380 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5382 /* counter or tsc mode for trace_clock */
5383 trace_seq_printf(s, "oldest event ts: %llu\n",
5384 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5386 trace_seq_printf(s, "now ts: %llu\n",
5387 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5390 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5391 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5393 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5394 trace_seq_printf(s, "read events: %ld\n", cnt);
5396 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5403 static const struct file_operations tracing_stats_fops = {
5404 .open = tracing_open_generic_tr,
5405 .read = tracing_stats_read,
5406 .llseek = generic_file_llseek,
5407 .release = tracing_release_generic_tr,
5410 #ifdef CONFIG_DYNAMIC_FTRACE
5412 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5418 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5419 size_t cnt, loff_t *ppos)
5421 static char ftrace_dyn_info_buffer[1024];
5422 static DEFINE_MUTEX(dyn_info_mutex);
5423 unsigned long *p = filp->private_data;
5424 char *buf = ftrace_dyn_info_buffer;
5425 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5428 mutex_lock(&dyn_info_mutex);
5429 r = sprintf(buf, "%ld ", *p);
5431 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5434 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5436 mutex_unlock(&dyn_info_mutex);
5441 static const struct file_operations tracing_dyn_info_fops = {
5442 .open = tracing_open_generic,
5443 .read = tracing_read_dyn_info,
5444 .llseek = generic_file_llseek,
5446 #endif /* CONFIG_DYNAMIC_FTRACE */
5448 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5450 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5456 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5458 unsigned long *count = (long *)data;
5470 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5471 struct ftrace_probe_ops *ops, void *data)
5473 long count = (long)data;
5475 seq_printf(m, "%ps:", (void *)ip);
5477 seq_printf(m, "snapshot");
5480 seq_printf(m, ":unlimited\n");
5482 seq_printf(m, ":count=%ld\n", count);
5487 static struct ftrace_probe_ops snapshot_probe_ops = {
5488 .func = ftrace_snapshot,
5489 .print = ftrace_snapshot_print,
5492 static struct ftrace_probe_ops snapshot_count_probe_ops = {
5493 .func = ftrace_count_snapshot,
5494 .print = ftrace_snapshot_print,
5498 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5499 char *glob, char *cmd, char *param, int enable)
5501 struct ftrace_probe_ops *ops;
5502 void *count = (void *)-1;
5506 /* hash funcs only work with set_ftrace_filter */
5510 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5512 if (glob[0] == '!') {
5513 unregister_ftrace_function_probe_func(glob+1, ops);
5520 number = strsep(¶m, ":");
5522 if (!strlen(number))
5526 * We use the callback data field (which is a pointer)
5529 ret = kstrtoul(number, 0, (unsigned long *)&count);
5534 ret = register_ftrace_function_probe(glob, ops, count);
5537 alloc_snapshot(&global_trace);
5539 return ret < 0 ? ret : 0;
5542 static struct ftrace_func_command ftrace_snapshot_cmd = {
5544 .func = ftrace_trace_snapshot_callback,
5547 static __init int register_snapshot_cmd(void)
5549 return register_ftrace_command(&ftrace_snapshot_cmd);
5552 static inline __init int register_snapshot_cmd(void) { return 0; }
5553 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5555 struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
5560 if (!debugfs_initialized())
5563 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5564 tr->dir = debugfs_create_dir("tracing", NULL);
5567 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5572 struct dentry *tracing_init_dentry(void)
5574 return tracing_init_dentry_tr(&global_trace);
5577 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5579 struct dentry *d_tracer;
5582 return tr->percpu_dir;
5584 d_tracer = tracing_init_dentry_tr(tr);
5588 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5590 WARN_ONCE(!tr->percpu_dir,
5591 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5593 return tr->percpu_dir;
5596 static struct dentry *
5597 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5598 void *data, long cpu, const struct file_operations *fops)
5600 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5602 if (ret) /* See tracing_get_cpu() */
5603 ret->d_inode->i_cdev = (void *)(cpu + 1);
5608 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5610 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5611 struct dentry *d_cpu;
5612 char cpu_dir[30]; /* 30 characters should be more than enough */
5617 snprintf(cpu_dir, 30, "cpu%ld", cpu);
5618 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5620 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5624 /* per cpu trace_pipe */
5625 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
5626 tr, cpu, &tracing_pipe_fops);
5629 trace_create_cpu_file("trace", 0644, d_cpu,
5630 tr, cpu, &tracing_fops);
5632 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
5633 tr, cpu, &tracing_buffers_fops);
5635 trace_create_cpu_file("stats", 0444, d_cpu,
5636 tr, cpu, &tracing_stats_fops);
5638 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
5639 tr, cpu, &tracing_entries_fops);
5641 #ifdef CONFIG_TRACER_SNAPSHOT
5642 trace_create_cpu_file("snapshot", 0644, d_cpu,
5643 tr, cpu, &snapshot_fops);
5645 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
5646 tr, cpu, &snapshot_raw_fops);
5650 #ifdef CONFIG_FTRACE_SELFTEST
5651 /* Let selftest have access to static functions in this file */
5652 #include "trace_selftest.c"
5655 struct trace_option_dentry {
5656 struct tracer_opt *opt;
5657 struct tracer_flags *flags;
5658 struct trace_array *tr;
5659 struct dentry *entry;
5663 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5666 struct trace_option_dentry *topt = filp->private_data;
5669 if (topt->flags->val & topt->opt->bit)
5674 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5678 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5681 struct trace_option_dentry *topt = filp->private_data;
5685 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5689 if (val != 0 && val != 1)
5692 if (!!(topt->flags->val & topt->opt->bit) != val) {
5693 mutex_lock(&trace_types_lock);
5694 ret = __set_tracer_option(topt->tr, topt->flags,
5696 mutex_unlock(&trace_types_lock);
5707 static const struct file_operations trace_options_fops = {
5708 .open = tracing_open_generic,
5709 .read = trace_options_read,
5710 .write = trace_options_write,
5711 .llseek = generic_file_llseek,
5715 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5718 long index = (long)filp->private_data;
5721 if (trace_flags & (1 << index))
5726 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5730 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5733 struct trace_array *tr = &global_trace;
5734 long index = (long)filp->private_data;
5738 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5742 if (val != 0 && val != 1)
5745 mutex_lock(&trace_types_lock);
5746 ret = set_tracer_flag(tr, 1 << index, val);
5747 mutex_unlock(&trace_types_lock);
5757 static const struct file_operations trace_options_core_fops = {
5758 .open = tracing_open_generic,
5759 .read = trace_options_core_read,
5760 .write = trace_options_core_write,
5761 .llseek = generic_file_llseek,
5764 struct dentry *trace_create_file(const char *name,
5766 struct dentry *parent,
5768 const struct file_operations *fops)
5772 ret = debugfs_create_file(name, mode, parent, data, fops);
5774 pr_warning("Could not create debugfs '%s' entry\n", name);
5780 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
5782 struct dentry *d_tracer;
5787 d_tracer = tracing_init_dentry_tr(tr);
5791 tr->options = debugfs_create_dir("options", d_tracer);
5793 pr_warning("Could not create debugfs directory 'options'\n");
5801 create_trace_option_file(struct trace_array *tr,
5802 struct trace_option_dentry *topt,
5803 struct tracer_flags *flags,
5804 struct tracer_opt *opt)
5806 struct dentry *t_options;
5808 t_options = trace_options_init_dentry(tr);
5812 topt->flags = flags;
5816 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
5817 &trace_options_fops);
5821 static struct trace_option_dentry *
5822 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
5824 struct trace_option_dentry *topts;
5825 struct tracer_flags *flags;
5826 struct tracer_opt *opts;
5832 flags = tracer->flags;
5834 if (!flags || !flags->opts)
5839 for (cnt = 0; opts[cnt].name; cnt++)
5842 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
5846 for (cnt = 0; opts[cnt].name; cnt++)
5847 create_trace_option_file(tr, &topts[cnt], flags,
5854 destroy_trace_option_files(struct trace_option_dentry *topts)
5861 for (cnt = 0; topts[cnt].opt; cnt++) {
5862 if (topts[cnt].entry)
5863 debugfs_remove(topts[cnt].entry);
5869 static struct dentry *
5870 create_trace_option_core_file(struct trace_array *tr,
5871 const char *option, long index)
5873 struct dentry *t_options;
5875 t_options = trace_options_init_dentry(tr);
5879 return trace_create_file(option, 0644, t_options, (void *)index,
5880 &trace_options_core_fops);
5883 static __init void create_trace_options_dir(struct trace_array *tr)
5885 struct dentry *t_options;
5888 t_options = trace_options_init_dentry(tr);
5892 for (i = 0; trace_options[i]; i++)
5893 create_trace_option_core_file(tr, trace_options[i], i);
5897 rb_simple_read(struct file *filp, char __user *ubuf,
5898 size_t cnt, loff_t *ppos)
5900 struct trace_array *tr = filp->private_data;
5904 r = tracer_tracing_is_on(tr);
5905 r = sprintf(buf, "%d\n", r);
5907 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5911 rb_simple_write(struct file *filp, const char __user *ubuf,
5912 size_t cnt, loff_t *ppos)
5914 struct trace_array *tr = filp->private_data;
5915 struct ring_buffer *buffer = tr->trace_buffer.buffer;
5919 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5924 mutex_lock(&trace_types_lock);
5926 tracer_tracing_on(tr);
5927 if (tr->current_trace->start)
5928 tr->current_trace->start(tr);
5930 tracer_tracing_off(tr);
5931 if (tr->current_trace->stop)
5932 tr->current_trace->stop(tr);
5934 mutex_unlock(&trace_types_lock);
5942 static const struct file_operations rb_simple_fops = {
5943 .open = tracing_open_generic_tr,
5944 .read = rb_simple_read,
5945 .write = rb_simple_write,
5946 .release = tracing_release_generic_tr,
5947 .llseek = default_llseek,
5950 struct dentry *trace_instance_dir;
5953 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5956 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
5958 enum ring_buffer_flags rb_flags;
5960 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5964 buf->buffer = ring_buffer_alloc(size, rb_flags);
5968 buf->data = alloc_percpu(struct trace_array_cpu);
5970 ring_buffer_free(buf->buffer);
5974 /* Allocate the first page for all buffers */
5975 set_buffer_entries(&tr->trace_buffer,
5976 ring_buffer_size(tr->trace_buffer.buffer, 0));
5981 static int allocate_trace_buffers(struct trace_array *tr, int size)
5985 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
5989 #ifdef CONFIG_TRACER_MAX_TRACE
5990 ret = allocate_trace_buffer(tr, &tr->max_buffer,
5991 allocate_snapshot ? size : 1);
5993 ring_buffer_free(tr->trace_buffer.buffer);
5994 free_percpu(tr->trace_buffer.data);
5997 tr->allocated_snapshot = allocate_snapshot;
6000 * Only the top level trace array gets its snapshot allocated
6001 * from the kernel command line.
6003 allocate_snapshot = false;
6008 static int new_instance_create(const char *name)
6010 struct trace_array *tr;
6013 mutex_lock(&trace_types_lock);
6016 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6017 if (tr->name && strcmp(tr->name, name) == 0)
6022 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6026 tr->name = kstrdup(name, GFP_KERNEL);
6030 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6033 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6035 raw_spin_lock_init(&tr->start_lock);
6037 tr->current_trace = &nop_trace;
6039 INIT_LIST_HEAD(&tr->systems);
6040 INIT_LIST_HEAD(&tr->events);
6042 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6045 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6049 ret = event_trace_add_tracer(tr->dir, tr);
6051 debugfs_remove_recursive(tr->dir);
6055 init_tracer_debugfs(tr, tr->dir);
6057 list_add(&tr->list, &ftrace_trace_arrays);
6059 mutex_unlock(&trace_types_lock);
6064 if (tr->trace_buffer.buffer)
6065 ring_buffer_free(tr->trace_buffer.buffer);
6066 free_cpumask_var(tr->tracing_cpumask);
6071 mutex_unlock(&trace_types_lock);
6077 static int instance_delete(const char *name)
6079 struct trace_array *tr;
6083 mutex_lock(&trace_types_lock);
6086 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6087 if (tr->name && strcmp(tr->name, name) == 0) {
6099 list_del(&tr->list);
6101 event_trace_del_tracer(tr);
6102 debugfs_remove_recursive(tr->dir);
6103 free_percpu(tr->trace_buffer.data);
6104 ring_buffer_free(tr->trace_buffer.buffer);
6112 mutex_unlock(&trace_types_lock);
6117 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6119 struct dentry *parent;
6122 /* Paranoid: Make sure the parent is the "instances" directory */
6123 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6124 if (WARN_ON_ONCE(parent != trace_instance_dir))
6128 * The inode mutex is locked, but debugfs_create_dir() will also
6129 * take the mutex. As the instances directory can not be destroyed
6130 * or changed in any other way, it is safe to unlock it, and
6131 * let the dentry try. If two users try to make the same dir at
6132 * the same time, then the new_instance_create() will determine the
6135 mutex_unlock(&inode->i_mutex);
6137 ret = new_instance_create(dentry->d_iname);
6139 mutex_lock(&inode->i_mutex);
6144 static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6146 struct dentry *parent;
6149 /* Paranoid: Make sure the parent is the "instances" directory */
6150 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6151 if (WARN_ON_ONCE(parent != trace_instance_dir))
6154 /* The caller did a dget() on dentry */
6155 mutex_unlock(&dentry->d_inode->i_mutex);
6158 * The inode mutex is locked, but debugfs_create_dir() will also
6159 * take the mutex. As the instances directory can not be destroyed
6160 * or changed in any other way, it is safe to unlock it, and
6161 * let the dentry try. If two users try to make the same dir at
6162 * the same time, then the instance_delete() will determine the
6165 mutex_unlock(&inode->i_mutex);
6167 ret = instance_delete(dentry->d_iname);
6169 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6170 mutex_lock(&dentry->d_inode->i_mutex);
6175 static const struct inode_operations instance_dir_inode_operations = {
6176 .lookup = simple_lookup,
6177 .mkdir = instance_mkdir,
6178 .rmdir = instance_rmdir,
6181 static __init void create_trace_instances(struct dentry *d_tracer)
6183 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6184 if (WARN_ON(!trace_instance_dir))
6187 /* Hijack the dir inode operations, to allow mkdir */
6188 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6192 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6196 trace_create_file("tracing_cpumask", 0644, d_tracer,
6197 tr, &tracing_cpumask_fops);
6199 trace_create_file("trace_options", 0644, d_tracer,
6200 tr, &tracing_iter_fops);
6202 trace_create_file("trace", 0644, d_tracer,
6205 trace_create_file("trace_pipe", 0444, d_tracer,
6206 tr, &tracing_pipe_fops);
6208 trace_create_file("buffer_size_kb", 0644, d_tracer,
6209 tr, &tracing_entries_fops);
6211 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6212 tr, &tracing_total_entries_fops);
6214 trace_create_file("free_buffer", 0200, d_tracer,
6215 tr, &tracing_free_buffer_fops);
6217 trace_create_file("trace_marker", 0220, d_tracer,
6218 tr, &tracing_mark_fops);
6220 trace_create_file("trace_clock", 0644, d_tracer, tr,
6223 trace_create_file("tracing_on", 0644, d_tracer,
6224 tr, &rb_simple_fops);
6226 #ifdef CONFIG_TRACER_SNAPSHOT
6227 trace_create_file("snapshot", 0644, d_tracer,
6228 tr, &snapshot_fops);
6231 for_each_tracing_cpu(cpu)
6232 tracing_init_debugfs_percpu(tr, cpu);
6236 static __init int tracer_init_debugfs(void)
6238 struct dentry *d_tracer;
6240 trace_access_lock_init();
6242 d_tracer = tracing_init_dentry();
6246 init_tracer_debugfs(&global_trace, d_tracer);
6248 trace_create_file("available_tracers", 0444, d_tracer,
6249 &global_trace, &show_traces_fops);
6251 trace_create_file("current_tracer", 0644, d_tracer,
6252 &global_trace, &set_tracer_fops);
6254 #ifdef CONFIG_TRACER_MAX_TRACE
6255 trace_create_file("tracing_max_latency", 0644, d_tracer,
6256 &tracing_max_latency, &tracing_max_lat_fops);
6259 trace_create_file("tracing_thresh", 0644, d_tracer,
6260 &tracing_thresh, &tracing_max_lat_fops);
6262 trace_create_file("README", 0444, d_tracer,
6263 NULL, &tracing_readme_fops);
6265 trace_create_file("saved_cmdlines", 0444, d_tracer,
6266 NULL, &tracing_saved_cmdlines_fops);
6268 #ifdef CONFIG_DYNAMIC_FTRACE
6269 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6270 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6273 create_trace_instances(d_tracer);
6275 create_trace_options_dir(&global_trace);
6280 static int trace_panic_handler(struct notifier_block *this,
6281 unsigned long event, void *unused)
6283 if (ftrace_dump_on_oops)
6284 ftrace_dump(ftrace_dump_on_oops);
6288 static struct notifier_block trace_panic_notifier = {
6289 .notifier_call = trace_panic_handler,
6291 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6294 static int trace_die_handler(struct notifier_block *self,
6300 if (ftrace_dump_on_oops)
6301 ftrace_dump(ftrace_dump_on_oops);
6309 static struct notifier_block trace_die_notifier = {
6310 .notifier_call = trace_die_handler,
6315 * printk is set to max of 1024, we really don't need it that big.
6316 * Nothing should be printing 1000 characters anyway.
6318 #define TRACE_MAX_PRINT 1000
6321 * Define here KERN_TRACE so that we have one place to modify
6322 * it if we decide to change what log level the ftrace dump
6325 #define KERN_TRACE KERN_EMERG
6328 trace_printk_seq(struct trace_seq *s)
6330 /* Probably should print a warning here. */
6331 if (s->len >= TRACE_MAX_PRINT)
6332 s->len = TRACE_MAX_PRINT;
6334 /* should be zero ended, but we are paranoid. */
6335 s->buffer[s->len] = 0;
6337 printk(KERN_TRACE "%s", s->buffer);
6342 void trace_init_global_iter(struct trace_iterator *iter)
6344 iter->tr = &global_trace;
6345 iter->trace = iter->tr->current_trace;
6346 iter->cpu_file = RING_BUFFER_ALL_CPUS;
6347 iter->trace_buffer = &global_trace.trace_buffer;
6349 if (iter->trace && iter->trace->open)
6350 iter->trace->open(iter);
6352 /* Annotate start of buffers if we had overruns */
6353 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6354 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6356 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6357 if (trace_clocks[iter->tr->clock_id].in_ns)
6358 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6361 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6363 /* use static because iter can be a bit big for the stack */
6364 static struct trace_iterator iter;
6365 static atomic_t dump_running;
6366 unsigned int old_userobj;
6367 unsigned long flags;
6370 /* Only allow one dump user at a time. */
6371 if (atomic_inc_return(&dump_running) != 1) {
6372 atomic_dec(&dump_running);
6377 * Always turn off tracing when we dump.
6378 * We don't need to show trace output of what happens
6379 * between multiple crashes.
6381 * If the user does a sysrq-z, then they can re-enable
6382 * tracing with echo 1 > tracing_on.
6386 local_irq_save(flags);
6388 /* Simulate the iterator */
6389 trace_init_global_iter(&iter);
6391 for_each_tracing_cpu(cpu) {
6392 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6395 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6397 /* don't look at user memory in panic mode */
6398 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6400 switch (oops_dump_mode) {
6402 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6405 iter.cpu_file = raw_smp_processor_id();
6410 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6411 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6414 printk(KERN_TRACE "Dumping ftrace buffer:\n");
6416 /* Did function tracer already get disabled? */
6417 if (ftrace_is_dead()) {
6418 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6419 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6423 * We need to stop all tracing on all CPUS to read the
6424 * the next buffer. This is a bit expensive, but is
6425 * not done often. We fill all what we can read,
6426 * and then release the locks again.
6429 while (!trace_empty(&iter)) {
6432 printk(KERN_TRACE "---------------------------------\n");
6436 /* reset all but tr, trace, and overruns */
6437 memset(&iter.seq, 0,
6438 sizeof(struct trace_iterator) -
6439 offsetof(struct trace_iterator, seq));
6440 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6443 if (trace_find_next_entry_inc(&iter) != NULL) {
6446 ret = print_trace_line(&iter);
6447 if (ret != TRACE_TYPE_NO_CONSUME)
6448 trace_consume(&iter);
6450 touch_nmi_watchdog();
6452 trace_printk_seq(&iter.seq);
6456 printk(KERN_TRACE " (ftrace buffer empty)\n");
6458 printk(KERN_TRACE "---------------------------------\n");
6461 trace_flags |= old_userobj;
6463 for_each_tracing_cpu(cpu) {
6464 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6466 atomic_dec(&dump_running);
6467 local_irq_restore(flags);
6469 EXPORT_SYMBOL_GPL(ftrace_dump);
6471 __init static int tracer_alloc_buffers(void)
6477 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6480 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
6481 goto out_free_buffer_mask;
6483 /* Only allocate trace_printk buffers if a trace_printk exists */
6484 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6485 /* Must be called before global_trace.buffer is allocated */
6486 trace_printk_init_buffers();
6488 /* To save memory, keep the ring buffer size to its minimum */
6489 if (ring_buffer_expanded)
6490 ring_buf_size = trace_buf_size;
6494 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6495 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
6497 raw_spin_lock_init(&global_trace.start_lock);
6499 /* TODO: make the number of buffers hot pluggable with CPUS */
6500 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6501 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6503 goto out_free_cpumask;
6506 if (global_trace.buffer_disabled)
6509 trace_init_cmdlines();
6512 * register_tracer() might reference current_trace, so it
6513 * needs to be set before we register anything. This is
6514 * just a bootstrap of current_trace anyway.
6516 global_trace.current_trace = &nop_trace;
6518 register_tracer(&nop_trace);
6520 /* All seems OK, enable tracing */
6521 tracing_disabled = 0;
6523 atomic_notifier_chain_register(&panic_notifier_list,
6524 &trace_panic_notifier);
6526 register_die_notifier(&trace_die_notifier);
6528 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6530 INIT_LIST_HEAD(&global_trace.systems);
6531 INIT_LIST_HEAD(&global_trace.events);
6532 list_add(&global_trace.list, &ftrace_trace_arrays);
6534 while (trace_boot_options) {
6537 option = strsep(&trace_boot_options, ",");
6538 trace_set_options(&global_trace, option);
6541 register_snapshot_cmd();
6546 free_percpu(global_trace.trace_buffer.data);
6547 #ifdef CONFIG_TRACER_MAX_TRACE
6548 free_percpu(global_trace.max_buffer.data);
6550 free_cpumask_var(global_trace.tracing_cpumask);
6551 out_free_buffer_mask:
6552 free_cpumask_var(tracing_buffer_mask);
6557 __init static int clear_boot_tracer(void)
6560 * The default tracer at boot buffer is an init section.
6561 * This function is called in lateinit. If we did not
6562 * find the boot tracer, then clear it out, to prevent
6563 * later registration from accessing the buffer that is
6564 * about to be freed.
6566 if (!default_bootup_tracer)
6569 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6570 default_bootup_tracer);
6571 default_bootup_tracer = NULL;
6576 early_initcall(tracer_alloc_buffers);
6577 fs_initcall(tracer_init_debugfs);
6578 late_initcall(clear_boot_tracer);