2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
41 #include <linux/sched/rt.h>
44 #include "trace_output.h"
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
50 bool ring_buffer_expanded;
53 * We need to change this state when a selftest is running.
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
56 * insertions into the ring-buffer such as trace_printk could occurred
57 * at the same time, giving false positive or negative results.
59 static bool __read_mostly tracing_selftest_running;
62 * If a tracer is running, we do not want to run SELFTEST.
64 bool __read_mostly tracing_selftest_disabled;
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt[] = {
71 static struct tracer_flags dummy_tracer_flags = {
73 .opts = dummy_tracer_opt
77 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
83 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
87 static DEFINE_PER_CPU(bool, trace_cmdline_save);
90 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
95 static int tracing_disabled = 1;
97 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
99 cpumask_var_t __read_mostly tracing_buffer_mask;
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
117 enum ftrace_dump_mode ftrace_dump_on_oops;
119 /* When set, tracing will stop when a WARN*() is hit */
120 int __disable_trace_on_warning;
122 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
124 #define MAX_TRACER_SIZE 100
125 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
126 static char *default_bootup_tracer;
128 static bool allocate_snapshot;
130 static int __init set_cmdline_ftrace(char *str)
132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
133 default_bootup_tracer = bootup_tracer_buf;
134 /* We are using ftrace early, expand it */
135 ring_buffer_expanded = true;
138 __setup("ftrace=", set_cmdline_ftrace);
140 static int __init set_ftrace_dump_on_oops(char *str)
142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
154 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
156 static int __init stop_trace_on_warning(char *str)
158 __disable_trace_on_warning = 1;
161 __setup("traceoff_on_warning=", stop_trace_on_warning);
163 static int __init boot_alloc_snapshot(char *str)
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
170 __setup("alloc_snapshot", boot_alloc_snapshot);
173 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174 static char *trace_boot_options __initdata;
176 static int __init set_trace_boot_options(char *str)
178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
179 trace_boot_options = trace_boot_options_buf;
182 __setup("trace_options=", set_trace_boot_options);
184 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185 static char *trace_boot_clock __initdata;
187 static int __init set_trace_boot_clock(char *str)
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
193 __setup("trace_clock=", set_trace_boot_clock);
196 unsigned long long ns2usecs(cycle_t nsec)
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
215 static struct trace_array global_trace;
217 LIST_HEAD(ftrace_trace_arrays);
219 int trace_array_get(struct trace_array *this_tr)
221 struct trace_array *tr;
224 mutex_lock(&trace_types_lock);
225 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
232 mutex_unlock(&trace_types_lock);
237 static void __trace_array_put(struct trace_array *this_tr)
239 WARN_ON(!this_tr->ref);
243 void trace_array_put(struct trace_array *this_tr)
245 mutex_lock(&trace_types_lock);
246 __trace_array_put(this_tr);
247 mutex_unlock(&trace_types_lock);
250 int filter_check_discard(struct ftrace_event_file *file, void *rec,
251 struct ring_buffer *buffer,
252 struct ring_buffer_event *event)
254 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255 !filter_match_preds(file->filter, rec)) {
256 ring_buffer_discard_commit(buffer, event);
262 EXPORT_SYMBOL_GPL(filter_check_discard);
264 int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
268 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(call->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
276 EXPORT_SYMBOL_GPL(call_filter_check_discard);
278 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
282 /* Early boot up does not have a buffer yet */
284 return trace_clock_local();
286 ts = ring_buffer_time_stamp(buf->buffer, cpu);
287 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
292 cycle_t ftrace_now(int cpu)
294 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
298 * tracing_is_enabled - Show if global_trace has been disabled
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
306 int tracing_is_enabled(void)
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
314 return !global_trace.buffer_disabled;
318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
327 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
329 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
331 /* trace_types holds a link list of available tracers. */
332 static struct tracer *trace_types __read_mostly;
335 * trace_types_lock is used to protect the trace_types list.
337 DEFINE_MUTEX(trace_types_lock);
340 * serialize the access of the ring buffer
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
354 * These primitives allow multi process access to different cpu ring buffer
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
362 static DECLARE_RWSEM(all_cpu_access_lock);
363 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
365 static inline void trace_access_lock(int cpu)
367 if (cpu == RING_BUFFER_ALL_CPUS) {
368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock);
371 /* gain it for accessing a cpu ring buffer. */
373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
374 down_read(&all_cpu_access_lock);
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock, cpu));
381 static inline void trace_access_unlock(int cpu)
383 if (cpu == RING_BUFFER_ALL_CPUS) {
384 up_write(&all_cpu_access_lock);
386 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387 up_read(&all_cpu_access_lock);
391 static inline void trace_access_lock_init(void)
395 for_each_possible_cpu(cpu)
396 mutex_init(&per_cpu(cpu_access_lock, cpu));
401 static DEFINE_MUTEX(access_lock);
403 static inline void trace_access_lock(int cpu)
406 mutex_lock(&access_lock);
409 static inline void trace_access_unlock(int cpu)
412 mutex_unlock(&access_lock);
415 static inline void trace_access_lock_init(void)
421 /* trace_flags holds trace_options default values */
422 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
423 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
424 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
425 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
427 static void tracer_tracing_on(struct trace_array *tr)
429 if (tr->trace_buffer.buffer)
430 ring_buffer_record_on(tr->trace_buffer.buffer);
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
439 tr->buffer_disabled = 0;
440 /* Make the flag seen by readers */
445 * tracing_on - enable tracing buffers
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
450 void tracing_on(void)
452 tracer_tracing_on(&global_trace);
454 EXPORT_SYMBOL_GPL(tracing_on);
457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
462 int __trace_puts(unsigned long ip, const char *str, int size)
464 struct ring_buffer_event *event;
465 struct ring_buffer *buffer;
466 struct print_entry *entry;
467 unsigned long irq_flags;
470 if (unlikely(tracing_selftest_running || tracing_disabled))
473 alloc = sizeof(*entry) + size + 2; /* possible \n added */
475 local_save_flags(irq_flags);
476 buffer = global_trace.trace_buffer.buffer;
477 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478 irq_flags, preempt_count());
482 entry = ring_buffer_event_data(event);
485 memcpy(&entry->buf, str, size);
487 /* Add a newline if necessary */
488 if (entry->buf[size - 1] != '\n') {
489 entry->buf[size] = '\n';
490 entry->buf[size + 1] = '\0';
492 entry->buf[size] = '\0';
494 __buffer_unlock_commit(buffer, event);
498 EXPORT_SYMBOL_GPL(__trace_puts);
501 * __trace_bputs - write the pointer to a constant string into trace buffer
502 * @ip: The address of the caller
503 * @str: The constant string to write to the buffer to
505 int __trace_bputs(unsigned long ip, const char *str)
507 struct ring_buffer_event *event;
508 struct ring_buffer *buffer;
509 struct bputs_entry *entry;
510 unsigned long irq_flags;
511 int size = sizeof(struct bputs_entry);
513 if (unlikely(tracing_selftest_running || tracing_disabled))
516 local_save_flags(irq_flags);
517 buffer = global_trace.trace_buffer.buffer;
518 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519 irq_flags, preempt_count());
523 entry = ring_buffer_event_data(event);
527 __buffer_unlock_commit(buffer, event);
531 EXPORT_SYMBOL_GPL(__trace_bputs);
533 #ifdef CONFIG_TRACER_SNAPSHOT
535 * trace_snapshot - take a snapshot of the current buffer.
537 * This causes a swap between the snapshot buffer and the current live
538 * tracing buffer. You can use this to take snapshots of the live
539 * trace when some condition is triggered, but continue to trace.
541 * Note, make sure to allocate the snapshot with either
542 * a tracing_snapshot_alloc(), or by doing it manually
543 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
545 * If the snapshot buffer is not allocated, it will stop tracing.
546 * Basically making a permanent snapshot.
548 void tracing_snapshot(void)
550 struct trace_array *tr = &global_trace;
551 struct tracer *tracer = tr->current_trace;
555 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
556 internal_trace_puts("*** snapshot is being ignored ***\n");
560 if (!tr->allocated_snapshot) {
561 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
562 internal_trace_puts("*** stopping trace here! ***\n");
567 /* Note, snapshot can not be used when the tracer uses it */
568 if (tracer->use_max_tr) {
569 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
570 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
574 local_irq_save(flags);
575 update_max_tr(tr, current, smp_processor_id());
576 local_irq_restore(flags);
578 EXPORT_SYMBOL_GPL(tracing_snapshot);
580 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
581 struct trace_buffer *size_buf, int cpu_id);
582 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
584 static int alloc_snapshot(struct trace_array *tr)
588 if (!tr->allocated_snapshot) {
590 /* allocate spare buffer */
591 ret = resize_buffer_duplicate_size(&tr->max_buffer,
592 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
596 tr->allocated_snapshot = true;
602 static void free_snapshot(struct trace_array *tr)
605 * We don't free the ring buffer. instead, resize it because
606 * The max_tr ring buffer has some state (e.g. ring->clock) and
607 * we want preserve it.
609 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
610 set_buffer_entries(&tr->max_buffer, 1);
611 tracing_reset_online_cpus(&tr->max_buffer);
612 tr->allocated_snapshot = false;
616 * tracing_alloc_snapshot - allocate snapshot buffer.
618 * This only allocates the snapshot buffer if it isn't already
619 * allocated - it doesn't also take a snapshot.
621 * This is meant to be used in cases where the snapshot buffer needs
622 * to be set up for events that can't sleep but need to be able to
623 * trigger a snapshot.
625 int tracing_alloc_snapshot(void)
627 struct trace_array *tr = &global_trace;
630 ret = alloc_snapshot(tr);
635 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
638 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
640 * This is similar to trace_snapshot(), but it will allocate the
641 * snapshot buffer if it isn't already allocated. Use this only
642 * where it is safe to sleep, as the allocation may sleep.
644 * This causes a swap between the snapshot buffer and the current live
645 * tracing buffer. You can use this to take snapshots of the live
646 * trace when some condition is triggered, but continue to trace.
648 void tracing_snapshot_alloc(void)
652 ret = tracing_alloc_snapshot();
658 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
660 void tracing_snapshot(void)
662 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
664 EXPORT_SYMBOL_GPL(tracing_snapshot);
665 int tracing_alloc_snapshot(void)
667 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
670 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
671 void tracing_snapshot_alloc(void)
676 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
677 #endif /* CONFIG_TRACER_SNAPSHOT */
679 static void tracer_tracing_off(struct trace_array *tr)
681 if (tr->trace_buffer.buffer)
682 ring_buffer_record_off(tr->trace_buffer.buffer);
684 * This flag is looked at when buffers haven't been allocated
685 * yet, or by some tracers (like irqsoff), that just want to
686 * know if the ring buffer has been disabled, but it can handle
687 * races of where it gets disabled but we still do a record.
688 * As the check is in the fast path of the tracers, it is more
689 * important to be fast than accurate.
691 tr->buffer_disabled = 1;
692 /* Make the flag seen by readers */
697 * tracing_off - turn off tracing buffers
699 * This function stops the tracing buffers from recording data.
700 * It does not disable any overhead the tracers themselves may
701 * be causing. This function simply causes all recording to
702 * the ring buffers to fail.
704 void tracing_off(void)
706 tracer_tracing_off(&global_trace);
708 EXPORT_SYMBOL_GPL(tracing_off);
710 void disable_trace_on_warning(void)
712 if (__disable_trace_on_warning)
717 * tracer_tracing_is_on - show real state of ring buffer enabled
718 * @tr : the trace array to know if ring buffer is enabled
720 * Shows real state of the ring buffer if it is enabled or not.
722 static int tracer_tracing_is_on(struct trace_array *tr)
724 if (tr->trace_buffer.buffer)
725 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
726 return !tr->buffer_disabled;
730 * tracing_is_on - show state of ring buffers enabled
732 int tracing_is_on(void)
734 return tracer_tracing_is_on(&global_trace);
736 EXPORT_SYMBOL_GPL(tracing_is_on);
738 static int __init set_buf_size(char *str)
740 unsigned long buf_size;
744 buf_size = memparse(str, &str);
745 /* nr_entries can not be zero */
748 trace_buf_size = buf_size;
751 __setup("trace_buf_size=", set_buf_size);
753 static int __init set_tracing_thresh(char *str)
755 unsigned long threshold;
760 ret = kstrtoul(str, 0, &threshold);
763 tracing_thresh = threshold * 1000;
766 __setup("tracing_thresh=", set_tracing_thresh);
768 unsigned long nsecs_to_usecs(unsigned long nsecs)
773 /* These must match the bit postions in trace_iterator_flags */
774 static const char *trace_options[] = {
807 int in_ns; /* is this clock in nanoseconds? */
809 { trace_clock_local, "local", 1 },
810 { trace_clock_global, "global", 1 },
811 { trace_clock_counter, "counter", 0 },
812 { trace_clock_jiffies, "uptime", 1 },
813 { trace_clock, "perf", 1 },
818 * trace_parser_get_init - gets the buffer for trace parser
820 int trace_parser_get_init(struct trace_parser *parser, int size)
822 memset(parser, 0, sizeof(*parser));
824 parser->buffer = kmalloc(size, GFP_KERNEL);
833 * trace_parser_put - frees the buffer for trace parser
835 void trace_parser_put(struct trace_parser *parser)
837 kfree(parser->buffer);
841 * trace_get_user - reads the user input string separated by space
842 * (matched by isspace(ch))
844 * For each string found the 'struct trace_parser' is updated,
845 * and the function returns.
847 * Returns number of bytes read.
849 * See kernel/trace/trace.h for 'struct trace_parser' details.
851 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
852 size_t cnt, loff_t *ppos)
859 trace_parser_clear(parser);
861 ret = get_user(ch, ubuf++);
869 * The parser is not finished with the last write,
870 * continue reading the user input without skipping spaces.
873 /* skip white space */
874 while (cnt && isspace(ch)) {
875 ret = get_user(ch, ubuf++);
882 /* only spaces were written */
892 /* read the non-space input */
893 while (cnt && !isspace(ch)) {
894 if (parser->idx < parser->size - 1)
895 parser->buffer[parser->idx++] = ch;
900 ret = get_user(ch, ubuf++);
907 /* We either got finished input or we have to wait for another call. */
909 parser->buffer[parser->idx] = 0;
910 parser->cont = false;
911 } else if (parser->idx < parser->size - 1) {
913 parser->buffer[parser->idx++] = ch;
926 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
934 if (s->len <= s->readpos)
937 len = s->len - s->readpos;
940 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
950 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
954 if (s->len <= s->readpos)
957 len = s->len - s->readpos;
960 memcpy(buf, s->buffer + s->readpos, cnt);
966 unsigned long __read_mostly tracing_thresh;
968 #ifdef CONFIG_TRACER_MAX_TRACE
970 * Copy the new maximum trace into the separate maximum-trace
971 * structure. (this way the maximum trace is permanently saved,
972 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
975 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
977 struct trace_buffer *trace_buf = &tr->trace_buffer;
978 struct trace_buffer *max_buf = &tr->max_buffer;
979 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
980 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
983 max_buf->time_start = data->preempt_timestamp;
985 max_data->saved_latency = tr->max_latency;
986 max_data->critical_start = data->critical_start;
987 max_data->critical_end = data->critical_end;
989 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
990 max_data->pid = tsk->pid;
992 * If tsk == current, then use current_uid(), as that does not use
993 * RCU. The irq tracer can be called out of RCU scope.
996 max_data->uid = current_uid();
998 max_data->uid = task_uid(tsk);
1000 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1001 max_data->policy = tsk->policy;
1002 max_data->rt_priority = tsk->rt_priority;
1004 /* record this tasks comm */
1005 tracing_record_cmdline(tsk);
1009 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1011 * @tsk: the task with the latency
1012 * @cpu: The cpu that initiated the trace.
1014 * Flip the buffers between the @tr and the max_tr and record information
1015 * about which task was the cause of this latency.
1018 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1020 struct ring_buffer *buf;
1025 WARN_ON_ONCE(!irqs_disabled());
1027 if (!tr->allocated_snapshot) {
1028 /* Only the nop tracer should hit this when disabling */
1029 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1033 arch_spin_lock(&tr->max_lock);
1035 buf = tr->trace_buffer.buffer;
1036 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1037 tr->max_buffer.buffer = buf;
1039 __update_max_tr(tr, tsk, cpu);
1040 arch_spin_unlock(&tr->max_lock);
1044 * update_max_tr_single - only copy one trace over, and reset the rest
1046 * @tsk - task with the latency
1047 * @cpu - the cpu of the buffer to copy.
1049 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1052 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1059 WARN_ON_ONCE(!irqs_disabled());
1060 if (!tr->allocated_snapshot) {
1061 /* Only the nop tracer should hit this when disabling */
1062 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1066 arch_spin_lock(&tr->max_lock);
1068 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1070 if (ret == -EBUSY) {
1072 * We failed to swap the buffer due to a commit taking
1073 * place on this CPU. We fail to record, but we reset
1074 * the max trace buffer (no one writes directly to it)
1075 * and flag that it failed.
1077 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1078 "Failed to swap buffers due to commit in progress\n");
1081 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1083 __update_max_tr(tr, tsk, cpu);
1084 arch_spin_unlock(&tr->max_lock);
1086 #endif /* CONFIG_TRACER_MAX_TRACE */
1088 static void wait_on_pipe(struct trace_iterator *iter)
1090 /* Iterators are static, they should be filled or empty */
1091 if (trace_buffer_iter(iter, iter->cpu_file))
1094 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
1097 #ifdef CONFIG_FTRACE_STARTUP_TEST
1098 static int run_tracer_selftest(struct tracer *type)
1100 struct trace_array *tr = &global_trace;
1101 struct tracer *saved_tracer = tr->current_trace;
1104 if (!type->selftest || tracing_selftest_disabled)
1108 * Run a selftest on this tracer.
1109 * Here we reset the trace buffer, and set the current
1110 * tracer to be this tracer. The tracer can then run some
1111 * internal tracing to verify that everything is in order.
1112 * If we fail, we do not register this tracer.
1114 tracing_reset_online_cpus(&tr->trace_buffer);
1116 tr->current_trace = type;
1118 #ifdef CONFIG_TRACER_MAX_TRACE
1119 if (type->use_max_tr) {
1120 /* If we expanded the buffers, make sure the max is expanded too */
1121 if (ring_buffer_expanded)
1122 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1123 RING_BUFFER_ALL_CPUS);
1124 tr->allocated_snapshot = true;
1128 /* the test is responsible for initializing and enabling */
1129 pr_info("Testing tracer %s: ", type->name);
1130 ret = type->selftest(type, tr);
1131 /* the test is responsible for resetting too */
1132 tr->current_trace = saved_tracer;
1134 printk(KERN_CONT "FAILED!\n");
1135 /* Add the warning after printing 'FAILED' */
1139 /* Only reset on passing, to avoid touching corrupted buffers */
1140 tracing_reset_online_cpus(&tr->trace_buffer);
1142 #ifdef CONFIG_TRACER_MAX_TRACE
1143 if (type->use_max_tr) {
1144 tr->allocated_snapshot = false;
1146 /* Shrink the max buffer again */
1147 if (ring_buffer_expanded)
1148 ring_buffer_resize(tr->max_buffer.buffer, 1,
1149 RING_BUFFER_ALL_CPUS);
1153 printk(KERN_CONT "PASSED\n");
1157 static inline int run_tracer_selftest(struct tracer *type)
1161 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1164 * register_tracer - register a tracer with the ftrace system.
1165 * @type - the plugin for the tracer
1167 * Register a new plugin tracer.
1169 int register_tracer(struct tracer *type)
1175 pr_info("Tracer must have a name\n");
1179 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1180 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1184 mutex_lock(&trace_types_lock);
1186 tracing_selftest_running = true;
1188 for (t = trace_types; t; t = t->next) {
1189 if (strcmp(type->name, t->name) == 0) {
1191 pr_info("Tracer %s already registered\n",
1198 if (!type->set_flag)
1199 type->set_flag = &dummy_set_flag;
1201 type->flags = &dummy_tracer_flags;
1203 if (!type->flags->opts)
1204 type->flags->opts = dummy_tracer_opt;
1206 ret = run_tracer_selftest(type);
1210 type->next = trace_types;
1214 tracing_selftest_running = false;
1215 mutex_unlock(&trace_types_lock);
1217 if (ret || !default_bootup_tracer)
1220 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1223 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1224 /* Do we want this tracer to start on bootup? */
1225 tracing_set_tracer(&global_trace, type->name);
1226 default_bootup_tracer = NULL;
1227 /* disable other selftests, since this will break it. */
1228 tracing_selftest_disabled = true;
1229 #ifdef CONFIG_FTRACE_STARTUP_TEST
1230 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1238 void tracing_reset(struct trace_buffer *buf, int cpu)
1240 struct ring_buffer *buffer = buf->buffer;
1245 ring_buffer_record_disable(buffer);
1247 /* Make sure all commits have finished */
1248 synchronize_sched();
1249 ring_buffer_reset_cpu(buffer, cpu);
1251 ring_buffer_record_enable(buffer);
1254 void tracing_reset_online_cpus(struct trace_buffer *buf)
1256 struct ring_buffer *buffer = buf->buffer;
1262 ring_buffer_record_disable(buffer);
1264 /* Make sure all commits have finished */
1265 synchronize_sched();
1267 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1269 for_each_online_cpu(cpu)
1270 ring_buffer_reset_cpu(buffer, cpu);
1272 ring_buffer_record_enable(buffer);
1275 /* Must have trace_types_lock held */
1276 void tracing_reset_all_online_cpus(void)
1278 struct trace_array *tr;
1280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1281 tracing_reset_online_cpus(&tr->trace_buffer);
1282 #ifdef CONFIG_TRACER_MAX_TRACE
1283 tracing_reset_online_cpus(&tr->max_buffer);
1288 #define SAVED_CMDLINES_DEFAULT 128
1289 #define NO_CMDLINE_MAP UINT_MAX
1290 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1291 struct saved_cmdlines_buffer {
1292 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1293 unsigned *map_cmdline_to_pid;
1294 unsigned cmdline_num;
1296 char *saved_cmdlines;
1298 static struct saved_cmdlines_buffer *savedcmd;
1300 /* temporary disable recording */
1301 static atomic_t trace_record_cmdline_disabled __read_mostly;
1303 static inline char *get_saved_cmdlines(int idx)
1305 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1308 static inline void set_cmdline(int idx, const char *cmdline)
1310 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1313 static int allocate_cmdlines_buffer(unsigned int val,
1314 struct saved_cmdlines_buffer *s)
1316 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1318 if (!s->map_cmdline_to_pid)
1321 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1322 if (!s->saved_cmdlines) {
1323 kfree(s->map_cmdline_to_pid);
1328 s->cmdline_num = val;
1329 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1330 sizeof(s->map_pid_to_cmdline));
1331 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1332 val * sizeof(*s->map_cmdline_to_pid));
1337 static int trace_create_savedcmd(void)
1341 savedcmd = kmalloc(sizeof(struct saved_cmdlines_buffer), GFP_KERNEL);
1345 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1355 int is_tracing_stopped(void)
1357 return global_trace.stop_count;
1361 * tracing_start - quick start of the tracer
1363 * If tracing is enabled but was stopped by tracing_stop,
1364 * this will start the tracer back up.
1366 void tracing_start(void)
1368 struct ring_buffer *buffer;
1369 unsigned long flags;
1371 if (tracing_disabled)
1374 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1375 if (--global_trace.stop_count) {
1376 if (global_trace.stop_count < 0) {
1377 /* Someone screwed up their debugging */
1379 global_trace.stop_count = 0;
1384 /* Prevent the buffers from switching */
1385 arch_spin_lock(&global_trace.max_lock);
1387 buffer = global_trace.trace_buffer.buffer;
1389 ring_buffer_record_enable(buffer);
1391 #ifdef CONFIG_TRACER_MAX_TRACE
1392 buffer = global_trace.max_buffer.buffer;
1394 ring_buffer_record_enable(buffer);
1397 arch_spin_unlock(&global_trace.max_lock);
1401 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1404 static void tracing_start_tr(struct trace_array *tr)
1406 struct ring_buffer *buffer;
1407 unsigned long flags;
1409 if (tracing_disabled)
1412 /* If global, we need to also start the max tracer */
1413 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1414 return tracing_start();
1416 raw_spin_lock_irqsave(&tr->start_lock, flags);
1418 if (--tr->stop_count) {
1419 if (tr->stop_count < 0) {
1420 /* Someone screwed up their debugging */
1427 buffer = tr->trace_buffer.buffer;
1429 ring_buffer_record_enable(buffer);
1432 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1436 * tracing_stop - quick stop of the tracer
1438 * Light weight way to stop tracing. Use in conjunction with
1441 void tracing_stop(void)
1443 struct ring_buffer *buffer;
1444 unsigned long flags;
1447 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1448 if (global_trace.stop_count++)
1451 /* Prevent the buffers from switching */
1452 arch_spin_lock(&global_trace.max_lock);
1454 buffer = global_trace.trace_buffer.buffer;
1456 ring_buffer_record_disable(buffer);
1458 #ifdef CONFIG_TRACER_MAX_TRACE
1459 buffer = global_trace.max_buffer.buffer;
1461 ring_buffer_record_disable(buffer);
1464 arch_spin_unlock(&global_trace.max_lock);
1467 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1470 static void tracing_stop_tr(struct trace_array *tr)
1472 struct ring_buffer *buffer;
1473 unsigned long flags;
1475 /* If global, we need to also stop the max tracer */
1476 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1477 return tracing_stop();
1479 raw_spin_lock_irqsave(&tr->start_lock, flags);
1480 if (tr->stop_count++)
1483 buffer = tr->trace_buffer.buffer;
1485 ring_buffer_record_disable(buffer);
1488 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1491 void trace_stop_cmdline_recording(void);
1493 static int trace_save_cmdline(struct task_struct *tsk)
1497 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1501 * It's not the end of the world if we don't get
1502 * the lock, but we also don't want to spin
1503 * nor do we want to disable interrupts,
1504 * so if we miss here, then better luck next time.
1506 if (!arch_spin_trylock(&trace_cmdline_lock))
1509 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1510 if (idx == NO_CMDLINE_MAP) {
1511 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1514 * Check whether the cmdline buffer at idx has a pid
1515 * mapped. We are going to overwrite that entry so we
1516 * need to clear the map_pid_to_cmdline. Otherwise we
1517 * would read the new comm for the old pid.
1519 pid = savedcmd->map_cmdline_to_pid[idx];
1520 if (pid != NO_CMDLINE_MAP)
1521 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1523 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1524 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1526 savedcmd->cmdline_idx = idx;
1529 set_cmdline(idx, tsk->comm);
1531 arch_spin_unlock(&trace_cmdline_lock);
1536 static void __trace_find_cmdline(int pid, char comm[])
1541 strcpy(comm, "<idle>");
1545 if (WARN_ON_ONCE(pid < 0)) {
1546 strcpy(comm, "<XXX>");
1550 if (pid > PID_MAX_DEFAULT) {
1551 strcpy(comm, "<...>");
1555 map = savedcmd->map_pid_to_cmdline[pid];
1556 if (map != NO_CMDLINE_MAP)
1557 strcpy(comm, get_saved_cmdlines(map));
1559 strcpy(comm, "<...>");
1562 void trace_find_cmdline(int pid, char comm[])
1565 arch_spin_lock(&trace_cmdline_lock);
1567 __trace_find_cmdline(pid, comm);
1569 arch_spin_unlock(&trace_cmdline_lock);
1573 void tracing_record_cmdline(struct task_struct *tsk)
1575 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1578 if (!__this_cpu_read(trace_cmdline_save))
1581 if (trace_save_cmdline(tsk))
1582 __this_cpu_write(trace_cmdline_save, false);
1586 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1589 struct task_struct *tsk = current;
1591 entry->preempt_count = pc & 0xff;
1592 entry->pid = (tsk) ? tsk->pid : 0;
1594 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1595 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1597 TRACE_FLAG_IRQS_NOSUPPORT |
1599 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1600 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1601 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1602 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1604 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1606 struct ring_buffer_event *
1607 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1610 unsigned long flags, int pc)
1612 struct ring_buffer_event *event;
1614 event = ring_buffer_lock_reserve(buffer, len);
1615 if (event != NULL) {
1616 struct trace_entry *ent = ring_buffer_event_data(event);
1618 tracing_generic_entry_update(ent, flags, pc);
1626 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1628 __this_cpu_write(trace_cmdline_save, true);
1629 ring_buffer_unlock_commit(buffer, event);
1633 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1634 struct ring_buffer_event *event,
1635 unsigned long flags, int pc)
1637 __buffer_unlock_commit(buffer, event);
1639 ftrace_trace_stack(buffer, flags, 6, pc);
1640 ftrace_trace_userstack(buffer, flags, pc);
1643 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1644 struct ring_buffer_event *event,
1645 unsigned long flags, int pc)
1647 __trace_buffer_unlock_commit(buffer, event, flags, pc);
1649 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1651 static struct ring_buffer *temp_buffer;
1653 struct ring_buffer_event *
1654 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1655 struct ftrace_event_file *ftrace_file,
1656 int type, unsigned long len,
1657 unsigned long flags, int pc)
1659 struct ring_buffer_event *entry;
1661 *current_rb = ftrace_file->tr->trace_buffer.buffer;
1662 entry = trace_buffer_lock_reserve(*current_rb,
1663 type, len, flags, pc);
1665 * If tracing is off, but we have triggers enabled
1666 * we still need to look at the event data. Use the temp_buffer
1667 * to store the trace event for the tigger to use. It's recusive
1668 * safe and will not be recorded anywhere.
1670 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1671 *current_rb = temp_buffer;
1672 entry = trace_buffer_lock_reserve(*current_rb,
1673 type, len, flags, pc);
1677 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1679 struct ring_buffer_event *
1680 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1681 int type, unsigned long len,
1682 unsigned long flags, int pc)
1684 *current_rb = global_trace.trace_buffer.buffer;
1685 return trace_buffer_lock_reserve(*current_rb,
1686 type, len, flags, pc);
1688 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1690 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1691 struct ring_buffer_event *event,
1692 unsigned long flags, int pc)
1694 __trace_buffer_unlock_commit(buffer, event, flags, pc);
1696 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1698 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1699 struct ring_buffer_event *event,
1700 unsigned long flags, int pc,
1701 struct pt_regs *regs)
1703 __buffer_unlock_commit(buffer, event);
1705 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1706 ftrace_trace_userstack(buffer, flags, pc);
1708 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1710 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1711 struct ring_buffer_event *event)
1713 ring_buffer_discard_commit(buffer, event);
1715 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1718 trace_function(struct trace_array *tr,
1719 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1722 struct ftrace_event_call *call = &event_function;
1723 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1724 struct ring_buffer_event *event;
1725 struct ftrace_entry *entry;
1727 /* If we are reading the ring buffer, don't trace */
1728 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1731 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1735 entry = ring_buffer_event_data(event);
1737 entry->parent_ip = parent_ip;
1739 if (!call_filter_check_discard(call, entry, buffer, event))
1740 __buffer_unlock_commit(buffer, event);
1743 #ifdef CONFIG_STACKTRACE
1745 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1746 struct ftrace_stack {
1747 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1750 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1751 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1753 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1754 unsigned long flags,
1755 int skip, int pc, struct pt_regs *regs)
1757 struct ftrace_event_call *call = &event_kernel_stack;
1758 struct ring_buffer_event *event;
1759 struct stack_entry *entry;
1760 struct stack_trace trace;
1762 int size = FTRACE_STACK_ENTRIES;
1764 trace.nr_entries = 0;
1768 * Since events can happen in NMIs there's no safe way to
1769 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1770 * or NMI comes in, it will just have to use the default
1771 * FTRACE_STACK_SIZE.
1773 preempt_disable_notrace();
1775 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1777 * We don't need any atomic variables, just a barrier.
1778 * If an interrupt comes in, we don't care, because it would
1779 * have exited and put the counter back to what we want.
1780 * We just need a barrier to keep gcc from moving things
1784 if (use_stack == 1) {
1785 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1786 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1789 save_stack_trace_regs(regs, &trace);
1791 save_stack_trace(&trace);
1793 if (trace.nr_entries > size)
1794 size = trace.nr_entries;
1796 /* From now on, use_stack is a boolean */
1799 size *= sizeof(unsigned long);
1801 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1802 sizeof(*entry) + size, flags, pc);
1805 entry = ring_buffer_event_data(event);
1807 memset(&entry->caller, 0, size);
1810 memcpy(&entry->caller, trace.entries,
1811 trace.nr_entries * sizeof(unsigned long));
1813 trace.max_entries = FTRACE_STACK_ENTRIES;
1814 trace.entries = entry->caller;
1816 save_stack_trace_regs(regs, &trace);
1818 save_stack_trace(&trace);
1821 entry->size = trace.nr_entries;
1823 if (!call_filter_check_discard(call, entry, buffer, event))
1824 __buffer_unlock_commit(buffer, event);
1827 /* Again, don't let gcc optimize things here */
1829 __this_cpu_dec(ftrace_stack_reserve);
1830 preempt_enable_notrace();
1834 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1835 int skip, int pc, struct pt_regs *regs)
1837 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1840 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1843 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1846 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1849 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1852 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1855 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1859 * trace_dump_stack - record a stack back trace in the trace buffer
1860 * @skip: Number of functions to skip (helper handlers)
1862 void trace_dump_stack(int skip)
1864 unsigned long flags;
1866 if (tracing_disabled || tracing_selftest_running)
1869 local_save_flags(flags);
1872 * Skip 3 more, seems to get us at the caller of
1876 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1877 flags, skip, preempt_count(), NULL);
1880 static DEFINE_PER_CPU(int, user_stack_count);
1883 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1885 struct ftrace_event_call *call = &event_user_stack;
1886 struct ring_buffer_event *event;
1887 struct userstack_entry *entry;
1888 struct stack_trace trace;
1890 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1894 * NMIs can not handle page faults, even with fix ups.
1895 * The save user stack can (and often does) fault.
1897 if (unlikely(in_nmi()))
1901 * prevent recursion, since the user stack tracing may
1902 * trigger other kernel events.
1905 if (__this_cpu_read(user_stack_count))
1908 __this_cpu_inc(user_stack_count);
1910 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1911 sizeof(*entry), flags, pc);
1913 goto out_drop_count;
1914 entry = ring_buffer_event_data(event);
1916 entry->tgid = current->tgid;
1917 memset(&entry->caller, 0, sizeof(entry->caller));
1919 trace.nr_entries = 0;
1920 trace.max_entries = FTRACE_STACK_ENTRIES;
1922 trace.entries = entry->caller;
1924 save_stack_trace_user(&trace);
1925 if (!call_filter_check_discard(call, entry, buffer, event))
1926 __buffer_unlock_commit(buffer, event);
1929 __this_cpu_dec(user_stack_count);
1935 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1937 ftrace_trace_userstack(tr, flags, preempt_count());
1941 #endif /* CONFIG_STACKTRACE */
1943 /* created for use with alloc_percpu */
1944 struct trace_buffer_struct {
1945 char buffer[TRACE_BUF_SIZE];
1948 static struct trace_buffer_struct *trace_percpu_buffer;
1949 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1950 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1951 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1954 * The buffer used is dependent on the context. There is a per cpu
1955 * buffer for normal context, softirq contex, hard irq context and
1956 * for NMI context. Thise allows for lockless recording.
1958 * Note, if the buffers failed to be allocated, then this returns NULL
1960 static char *get_trace_buf(void)
1962 struct trace_buffer_struct *percpu_buffer;
1965 * If we have allocated per cpu buffers, then we do not
1966 * need to do any locking.
1969 percpu_buffer = trace_percpu_nmi_buffer;
1971 percpu_buffer = trace_percpu_irq_buffer;
1972 else if (in_softirq())
1973 percpu_buffer = trace_percpu_sirq_buffer;
1975 percpu_buffer = trace_percpu_buffer;
1980 return this_cpu_ptr(&percpu_buffer->buffer[0]);
1983 static int alloc_percpu_trace_buffer(void)
1985 struct trace_buffer_struct *buffers;
1986 struct trace_buffer_struct *sirq_buffers;
1987 struct trace_buffer_struct *irq_buffers;
1988 struct trace_buffer_struct *nmi_buffers;
1990 buffers = alloc_percpu(struct trace_buffer_struct);
1994 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1998 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2002 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2006 trace_percpu_buffer = buffers;
2007 trace_percpu_sirq_buffer = sirq_buffers;
2008 trace_percpu_irq_buffer = irq_buffers;
2009 trace_percpu_nmi_buffer = nmi_buffers;
2014 free_percpu(irq_buffers);
2016 free_percpu(sirq_buffers);
2018 free_percpu(buffers);
2020 WARN(1, "Could not allocate percpu trace_printk buffer");
2024 static int buffers_allocated;
2026 void trace_printk_init_buffers(void)
2028 if (buffers_allocated)
2031 if (alloc_percpu_trace_buffer())
2034 /* trace_printk() is for debug use only. Don't use it in production. */
2036 pr_warning("\n**********************************************************\n");
2037 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2038 pr_warning("** **\n");
2039 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2040 pr_warning("** **\n");
2041 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2042 pr_warning("** unsafe for produciton use. **\n");
2043 pr_warning("** **\n");
2044 pr_warning("** If you see this message and you are not debugging **\n");
2045 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2046 pr_warning("** **\n");
2047 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2048 pr_warning("**********************************************************\n");
2050 /* Expand the buffers to set size */
2051 tracing_update_buffers();
2053 buffers_allocated = 1;
2056 * trace_printk_init_buffers() can be called by modules.
2057 * If that happens, then we need to start cmdline recording
2058 * directly here. If the global_trace.buffer is already
2059 * allocated here, then this was called by module code.
2061 if (global_trace.trace_buffer.buffer)
2062 tracing_start_cmdline_record();
2065 void trace_printk_start_comm(void)
2067 /* Start tracing comms if trace printk is set */
2068 if (!buffers_allocated)
2070 tracing_start_cmdline_record();
2073 static void trace_printk_start_stop_comm(int enabled)
2075 if (!buffers_allocated)
2079 tracing_start_cmdline_record();
2081 tracing_stop_cmdline_record();
2085 * trace_vbprintk - write binary msg to tracing buffer
2088 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2090 struct ftrace_event_call *call = &event_bprint;
2091 struct ring_buffer_event *event;
2092 struct ring_buffer *buffer;
2093 struct trace_array *tr = &global_trace;
2094 struct bprint_entry *entry;
2095 unsigned long flags;
2097 int len = 0, size, pc;
2099 if (unlikely(tracing_selftest_running || tracing_disabled))
2102 /* Don't pollute graph traces with trace_vprintk internals */
2103 pause_graph_tracing();
2105 pc = preempt_count();
2106 preempt_disable_notrace();
2108 tbuffer = get_trace_buf();
2114 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2116 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2119 local_save_flags(flags);
2120 size = sizeof(*entry) + sizeof(u32) * len;
2121 buffer = tr->trace_buffer.buffer;
2122 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2126 entry = ring_buffer_event_data(event);
2130 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2131 if (!call_filter_check_discard(call, entry, buffer, event)) {
2132 __buffer_unlock_commit(buffer, event);
2133 ftrace_trace_stack(buffer, flags, 6, pc);
2137 preempt_enable_notrace();
2138 unpause_graph_tracing();
2142 EXPORT_SYMBOL_GPL(trace_vbprintk);
2145 __trace_array_vprintk(struct ring_buffer *buffer,
2146 unsigned long ip, const char *fmt, va_list args)
2148 struct ftrace_event_call *call = &event_print;
2149 struct ring_buffer_event *event;
2150 int len = 0, size, pc;
2151 struct print_entry *entry;
2152 unsigned long flags;
2155 if (tracing_disabled || tracing_selftest_running)
2158 /* Don't pollute graph traces with trace_vprintk internals */
2159 pause_graph_tracing();
2161 pc = preempt_count();
2162 preempt_disable_notrace();
2165 tbuffer = get_trace_buf();
2171 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2172 if (len > TRACE_BUF_SIZE)
2175 local_save_flags(flags);
2176 size = sizeof(*entry) + len + 1;
2177 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2181 entry = ring_buffer_event_data(event);
2184 memcpy(&entry->buf, tbuffer, len);
2185 entry->buf[len] = '\0';
2186 if (!call_filter_check_discard(call, entry, buffer, event)) {
2187 __buffer_unlock_commit(buffer, event);
2188 ftrace_trace_stack(buffer, flags, 6, pc);
2191 preempt_enable_notrace();
2192 unpause_graph_tracing();
2197 int trace_array_vprintk(struct trace_array *tr,
2198 unsigned long ip, const char *fmt, va_list args)
2200 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2203 int trace_array_printk(struct trace_array *tr,
2204 unsigned long ip, const char *fmt, ...)
2209 if (!(trace_flags & TRACE_ITER_PRINTK))
2213 ret = trace_array_vprintk(tr, ip, fmt, ap);
2218 int trace_array_printk_buf(struct ring_buffer *buffer,
2219 unsigned long ip, const char *fmt, ...)
2224 if (!(trace_flags & TRACE_ITER_PRINTK))
2228 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2233 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2235 return trace_array_vprintk(&global_trace, ip, fmt, args);
2237 EXPORT_SYMBOL_GPL(trace_vprintk);
2239 static void trace_iterator_increment(struct trace_iterator *iter)
2241 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2245 ring_buffer_read(buf_iter, NULL);
2248 static struct trace_entry *
2249 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2250 unsigned long *lost_events)
2252 struct ring_buffer_event *event;
2253 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2256 event = ring_buffer_iter_peek(buf_iter, ts);
2258 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2262 iter->ent_size = ring_buffer_event_length(event);
2263 return ring_buffer_event_data(event);
2269 static struct trace_entry *
2270 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2271 unsigned long *missing_events, u64 *ent_ts)
2273 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2274 struct trace_entry *ent, *next = NULL;
2275 unsigned long lost_events = 0, next_lost = 0;
2276 int cpu_file = iter->cpu_file;
2277 u64 next_ts = 0, ts;
2283 * If we are in a per_cpu trace file, don't bother by iterating over
2284 * all cpu and peek directly.
2286 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2287 if (ring_buffer_empty_cpu(buffer, cpu_file))
2289 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2291 *ent_cpu = cpu_file;
2296 for_each_tracing_cpu(cpu) {
2298 if (ring_buffer_empty_cpu(buffer, cpu))
2301 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2304 * Pick the entry with the smallest timestamp:
2306 if (ent && (!next || ts < next_ts)) {
2310 next_lost = lost_events;
2311 next_size = iter->ent_size;
2315 iter->ent_size = next_size;
2318 *ent_cpu = next_cpu;
2324 *missing_events = next_lost;
2329 /* Find the next real entry, without updating the iterator itself */
2330 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2331 int *ent_cpu, u64 *ent_ts)
2333 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2336 /* Find the next real entry, and increment the iterator to the next entry */
2337 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2339 iter->ent = __find_next_entry(iter, &iter->cpu,
2340 &iter->lost_events, &iter->ts);
2343 trace_iterator_increment(iter);
2345 return iter->ent ? iter : NULL;
2348 static void trace_consume(struct trace_iterator *iter)
2350 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2351 &iter->lost_events);
2354 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2356 struct trace_iterator *iter = m->private;
2360 WARN_ON_ONCE(iter->leftover);
2364 /* can't go backwards */
2369 ent = trace_find_next_entry_inc(iter);
2373 while (ent && iter->idx < i)
2374 ent = trace_find_next_entry_inc(iter);
2381 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2383 struct ring_buffer_event *event;
2384 struct ring_buffer_iter *buf_iter;
2385 unsigned long entries = 0;
2388 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2390 buf_iter = trace_buffer_iter(iter, cpu);
2394 ring_buffer_iter_reset(buf_iter);
2397 * We could have the case with the max latency tracers
2398 * that a reset never took place on a cpu. This is evident
2399 * by the timestamp being before the start of the buffer.
2401 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2402 if (ts >= iter->trace_buffer->time_start)
2405 ring_buffer_read(buf_iter, NULL);
2408 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2412 * The current tracer is copied to avoid a global locking
2415 static void *s_start(struct seq_file *m, loff_t *pos)
2417 struct trace_iterator *iter = m->private;
2418 struct trace_array *tr = iter->tr;
2419 int cpu_file = iter->cpu_file;
2425 * copy the tracer to avoid using a global lock all around.
2426 * iter->trace is a copy of current_trace, the pointer to the
2427 * name may be used instead of a strcmp(), as iter->trace->name
2428 * will point to the same string as current_trace->name.
2430 mutex_lock(&trace_types_lock);
2431 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2432 *iter->trace = *tr->current_trace;
2433 mutex_unlock(&trace_types_lock);
2435 #ifdef CONFIG_TRACER_MAX_TRACE
2436 if (iter->snapshot && iter->trace->use_max_tr)
2437 return ERR_PTR(-EBUSY);
2440 if (!iter->snapshot)
2441 atomic_inc(&trace_record_cmdline_disabled);
2443 if (*pos != iter->pos) {
2448 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2449 for_each_tracing_cpu(cpu)
2450 tracing_iter_reset(iter, cpu);
2452 tracing_iter_reset(iter, cpu_file);
2455 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2460 * If we overflowed the seq_file before, then we want
2461 * to just reuse the trace_seq buffer again.
2467 p = s_next(m, p, &l);
2471 trace_event_read_lock();
2472 trace_access_lock(cpu_file);
2476 static void s_stop(struct seq_file *m, void *p)
2478 struct trace_iterator *iter = m->private;
2480 #ifdef CONFIG_TRACER_MAX_TRACE
2481 if (iter->snapshot && iter->trace->use_max_tr)
2485 if (!iter->snapshot)
2486 atomic_dec(&trace_record_cmdline_disabled);
2488 trace_access_unlock(iter->cpu_file);
2489 trace_event_read_unlock();
2493 get_total_entries(struct trace_buffer *buf,
2494 unsigned long *total, unsigned long *entries)
2496 unsigned long count;
2502 for_each_tracing_cpu(cpu) {
2503 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2505 * If this buffer has skipped entries, then we hold all
2506 * entries for the trace and we need to ignore the
2507 * ones before the time stamp.
2509 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2510 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2511 /* total is the same as the entries */
2515 ring_buffer_overrun_cpu(buf->buffer, cpu);
2520 static void print_lat_help_header(struct seq_file *m)
2522 seq_puts(m, "# _------=> CPU# \n");
2523 seq_puts(m, "# / _-----=> irqs-off \n");
2524 seq_puts(m, "# | / _----=> need-resched \n");
2525 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2526 seq_puts(m, "# ||| / _--=> preempt-depth \n");
2527 seq_puts(m, "# |||| / delay \n");
2528 seq_puts(m, "# cmd pid ||||| time | caller \n");
2529 seq_puts(m, "# \\ / ||||| \\ | / \n");
2532 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2534 unsigned long total;
2535 unsigned long entries;
2537 get_total_entries(buf, &total, &entries);
2538 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2539 entries, total, num_online_cpus());
2543 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2545 print_event_info(buf, m);
2546 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
2547 seq_puts(m, "# | | | | |\n");
2550 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2552 print_event_info(buf, m);
2553 seq_puts(m, "# _-----=> irqs-off\n");
2554 seq_puts(m, "# / _----=> need-resched\n");
2555 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2556 seq_puts(m, "# || / _--=> preempt-depth\n");
2557 seq_puts(m, "# ||| / delay\n");
2558 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2559 seq_puts(m, "# | | | |||| | |\n");
2563 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2565 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2566 struct trace_buffer *buf = iter->trace_buffer;
2567 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2568 struct tracer *type = iter->trace;
2569 unsigned long entries;
2570 unsigned long total;
2571 const char *name = "preemption";
2575 get_total_entries(buf, &total, &entries);
2577 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2579 seq_puts(m, "# -----------------------------------"
2580 "---------------------------------\n");
2581 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2582 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2583 nsecs_to_usecs(data->saved_latency),
2587 #if defined(CONFIG_PREEMPT_NONE)
2589 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2591 #elif defined(CONFIG_PREEMPT)
2596 /* These are reserved for later use */
2599 seq_printf(m, " #P:%d)\n", num_online_cpus());
2603 seq_puts(m, "# -----------------\n");
2604 seq_printf(m, "# | task: %.16s-%d "
2605 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2606 data->comm, data->pid,
2607 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2608 data->policy, data->rt_priority);
2609 seq_puts(m, "# -----------------\n");
2611 if (data->critical_start) {
2612 seq_puts(m, "# => started at: ");
2613 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2614 trace_print_seq(m, &iter->seq);
2615 seq_puts(m, "\n# => ended at: ");
2616 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2617 trace_print_seq(m, &iter->seq);
2618 seq_puts(m, "\n#\n");
2624 static void test_cpu_buff_start(struct trace_iterator *iter)
2626 struct trace_seq *s = &iter->seq;
2628 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2631 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2634 if (cpumask_test_cpu(iter->cpu, iter->started))
2637 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2640 cpumask_set_cpu(iter->cpu, iter->started);
2642 /* Don't print started cpu buffer for the first entry of the trace */
2644 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2648 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2650 struct trace_seq *s = &iter->seq;
2651 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2652 struct trace_entry *entry;
2653 struct trace_event *event;
2657 test_cpu_buff_start(iter);
2659 event = ftrace_find_event(entry->type);
2661 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2662 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2663 if (!trace_print_lat_context(iter))
2666 if (!trace_print_context(iter))
2672 return event->funcs->trace(iter, sym_flags, event);
2674 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2677 return TRACE_TYPE_HANDLED;
2679 return TRACE_TYPE_PARTIAL_LINE;
2682 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2684 struct trace_seq *s = &iter->seq;
2685 struct trace_entry *entry;
2686 struct trace_event *event;
2690 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2691 if (!trace_seq_printf(s, "%d %d %llu ",
2692 entry->pid, iter->cpu, iter->ts))
2696 event = ftrace_find_event(entry->type);
2698 return event->funcs->raw(iter, 0, event);
2700 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2703 return TRACE_TYPE_HANDLED;
2705 return TRACE_TYPE_PARTIAL_LINE;
2708 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2710 struct trace_seq *s = &iter->seq;
2711 unsigned char newline = '\n';
2712 struct trace_entry *entry;
2713 struct trace_event *event;
2717 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2718 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2719 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2720 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2723 event = ftrace_find_event(entry->type);
2725 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2726 if (ret != TRACE_TYPE_HANDLED)
2730 SEQ_PUT_FIELD_RET(s, newline);
2732 return TRACE_TYPE_HANDLED;
2735 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2737 struct trace_seq *s = &iter->seq;
2738 struct trace_entry *entry;
2739 struct trace_event *event;
2743 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2744 SEQ_PUT_FIELD_RET(s, entry->pid);
2745 SEQ_PUT_FIELD_RET(s, iter->cpu);
2746 SEQ_PUT_FIELD_RET(s, iter->ts);
2749 event = ftrace_find_event(entry->type);
2750 return event ? event->funcs->binary(iter, 0, event) :
2754 int trace_empty(struct trace_iterator *iter)
2756 struct ring_buffer_iter *buf_iter;
2759 /* If we are looking at one CPU buffer, only check that one */
2760 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2761 cpu = iter->cpu_file;
2762 buf_iter = trace_buffer_iter(iter, cpu);
2764 if (!ring_buffer_iter_empty(buf_iter))
2767 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2773 for_each_tracing_cpu(cpu) {
2774 buf_iter = trace_buffer_iter(iter, cpu);
2776 if (!ring_buffer_iter_empty(buf_iter))
2779 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2787 /* Called with trace_event_read_lock() held. */
2788 enum print_line_t print_trace_line(struct trace_iterator *iter)
2790 enum print_line_t ret;
2792 if (iter->lost_events &&
2793 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2794 iter->cpu, iter->lost_events))
2795 return TRACE_TYPE_PARTIAL_LINE;
2797 if (iter->trace && iter->trace->print_line) {
2798 ret = iter->trace->print_line(iter);
2799 if (ret != TRACE_TYPE_UNHANDLED)
2803 if (iter->ent->type == TRACE_BPUTS &&
2804 trace_flags & TRACE_ITER_PRINTK &&
2805 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2806 return trace_print_bputs_msg_only(iter);
2808 if (iter->ent->type == TRACE_BPRINT &&
2809 trace_flags & TRACE_ITER_PRINTK &&
2810 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2811 return trace_print_bprintk_msg_only(iter);
2813 if (iter->ent->type == TRACE_PRINT &&
2814 trace_flags & TRACE_ITER_PRINTK &&
2815 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2816 return trace_print_printk_msg_only(iter);
2818 if (trace_flags & TRACE_ITER_BIN)
2819 return print_bin_fmt(iter);
2821 if (trace_flags & TRACE_ITER_HEX)
2822 return print_hex_fmt(iter);
2824 if (trace_flags & TRACE_ITER_RAW)
2825 return print_raw_fmt(iter);
2827 return print_trace_fmt(iter);
2830 void trace_latency_header(struct seq_file *m)
2832 struct trace_iterator *iter = m->private;
2834 /* print nothing if the buffers are empty */
2835 if (trace_empty(iter))
2838 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2839 print_trace_header(m, iter);
2841 if (!(trace_flags & TRACE_ITER_VERBOSE))
2842 print_lat_help_header(m);
2845 void trace_default_header(struct seq_file *m)
2847 struct trace_iterator *iter = m->private;
2849 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2852 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2853 /* print nothing if the buffers are empty */
2854 if (trace_empty(iter))
2856 print_trace_header(m, iter);
2857 if (!(trace_flags & TRACE_ITER_VERBOSE))
2858 print_lat_help_header(m);
2860 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2861 if (trace_flags & TRACE_ITER_IRQ_INFO)
2862 print_func_help_header_irq(iter->trace_buffer, m);
2864 print_func_help_header(iter->trace_buffer, m);
2869 static void test_ftrace_alive(struct seq_file *m)
2871 if (!ftrace_is_dead())
2873 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2874 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2877 #ifdef CONFIG_TRACER_MAX_TRACE
2878 static void show_snapshot_main_help(struct seq_file *m)
2880 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2881 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2882 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2883 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
2884 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2885 seq_printf(m, "# is not a '0' or '1')\n");
2888 static void show_snapshot_percpu_help(struct seq_file *m)
2890 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2891 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2892 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2893 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2895 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2896 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2898 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2899 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2900 seq_printf(m, "# is not a '0' or '1')\n");
2903 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2905 if (iter->tr->allocated_snapshot)
2906 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2908 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2910 seq_printf(m, "# Snapshot commands:\n");
2911 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2912 show_snapshot_main_help(m);
2914 show_snapshot_percpu_help(m);
2917 /* Should never be called */
2918 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2921 static int s_show(struct seq_file *m, void *v)
2923 struct trace_iterator *iter = v;
2926 if (iter->ent == NULL) {
2928 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2930 test_ftrace_alive(m);
2932 if (iter->snapshot && trace_empty(iter))
2933 print_snapshot_help(m, iter);
2934 else if (iter->trace && iter->trace->print_header)
2935 iter->trace->print_header(m);
2937 trace_default_header(m);
2939 } else if (iter->leftover) {
2941 * If we filled the seq_file buffer earlier, we
2942 * want to just show it now.
2944 ret = trace_print_seq(m, &iter->seq);
2946 /* ret should this time be zero, but you never know */
2947 iter->leftover = ret;
2950 print_trace_line(iter);
2951 ret = trace_print_seq(m, &iter->seq);
2953 * If we overflow the seq_file buffer, then it will
2954 * ask us for this data again at start up.
2956 * ret is 0 if seq_file write succeeded.
2959 iter->leftover = ret;
2966 * Should be used after trace_array_get(), trace_types_lock
2967 * ensures that i_cdev was already initialized.
2969 static inline int tracing_get_cpu(struct inode *inode)
2971 if (inode->i_cdev) /* See trace_create_cpu_file() */
2972 return (long)inode->i_cdev - 1;
2973 return RING_BUFFER_ALL_CPUS;
2976 static const struct seq_operations tracer_seq_ops = {
2983 static struct trace_iterator *
2984 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2986 struct trace_array *tr = inode->i_private;
2987 struct trace_iterator *iter;
2990 if (tracing_disabled)
2991 return ERR_PTR(-ENODEV);
2993 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2995 return ERR_PTR(-ENOMEM);
2997 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2999 if (!iter->buffer_iter)
3003 * We make a copy of the current tracer to avoid concurrent
3004 * changes on it while we are reading.
3006 mutex_lock(&trace_types_lock);
3007 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3011 *iter->trace = *tr->current_trace;
3013 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3018 #ifdef CONFIG_TRACER_MAX_TRACE
3019 /* Currently only the top directory has a snapshot */
3020 if (tr->current_trace->print_max || snapshot)
3021 iter->trace_buffer = &tr->max_buffer;
3024 iter->trace_buffer = &tr->trace_buffer;
3025 iter->snapshot = snapshot;
3027 iter->cpu_file = tracing_get_cpu(inode);
3028 mutex_init(&iter->mutex);
3030 /* Notify the tracer early; before we stop tracing. */
3031 if (iter->trace && iter->trace->open)
3032 iter->trace->open(iter);
3034 /* Annotate start of buffers if we had overruns */
3035 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3036 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3038 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3039 if (trace_clocks[tr->clock_id].in_ns)
3040 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3042 /* stop the trace while dumping if we are not opening "snapshot" */
3043 if (!iter->snapshot)
3044 tracing_stop_tr(tr);
3046 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3047 for_each_tracing_cpu(cpu) {
3048 iter->buffer_iter[cpu] =
3049 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3051 ring_buffer_read_prepare_sync();
3052 for_each_tracing_cpu(cpu) {
3053 ring_buffer_read_start(iter->buffer_iter[cpu]);
3054 tracing_iter_reset(iter, cpu);
3057 cpu = iter->cpu_file;
3058 iter->buffer_iter[cpu] =
3059 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3060 ring_buffer_read_prepare_sync();
3061 ring_buffer_read_start(iter->buffer_iter[cpu]);
3062 tracing_iter_reset(iter, cpu);
3065 mutex_unlock(&trace_types_lock);
3070 mutex_unlock(&trace_types_lock);
3072 kfree(iter->buffer_iter);
3074 seq_release_private(inode, file);
3075 return ERR_PTR(-ENOMEM);
3078 int tracing_open_generic(struct inode *inode, struct file *filp)
3080 if (tracing_disabled)
3083 filp->private_data = inode->i_private;
3087 bool tracing_is_disabled(void)
3089 return (tracing_disabled) ? true: false;
3093 * Open and update trace_array ref count.
3094 * Must have the current trace_array passed to it.
3096 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3098 struct trace_array *tr = inode->i_private;
3100 if (tracing_disabled)
3103 if (trace_array_get(tr) < 0)
3106 filp->private_data = inode->i_private;
3111 static int tracing_release(struct inode *inode, struct file *file)
3113 struct trace_array *tr = inode->i_private;
3114 struct seq_file *m = file->private_data;
3115 struct trace_iterator *iter;
3118 if (!(file->f_mode & FMODE_READ)) {
3119 trace_array_put(tr);
3123 /* Writes do not use seq_file */
3125 mutex_lock(&trace_types_lock);
3127 for_each_tracing_cpu(cpu) {
3128 if (iter->buffer_iter[cpu])
3129 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3132 if (iter->trace && iter->trace->close)
3133 iter->trace->close(iter);
3135 if (!iter->snapshot)
3136 /* reenable tracing if it was previously enabled */
3137 tracing_start_tr(tr);
3139 __trace_array_put(tr);
3141 mutex_unlock(&trace_types_lock);
3143 mutex_destroy(&iter->mutex);
3144 free_cpumask_var(iter->started);
3146 kfree(iter->buffer_iter);
3147 seq_release_private(inode, file);
3152 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3154 struct trace_array *tr = inode->i_private;
3156 trace_array_put(tr);
3160 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3162 struct trace_array *tr = inode->i_private;
3164 trace_array_put(tr);
3166 return single_release(inode, file);
3169 static int tracing_open(struct inode *inode, struct file *file)
3171 struct trace_array *tr = inode->i_private;
3172 struct trace_iterator *iter;
3175 if (trace_array_get(tr) < 0)
3178 /* If this file was open for write, then erase contents */
3179 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3180 int cpu = tracing_get_cpu(inode);
3182 if (cpu == RING_BUFFER_ALL_CPUS)
3183 tracing_reset_online_cpus(&tr->trace_buffer);
3185 tracing_reset(&tr->trace_buffer, cpu);
3188 if (file->f_mode & FMODE_READ) {
3189 iter = __tracing_open(inode, file, false);
3191 ret = PTR_ERR(iter);
3192 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3193 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3197 trace_array_put(tr);
3203 * Some tracers are not suitable for instance buffers.
3204 * A tracer is always available for the global array (toplevel)
3205 * or if it explicitly states that it is.
3208 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3210 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3213 /* Find the next tracer that this trace array may use */
3214 static struct tracer *
3215 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3217 while (t && !trace_ok_for_array(t, tr))
3224 t_next(struct seq_file *m, void *v, loff_t *pos)
3226 struct trace_array *tr = m->private;
3227 struct tracer *t = v;
3232 t = get_tracer_for_array(tr, t->next);
3237 static void *t_start(struct seq_file *m, loff_t *pos)
3239 struct trace_array *tr = m->private;
3243 mutex_lock(&trace_types_lock);
3245 t = get_tracer_for_array(tr, trace_types);
3246 for (; t && l < *pos; t = t_next(m, t, &l))
3252 static void t_stop(struct seq_file *m, void *p)
3254 mutex_unlock(&trace_types_lock);
3257 static int t_show(struct seq_file *m, void *v)
3259 struct tracer *t = v;
3264 seq_printf(m, "%s", t->name);
3273 static const struct seq_operations show_traces_seq_ops = {
3280 static int show_traces_open(struct inode *inode, struct file *file)
3282 struct trace_array *tr = inode->i_private;
3286 if (tracing_disabled)
3289 ret = seq_open(file, &show_traces_seq_ops);
3293 m = file->private_data;
3300 tracing_write_stub(struct file *filp, const char __user *ubuf,
3301 size_t count, loff_t *ppos)
3306 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3310 if (file->f_mode & FMODE_READ)
3311 ret = seq_lseek(file, offset, whence);
3313 file->f_pos = ret = 0;
3318 static const struct file_operations tracing_fops = {
3319 .open = tracing_open,
3321 .write = tracing_write_stub,
3322 .llseek = tracing_lseek,
3323 .release = tracing_release,
3326 static const struct file_operations show_traces_fops = {
3327 .open = show_traces_open,
3329 .release = seq_release,
3330 .llseek = seq_lseek,
3334 * The tracer itself will not take this lock, but still we want
3335 * to provide a consistent cpumask to user-space:
3337 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3340 * Temporary storage for the character representation of the
3341 * CPU bitmask (and one more byte for the newline):
3343 static char mask_str[NR_CPUS + 1];
3346 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3347 size_t count, loff_t *ppos)
3349 struct trace_array *tr = file_inode(filp)->i_private;
3352 mutex_lock(&tracing_cpumask_update_lock);
3354 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
3355 if (count - len < 2) {
3359 len += sprintf(mask_str + len, "\n");
3360 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3363 mutex_unlock(&tracing_cpumask_update_lock);
3369 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3370 size_t count, loff_t *ppos)
3372 struct trace_array *tr = file_inode(filp)->i_private;
3373 cpumask_var_t tracing_cpumask_new;
3376 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3379 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3383 mutex_lock(&tracing_cpumask_update_lock);
3385 local_irq_disable();
3386 arch_spin_lock(&tr->max_lock);
3387 for_each_tracing_cpu(cpu) {
3389 * Increase/decrease the disabled counter if we are
3390 * about to flip a bit in the cpumask:
3392 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3393 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3394 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3395 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3397 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3398 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3399 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3400 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3403 arch_spin_unlock(&tr->max_lock);
3406 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3408 mutex_unlock(&tracing_cpumask_update_lock);
3409 free_cpumask_var(tracing_cpumask_new);
3414 free_cpumask_var(tracing_cpumask_new);
3419 static const struct file_operations tracing_cpumask_fops = {
3420 .open = tracing_open_generic_tr,
3421 .read = tracing_cpumask_read,
3422 .write = tracing_cpumask_write,
3423 .release = tracing_release_generic_tr,
3424 .llseek = generic_file_llseek,
3427 static int tracing_trace_options_show(struct seq_file *m, void *v)
3429 struct tracer_opt *trace_opts;
3430 struct trace_array *tr = m->private;
3434 mutex_lock(&trace_types_lock);
3435 tracer_flags = tr->current_trace->flags->val;
3436 trace_opts = tr->current_trace->flags->opts;
3438 for (i = 0; trace_options[i]; i++) {
3439 if (trace_flags & (1 << i))
3440 seq_printf(m, "%s\n", trace_options[i]);
3442 seq_printf(m, "no%s\n", trace_options[i]);
3445 for (i = 0; trace_opts[i].name; i++) {
3446 if (tracer_flags & trace_opts[i].bit)
3447 seq_printf(m, "%s\n", trace_opts[i].name);
3449 seq_printf(m, "no%s\n", trace_opts[i].name);
3451 mutex_unlock(&trace_types_lock);
3456 static int __set_tracer_option(struct trace_array *tr,
3457 struct tracer_flags *tracer_flags,
3458 struct tracer_opt *opts, int neg)
3460 struct tracer *trace = tr->current_trace;
3463 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3468 tracer_flags->val &= ~opts->bit;
3470 tracer_flags->val |= opts->bit;
3474 /* Try to assign a tracer specific option */
3475 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3477 struct tracer *trace = tr->current_trace;
3478 struct tracer_flags *tracer_flags = trace->flags;
3479 struct tracer_opt *opts = NULL;
3482 for (i = 0; tracer_flags->opts[i].name; i++) {
3483 opts = &tracer_flags->opts[i];
3485 if (strcmp(cmp, opts->name) == 0)
3486 return __set_tracer_option(tr, trace->flags, opts, neg);
3492 /* Some tracers require overwrite to stay enabled */
3493 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3495 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3501 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3503 /* do nothing if flag is already set */
3504 if (!!(trace_flags & mask) == !!enabled)
3507 /* Give the tracer a chance to approve the change */
3508 if (tr->current_trace->flag_changed)
3509 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3513 trace_flags |= mask;
3515 trace_flags &= ~mask;
3517 if (mask == TRACE_ITER_RECORD_CMD)
3518 trace_event_enable_cmd_record(enabled);
3520 if (mask == TRACE_ITER_OVERWRITE) {
3521 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3522 #ifdef CONFIG_TRACER_MAX_TRACE
3523 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3527 if (mask == TRACE_ITER_PRINTK)
3528 trace_printk_start_stop_comm(enabled);
3533 static int trace_set_options(struct trace_array *tr, char *option)
3540 cmp = strstrip(option);
3542 if (strncmp(cmp, "no", 2) == 0) {
3547 mutex_lock(&trace_types_lock);
3549 for (i = 0; trace_options[i]; i++) {
3550 if (strcmp(cmp, trace_options[i]) == 0) {
3551 ret = set_tracer_flag(tr, 1 << i, !neg);
3556 /* If no option could be set, test the specific tracer options */
3557 if (!trace_options[i])
3558 ret = set_tracer_option(tr, cmp, neg);
3560 mutex_unlock(&trace_types_lock);
3566 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3567 size_t cnt, loff_t *ppos)
3569 struct seq_file *m = filp->private_data;
3570 struct trace_array *tr = m->private;
3574 if (cnt >= sizeof(buf))
3577 if (copy_from_user(&buf, ubuf, cnt))
3582 ret = trace_set_options(tr, buf);
3591 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3593 struct trace_array *tr = inode->i_private;
3596 if (tracing_disabled)
3599 if (trace_array_get(tr) < 0)
3602 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3604 trace_array_put(tr);
3609 static const struct file_operations tracing_iter_fops = {
3610 .open = tracing_trace_options_open,
3612 .llseek = seq_lseek,
3613 .release = tracing_single_release_tr,
3614 .write = tracing_trace_options_write,
3617 static const char readme_msg[] =
3618 "tracing mini-HOWTO:\n\n"
3619 "# echo 0 > tracing_on : quick way to disable tracing\n"
3620 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3621 " Important files:\n"
3622 " trace\t\t\t- The static contents of the buffer\n"
3623 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3624 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3625 " current_tracer\t- function and latency tracers\n"
3626 " available_tracers\t- list of configured tracers for current_tracer\n"
3627 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3628 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3629 " trace_clock\t\t-change the clock used to order events\n"
3630 " local: Per cpu clock but may not be synced across CPUs\n"
3631 " global: Synced across CPUs but slows tracing down.\n"
3632 " counter: Not a clock, but just an increment\n"
3633 " uptime: Jiffy counter from time of boot\n"
3634 " perf: Same clock that perf events use\n"
3635 #ifdef CONFIG_X86_64
3636 " x86-tsc: TSC cycle counter\n"
3638 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3639 " tracing_cpumask\t- Limit which CPUs to trace\n"
3640 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3641 "\t\t\t Remove sub-buffer with rmdir\n"
3642 " trace_options\t\t- Set format or modify how tracing happens\n"
3643 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3644 "\t\t\t option name\n"
3645 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3646 #ifdef CONFIG_DYNAMIC_FTRACE
3647 "\n available_filter_functions - list of functions that can be filtered on\n"
3648 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3649 "\t\t\t functions\n"
3650 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3651 "\t modules: Can select a group via module\n"
3652 "\t Format: :mod:<module-name>\n"
3653 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3654 "\t triggers: a command to perform when function is hit\n"
3655 "\t Format: <function>:<trigger>[:count]\n"
3656 "\t trigger: traceon, traceoff\n"
3657 "\t\t enable_event:<system>:<event>\n"
3658 "\t\t disable_event:<system>:<event>\n"
3659 #ifdef CONFIG_STACKTRACE
3662 #ifdef CONFIG_TRACER_SNAPSHOT
3667 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3668 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3669 "\t The first one will disable tracing every time do_fault is hit\n"
3670 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3671 "\t The first time do trap is hit and it disables tracing, the\n"
3672 "\t counter will decrement to 2. If tracing is already disabled,\n"
3673 "\t the counter will not decrement. It only decrements when the\n"
3674 "\t trigger did work\n"
3675 "\t To remove trigger without count:\n"
3676 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3677 "\t To remove trigger with a count:\n"
3678 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3679 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3680 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3681 "\t modules: Can select a group via module command :mod:\n"
3682 "\t Does not accept triggers\n"
3683 #endif /* CONFIG_DYNAMIC_FTRACE */
3684 #ifdef CONFIG_FUNCTION_TRACER
3685 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3688 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3689 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3690 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3692 #ifdef CONFIG_TRACER_SNAPSHOT
3693 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3694 "\t\t\t snapshot buffer. Read the contents for more\n"
3695 "\t\t\t information\n"
3697 #ifdef CONFIG_STACK_TRACER
3698 " stack_trace\t\t- Shows the max stack trace when active\n"
3699 " stack_max_size\t- Shows current max stack size that was traced\n"
3700 "\t\t\t Write into this file to reset the max size (trigger a\n"
3701 "\t\t\t new trace)\n"
3702 #ifdef CONFIG_DYNAMIC_FTRACE
3703 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3706 #endif /* CONFIG_STACK_TRACER */
3707 " events/\t\t- Directory containing all trace event subsystems:\n"
3708 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3709 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3710 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3712 " filter\t\t- If set, only events passing filter are traced\n"
3713 " events/<system>/<event>/\t- Directory containing control files for\n"
3715 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3716 " filter\t\t- If set, only events passing filter are traced\n"
3717 " trigger\t\t- If set, a command to perform when event is hit\n"
3718 "\t Format: <trigger>[:count][if <filter>]\n"
3719 "\t trigger: traceon, traceoff\n"
3720 "\t enable_event:<system>:<event>\n"
3721 "\t disable_event:<system>:<event>\n"
3722 #ifdef CONFIG_STACKTRACE
3725 #ifdef CONFIG_TRACER_SNAPSHOT
3728 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3729 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3730 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3731 "\t events/block/block_unplug/trigger\n"
3732 "\t The first disables tracing every time block_unplug is hit.\n"
3733 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3734 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3735 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3736 "\t Like function triggers, the counter is only decremented if it\n"
3737 "\t enabled or disabled tracing.\n"
3738 "\t To remove a trigger without a count:\n"
3739 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3740 "\t To remove a trigger with a count:\n"
3741 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3742 "\t Filters can be ignored when removing a trigger.\n"
3746 tracing_readme_read(struct file *filp, char __user *ubuf,
3747 size_t cnt, loff_t *ppos)
3749 return simple_read_from_buffer(ubuf, cnt, ppos,
3750 readme_msg, strlen(readme_msg));
3753 static const struct file_operations tracing_readme_fops = {
3754 .open = tracing_open_generic,
3755 .read = tracing_readme_read,
3756 .llseek = generic_file_llseek,
3759 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3761 unsigned int *ptr = v;
3763 if (*pos || m->count)
3768 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3770 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3779 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3785 arch_spin_lock(&trace_cmdline_lock);
3787 v = &savedcmd->map_cmdline_to_pid[0];
3789 v = saved_cmdlines_next(m, v, &l);
3797 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3799 arch_spin_unlock(&trace_cmdline_lock);
3803 static int saved_cmdlines_show(struct seq_file *m, void *v)
3805 char buf[TASK_COMM_LEN];
3806 unsigned int *pid = v;
3808 __trace_find_cmdline(*pid, buf);
3809 seq_printf(m, "%d %s\n", *pid, buf);
3813 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3814 .start = saved_cmdlines_start,
3815 .next = saved_cmdlines_next,
3816 .stop = saved_cmdlines_stop,
3817 .show = saved_cmdlines_show,
3820 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3822 if (tracing_disabled)
3825 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3828 static const struct file_operations tracing_saved_cmdlines_fops = {
3829 .open = tracing_saved_cmdlines_open,
3831 .llseek = seq_lseek,
3832 .release = seq_release,
3836 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3837 size_t cnt, loff_t *ppos)
3842 arch_spin_lock(&trace_cmdline_lock);
3843 r = sprintf(buf, "%u\n", savedcmd->cmdline_num);
3844 arch_spin_unlock(&trace_cmdline_lock);
3846 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3849 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3851 kfree(s->saved_cmdlines);
3852 kfree(s->map_cmdline_to_pid);
3856 static int tracing_resize_saved_cmdlines(unsigned int val)
3858 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3860 s = kmalloc(sizeof(struct saved_cmdlines_buffer), GFP_KERNEL);
3864 if (allocate_cmdlines_buffer(val, s) < 0) {
3869 arch_spin_lock(&trace_cmdline_lock);
3870 savedcmd_temp = savedcmd;
3872 arch_spin_unlock(&trace_cmdline_lock);
3873 free_saved_cmdlines_buffer(savedcmd_temp);
3879 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3880 size_t cnt, loff_t *ppos)
3885 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3889 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3890 if (!val || val > PID_MAX_DEFAULT)
3893 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3902 static const struct file_operations tracing_saved_cmdlines_size_fops = {
3903 .open = tracing_open_generic,
3904 .read = tracing_saved_cmdlines_size_read,
3905 .write = tracing_saved_cmdlines_size_write,
3909 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3910 size_t cnt, loff_t *ppos)
3912 struct trace_array *tr = filp->private_data;
3913 char buf[MAX_TRACER_SIZE+2];
3916 mutex_lock(&trace_types_lock);
3917 r = sprintf(buf, "%s\n", tr->current_trace->name);
3918 mutex_unlock(&trace_types_lock);
3920 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3923 int tracer_init(struct tracer *t, struct trace_array *tr)
3925 tracing_reset_online_cpus(&tr->trace_buffer);
3929 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3933 for_each_tracing_cpu(cpu)
3934 per_cpu_ptr(buf->data, cpu)->entries = val;
3937 #ifdef CONFIG_TRACER_MAX_TRACE
3938 /* resize @tr's buffer to the size of @size_tr's entries */
3939 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3940 struct trace_buffer *size_buf, int cpu_id)
3944 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3945 for_each_tracing_cpu(cpu) {
3946 ret = ring_buffer_resize(trace_buf->buffer,
3947 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3950 per_cpu_ptr(trace_buf->data, cpu)->entries =
3951 per_cpu_ptr(size_buf->data, cpu)->entries;
3954 ret = ring_buffer_resize(trace_buf->buffer,
3955 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3957 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3958 per_cpu_ptr(size_buf->data, cpu_id)->entries;
3963 #endif /* CONFIG_TRACER_MAX_TRACE */
3965 static int __tracing_resize_ring_buffer(struct trace_array *tr,
3966 unsigned long size, int cpu)
3971 * If kernel or user changes the size of the ring buffer
3972 * we use the size that was given, and we can forget about
3973 * expanding it later.
3975 ring_buffer_expanded = true;
3977 /* May be called before buffers are initialized */
3978 if (!tr->trace_buffer.buffer)
3981 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3985 #ifdef CONFIG_TRACER_MAX_TRACE
3986 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3987 !tr->current_trace->use_max_tr)
3990 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3992 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3993 &tr->trace_buffer, cpu);
3996 * AARGH! We are left with different
3997 * size max buffer!!!!
3998 * The max buffer is our "snapshot" buffer.
3999 * When a tracer needs a snapshot (one of the
4000 * latency tracers), it swaps the max buffer
4001 * with the saved snap shot. We succeeded to
4002 * update the size of the main buffer, but failed to
4003 * update the size of the max buffer. But when we tried
4004 * to reset the main buffer to the original size, we
4005 * failed there too. This is very unlikely to
4006 * happen, but if it does, warn and kill all
4010 tracing_disabled = 1;
4015 if (cpu == RING_BUFFER_ALL_CPUS)
4016 set_buffer_entries(&tr->max_buffer, size);
4018 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4021 #endif /* CONFIG_TRACER_MAX_TRACE */
4023 if (cpu == RING_BUFFER_ALL_CPUS)
4024 set_buffer_entries(&tr->trace_buffer, size);
4026 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4031 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4032 unsigned long size, int cpu_id)
4036 mutex_lock(&trace_types_lock);
4038 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4039 /* make sure, this cpu is enabled in the mask */
4040 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4046 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4051 mutex_unlock(&trace_types_lock);
4058 * tracing_update_buffers - used by tracing facility to expand ring buffers
4060 * To save on memory when the tracing is never used on a system with it
4061 * configured in. The ring buffers are set to a minimum size. But once
4062 * a user starts to use the tracing facility, then they need to grow
4063 * to their default size.
4065 * This function is to be called when a tracer is about to be used.
4067 int tracing_update_buffers(void)
4071 mutex_lock(&trace_types_lock);
4072 if (!ring_buffer_expanded)
4073 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4074 RING_BUFFER_ALL_CPUS);
4075 mutex_unlock(&trace_types_lock);
4080 struct trace_option_dentry;
4082 static struct trace_option_dentry *
4083 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4086 destroy_trace_option_files(struct trace_option_dentry *topts);
4089 * Used to clear out the tracer before deletion of an instance.
4090 * Must have trace_types_lock held.
4092 static void tracing_set_nop(struct trace_array *tr)
4094 if (tr->current_trace == &nop_trace)
4097 tr->current_trace->enabled--;
4099 if (tr->current_trace->reset)
4100 tr->current_trace->reset(tr);
4102 tr->current_trace = &nop_trace;
4105 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4107 static struct trace_option_dentry *topts;
4109 #ifdef CONFIG_TRACER_MAX_TRACE
4114 mutex_lock(&trace_types_lock);
4116 if (!ring_buffer_expanded) {
4117 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4118 RING_BUFFER_ALL_CPUS);
4124 for (t = trace_types; t; t = t->next) {
4125 if (strcmp(t->name, buf) == 0)
4132 if (t == tr->current_trace)
4135 /* Some tracers are only allowed for the top level buffer */
4136 if (!trace_ok_for_array(t, tr)) {
4141 trace_branch_disable();
4143 tr->current_trace->enabled--;
4145 if (tr->current_trace->reset)
4146 tr->current_trace->reset(tr);
4148 /* Current trace needs to be nop_trace before synchronize_sched */
4149 tr->current_trace = &nop_trace;
4151 #ifdef CONFIG_TRACER_MAX_TRACE
4152 had_max_tr = tr->allocated_snapshot;
4154 if (had_max_tr && !t->use_max_tr) {
4156 * We need to make sure that the update_max_tr sees that
4157 * current_trace changed to nop_trace to keep it from
4158 * swapping the buffers after we resize it.
4159 * The update_max_tr is called from interrupts disabled
4160 * so a synchronized_sched() is sufficient.
4162 synchronize_sched();
4166 /* Currently, only the top instance has options */
4167 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4168 destroy_trace_option_files(topts);
4169 topts = create_trace_option_files(tr, t);
4172 #ifdef CONFIG_TRACER_MAX_TRACE
4173 if (t->use_max_tr && !had_max_tr) {
4174 ret = alloc_snapshot(tr);
4181 ret = tracer_init(t, tr);
4186 tr->current_trace = t;
4187 tr->current_trace->enabled++;
4188 trace_branch_enable(tr);
4190 mutex_unlock(&trace_types_lock);
4196 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4197 size_t cnt, loff_t *ppos)
4199 struct trace_array *tr = filp->private_data;
4200 char buf[MAX_TRACER_SIZE+1];
4207 if (cnt > MAX_TRACER_SIZE)
4208 cnt = MAX_TRACER_SIZE;
4210 if (copy_from_user(&buf, ubuf, cnt))
4215 /* strip ending whitespace. */
4216 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4219 err = tracing_set_tracer(tr, buf);
4229 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4230 size_t cnt, loff_t *ppos)
4232 unsigned long *ptr = filp->private_data;
4236 r = snprintf(buf, sizeof(buf), "%ld\n",
4237 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4238 if (r > sizeof(buf))
4240 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4244 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4245 size_t cnt, loff_t *ppos)
4247 unsigned long *ptr = filp->private_data;
4251 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4260 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4262 struct trace_array *tr = inode->i_private;
4263 struct trace_iterator *iter;
4266 if (tracing_disabled)
4269 if (trace_array_get(tr) < 0)
4272 mutex_lock(&trace_types_lock);
4274 /* create a buffer to store the information to pass to userspace */
4275 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4278 __trace_array_put(tr);
4283 * We make a copy of the current tracer to avoid concurrent
4284 * changes on it while we are reading.
4286 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4291 *iter->trace = *tr->current_trace;
4293 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4298 /* trace pipe does not show start of buffer */
4299 cpumask_setall(iter->started);
4301 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4302 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4304 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4305 if (trace_clocks[tr->clock_id].in_ns)
4306 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4309 iter->trace_buffer = &tr->trace_buffer;
4310 iter->cpu_file = tracing_get_cpu(inode);
4311 mutex_init(&iter->mutex);
4312 filp->private_data = iter;
4314 if (iter->trace->pipe_open)
4315 iter->trace->pipe_open(iter);
4317 nonseekable_open(inode, filp);
4319 mutex_unlock(&trace_types_lock);
4325 __trace_array_put(tr);
4326 mutex_unlock(&trace_types_lock);
4330 static int tracing_release_pipe(struct inode *inode, struct file *file)
4332 struct trace_iterator *iter = file->private_data;
4333 struct trace_array *tr = inode->i_private;
4335 mutex_lock(&trace_types_lock);
4337 if (iter->trace->pipe_close)
4338 iter->trace->pipe_close(iter);
4340 mutex_unlock(&trace_types_lock);
4342 free_cpumask_var(iter->started);
4343 mutex_destroy(&iter->mutex);
4347 trace_array_put(tr);
4353 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4355 /* Iterators are static, they should be filled or empty */
4356 if (trace_buffer_iter(iter, iter->cpu_file))
4357 return POLLIN | POLLRDNORM;
4359 if (trace_flags & TRACE_ITER_BLOCK)
4361 * Always select as readable when in blocking mode
4363 return POLLIN | POLLRDNORM;
4365 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4370 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4372 struct trace_iterator *iter = filp->private_data;
4374 return trace_poll(iter, filp, poll_table);
4377 /* Must be called with trace_types_lock mutex held. */
4378 static int tracing_wait_pipe(struct file *filp)
4380 struct trace_iterator *iter = filp->private_data;
4382 while (trace_empty(iter)) {
4384 if ((filp->f_flags & O_NONBLOCK)) {
4389 * We block until we read something and tracing is disabled.
4390 * We still block if tracing is disabled, but we have never
4391 * read anything. This allows a user to cat this file, and
4392 * then enable tracing. But after we have read something,
4393 * we give an EOF when tracing is again disabled.
4395 * iter->pos will be 0 if we haven't read anything.
4397 if (!tracing_is_on() && iter->pos)
4400 mutex_unlock(&iter->mutex);
4404 mutex_lock(&iter->mutex);
4406 if (signal_pending(current))
4417 tracing_read_pipe(struct file *filp, char __user *ubuf,
4418 size_t cnt, loff_t *ppos)
4420 struct trace_iterator *iter = filp->private_data;
4421 struct trace_array *tr = iter->tr;
4424 /* return any leftover data */
4425 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4429 trace_seq_init(&iter->seq);
4431 /* copy the tracer to avoid using a global lock all around */
4432 mutex_lock(&trace_types_lock);
4433 if (unlikely(iter->trace->name != tr->current_trace->name))
4434 *iter->trace = *tr->current_trace;
4435 mutex_unlock(&trace_types_lock);
4438 * Avoid more than one consumer on a single file descriptor
4439 * This is just a matter of traces coherency, the ring buffer itself
4442 mutex_lock(&iter->mutex);
4443 if (iter->trace->read) {
4444 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4450 sret = tracing_wait_pipe(filp);
4454 /* stop when tracing is finished */
4455 if (trace_empty(iter)) {
4460 if (cnt >= PAGE_SIZE)
4461 cnt = PAGE_SIZE - 1;
4463 /* reset all but tr, trace, and overruns */
4464 memset(&iter->seq, 0,
4465 sizeof(struct trace_iterator) -
4466 offsetof(struct trace_iterator, seq));
4467 cpumask_clear(iter->started);
4470 trace_event_read_lock();
4471 trace_access_lock(iter->cpu_file);
4472 while (trace_find_next_entry_inc(iter) != NULL) {
4473 enum print_line_t ret;
4474 int len = iter->seq.len;
4476 ret = print_trace_line(iter);
4477 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4478 /* don't print partial lines */
4479 iter->seq.len = len;
4482 if (ret != TRACE_TYPE_NO_CONSUME)
4483 trace_consume(iter);
4485 if (iter->seq.len >= cnt)
4489 * Setting the full flag means we reached the trace_seq buffer
4490 * size and we should leave by partial output condition above.
4491 * One of the trace_seq_* functions is not used properly.
4493 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4496 trace_access_unlock(iter->cpu_file);
4497 trace_event_read_unlock();
4499 /* Now copy what we have to the user */
4500 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4501 if (iter->seq.readpos >= iter->seq.len)
4502 trace_seq_init(&iter->seq);
4505 * If there was nothing to send to user, in spite of consuming trace
4506 * entries, go back to wait for more entries.
4512 mutex_unlock(&iter->mutex);
4517 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4520 __free_page(spd->pages[idx]);
4523 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4525 .confirm = generic_pipe_buf_confirm,
4526 .release = generic_pipe_buf_release,
4527 .steal = generic_pipe_buf_steal,
4528 .get = generic_pipe_buf_get,
4532 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4537 /* Seq buffer is page-sized, exactly what we need. */
4539 count = iter->seq.len;
4540 ret = print_trace_line(iter);
4541 count = iter->seq.len - count;
4544 iter->seq.len -= count;
4547 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4548 iter->seq.len -= count;
4552 if (ret != TRACE_TYPE_NO_CONSUME)
4553 trace_consume(iter);
4555 if (!trace_find_next_entry_inc(iter)) {
4565 static ssize_t tracing_splice_read_pipe(struct file *filp,
4567 struct pipe_inode_info *pipe,
4571 struct page *pages_def[PIPE_DEF_BUFFERS];
4572 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4573 struct trace_iterator *iter = filp->private_data;
4574 struct splice_pipe_desc spd = {
4576 .partial = partial_def,
4577 .nr_pages = 0, /* This gets updated below. */
4578 .nr_pages_max = PIPE_DEF_BUFFERS,
4580 .ops = &tracing_pipe_buf_ops,
4581 .spd_release = tracing_spd_release_pipe,
4583 struct trace_array *tr = iter->tr;
4588 if (splice_grow_spd(pipe, &spd))
4591 /* copy the tracer to avoid using a global lock all around */
4592 mutex_lock(&trace_types_lock);
4593 if (unlikely(iter->trace->name != tr->current_trace->name))
4594 *iter->trace = *tr->current_trace;
4595 mutex_unlock(&trace_types_lock);
4597 mutex_lock(&iter->mutex);
4599 if (iter->trace->splice_read) {
4600 ret = iter->trace->splice_read(iter, filp,
4601 ppos, pipe, len, flags);
4606 ret = tracing_wait_pipe(filp);
4610 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4615 trace_event_read_lock();
4616 trace_access_lock(iter->cpu_file);
4618 /* Fill as many pages as possible. */
4619 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4620 spd.pages[i] = alloc_page(GFP_KERNEL);
4624 rem = tracing_fill_pipe_page(rem, iter);
4626 /* Copy the data into the page, so we can start over. */
4627 ret = trace_seq_to_buffer(&iter->seq,
4628 page_address(spd.pages[i]),
4631 __free_page(spd.pages[i]);
4634 spd.partial[i].offset = 0;
4635 spd.partial[i].len = iter->seq.len;
4637 trace_seq_init(&iter->seq);
4640 trace_access_unlock(iter->cpu_file);
4641 trace_event_read_unlock();
4642 mutex_unlock(&iter->mutex);
4646 ret = splice_to_pipe(pipe, &spd);
4648 splice_shrink_spd(&spd);
4652 mutex_unlock(&iter->mutex);
4657 tracing_entries_read(struct file *filp, char __user *ubuf,
4658 size_t cnt, loff_t *ppos)
4660 struct inode *inode = file_inode(filp);
4661 struct trace_array *tr = inode->i_private;
4662 int cpu = tracing_get_cpu(inode);
4667 mutex_lock(&trace_types_lock);
4669 if (cpu == RING_BUFFER_ALL_CPUS) {
4670 int cpu, buf_size_same;
4675 /* check if all cpu sizes are same */
4676 for_each_tracing_cpu(cpu) {
4677 /* fill in the size from first enabled cpu */
4679 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4680 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4686 if (buf_size_same) {
4687 if (!ring_buffer_expanded)
4688 r = sprintf(buf, "%lu (expanded: %lu)\n",
4690 trace_buf_size >> 10);
4692 r = sprintf(buf, "%lu\n", size >> 10);
4694 r = sprintf(buf, "X\n");
4696 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4698 mutex_unlock(&trace_types_lock);
4700 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4705 tracing_entries_write(struct file *filp, const char __user *ubuf,
4706 size_t cnt, loff_t *ppos)
4708 struct inode *inode = file_inode(filp);
4709 struct trace_array *tr = inode->i_private;
4713 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4717 /* must have at least 1 entry */
4721 /* value is in KB */
4723 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4733 tracing_total_entries_read(struct file *filp, char __user *ubuf,
4734 size_t cnt, loff_t *ppos)
4736 struct trace_array *tr = filp->private_data;
4739 unsigned long size = 0, expanded_size = 0;
4741 mutex_lock(&trace_types_lock);
4742 for_each_tracing_cpu(cpu) {
4743 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4744 if (!ring_buffer_expanded)
4745 expanded_size += trace_buf_size >> 10;
4747 if (ring_buffer_expanded)
4748 r = sprintf(buf, "%lu\n", size);
4750 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4751 mutex_unlock(&trace_types_lock);
4753 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4757 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4758 size_t cnt, loff_t *ppos)
4761 * There is no need to read what the user has written, this function
4762 * is just to make sure that there is no error when "echo" is used
4771 tracing_free_buffer_release(struct inode *inode, struct file *filp)
4773 struct trace_array *tr = inode->i_private;
4775 /* disable tracing ? */
4776 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4777 tracer_tracing_off(tr);
4778 /* resize the ring buffer to 0 */
4779 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4781 trace_array_put(tr);
4787 tracing_mark_write(struct file *filp, const char __user *ubuf,
4788 size_t cnt, loff_t *fpos)
4790 unsigned long addr = (unsigned long)ubuf;
4791 struct trace_array *tr = filp->private_data;
4792 struct ring_buffer_event *event;
4793 struct ring_buffer *buffer;
4794 struct print_entry *entry;
4795 unsigned long irq_flags;
4796 struct page *pages[2];
4806 if (tracing_disabled)
4809 if (!(trace_flags & TRACE_ITER_MARKERS))
4812 if (cnt > TRACE_BUF_SIZE)
4813 cnt = TRACE_BUF_SIZE;
4816 * Userspace is injecting traces into the kernel trace buffer.
4817 * We want to be as non intrusive as possible.
4818 * To do so, we do not want to allocate any special buffers
4819 * or take any locks, but instead write the userspace data
4820 * straight into the ring buffer.
4822 * First we need to pin the userspace buffer into memory,
4823 * which, most likely it is, because it just referenced it.
4824 * But there's no guarantee that it is. By using get_user_pages_fast()
4825 * and kmap_atomic/kunmap_atomic() we can get access to the
4826 * pages directly. We then write the data directly into the
4829 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4831 /* check if we cross pages */
4832 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4835 offset = addr & (PAGE_SIZE - 1);
4838 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4839 if (ret < nr_pages) {
4841 put_page(pages[ret]);
4846 for (i = 0; i < nr_pages; i++)
4847 map_page[i] = kmap_atomic(pages[i]);
4849 local_save_flags(irq_flags);
4850 size = sizeof(*entry) + cnt + 2; /* possible \n added */
4851 buffer = tr->trace_buffer.buffer;
4852 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4853 irq_flags, preempt_count());
4855 /* Ring buffer disabled, return as if not open for write */
4860 entry = ring_buffer_event_data(event);
4861 entry->ip = _THIS_IP_;
4863 if (nr_pages == 2) {
4864 len = PAGE_SIZE - offset;
4865 memcpy(&entry->buf, map_page[0] + offset, len);
4866 memcpy(&entry->buf[len], map_page[1], cnt - len);
4868 memcpy(&entry->buf, map_page[0] + offset, cnt);
4870 if (entry->buf[cnt - 1] != '\n') {
4871 entry->buf[cnt] = '\n';
4872 entry->buf[cnt + 1] = '\0';
4874 entry->buf[cnt] = '\0';
4876 __buffer_unlock_commit(buffer, event);
4883 for (i = 0; i < nr_pages; i++){
4884 kunmap_atomic(map_page[i]);
4891 static int tracing_clock_show(struct seq_file *m, void *v)
4893 struct trace_array *tr = m->private;
4896 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4898 "%s%s%s%s", i ? " " : "",
4899 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4900 i == tr->clock_id ? "]" : "");
4906 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
4910 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4911 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4914 if (i == ARRAY_SIZE(trace_clocks))
4917 mutex_lock(&trace_types_lock);
4921 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4924 * New clock may not be consistent with the previous clock.
4925 * Reset the buffer so that it doesn't have incomparable timestamps.
4927 tracing_reset_online_cpus(&tr->trace_buffer);
4929 #ifdef CONFIG_TRACER_MAX_TRACE
4930 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4931 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4932 tracing_reset_online_cpus(&tr->max_buffer);
4935 mutex_unlock(&trace_types_lock);
4940 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4941 size_t cnt, loff_t *fpos)
4943 struct seq_file *m = filp->private_data;
4944 struct trace_array *tr = m->private;
4946 const char *clockstr;
4949 if (cnt >= sizeof(buf))
4952 if (copy_from_user(&buf, ubuf, cnt))
4957 clockstr = strstrip(buf);
4959 ret = tracing_set_clock(tr, clockstr);
4968 static int tracing_clock_open(struct inode *inode, struct file *file)
4970 struct trace_array *tr = inode->i_private;
4973 if (tracing_disabled)
4976 if (trace_array_get(tr))
4979 ret = single_open(file, tracing_clock_show, inode->i_private);
4981 trace_array_put(tr);
4986 struct ftrace_buffer_info {
4987 struct trace_iterator iter;
4992 #ifdef CONFIG_TRACER_SNAPSHOT
4993 static int tracing_snapshot_open(struct inode *inode, struct file *file)
4995 struct trace_array *tr = inode->i_private;
4996 struct trace_iterator *iter;
5000 if (trace_array_get(tr) < 0)
5003 if (file->f_mode & FMODE_READ) {
5004 iter = __tracing_open(inode, file, true);
5006 ret = PTR_ERR(iter);
5008 /* Writes still need the seq_file to hold the private data */
5010 m = kzalloc(sizeof(*m), GFP_KERNEL);
5013 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5021 iter->trace_buffer = &tr->max_buffer;
5022 iter->cpu_file = tracing_get_cpu(inode);
5024 file->private_data = m;
5028 trace_array_put(tr);
5034 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5037 struct seq_file *m = filp->private_data;
5038 struct trace_iterator *iter = m->private;
5039 struct trace_array *tr = iter->tr;
5043 ret = tracing_update_buffers();
5047 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5051 mutex_lock(&trace_types_lock);
5053 if (tr->current_trace->use_max_tr) {
5060 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5064 if (tr->allocated_snapshot)
5068 /* Only allow per-cpu swap if the ring buffer supports it */
5069 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5070 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5075 if (!tr->allocated_snapshot) {
5076 ret = alloc_snapshot(tr);
5080 local_irq_disable();
5081 /* Now, we're going to swap */
5082 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5083 update_max_tr(tr, current, smp_processor_id());
5085 update_max_tr_single(tr, current, iter->cpu_file);
5089 if (tr->allocated_snapshot) {
5090 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5091 tracing_reset_online_cpus(&tr->max_buffer);
5093 tracing_reset(&tr->max_buffer, iter->cpu_file);
5103 mutex_unlock(&trace_types_lock);
5107 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5109 struct seq_file *m = file->private_data;
5112 ret = tracing_release(inode, file);
5114 if (file->f_mode & FMODE_READ)
5117 /* If write only, the seq_file is just a stub */
5125 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5126 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5127 size_t count, loff_t *ppos);
5128 static int tracing_buffers_release(struct inode *inode, struct file *file);
5129 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5130 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5132 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5134 struct ftrace_buffer_info *info;
5137 ret = tracing_buffers_open(inode, filp);
5141 info = filp->private_data;
5143 if (info->iter.trace->use_max_tr) {
5144 tracing_buffers_release(inode, filp);
5148 info->iter.snapshot = true;
5149 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5154 #endif /* CONFIG_TRACER_SNAPSHOT */
5157 static const struct file_operations tracing_max_lat_fops = {
5158 .open = tracing_open_generic,
5159 .read = tracing_max_lat_read,
5160 .write = tracing_max_lat_write,
5161 .llseek = generic_file_llseek,
5164 static const struct file_operations set_tracer_fops = {
5165 .open = tracing_open_generic,
5166 .read = tracing_set_trace_read,
5167 .write = tracing_set_trace_write,
5168 .llseek = generic_file_llseek,
5171 static const struct file_operations tracing_pipe_fops = {
5172 .open = tracing_open_pipe,
5173 .poll = tracing_poll_pipe,
5174 .read = tracing_read_pipe,
5175 .splice_read = tracing_splice_read_pipe,
5176 .release = tracing_release_pipe,
5177 .llseek = no_llseek,
5180 static const struct file_operations tracing_entries_fops = {
5181 .open = tracing_open_generic_tr,
5182 .read = tracing_entries_read,
5183 .write = tracing_entries_write,
5184 .llseek = generic_file_llseek,
5185 .release = tracing_release_generic_tr,
5188 static const struct file_operations tracing_total_entries_fops = {
5189 .open = tracing_open_generic_tr,
5190 .read = tracing_total_entries_read,
5191 .llseek = generic_file_llseek,
5192 .release = tracing_release_generic_tr,
5195 static const struct file_operations tracing_free_buffer_fops = {
5196 .open = tracing_open_generic_tr,
5197 .write = tracing_free_buffer_write,
5198 .release = tracing_free_buffer_release,
5201 static const struct file_operations tracing_mark_fops = {
5202 .open = tracing_open_generic_tr,
5203 .write = tracing_mark_write,
5204 .llseek = generic_file_llseek,
5205 .release = tracing_release_generic_tr,
5208 static const struct file_operations trace_clock_fops = {
5209 .open = tracing_clock_open,
5211 .llseek = seq_lseek,
5212 .release = tracing_single_release_tr,
5213 .write = tracing_clock_write,
5216 #ifdef CONFIG_TRACER_SNAPSHOT
5217 static const struct file_operations snapshot_fops = {
5218 .open = tracing_snapshot_open,
5220 .write = tracing_snapshot_write,
5221 .llseek = tracing_lseek,
5222 .release = tracing_snapshot_release,
5225 static const struct file_operations snapshot_raw_fops = {
5226 .open = snapshot_raw_open,
5227 .read = tracing_buffers_read,
5228 .release = tracing_buffers_release,
5229 .splice_read = tracing_buffers_splice_read,
5230 .llseek = no_llseek,
5233 #endif /* CONFIG_TRACER_SNAPSHOT */
5235 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5237 struct trace_array *tr = inode->i_private;
5238 struct ftrace_buffer_info *info;
5241 if (tracing_disabled)
5244 if (trace_array_get(tr) < 0)
5247 info = kzalloc(sizeof(*info), GFP_KERNEL);
5249 trace_array_put(tr);
5253 mutex_lock(&trace_types_lock);
5256 info->iter.cpu_file = tracing_get_cpu(inode);
5257 info->iter.trace = tr->current_trace;
5258 info->iter.trace_buffer = &tr->trace_buffer;
5260 /* Force reading ring buffer for first read */
5261 info->read = (unsigned int)-1;
5263 filp->private_data = info;
5265 mutex_unlock(&trace_types_lock);
5267 ret = nonseekable_open(inode, filp);
5269 trace_array_put(tr);
5275 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5277 struct ftrace_buffer_info *info = filp->private_data;
5278 struct trace_iterator *iter = &info->iter;
5280 return trace_poll(iter, filp, poll_table);
5284 tracing_buffers_read(struct file *filp, char __user *ubuf,
5285 size_t count, loff_t *ppos)
5287 struct ftrace_buffer_info *info = filp->private_data;
5288 struct trace_iterator *iter = &info->iter;
5295 mutex_lock(&trace_types_lock);
5297 #ifdef CONFIG_TRACER_MAX_TRACE
5298 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5305 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5311 /* Do we have previous read data to read? */
5312 if (info->read < PAGE_SIZE)
5316 trace_access_lock(iter->cpu_file);
5317 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5321 trace_access_unlock(iter->cpu_file);
5324 if (trace_empty(iter)) {
5325 if ((filp->f_flags & O_NONBLOCK)) {
5329 mutex_unlock(&trace_types_lock);
5331 mutex_lock(&trace_types_lock);
5332 if (signal_pending(current)) {
5344 size = PAGE_SIZE - info->read;
5348 ret = copy_to_user(ubuf, info->spare + info->read, size);
5359 mutex_unlock(&trace_types_lock);
5364 static int tracing_buffers_release(struct inode *inode, struct file *file)
5366 struct ftrace_buffer_info *info = file->private_data;
5367 struct trace_iterator *iter = &info->iter;
5369 mutex_lock(&trace_types_lock);
5371 __trace_array_put(iter->tr);
5374 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5377 mutex_unlock(&trace_types_lock);
5383 struct ring_buffer *buffer;
5388 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5389 struct pipe_buffer *buf)
5391 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5396 ring_buffer_free_read_page(ref->buffer, ref->page);
5401 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5402 struct pipe_buffer *buf)
5404 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5409 /* Pipe buffer operations for a buffer. */
5410 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5412 .confirm = generic_pipe_buf_confirm,
5413 .release = buffer_pipe_buf_release,
5414 .steal = generic_pipe_buf_steal,
5415 .get = buffer_pipe_buf_get,
5419 * Callback from splice_to_pipe(), if we need to release some pages
5420 * at the end of the spd in case we error'ed out in filling the pipe.
5422 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5424 struct buffer_ref *ref =
5425 (struct buffer_ref *)spd->partial[i].private;
5430 ring_buffer_free_read_page(ref->buffer, ref->page);
5432 spd->partial[i].private = 0;
5436 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5437 struct pipe_inode_info *pipe, size_t len,
5440 struct ftrace_buffer_info *info = file->private_data;
5441 struct trace_iterator *iter = &info->iter;
5442 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5443 struct page *pages_def[PIPE_DEF_BUFFERS];
5444 struct splice_pipe_desc spd = {
5446 .partial = partial_def,
5447 .nr_pages_max = PIPE_DEF_BUFFERS,
5449 .ops = &buffer_pipe_buf_ops,
5450 .spd_release = buffer_spd_release,
5452 struct buffer_ref *ref;
5453 int entries, size, i;
5456 mutex_lock(&trace_types_lock);
5458 #ifdef CONFIG_TRACER_MAX_TRACE
5459 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5465 if (splice_grow_spd(pipe, &spd)) {
5470 if (*ppos & (PAGE_SIZE - 1)) {
5475 if (len & (PAGE_SIZE - 1)) {
5476 if (len < PAGE_SIZE) {
5484 trace_access_lock(iter->cpu_file);
5485 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5487 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5491 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5496 ref->buffer = iter->trace_buffer->buffer;
5497 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5503 r = ring_buffer_read_page(ref->buffer, &ref->page,
5504 len, iter->cpu_file, 1);
5506 ring_buffer_free_read_page(ref->buffer, ref->page);
5512 * zero out any left over data, this is going to
5515 size = ring_buffer_page_len(ref->page);
5516 if (size < PAGE_SIZE)
5517 memset(ref->page + size, 0, PAGE_SIZE - size);
5519 page = virt_to_page(ref->page);
5521 spd.pages[i] = page;
5522 spd.partial[i].len = PAGE_SIZE;
5523 spd.partial[i].offset = 0;
5524 spd.partial[i].private = (unsigned long)ref;
5528 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5531 trace_access_unlock(iter->cpu_file);
5534 /* did we read anything? */
5535 if (!spd.nr_pages) {
5536 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5540 mutex_unlock(&trace_types_lock);
5542 mutex_lock(&trace_types_lock);
5543 if (signal_pending(current)) {
5550 ret = splice_to_pipe(pipe, &spd);
5551 splice_shrink_spd(&spd);
5553 mutex_unlock(&trace_types_lock);
5558 static const struct file_operations tracing_buffers_fops = {
5559 .open = tracing_buffers_open,
5560 .read = tracing_buffers_read,
5561 .poll = tracing_buffers_poll,
5562 .release = tracing_buffers_release,
5563 .splice_read = tracing_buffers_splice_read,
5564 .llseek = no_llseek,
5568 tracing_stats_read(struct file *filp, char __user *ubuf,
5569 size_t count, loff_t *ppos)
5571 struct inode *inode = file_inode(filp);
5572 struct trace_array *tr = inode->i_private;
5573 struct trace_buffer *trace_buf = &tr->trace_buffer;
5574 int cpu = tracing_get_cpu(inode);
5575 struct trace_seq *s;
5577 unsigned long long t;
5578 unsigned long usec_rem;
5580 s = kmalloc(sizeof(*s), GFP_KERNEL);
5586 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5587 trace_seq_printf(s, "entries: %ld\n", cnt);
5589 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5590 trace_seq_printf(s, "overrun: %ld\n", cnt);
5592 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5593 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5595 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5596 trace_seq_printf(s, "bytes: %ld\n", cnt);
5598 if (trace_clocks[tr->clock_id].in_ns) {
5599 /* local or global for trace_clock */
5600 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5601 usec_rem = do_div(t, USEC_PER_SEC);
5602 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5605 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5606 usec_rem = do_div(t, USEC_PER_SEC);
5607 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5609 /* counter or tsc mode for trace_clock */
5610 trace_seq_printf(s, "oldest event ts: %llu\n",
5611 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5613 trace_seq_printf(s, "now ts: %llu\n",
5614 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5617 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5618 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5620 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5621 trace_seq_printf(s, "read events: %ld\n", cnt);
5623 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5630 static const struct file_operations tracing_stats_fops = {
5631 .open = tracing_open_generic_tr,
5632 .read = tracing_stats_read,
5633 .llseek = generic_file_llseek,
5634 .release = tracing_release_generic_tr,
5637 #ifdef CONFIG_DYNAMIC_FTRACE
5639 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5645 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5646 size_t cnt, loff_t *ppos)
5648 static char ftrace_dyn_info_buffer[1024];
5649 static DEFINE_MUTEX(dyn_info_mutex);
5650 unsigned long *p = filp->private_data;
5651 char *buf = ftrace_dyn_info_buffer;
5652 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5655 mutex_lock(&dyn_info_mutex);
5656 r = sprintf(buf, "%ld ", *p);
5658 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5661 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5663 mutex_unlock(&dyn_info_mutex);
5668 static const struct file_operations tracing_dyn_info_fops = {
5669 .open = tracing_open_generic,
5670 .read = tracing_read_dyn_info,
5671 .llseek = generic_file_llseek,
5673 #endif /* CONFIG_DYNAMIC_FTRACE */
5675 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5677 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5683 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5685 unsigned long *count = (long *)data;
5697 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5698 struct ftrace_probe_ops *ops, void *data)
5700 long count = (long)data;
5702 seq_printf(m, "%ps:", (void *)ip);
5704 seq_printf(m, "snapshot");
5707 seq_printf(m, ":unlimited\n");
5709 seq_printf(m, ":count=%ld\n", count);
5714 static struct ftrace_probe_ops snapshot_probe_ops = {
5715 .func = ftrace_snapshot,
5716 .print = ftrace_snapshot_print,
5719 static struct ftrace_probe_ops snapshot_count_probe_ops = {
5720 .func = ftrace_count_snapshot,
5721 .print = ftrace_snapshot_print,
5725 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5726 char *glob, char *cmd, char *param, int enable)
5728 struct ftrace_probe_ops *ops;
5729 void *count = (void *)-1;
5733 /* hash funcs only work with set_ftrace_filter */
5737 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5739 if (glob[0] == '!') {
5740 unregister_ftrace_function_probe_func(glob+1, ops);
5747 number = strsep(¶m, ":");
5749 if (!strlen(number))
5753 * We use the callback data field (which is a pointer)
5756 ret = kstrtoul(number, 0, (unsigned long *)&count);
5761 ret = register_ftrace_function_probe(glob, ops, count);
5764 alloc_snapshot(&global_trace);
5766 return ret < 0 ? ret : 0;
5769 static struct ftrace_func_command ftrace_snapshot_cmd = {
5771 .func = ftrace_trace_snapshot_callback,
5774 static __init int register_snapshot_cmd(void)
5776 return register_ftrace_command(&ftrace_snapshot_cmd);
5779 static inline __init int register_snapshot_cmd(void) { return 0; }
5780 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5782 struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
5787 if (!debugfs_initialized())
5790 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5791 tr->dir = debugfs_create_dir("tracing", NULL);
5794 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5799 struct dentry *tracing_init_dentry(void)
5801 return tracing_init_dentry_tr(&global_trace);
5804 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5806 struct dentry *d_tracer;
5809 return tr->percpu_dir;
5811 d_tracer = tracing_init_dentry_tr(tr);
5815 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5817 WARN_ONCE(!tr->percpu_dir,
5818 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5820 return tr->percpu_dir;
5823 static struct dentry *
5824 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5825 void *data, long cpu, const struct file_operations *fops)
5827 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5829 if (ret) /* See tracing_get_cpu() */
5830 ret->d_inode->i_cdev = (void *)(cpu + 1);
5835 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5837 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5838 struct dentry *d_cpu;
5839 char cpu_dir[30]; /* 30 characters should be more than enough */
5844 snprintf(cpu_dir, 30, "cpu%ld", cpu);
5845 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5847 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5851 /* per cpu trace_pipe */
5852 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
5853 tr, cpu, &tracing_pipe_fops);
5856 trace_create_cpu_file("trace", 0644, d_cpu,
5857 tr, cpu, &tracing_fops);
5859 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
5860 tr, cpu, &tracing_buffers_fops);
5862 trace_create_cpu_file("stats", 0444, d_cpu,
5863 tr, cpu, &tracing_stats_fops);
5865 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
5866 tr, cpu, &tracing_entries_fops);
5868 #ifdef CONFIG_TRACER_SNAPSHOT
5869 trace_create_cpu_file("snapshot", 0644, d_cpu,
5870 tr, cpu, &snapshot_fops);
5872 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
5873 tr, cpu, &snapshot_raw_fops);
5877 #ifdef CONFIG_FTRACE_SELFTEST
5878 /* Let selftest have access to static functions in this file */
5879 #include "trace_selftest.c"
5882 struct trace_option_dentry {
5883 struct tracer_opt *opt;
5884 struct tracer_flags *flags;
5885 struct trace_array *tr;
5886 struct dentry *entry;
5890 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5893 struct trace_option_dentry *topt = filp->private_data;
5896 if (topt->flags->val & topt->opt->bit)
5901 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5905 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5908 struct trace_option_dentry *topt = filp->private_data;
5912 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5916 if (val != 0 && val != 1)
5919 if (!!(topt->flags->val & topt->opt->bit) != val) {
5920 mutex_lock(&trace_types_lock);
5921 ret = __set_tracer_option(topt->tr, topt->flags,
5923 mutex_unlock(&trace_types_lock);
5934 static const struct file_operations trace_options_fops = {
5935 .open = tracing_open_generic,
5936 .read = trace_options_read,
5937 .write = trace_options_write,
5938 .llseek = generic_file_llseek,
5942 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5945 long index = (long)filp->private_data;
5948 if (trace_flags & (1 << index))
5953 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5957 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5960 struct trace_array *tr = &global_trace;
5961 long index = (long)filp->private_data;
5965 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5969 if (val != 0 && val != 1)
5972 mutex_lock(&trace_types_lock);
5973 ret = set_tracer_flag(tr, 1 << index, val);
5974 mutex_unlock(&trace_types_lock);
5984 static const struct file_operations trace_options_core_fops = {
5985 .open = tracing_open_generic,
5986 .read = trace_options_core_read,
5987 .write = trace_options_core_write,
5988 .llseek = generic_file_llseek,
5991 struct dentry *trace_create_file(const char *name,
5993 struct dentry *parent,
5995 const struct file_operations *fops)
5999 ret = debugfs_create_file(name, mode, parent, data, fops);
6001 pr_warning("Could not create debugfs '%s' entry\n", name);
6007 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6009 struct dentry *d_tracer;
6014 d_tracer = tracing_init_dentry_tr(tr);
6018 tr->options = debugfs_create_dir("options", d_tracer);
6020 pr_warning("Could not create debugfs directory 'options'\n");
6028 create_trace_option_file(struct trace_array *tr,
6029 struct trace_option_dentry *topt,
6030 struct tracer_flags *flags,
6031 struct tracer_opt *opt)
6033 struct dentry *t_options;
6035 t_options = trace_options_init_dentry(tr);
6039 topt->flags = flags;
6043 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6044 &trace_options_fops);
6048 static struct trace_option_dentry *
6049 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6051 struct trace_option_dentry *topts;
6052 struct tracer_flags *flags;
6053 struct tracer_opt *opts;
6059 flags = tracer->flags;
6061 if (!flags || !flags->opts)
6066 for (cnt = 0; opts[cnt].name; cnt++)
6069 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6073 for (cnt = 0; opts[cnt].name; cnt++)
6074 create_trace_option_file(tr, &topts[cnt], flags,
6081 destroy_trace_option_files(struct trace_option_dentry *topts)
6088 for (cnt = 0; topts[cnt].opt; cnt++) {
6089 if (topts[cnt].entry)
6090 debugfs_remove(topts[cnt].entry);
6096 static struct dentry *
6097 create_trace_option_core_file(struct trace_array *tr,
6098 const char *option, long index)
6100 struct dentry *t_options;
6102 t_options = trace_options_init_dentry(tr);
6106 return trace_create_file(option, 0644, t_options, (void *)index,
6107 &trace_options_core_fops);
6110 static __init void create_trace_options_dir(struct trace_array *tr)
6112 struct dentry *t_options;
6115 t_options = trace_options_init_dentry(tr);
6119 for (i = 0; trace_options[i]; i++)
6120 create_trace_option_core_file(tr, trace_options[i], i);
6124 rb_simple_read(struct file *filp, char __user *ubuf,
6125 size_t cnt, loff_t *ppos)
6127 struct trace_array *tr = filp->private_data;
6131 r = tracer_tracing_is_on(tr);
6132 r = sprintf(buf, "%d\n", r);
6134 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6138 rb_simple_write(struct file *filp, const char __user *ubuf,
6139 size_t cnt, loff_t *ppos)
6141 struct trace_array *tr = filp->private_data;
6142 struct ring_buffer *buffer = tr->trace_buffer.buffer;
6146 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6151 mutex_lock(&trace_types_lock);
6153 tracer_tracing_on(tr);
6154 if (tr->current_trace->start)
6155 tr->current_trace->start(tr);
6157 tracer_tracing_off(tr);
6158 if (tr->current_trace->stop)
6159 tr->current_trace->stop(tr);
6161 mutex_unlock(&trace_types_lock);
6169 static const struct file_operations rb_simple_fops = {
6170 .open = tracing_open_generic_tr,
6171 .read = rb_simple_read,
6172 .write = rb_simple_write,
6173 .release = tracing_release_generic_tr,
6174 .llseek = default_llseek,
6177 struct dentry *trace_instance_dir;
6180 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6183 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6185 enum ring_buffer_flags rb_flags;
6187 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6191 buf->buffer = ring_buffer_alloc(size, rb_flags);
6195 buf->data = alloc_percpu(struct trace_array_cpu);
6197 ring_buffer_free(buf->buffer);
6201 /* Allocate the first page for all buffers */
6202 set_buffer_entries(&tr->trace_buffer,
6203 ring_buffer_size(tr->trace_buffer.buffer, 0));
6208 static int allocate_trace_buffers(struct trace_array *tr, int size)
6212 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6216 #ifdef CONFIG_TRACER_MAX_TRACE
6217 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6218 allocate_snapshot ? size : 1);
6220 ring_buffer_free(tr->trace_buffer.buffer);
6221 free_percpu(tr->trace_buffer.data);
6224 tr->allocated_snapshot = allocate_snapshot;
6227 * Only the top level trace array gets its snapshot allocated
6228 * from the kernel command line.
6230 allocate_snapshot = false;
6235 static int new_instance_create(const char *name)
6237 struct trace_array *tr;
6240 mutex_lock(&trace_types_lock);
6243 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6244 if (tr->name && strcmp(tr->name, name) == 0)
6249 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6253 tr->name = kstrdup(name, GFP_KERNEL);
6257 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6260 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6262 raw_spin_lock_init(&tr->start_lock);
6264 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6266 tr->current_trace = &nop_trace;
6268 INIT_LIST_HEAD(&tr->systems);
6269 INIT_LIST_HEAD(&tr->events);
6271 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6274 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6278 ret = event_trace_add_tracer(tr->dir, tr);
6280 debugfs_remove_recursive(tr->dir);
6284 init_tracer_debugfs(tr, tr->dir);
6286 list_add(&tr->list, &ftrace_trace_arrays);
6288 mutex_unlock(&trace_types_lock);
6293 if (tr->trace_buffer.buffer)
6294 ring_buffer_free(tr->trace_buffer.buffer);
6295 free_cpumask_var(tr->tracing_cpumask);
6300 mutex_unlock(&trace_types_lock);
6306 static int instance_delete(const char *name)
6308 struct trace_array *tr;
6312 mutex_lock(&trace_types_lock);
6315 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6316 if (tr->name && strcmp(tr->name, name) == 0) {
6328 list_del(&tr->list);
6330 tracing_set_nop(tr);
6331 event_trace_del_tracer(tr);
6332 ftrace_destroy_function_files(tr);
6333 debugfs_remove_recursive(tr->dir);
6334 free_percpu(tr->trace_buffer.data);
6335 ring_buffer_free(tr->trace_buffer.buffer);
6343 mutex_unlock(&trace_types_lock);
6348 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6350 struct dentry *parent;
6353 /* Paranoid: Make sure the parent is the "instances" directory */
6354 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6355 if (WARN_ON_ONCE(parent != trace_instance_dir))
6359 * The inode mutex is locked, but debugfs_create_dir() will also
6360 * take the mutex. As the instances directory can not be destroyed
6361 * or changed in any other way, it is safe to unlock it, and
6362 * let the dentry try. If two users try to make the same dir at
6363 * the same time, then the new_instance_create() will determine the
6366 mutex_unlock(&inode->i_mutex);
6368 ret = new_instance_create(dentry->d_iname);
6370 mutex_lock(&inode->i_mutex);
6375 static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6377 struct dentry *parent;
6380 /* Paranoid: Make sure the parent is the "instances" directory */
6381 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6382 if (WARN_ON_ONCE(parent != trace_instance_dir))
6385 /* The caller did a dget() on dentry */
6386 mutex_unlock(&dentry->d_inode->i_mutex);
6389 * The inode mutex is locked, but debugfs_create_dir() will also
6390 * take the mutex. As the instances directory can not be destroyed
6391 * or changed in any other way, it is safe to unlock it, and
6392 * let the dentry try. If two users try to make the same dir at
6393 * the same time, then the instance_delete() will determine the
6396 mutex_unlock(&inode->i_mutex);
6398 ret = instance_delete(dentry->d_iname);
6400 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6401 mutex_lock(&dentry->d_inode->i_mutex);
6406 static const struct inode_operations instance_dir_inode_operations = {
6407 .lookup = simple_lookup,
6408 .mkdir = instance_mkdir,
6409 .rmdir = instance_rmdir,
6412 static __init void create_trace_instances(struct dentry *d_tracer)
6414 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6415 if (WARN_ON(!trace_instance_dir))
6418 /* Hijack the dir inode operations, to allow mkdir */
6419 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6423 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6427 trace_create_file("available_tracers", 0444, d_tracer,
6428 tr, &show_traces_fops);
6430 trace_create_file("current_tracer", 0644, d_tracer,
6431 tr, &set_tracer_fops);
6433 trace_create_file("tracing_cpumask", 0644, d_tracer,
6434 tr, &tracing_cpumask_fops);
6436 trace_create_file("trace_options", 0644, d_tracer,
6437 tr, &tracing_iter_fops);
6439 trace_create_file("trace", 0644, d_tracer,
6442 trace_create_file("trace_pipe", 0444, d_tracer,
6443 tr, &tracing_pipe_fops);
6445 trace_create_file("buffer_size_kb", 0644, d_tracer,
6446 tr, &tracing_entries_fops);
6448 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6449 tr, &tracing_total_entries_fops);
6451 trace_create_file("free_buffer", 0200, d_tracer,
6452 tr, &tracing_free_buffer_fops);
6454 trace_create_file("trace_marker", 0220, d_tracer,
6455 tr, &tracing_mark_fops);
6457 trace_create_file("trace_clock", 0644, d_tracer, tr,
6460 trace_create_file("tracing_on", 0644, d_tracer,
6461 tr, &rb_simple_fops);
6463 #ifdef CONFIG_TRACER_MAX_TRACE
6464 trace_create_file("tracing_max_latency", 0644, d_tracer,
6465 &tr->max_latency, &tracing_max_lat_fops);
6468 if (ftrace_create_function_files(tr, d_tracer))
6469 WARN(1, "Could not allocate function filter files");
6471 #ifdef CONFIG_TRACER_SNAPSHOT
6472 trace_create_file("snapshot", 0644, d_tracer,
6473 tr, &snapshot_fops);
6476 for_each_tracing_cpu(cpu)
6477 tracing_init_debugfs_percpu(tr, cpu);
6481 static __init int tracer_init_debugfs(void)
6483 struct dentry *d_tracer;
6485 trace_access_lock_init();
6487 d_tracer = tracing_init_dentry();
6491 init_tracer_debugfs(&global_trace, d_tracer);
6493 trace_create_file("tracing_thresh", 0644, d_tracer,
6494 &tracing_thresh, &tracing_max_lat_fops);
6496 trace_create_file("README", 0444, d_tracer,
6497 NULL, &tracing_readme_fops);
6499 trace_create_file("saved_cmdlines", 0444, d_tracer,
6500 NULL, &tracing_saved_cmdlines_fops);
6502 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6503 NULL, &tracing_saved_cmdlines_size_fops);
6505 #ifdef CONFIG_DYNAMIC_FTRACE
6506 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6507 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6510 create_trace_instances(d_tracer);
6512 create_trace_options_dir(&global_trace);
6517 static int trace_panic_handler(struct notifier_block *this,
6518 unsigned long event, void *unused)
6520 if (ftrace_dump_on_oops)
6521 ftrace_dump(ftrace_dump_on_oops);
6525 static struct notifier_block trace_panic_notifier = {
6526 .notifier_call = trace_panic_handler,
6528 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6531 static int trace_die_handler(struct notifier_block *self,
6537 if (ftrace_dump_on_oops)
6538 ftrace_dump(ftrace_dump_on_oops);
6546 static struct notifier_block trace_die_notifier = {
6547 .notifier_call = trace_die_handler,
6552 * printk is set to max of 1024, we really don't need it that big.
6553 * Nothing should be printing 1000 characters anyway.
6555 #define TRACE_MAX_PRINT 1000
6558 * Define here KERN_TRACE so that we have one place to modify
6559 * it if we decide to change what log level the ftrace dump
6562 #define KERN_TRACE KERN_EMERG
6565 trace_printk_seq(struct trace_seq *s)
6567 /* Probably should print a warning here. */
6568 if (s->len >= TRACE_MAX_PRINT)
6569 s->len = TRACE_MAX_PRINT;
6571 /* should be zero ended, but we are paranoid. */
6572 s->buffer[s->len] = 0;
6574 printk(KERN_TRACE "%s", s->buffer);
6579 void trace_init_global_iter(struct trace_iterator *iter)
6581 iter->tr = &global_trace;
6582 iter->trace = iter->tr->current_trace;
6583 iter->cpu_file = RING_BUFFER_ALL_CPUS;
6584 iter->trace_buffer = &global_trace.trace_buffer;
6586 if (iter->trace && iter->trace->open)
6587 iter->trace->open(iter);
6589 /* Annotate start of buffers if we had overruns */
6590 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6591 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6593 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6594 if (trace_clocks[iter->tr->clock_id].in_ns)
6595 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6598 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6600 /* use static because iter can be a bit big for the stack */
6601 static struct trace_iterator iter;
6602 static atomic_t dump_running;
6603 unsigned int old_userobj;
6604 unsigned long flags;
6607 /* Only allow one dump user at a time. */
6608 if (atomic_inc_return(&dump_running) != 1) {
6609 atomic_dec(&dump_running);
6614 * Always turn off tracing when we dump.
6615 * We don't need to show trace output of what happens
6616 * between multiple crashes.
6618 * If the user does a sysrq-z, then they can re-enable
6619 * tracing with echo 1 > tracing_on.
6623 local_irq_save(flags);
6625 /* Simulate the iterator */
6626 trace_init_global_iter(&iter);
6628 for_each_tracing_cpu(cpu) {
6629 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6632 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6634 /* don't look at user memory in panic mode */
6635 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6637 switch (oops_dump_mode) {
6639 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6642 iter.cpu_file = raw_smp_processor_id();
6647 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6648 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6651 printk(KERN_TRACE "Dumping ftrace buffer:\n");
6653 /* Did function tracer already get disabled? */
6654 if (ftrace_is_dead()) {
6655 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6656 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6660 * We need to stop all tracing on all CPUS to read the
6661 * the next buffer. This is a bit expensive, but is
6662 * not done often. We fill all what we can read,
6663 * and then release the locks again.
6666 while (!trace_empty(&iter)) {
6669 printk(KERN_TRACE "---------------------------------\n");
6673 /* reset all but tr, trace, and overruns */
6674 memset(&iter.seq, 0,
6675 sizeof(struct trace_iterator) -
6676 offsetof(struct trace_iterator, seq));
6677 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6680 if (trace_find_next_entry_inc(&iter) != NULL) {
6683 ret = print_trace_line(&iter);
6684 if (ret != TRACE_TYPE_NO_CONSUME)
6685 trace_consume(&iter);
6687 touch_nmi_watchdog();
6689 trace_printk_seq(&iter.seq);
6693 printk(KERN_TRACE " (ftrace buffer empty)\n");
6695 printk(KERN_TRACE "---------------------------------\n");
6698 trace_flags |= old_userobj;
6700 for_each_tracing_cpu(cpu) {
6701 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6703 atomic_dec(&dump_running);
6704 local_irq_restore(flags);
6706 EXPORT_SYMBOL_GPL(ftrace_dump);
6708 __init static int tracer_alloc_buffers(void)
6714 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6717 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
6718 goto out_free_buffer_mask;
6720 /* Only allocate trace_printk buffers if a trace_printk exists */
6721 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6722 /* Must be called before global_trace.buffer is allocated */
6723 trace_printk_init_buffers();
6725 /* To save memory, keep the ring buffer size to its minimum */
6726 if (ring_buffer_expanded)
6727 ring_buf_size = trace_buf_size;
6731 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6732 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
6734 raw_spin_lock_init(&global_trace.start_lock);
6736 /* Used for event triggers */
6737 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6739 goto out_free_cpumask;
6741 if (trace_create_savedcmd() < 0)
6742 goto out_free_temp_buffer;
6744 /* TODO: make the number of buffers hot pluggable with CPUS */
6745 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6746 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6748 goto out_free_savedcmd;
6751 if (global_trace.buffer_disabled)
6754 if (trace_boot_clock) {
6755 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6757 pr_warning("Trace clock %s not defined, going back to default\n",
6762 * register_tracer() might reference current_trace, so it
6763 * needs to be set before we register anything. This is
6764 * just a bootstrap of current_trace anyway.
6766 global_trace.current_trace = &nop_trace;
6768 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6770 ftrace_init_global_array_ops(&global_trace);
6772 register_tracer(&nop_trace);
6774 /* All seems OK, enable tracing */
6775 tracing_disabled = 0;
6777 atomic_notifier_chain_register(&panic_notifier_list,
6778 &trace_panic_notifier);
6780 register_die_notifier(&trace_die_notifier);
6782 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6784 INIT_LIST_HEAD(&global_trace.systems);
6785 INIT_LIST_HEAD(&global_trace.events);
6786 list_add(&global_trace.list, &ftrace_trace_arrays);
6788 while (trace_boot_options) {
6791 option = strsep(&trace_boot_options, ",");
6792 trace_set_options(&global_trace, option);
6795 register_snapshot_cmd();
6800 free_saved_cmdlines_buffer(savedcmd);
6801 out_free_temp_buffer:
6802 ring_buffer_free(temp_buffer);
6804 free_cpumask_var(global_trace.tracing_cpumask);
6805 out_free_buffer_mask:
6806 free_cpumask_var(tracing_buffer_mask);
6811 __init static int clear_boot_tracer(void)
6814 * The default tracer at boot buffer is an init section.
6815 * This function is called in lateinit. If we did not
6816 * find the boot tracer, then clear it out, to prevent
6817 * later registration from accessing the buffer that is
6818 * about to be freed.
6820 if (!default_bootup_tracer)
6823 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6824 default_bootup_tracer);
6825 default_bootup_tracer = NULL;
6830 early_initcall(tracer_alloc_buffers);
6831 fs_initcall(tracer_init_debugfs);
6832 late_initcall(clear_boot_tracer);