tracing: Change tracing_entries_fops to rely on tracing_get_cpu()
[firefly-linux-kernel-4.4.55.git] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 Nadia Yvette Chambers
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
40 #include <linux/fs.h>
41 #include <linux/sched/rt.h>
42
43 #include "trace.h"
44 #include "trace_output.h"
45
46 /*
47  * On boot up, the ring buffer is set to the minimum size, so that
48  * we do not waste memory on systems that are not using tracing.
49  */
50 bool ring_buffer_expanded;
51
52 /*
53  * We need to change this state when a selftest is running.
54  * A selftest will lurk into the ring-buffer to count the
55  * entries inserted during the selftest although some concurrent
56  * insertions into the ring-buffer such as trace_printk could occurred
57  * at the same time, giving false positive or negative results.
58  */
59 static bool __read_mostly tracing_selftest_running;
60
61 /*
62  * If a tracer is running, we do not want to run SELFTEST.
63  */
64 bool __read_mostly tracing_selftest_disabled;
65
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt[] = {
68         { }
69 };
70
71 static struct tracer_flags dummy_tracer_flags = {
72         .val = 0,
73         .opts = dummy_tracer_opt
74 };
75
76 static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77 {
78         return 0;
79 }
80
81 /*
82  * To prevent the comm cache from being overwritten when no
83  * tracing is active, only save the comm when a trace event
84  * occurred.
85  */
86 static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
88 /*
89  * Kill all tracing for good (never come back).
90  * It is initialized to 1 but will turn to zero if the initialization
91  * of the tracer is successful. But that is the only place that sets
92  * this back to zero.
93  */
94 static int tracing_disabled = 1;
95
96 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
97
98 cpumask_var_t __read_mostly     tracing_buffer_mask;
99
100 /*
101  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102  *
103  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104  * is set, then ftrace_dump is called. This will output the contents
105  * of the ftrace buffers to the console.  This is very useful for
106  * capturing traces that lead to crashes and outputing it to a
107  * serial console.
108  *
109  * It is default off, but you can enable it with either specifying
110  * "ftrace_dump_on_oops" in the kernel command line, or setting
111  * /proc/sys/kernel/ftrace_dump_on_oops
112  * Set 1 if you want to dump buffers of all CPUs
113  * Set 2 if you want to dump the buffer of the CPU that triggered oops
114  */
115
116 enum ftrace_dump_mode ftrace_dump_on_oops;
117
118 static int tracing_set_tracer(const char *buf);
119
120 #define MAX_TRACER_SIZE         100
121 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
122 static char *default_bootup_tracer;
123
124 static bool allocate_snapshot;
125
126 static int __init set_cmdline_ftrace(char *str)
127 {
128         strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
129         default_bootup_tracer = bootup_tracer_buf;
130         /* We are using ftrace early, expand it */
131         ring_buffer_expanded = true;
132         return 1;
133 }
134 __setup("ftrace=", set_cmdline_ftrace);
135
136 static int __init set_ftrace_dump_on_oops(char *str)
137 {
138         if (*str++ != '=' || !*str) {
139                 ftrace_dump_on_oops = DUMP_ALL;
140                 return 1;
141         }
142
143         if (!strcmp("orig_cpu", str)) {
144                 ftrace_dump_on_oops = DUMP_ORIG;
145                 return 1;
146         }
147
148         return 0;
149 }
150 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
151
152 static int __init boot_alloc_snapshot(char *str)
153 {
154         allocate_snapshot = true;
155         /* We also need the main ring buffer expanded */
156         ring_buffer_expanded = true;
157         return 1;
158 }
159 __setup("alloc_snapshot", boot_alloc_snapshot);
160
161
162 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
163 static char *trace_boot_options __initdata;
164
165 static int __init set_trace_boot_options(char *str)
166 {
167         strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
168         trace_boot_options = trace_boot_options_buf;
169         return 0;
170 }
171 __setup("trace_options=", set_trace_boot_options);
172
173 unsigned long long ns2usecs(cycle_t nsec)
174 {
175         nsec += 500;
176         do_div(nsec, 1000);
177         return nsec;
178 }
179
180 /*
181  * The global_trace is the descriptor that holds the tracing
182  * buffers for the live tracing. For each CPU, it contains
183  * a link list of pages that will store trace entries. The
184  * page descriptor of the pages in the memory is used to hold
185  * the link list by linking the lru item in the page descriptor
186  * to each of the pages in the buffer per CPU.
187  *
188  * For each active CPU there is a data field that holds the
189  * pages for the buffer for that CPU. Each CPU has the same number
190  * of pages allocated for its buffer.
191  */
192 static struct trace_array       global_trace;
193
194 LIST_HEAD(ftrace_trace_arrays);
195
196 int trace_array_get(struct trace_array *this_tr)
197 {
198         struct trace_array *tr;
199         int ret = -ENODEV;
200
201         mutex_lock(&trace_types_lock);
202         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
203                 if (tr == this_tr) {
204                         tr->ref++;
205                         ret = 0;
206                         break;
207                 }
208         }
209         mutex_unlock(&trace_types_lock);
210
211         return ret;
212 }
213
214 static void __trace_array_put(struct trace_array *this_tr)
215 {
216         WARN_ON(!this_tr->ref);
217         this_tr->ref--;
218 }
219
220 void trace_array_put(struct trace_array *this_tr)
221 {
222         mutex_lock(&trace_types_lock);
223         __trace_array_put(this_tr);
224         mutex_unlock(&trace_types_lock);
225 }
226
227 int filter_current_check_discard(struct ring_buffer *buffer,
228                                  struct ftrace_event_call *call, void *rec,
229                                  struct ring_buffer_event *event)
230 {
231         return filter_check_discard(call, rec, buffer, event);
232 }
233 EXPORT_SYMBOL_GPL(filter_current_check_discard);
234
235 cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
236 {
237         u64 ts;
238
239         /* Early boot up does not have a buffer yet */
240         if (!buf->buffer)
241                 return trace_clock_local();
242
243         ts = ring_buffer_time_stamp(buf->buffer, cpu);
244         ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
245
246         return ts;
247 }
248
249 cycle_t ftrace_now(int cpu)
250 {
251         return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
252 }
253
254 /**
255  * tracing_is_enabled - Show if global_trace has been disabled
256  *
257  * Shows if the global trace has been enabled or not. It uses the
258  * mirror flag "buffer_disabled" to be used in fast paths such as for
259  * the irqsoff tracer. But it may be inaccurate due to races. If you
260  * need to know the accurate state, use tracing_is_on() which is a little
261  * slower, but accurate.
262  */
263 int tracing_is_enabled(void)
264 {
265         /*
266          * For quick access (irqsoff uses this in fast path), just
267          * return the mirror variable of the state of the ring buffer.
268          * It's a little racy, but we don't really care.
269          */
270         smp_rmb();
271         return !global_trace.buffer_disabled;
272 }
273
274 /*
275  * trace_buf_size is the size in bytes that is allocated
276  * for a buffer. Note, the number of bytes is always rounded
277  * to page size.
278  *
279  * This number is purposely set to a low number of 16384.
280  * If the dump on oops happens, it will be much appreciated
281  * to not have to wait for all that output. Anyway this can be
282  * boot time and run time configurable.
283  */
284 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
285
286 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
287
288 /* trace_types holds a link list of available tracers. */
289 static struct tracer            *trace_types __read_mostly;
290
291 /*
292  * trace_types_lock is used to protect the trace_types list.
293  */
294 DEFINE_MUTEX(trace_types_lock);
295
296 /*
297  * serialize the access of the ring buffer
298  *
299  * ring buffer serializes readers, but it is low level protection.
300  * The validity of the events (which returns by ring_buffer_peek() ..etc)
301  * are not protected by ring buffer.
302  *
303  * The content of events may become garbage if we allow other process consumes
304  * these events concurrently:
305  *   A) the page of the consumed events may become a normal page
306  *      (not reader page) in ring buffer, and this page will be rewrited
307  *      by events producer.
308  *   B) The page of the consumed events may become a page for splice_read,
309  *      and this page will be returned to system.
310  *
311  * These primitives allow multi process access to different cpu ring buffer
312  * concurrently.
313  *
314  * These primitives don't distinguish read-only and read-consume access.
315  * Multi read-only access are also serialized.
316  */
317
318 #ifdef CONFIG_SMP
319 static DECLARE_RWSEM(all_cpu_access_lock);
320 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
321
322 static inline void trace_access_lock(int cpu)
323 {
324         if (cpu == RING_BUFFER_ALL_CPUS) {
325                 /* gain it for accessing the whole ring buffer. */
326                 down_write(&all_cpu_access_lock);
327         } else {
328                 /* gain it for accessing a cpu ring buffer. */
329
330                 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
331                 down_read(&all_cpu_access_lock);
332
333                 /* Secondly block other access to this @cpu ring buffer. */
334                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
335         }
336 }
337
338 static inline void trace_access_unlock(int cpu)
339 {
340         if (cpu == RING_BUFFER_ALL_CPUS) {
341                 up_write(&all_cpu_access_lock);
342         } else {
343                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
344                 up_read(&all_cpu_access_lock);
345         }
346 }
347
348 static inline void trace_access_lock_init(void)
349 {
350         int cpu;
351
352         for_each_possible_cpu(cpu)
353                 mutex_init(&per_cpu(cpu_access_lock, cpu));
354 }
355
356 #else
357
358 static DEFINE_MUTEX(access_lock);
359
360 static inline void trace_access_lock(int cpu)
361 {
362         (void)cpu;
363         mutex_lock(&access_lock);
364 }
365
366 static inline void trace_access_unlock(int cpu)
367 {
368         (void)cpu;
369         mutex_unlock(&access_lock);
370 }
371
372 static inline void trace_access_lock_init(void)
373 {
374 }
375
376 #endif
377
378 /* trace_flags holds trace_options default values */
379 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
380         TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
381         TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
382         TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
383
384 void tracer_tracing_on(struct trace_array *tr)
385 {
386         if (tr->trace_buffer.buffer)
387                 ring_buffer_record_on(tr->trace_buffer.buffer);
388         /*
389          * This flag is looked at when buffers haven't been allocated
390          * yet, or by some tracers (like irqsoff), that just want to
391          * know if the ring buffer has been disabled, but it can handle
392          * races of where it gets disabled but we still do a record.
393          * As the check is in the fast path of the tracers, it is more
394          * important to be fast than accurate.
395          */
396         tr->buffer_disabled = 0;
397         /* Make the flag seen by readers */
398         smp_wmb();
399 }
400
401 /**
402  * tracing_on - enable tracing buffers
403  *
404  * This function enables tracing buffers that may have been
405  * disabled with tracing_off.
406  */
407 void tracing_on(void)
408 {
409         tracer_tracing_on(&global_trace);
410 }
411 EXPORT_SYMBOL_GPL(tracing_on);
412
413 /**
414  * __trace_puts - write a constant string into the trace buffer.
415  * @ip:    The address of the caller
416  * @str:   The constant string to write
417  * @size:  The size of the string.
418  */
419 int __trace_puts(unsigned long ip, const char *str, int size)
420 {
421         struct ring_buffer_event *event;
422         struct ring_buffer *buffer;
423         struct print_entry *entry;
424         unsigned long irq_flags;
425         int alloc;
426
427         alloc = sizeof(*entry) + size + 2; /* possible \n added */
428
429         local_save_flags(irq_flags);
430         buffer = global_trace.trace_buffer.buffer;
431         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
432                                           irq_flags, preempt_count());
433         if (!event)
434                 return 0;
435
436         entry = ring_buffer_event_data(event);
437         entry->ip = ip;
438
439         memcpy(&entry->buf, str, size);
440
441         /* Add a newline if necessary */
442         if (entry->buf[size - 1] != '\n') {
443                 entry->buf[size] = '\n';
444                 entry->buf[size + 1] = '\0';
445         } else
446                 entry->buf[size] = '\0';
447
448         __buffer_unlock_commit(buffer, event);
449
450         return size;
451 }
452 EXPORT_SYMBOL_GPL(__trace_puts);
453
454 /**
455  * __trace_bputs - write the pointer to a constant string into trace buffer
456  * @ip:    The address of the caller
457  * @str:   The constant string to write to the buffer to
458  */
459 int __trace_bputs(unsigned long ip, const char *str)
460 {
461         struct ring_buffer_event *event;
462         struct ring_buffer *buffer;
463         struct bputs_entry *entry;
464         unsigned long irq_flags;
465         int size = sizeof(struct bputs_entry);
466
467         local_save_flags(irq_flags);
468         buffer = global_trace.trace_buffer.buffer;
469         event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
470                                           irq_flags, preempt_count());
471         if (!event)
472                 return 0;
473
474         entry = ring_buffer_event_data(event);
475         entry->ip                       = ip;
476         entry->str                      = str;
477
478         __buffer_unlock_commit(buffer, event);
479
480         return 1;
481 }
482 EXPORT_SYMBOL_GPL(__trace_bputs);
483
484 #ifdef CONFIG_TRACER_SNAPSHOT
485 /**
486  * trace_snapshot - take a snapshot of the current buffer.
487  *
488  * This causes a swap between the snapshot buffer and the current live
489  * tracing buffer. You can use this to take snapshots of the live
490  * trace when some condition is triggered, but continue to trace.
491  *
492  * Note, make sure to allocate the snapshot with either
493  * a tracing_snapshot_alloc(), or by doing it manually
494  * with: echo 1 > /sys/kernel/debug/tracing/snapshot
495  *
496  * If the snapshot buffer is not allocated, it will stop tracing.
497  * Basically making a permanent snapshot.
498  */
499 void tracing_snapshot(void)
500 {
501         struct trace_array *tr = &global_trace;
502         struct tracer *tracer = tr->current_trace;
503         unsigned long flags;
504
505         if (in_nmi()) {
506                 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
507                 internal_trace_puts("*** snapshot is being ignored        ***\n");
508                 return;
509         }
510
511         if (!tr->allocated_snapshot) {
512                 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
513                 internal_trace_puts("*** stopping trace here!   ***\n");
514                 tracing_off();
515                 return;
516         }
517
518         /* Note, snapshot can not be used when the tracer uses it */
519         if (tracer->use_max_tr) {
520                 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
521                 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
522                 return;
523         }
524
525         local_irq_save(flags);
526         update_max_tr(tr, current, smp_processor_id());
527         local_irq_restore(flags);
528 }
529 EXPORT_SYMBOL_GPL(tracing_snapshot);
530
531 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
532                                         struct trace_buffer *size_buf, int cpu_id);
533 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
534
535 static int alloc_snapshot(struct trace_array *tr)
536 {
537         int ret;
538
539         if (!tr->allocated_snapshot) {
540
541                 /* allocate spare buffer */
542                 ret = resize_buffer_duplicate_size(&tr->max_buffer,
543                                    &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
544                 if (ret < 0)
545                         return ret;
546
547                 tr->allocated_snapshot = true;
548         }
549
550         return 0;
551 }
552
553 void free_snapshot(struct trace_array *tr)
554 {
555         /*
556          * We don't free the ring buffer. instead, resize it because
557          * The max_tr ring buffer has some state (e.g. ring->clock) and
558          * we want preserve it.
559          */
560         ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
561         set_buffer_entries(&tr->max_buffer, 1);
562         tracing_reset_online_cpus(&tr->max_buffer);
563         tr->allocated_snapshot = false;
564 }
565
566 /**
567  * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
568  *
569  * This is similar to trace_snapshot(), but it will allocate the
570  * snapshot buffer if it isn't already allocated. Use this only
571  * where it is safe to sleep, as the allocation may sleep.
572  *
573  * This causes a swap between the snapshot buffer and the current live
574  * tracing buffer. You can use this to take snapshots of the live
575  * trace when some condition is triggered, but continue to trace.
576  */
577 void tracing_snapshot_alloc(void)
578 {
579         struct trace_array *tr = &global_trace;
580         int ret;
581
582         ret = alloc_snapshot(tr);
583         if (WARN_ON(ret < 0))
584                 return;
585
586         tracing_snapshot();
587 }
588 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
589 #else
590 void tracing_snapshot(void)
591 {
592         WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
593 }
594 EXPORT_SYMBOL_GPL(tracing_snapshot);
595 void tracing_snapshot_alloc(void)
596 {
597         /* Give warning */
598         tracing_snapshot();
599 }
600 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
601 #endif /* CONFIG_TRACER_SNAPSHOT */
602
603 void tracer_tracing_off(struct trace_array *tr)
604 {
605         if (tr->trace_buffer.buffer)
606                 ring_buffer_record_off(tr->trace_buffer.buffer);
607         /*
608          * This flag is looked at when buffers haven't been allocated
609          * yet, or by some tracers (like irqsoff), that just want to
610          * know if the ring buffer has been disabled, but it can handle
611          * races of where it gets disabled but we still do a record.
612          * As the check is in the fast path of the tracers, it is more
613          * important to be fast than accurate.
614          */
615         tr->buffer_disabled = 1;
616         /* Make the flag seen by readers */
617         smp_wmb();
618 }
619
620 /**
621  * tracing_off - turn off tracing buffers
622  *
623  * This function stops the tracing buffers from recording data.
624  * It does not disable any overhead the tracers themselves may
625  * be causing. This function simply causes all recording to
626  * the ring buffers to fail.
627  */
628 void tracing_off(void)
629 {
630         tracer_tracing_off(&global_trace);
631 }
632 EXPORT_SYMBOL_GPL(tracing_off);
633
634 /**
635  * tracer_tracing_is_on - show real state of ring buffer enabled
636  * @tr : the trace array to know if ring buffer is enabled
637  *
638  * Shows real state of the ring buffer if it is enabled or not.
639  */
640 int tracer_tracing_is_on(struct trace_array *tr)
641 {
642         if (tr->trace_buffer.buffer)
643                 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
644         return !tr->buffer_disabled;
645 }
646
647 /**
648  * tracing_is_on - show state of ring buffers enabled
649  */
650 int tracing_is_on(void)
651 {
652         return tracer_tracing_is_on(&global_trace);
653 }
654 EXPORT_SYMBOL_GPL(tracing_is_on);
655
656 static int __init set_buf_size(char *str)
657 {
658         unsigned long buf_size;
659
660         if (!str)
661                 return 0;
662         buf_size = memparse(str, &str);
663         /* nr_entries can not be zero */
664         if (buf_size == 0)
665                 return 0;
666         trace_buf_size = buf_size;
667         return 1;
668 }
669 __setup("trace_buf_size=", set_buf_size);
670
671 static int __init set_tracing_thresh(char *str)
672 {
673         unsigned long threshold;
674         int ret;
675
676         if (!str)
677                 return 0;
678         ret = kstrtoul(str, 0, &threshold);
679         if (ret < 0)
680                 return 0;
681         tracing_thresh = threshold * 1000;
682         return 1;
683 }
684 __setup("tracing_thresh=", set_tracing_thresh);
685
686 unsigned long nsecs_to_usecs(unsigned long nsecs)
687 {
688         return nsecs / 1000;
689 }
690
691 /* These must match the bit postions in trace_iterator_flags */
692 static const char *trace_options[] = {
693         "print-parent",
694         "sym-offset",
695         "sym-addr",
696         "verbose",
697         "raw",
698         "hex",
699         "bin",
700         "block",
701         "stacktrace",
702         "trace_printk",
703         "ftrace_preempt",
704         "branch",
705         "annotate",
706         "userstacktrace",
707         "sym-userobj",
708         "printk-msg-only",
709         "context-info",
710         "latency-format",
711         "sleep-time",
712         "graph-time",
713         "record-cmd",
714         "overwrite",
715         "disable_on_free",
716         "irq-info",
717         "markers",
718         "function-trace",
719         NULL
720 };
721
722 static struct {
723         u64 (*func)(void);
724         const char *name;
725         int in_ns;              /* is this clock in nanoseconds? */
726 } trace_clocks[] = {
727         { trace_clock_local,    "local",        1 },
728         { trace_clock_global,   "global",       1 },
729         { trace_clock_counter,  "counter",      0 },
730         { trace_clock_jiffies,  "uptime",       1 },
731         { trace_clock,          "perf",         1 },
732         ARCH_TRACE_CLOCKS
733 };
734
735 /*
736  * trace_parser_get_init - gets the buffer for trace parser
737  */
738 int trace_parser_get_init(struct trace_parser *parser, int size)
739 {
740         memset(parser, 0, sizeof(*parser));
741
742         parser->buffer = kmalloc(size, GFP_KERNEL);
743         if (!parser->buffer)
744                 return 1;
745
746         parser->size = size;
747         return 0;
748 }
749
750 /*
751  * trace_parser_put - frees the buffer for trace parser
752  */
753 void trace_parser_put(struct trace_parser *parser)
754 {
755         kfree(parser->buffer);
756 }
757
758 /*
759  * trace_get_user - reads the user input string separated by  space
760  * (matched by isspace(ch))
761  *
762  * For each string found the 'struct trace_parser' is updated,
763  * and the function returns.
764  *
765  * Returns number of bytes read.
766  *
767  * See kernel/trace/trace.h for 'struct trace_parser' details.
768  */
769 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
770         size_t cnt, loff_t *ppos)
771 {
772         char ch;
773         size_t read = 0;
774         ssize_t ret;
775
776         if (!*ppos)
777                 trace_parser_clear(parser);
778
779         ret = get_user(ch, ubuf++);
780         if (ret)
781                 goto out;
782
783         read++;
784         cnt--;
785
786         /*
787          * The parser is not finished with the last write,
788          * continue reading the user input without skipping spaces.
789          */
790         if (!parser->cont) {
791                 /* skip white space */
792                 while (cnt && isspace(ch)) {
793                         ret = get_user(ch, ubuf++);
794                         if (ret)
795                                 goto out;
796                         read++;
797                         cnt--;
798                 }
799
800                 /* only spaces were written */
801                 if (isspace(ch)) {
802                         *ppos += read;
803                         ret = read;
804                         goto out;
805                 }
806
807                 parser->idx = 0;
808         }
809
810         /* read the non-space input */
811         while (cnt && !isspace(ch)) {
812                 if (parser->idx < parser->size - 1)
813                         parser->buffer[parser->idx++] = ch;
814                 else {
815                         ret = -EINVAL;
816                         goto out;
817                 }
818                 ret = get_user(ch, ubuf++);
819                 if (ret)
820                         goto out;
821                 read++;
822                 cnt--;
823         }
824
825         /* We either got finished input or we have to wait for another call. */
826         if (isspace(ch)) {
827                 parser->buffer[parser->idx] = 0;
828                 parser->cont = false;
829         } else {
830                 parser->cont = true;
831                 parser->buffer[parser->idx++] = ch;
832         }
833
834         *ppos += read;
835         ret = read;
836
837 out:
838         return ret;
839 }
840
841 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
842 {
843         int len;
844         int ret;
845
846         if (!cnt)
847                 return 0;
848
849         if (s->len <= s->readpos)
850                 return -EBUSY;
851
852         len = s->len - s->readpos;
853         if (cnt > len)
854                 cnt = len;
855         ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
856         if (ret == cnt)
857                 return -EFAULT;
858
859         cnt -= ret;
860
861         s->readpos += cnt;
862         return cnt;
863 }
864
865 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
866 {
867         int len;
868
869         if (s->len <= s->readpos)
870                 return -EBUSY;
871
872         len = s->len - s->readpos;
873         if (cnt > len)
874                 cnt = len;
875         memcpy(buf, s->buffer + s->readpos, cnt);
876
877         s->readpos += cnt;
878         return cnt;
879 }
880
881 /*
882  * ftrace_max_lock is used to protect the swapping of buffers
883  * when taking a max snapshot. The buffers themselves are
884  * protected by per_cpu spinlocks. But the action of the swap
885  * needs its own lock.
886  *
887  * This is defined as a arch_spinlock_t in order to help
888  * with performance when lockdep debugging is enabled.
889  *
890  * It is also used in other places outside the update_max_tr
891  * so it needs to be defined outside of the
892  * CONFIG_TRACER_MAX_TRACE.
893  */
894 static arch_spinlock_t ftrace_max_lock =
895         (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
896
897 unsigned long __read_mostly     tracing_thresh;
898
899 #ifdef CONFIG_TRACER_MAX_TRACE
900 unsigned long __read_mostly     tracing_max_latency;
901
902 /*
903  * Copy the new maximum trace into the separate maximum-trace
904  * structure. (this way the maximum trace is permanently saved,
905  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
906  */
907 static void
908 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
909 {
910         struct trace_buffer *trace_buf = &tr->trace_buffer;
911         struct trace_buffer *max_buf = &tr->max_buffer;
912         struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
913         struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
914
915         max_buf->cpu = cpu;
916         max_buf->time_start = data->preempt_timestamp;
917
918         max_data->saved_latency = tracing_max_latency;
919         max_data->critical_start = data->critical_start;
920         max_data->critical_end = data->critical_end;
921
922         memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
923         max_data->pid = tsk->pid;
924         /*
925          * If tsk == current, then use current_uid(), as that does not use
926          * RCU. The irq tracer can be called out of RCU scope.
927          */
928         if (tsk == current)
929                 max_data->uid = current_uid();
930         else
931                 max_data->uid = task_uid(tsk);
932
933         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
934         max_data->policy = tsk->policy;
935         max_data->rt_priority = tsk->rt_priority;
936
937         /* record this tasks comm */
938         tracing_record_cmdline(tsk);
939 }
940
941 /**
942  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
943  * @tr: tracer
944  * @tsk: the task with the latency
945  * @cpu: The cpu that initiated the trace.
946  *
947  * Flip the buffers between the @tr and the max_tr and record information
948  * about which task was the cause of this latency.
949  */
950 void
951 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
952 {
953         struct ring_buffer *buf;
954
955         if (tr->stop_count)
956                 return;
957
958         WARN_ON_ONCE(!irqs_disabled());
959
960         if (!tr->allocated_snapshot) {
961                 /* Only the nop tracer should hit this when disabling */
962                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
963                 return;
964         }
965
966         arch_spin_lock(&ftrace_max_lock);
967
968         buf = tr->trace_buffer.buffer;
969         tr->trace_buffer.buffer = tr->max_buffer.buffer;
970         tr->max_buffer.buffer = buf;
971
972         __update_max_tr(tr, tsk, cpu);
973         arch_spin_unlock(&ftrace_max_lock);
974 }
975
976 /**
977  * update_max_tr_single - only copy one trace over, and reset the rest
978  * @tr - tracer
979  * @tsk - task with the latency
980  * @cpu - the cpu of the buffer to copy.
981  *
982  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
983  */
984 void
985 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
986 {
987         int ret;
988
989         if (tr->stop_count)
990                 return;
991
992         WARN_ON_ONCE(!irqs_disabled());
993         if (!tr->allocated_snapshot) {
994                 /* Only the nop tracer should hit this when disabling */
995                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
996                 return;
997         }
998
999         arch_spin_lock(&ftrace_max_lock);
1000
1001         ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1002
1003         if (ret == -EBUSY) {
1004                 /*
1005                  * We failed to swap the buffer due to a commit taking
1006                  * place on this CPU. We fail to record, but we reset
1007                  * the max trace buffer (no one writes directly to it)
1008                  * and flag that it failed.
1009                  */
1010                 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1011                         "Failed to swap buffers due to commit in progress\n");
1012         }
1013
1014         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1015
1016         __update_max_tr(tr, tsk, cpu);
1017         arch_spin_unlock(&ftrace_max_lock);
1018 }
1019 #endif /* CONFIG_TRACER_MAX_TRACE */
1020
1021 static void default_wait_pipe(struct trace_iterator *iter)
1022 {
1023         /* Iterators are static, they should be filled or empty */
1024         if (trace_buffer_iter(iter, iter->cpu_file))
1025                 return;
1026
1027         ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
1028 }
1029
1030 #ifdef CONFIG_FTRACE_STARTUP_TEST
1031 static int run_tracer_selftest(struct tracer *type)
1032 {
1033         struct trace_array *tr = &global_trace;
1034         struct tracer *saved_tracer = tr->current_trace;
1035         int ret;
1036
1037         if (!type->selftest || tracing_selftest_disabled)
1038                 return 0;
1039
1040         /*
1041          * Run a selftest on this tracer.
1042          * Here we reset the trace buffer, and set the current
1043          * tracer to be this tracer. The tracer can then run some
1044          * internal tracing to verify that everything is in order.
1045          * If we fail, we do not register this tracer.
1046          */
1047         tracing_reset_online_cpus(&tr->trace_buffer);
1048
1049         tr->current_trace = type;
1050
1051 #ifdef CONFIG_TRACER_MAX_TRACE
1052         if (type->use_max_tr) {
1053                 /* If we expanded the buffers, make sure the max is expanded too */
1054                 if (ring_buffer_expanded)
1055                         ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1056                                            RING_BUFFER_ALL_CPUS);
1057                 tr->allocated_snapshot = true;
1058         }
1059 #endif
1060
1061         /* the test is responsible for initializing and enabling */
1062         pr_info("Testing tracer %s: ", type->name);
1063         ret = type->selftest(type, tr);
1064         /* the test is responsible for resetting too */
1065         tr->current_trace = saved_tracer;
1066         if (ret) {
1067                 printk(KERN_CONT "FAILED!\n");
1068                 /* Add the warning after printing 'FAILED' */
1069                 WARN_ON(1);
1070                 return -1;
1071         }
1072         /* Only reset on passing, to avoid touching corrupted buffers */
1073         tracing_reset_online_cpus(&tr->trace_buffer);
1074
1075 #ifdef CONFIG_TRACER_MAX_TRACE
1076         if (type->use_max_tr) {
1077                 tr->allocated_snapshot = false;
1078
1079                 /* Shrink the max buffer again */
1080                 if (ring_buffer_expanded)
1081                         ring_buffer_resize(tr->max_buffer.buffer, 1,
1082                                            RING_BUFFER_ALL_CPUS);
1083         }
1084 #endif
1085
1086         printk(KERN_CONT "PASSED\n");
1087         return 0;
1088 }
1089 #else
1090 static inline int run_tracer_selftest(struct tracer *type)
1091 {
1092         return 0;
1093 }
1094 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1095
1096 /**
1097  * register_tracer - register a tracer with the ftrace system.
1098  * @type - the plugin for the tracer
1099  *
1100  * Register a new plugin tracer.
1101  */
1102 int register_tracer(struct tracer *type)
1103 {
1104         struct tracer *t;
1105         int ret = 0;
1106
1107         if (!type->name) {
1108                 pr_info("Tracer must have a name\n");
1109                 return -1;
1110         }
1111
1112         if (strlen(type->name) >= MAX_TRACER_SIZE) {
1113                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1114                 return -1;
1115         }
1116
1117         mutex_lock(&trace_types_lock);
1118
1119         tracing_selftest_running = true;
1120
1121         for (t = trace_types; t; t = t->next) {
1122                 if (strcmp(type->name, t->name) == 0) {
1123                         /* already found */
1124                         pr_info("Tracer %s already registered\n",
1125                                 type->name);
1126                         ret = -1;
1127                         goto out;
1128                 }
1129         }
1130
1131         if (!type->set_flag)
1132                 type->set_flag = &dummy_set_flag;
1133         if (!type->flags)
1134                 type->flags = &dummy_tracer_flags;
1135         else
1136                 if (!type->flags->opts)
1137                         type->flags->opts = dummy_tracer_opt;
1138         if (!type->wait_pipe)
1139                 type->wait_pipe = default_wait_pipe;
1140
1141         ret = run_tracer_selftest(type);
1142         if (ret < 0)
1143                 goto out;
1144
1145         type->next = trace_types;
1146         trace_types = type;
1147
1148  out:
1149         tracing_selftest_running = false;
1150         mutex_unlock(&trace_types_lock);
1151
1152         if (ret || !default_bootup_tracer)
1153                 goto out_unlock;
1154
1155         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1156                 goto out_unlock;
1157
1158         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1159         /* Do we want this tracer to start on bootup? */
1160         tracing_set_tracer(type->name);
1161         default_bootup_tracer = NULL;
1162         /* disable other selftests, since this will break it. */
1163         tracing_selftest_disabled = true;
1164 #ifdef CONFIG_FTRACE_STARTUP_TEST
1165         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1166                type->name);
1167 #endif
1168
1169  out_unlock:
1170         return ret;
1171 }
1172
1173 void tracing_reset(struct trace_buffer *buf, int cpu)
1174 {
1175         struct ring_buffer *buffer = buf->buffer;
1176
1177         if (!buffer)
1178                 return;
1179
1180         ring_buffer_record_disable(buffer);
1181
1182         /* Make sure all commits have finished */
1183         synchronize_sched();
1184         ring_buffer_reset_cpu(buffer, cpu);
1185
1186         ring_buffer_record_enable(buffer);
1187 }
1188
1189 void tracing_reset_online_cpus(struct trace_buffer *buf)
1190 {
1191         struct ring_buffer *buffer = buf->buffer;
1192         int cpu;
1193
1194         if (!buffer)
1195                 return;
1196
1197         ring_buffer_record_disable(buffer);
1198
1199         /* Make sure all commits have finished */
1200         synchronize_sched();
1201
1202         buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1203
1204         for_each_online_cpu(cpu)
1205                 ring_buffer_reset_cpu(buffer, cpu);
1206
1207         ring_buffer_record_enable(buffer);
1208 }
1209
1210 /* Must have trace_types_lock held */
1211 void tracing_reset_all_online_cpus(void)
1212 {
1213         struct trace_array *tr;
1214
1215         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1216                 tracing_reset_online_cpus(&tr->trace_buffer);
1217 #ifdef CONFIG_TRACER_MAX_TRACE
1218                 tracing_reset_online_cpus(&tr->max_buffer);
1219 #endif
1220         }
1221 }
1222
1223 #define SAVED_CMDLINES 128
1224 #define NO_CMDLINE_MAP UINT_MAX
1225 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1226 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1227 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1228 static int cmdline_idx;
1229 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1230
1231 /* temporary disable recording */
1232 static atomic_t trace_record_cmdline_disabled __read_mostly;
1233
1234 static void trace_init_cmdlines(void)
1235 {
1236         memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1237         memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
1238         cmdline_idx = 0;
1239 }
1240
1241 int is_tracing_stopped(void)
1242 {
1243         return global_trace.stop_count;
1244 }
1245
1246 /**
1247  * ftrace_off_permanent - disable all ftrace code permanently
1248  *
1249  * This should only be called when a serious anomally has
1250  * been detected.  This will turn off the function tracing,
1251  * ring buffers, and other tracing utilites. It takes no
1252  * locks and can be called from any context.
1253  */
1254 void ftrace_off_permanent(void)
1255 {
1256         tracing_disabled = 1;
1257         ftrace_stop();
1258         tracing_off_permanent();
1259 }
1260
1261 /**
1262  * tracing_start - quick start of the tracer
1263  *
1264  * If tracing is enabled but was stopped by tracing_stop,
1265  * this will start the tracer back up.
1266  */
1267 void tracing_start(void)
1268 {
1269         struct ring_buffer *buffer;
1270         unsigned long flags;
1271
1272         if (tracing_disabled)
1273                 return;
1274
1275         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1276         if (--global_trace.stop_count) {
1277                 if (global_trace.stop_count < 0) {
1278                         /* Someone screwed up their debugging */
1279                         WARN_ON_ONCE(1);
1280                         global_trace.stop_count = 0;
1281                 }
1282                 goto out;
1283         }
1284
1285         /* Prevent the buffers from switching */
1286         arch_spin_lock(&ftrace_max_lock);
1287
1288         buffer = global_trace.trace_buffer.buffer;
1289         if (buffer)
1290                 ring_buffer_record_enable(buffer);
1291
1292 #ifdef CONFIG_TRACER_MAX_TRACE
1293         buffer = global_trace.max_buffer.buffer;
1294         if (buffer)
1295                 ring_buffer_record_enable(buffer);
1296 #endif
1297
1298         arch_spin_unlock(&ftrace_max_lock);
1299
1300         ftrace_start();
1301  out:
1302         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1303 }
1304
1305 static void tracing_start_tr(struct trace_array *tr)
1306 {
1307         struct ring_buffer *buffer;
1308         unsigned long flags;
1309
1310         if (tracing_disabled)
1311                 return;
1312
1313         /* If global, we need to also start the max tracer */
1314         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1315                 return tracing_start();
1316
1317         raw_spin_lock_irqsave(&tr->start_lock, flags);
1318
1319         if (--tr->stop_count) {
1320                 if (tr->stop_count < 0) {
1321                         /* Someone screwed up their debugging */
1322                         WARN_ON_ONCE(1);
1323                         tr->stop_count = 0;
1324                 }
1325                 goto out;
1326         }
1327
1328         buffer = tr->trace_buffer.buffer;
1329         if (buffer)
1330                 ring_buffer_record_enable(buffer);
1331
1332  out:
1333         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1334 }
1335
1336 /**
1337  * tracing_stop - quick stop of the tracer
1338  *
1339  * Light weight way to stop tracing. Use in conjunction with
1340  * tracing_start.
1341  */
1342 void tracing_stop(void)
1343 {
1344         struct ring_buffer *buffer;
1345         unsigned long flags;
1346
1347         ftrace_stop();
1348         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1349         if (global_trace.stop_count++)
1350                 goto out;
1351
1352         /* Prevent the buffers from switching */
1353         arch_spin_lock(&ftrace_max_lock);
1354
1355         buffer = global_trace.trace_buffer.buffer;
1356         if (buffer)
1357                 ring_buffer_record_disable(buffer);
1358
1359 #ifdef CONFIG_TRACER_MAX_TRACE
1360         buffer = global_trace.max_buffer.buffer;
1361         if (buffer)
1362                 ring_buffer_record_disable(buffer);
1363 #endif
1364
1365         arch_spin_unlock(&ftrace_max_lock);
1366
1367  out:
1368         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1369 }
1370
1371 static void tracing_stop_tr(struct trace_array *tr)
1372 {
1373         struct ring_buffer *buffer;
1374         unsigned long flags;
1375
1376         /* If global, we need to also stop the max tracer */
1377         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1378                 return tracing_stop();
1379
1380         raw_spin_lock_irqsave(&tr->start_lock, flags);
1381         if (tr->stop_count++)
1382                 goto out;
1383
1384         buffer = tr->trace_buffer.buffer;
1385         if (buffer)
1386                 ring_buffer_record_disable(buffer);
1387
1388  out:
1389         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1390 }
1391
1392 void trace_stop_cmdline_recording(void);
1393
1394 static void trace_save_cmdline(struct task_struct *tsk)
1395 {
1396         unsigned pid, idx;
1397
1398         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1399                 return;
1400
1401         /*
1402          * It's not the end of the world if we don't get
1403          * the lock, but we also don't want to spin
1404          * nor do we want to disable interrupts,
1405          * so if we miss here, then better luck next time.
1406          */
1407         if (!arch_spin_trylock(&trace_cmdline_lock))
1408                 return;
1409
1410         idx = map_pid_to_cmdline[tsk->pid];
1411         if (idx == NO_CMDLINE_MAP) {
1412                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1413
1414                 /*
1415                  * Check whether the cmdline buffer at idx has a pid
1416                  * mapped. We are going to overwrite that entry so we
1417                  * need to clear the map_pid_to_cmdline. Otherwise we
1418                  * would read the new comm for the old pid.
1419                  */
1420                 pid = map_cmdline_to_pid[idx];
1421                 if (pid != NO_CMDLINE_MAP)
1422                         map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1423
1424                 map_cmdline_to_pid[idx] = tsk->pid;
1425                 map_pid_to_cmdline[tsk->pid] = idx;
1426
1427                 cmdline_idx = idx;
1428         }
1429
1430         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1431
1432         arch_spin_unlock(&trace_cmdline_lock);
1433 }
1434
1435 void trace_find_cmdline(int pid, char comm[])
1436 {
1437         unsigned map;
1438
1439         if (!pid) {
1440                 strcpy(comm, "<idle>");
1441                 return;
1442         }
1443
1444         if (WARN_ON_ONCE(pid < 0)) {
1445                 strcpy(comm, "<XXX>");
1446                 return;
1447         }
1448
1449         if (pid > PID_MAX_DEFAULT) {
1450                 strcpy(comm, "<...>");
1451                 return;
1452         }
1453
1454         preempt_disable();
1455         arch_spin_lock(&trace_cmdline_lock);
1456         map = map_pid_to_cmdline[pid];
1457         if (map != NO_CMDLINE_MAP)
1458                 strcpy(comm, saved_cmdlines[map]);
1459         else
1460                 strcpy(comm, "<...>");
1461
1462         arch_spin_unlock(&trace_cmdline_lock);
1463         preempt_enable();
1464 }
1465
1466 void tracing_record_cmdline(struct task_struct *tsk)
1467 {
1468         if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1469                 return;
1470
1471         if (!__this_cpu_read(trace_cmdline_save))
1472                 return;
1473
1474         __this_cpu_write(trace_cmdline_save, false);
1475
1476         trace_save_cmdline(tsk);
1477 }
1478
1479 void
1480 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1481                              int pc)
1482 {
1483         struct task_struct *tsk = current;
1484
1485         entry->preempt_count            = pc & 0xff;
1486         entry->pid                      = (tsk) ? tsk->pid : 0;
1487         entry->flags =
1488 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1489                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1490 #else
1491                 TRACE_FLAG_IRQS_NOSUPPORT |
1492 #endif
1493                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1494                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1495                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1496 }
1497 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1498
1499 struct ring_buffer_event *
1500 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1501                           int type,
1502                           unsigned long len,
1503                           unsigned long flags, int pc)
1504 {
1505         struct ring_buffer_event *event;
1506
1507         event = ring_buffer_lock_reserve(buffer, len);
1508         if (event != NULL) {
1509                 struct trace_entry *ent = ring_buffer_event_data(event);
1510
1511                 tracing_generic_entry_update(ent, flags, pc);
1512                 ent->type = type;
1513         }
1514
1515         return event;
1516 }
1517
1518 void
1519 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1520 {
1521         __this_cpu_write(trace_cmdline_save, true);
1522         ring_buffer_unlock_commit(buffer, event);
1523 }
1524
1525 static inline void
1526 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1527                              struct ring_buffer_event *event,
1528                              unsigned long flags, int pc)
1529 {
1530         __buffer_unlock_commit(buffer, event);
1531
1532         ftrace_trace_stack(buffer, flags, 6, pc);
1533         ftrace_trace_userstack(buffer, flags, pc);
1534 }
1535
1536 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1537                                 struct ring_buffer_event *event,
1538                                 unsigned long flags, int pc)
1539 {
1540         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1541 }
1542 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1543
1544 struct ring_buffer_event *
1545 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1546                           struct ftrace_event_file *ftrace_file,
1547                           int type, unsigned long len,
1548                           unsigned long flags, int pc)
1549 {
1550         *current_rb = ftrace_file->tr->trace_buffer.buffer;
1551         return trace_buffer_lock_reserve(*current_rb,
1552                                          type, len, flags, pc);
1553 }
1554 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1555
1556 struct ring_buffer_event *
1557 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1558                                   int type, unsigned long len,
1559                                   unsigned long flags, int pc)
1560 {
1561         *current_rb = global_trace.trace_buffer.buffer;
1562         return trace_buffer_lock_reserve(*current_rb,
1563                                          type, len, flags, pc);
1564 }
1565 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1566
1567 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1568                                         struct ring_buffer_event *event,
1569                                         unsigned long flags, int pc)
1570 {
1571         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1572 }
1573 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1574
1575 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1576                                      struct ring_buffer_event *event,
1577                                      unsigned long flags, int pc,
1578                                      struct pt_regs *regs)
1579 {
1580         __buffer_unlock_commit(buffer, event);
1581
1582         ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1583         ftrace_trace_userstack(buffer, flags, pc);
1584 }
1585 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1586
1587 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1588                                          struct ring_buffer_event *event)
1589 {
1590         ring_buffer_discard_commit(buffer, event);
1591 }
1592 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1593
1594 void
1595 trace_function(struct trace_array *tr,
1596                unsigned long ip, unsigned long parent_ip, unsigned long flags,
1597                int pc)
1598 {
1599         struct ftrace_event_call *call = &event_function;
1600         struct ring_buffer *buffer = tr->trace_buffer.buffer;
1601         struct ring_buffer_event *event;
1602         struct ftrace_entry *entry;
1603
1604         /* If we are reading the ring buffer, don't trace */
1605         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1606                 return;
1607
1608         event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1609                                           flags, pc);
1610         if (!event)
1611                 return;
1612         entry   = ring_buffer_event_data(event);
1613         entry->ip                       = ip;
1614         entry->parent_ip                = parent_ip;
1615
1616         if (!filter_check_discard(call, entry, buffer, event))
1617                 __buffer_unlock_commit(buffer, event);
1618 }
1619
1620 void
1621 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1622        unsigned long ip, unsigned long parent_ip, unsigned long flags,
1623        int pc)
1624 {
1625         if (likely(!atomic_read(&data->disabled)))
1626                 trace_function(tr, ip, parent_ip, flags, pc);
1627 }
1628
1629 #ifdef CONFIG_STACKTRACE
1630
1631 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1632 struct ftrace_stack {
1633         unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
1634 };
1635
1636 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1637 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1638
1639 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1640                                  unsigned long flags,
1641                                  int skip, int pc, struct pt_regs *regs)
1642 {
1643         struct ftrace_event_call *call = &event_kernel_stack;
1644         struct ring_buffer_event *event;
1645         struct stack_entry *entry;
1646         struct stack_trace trace;
1647         int use_stack;
1648         int size = FTRACE_STACK_ENTRIES;
1649
1650         trace.nr_entries        = 0;
1651         trace.skip              = skip;
1652
1653         /*
1654          * Since events can happen in NMIs there's no safe way to
1655          * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1656          * or NMI comes in, it will just have to use the default
1657          * FTRACE_STACK_SIZE.
1658          */
1659         preempt_disable_notrace();
1660
1661         use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1662         /*
1663          * We don't need any atomic variables, just a barrier.
1664          * If an interrupt comes in, we don't care, because it would
1665          * have exited and put the counter back to what we want.
1666          * We just need a barrier to keep gcc from moving things
1667          * around.
1668          */
1669         barrier();
1670         if (use_stack == 1) {
1671                 trace.entries           = &__get_cpu_var(ftrace_stack).calls[0];
1672                 trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
1673
1674                 if (regs)
1675                         save_stack_trace_regs(regs, &trace);
1676                 else
1677                         save_stack_trace(&trace);
1678
1679                 if (trace.nr_entries > size)
1680                         size = trace.nr_entries;
1681         } else
1682                 /* From now on, use_stack is a boolean */
1683                 use_stack = 0;
1684
1685         size *= sizeof(unsigned long);
1686
1687         event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1688                                           sizeof(*entry) + size, flags, pc);
1689         if (!event)
1690                 goto out;
1691         entry = ring_buffer_event_data(event);
1692
1693         memset(&entry->caller, 0, size);
1694
1695         if (use_stack)
1696                 memcpy(&entry->caller, trace.entries,
1697                        trace.nr_entries * sizeof(unsigned long));
1698         else {
1699                 trace.max_entries       = FTRACE_STACK_ENTRIES;
1700                 trace.entries           = entry->caller;
1701                 if (regs)
1702                         save_stack_trace_regs(regs, &trace);
1703                 else
1704                         save_stack_trace(&trace);
1705         }
1706
1707         entry->size = trace.nr_entries;
1708
1709         if (!filter_check_discard(call, entry, buffer, event))
1710                 __buffer_unlock_commit(buffer, event);
1711
1712  out:
1713         /* Again, don't let gcc optimize things here */
1714         barrier();
1715         __this_cpu_dec(ftrace_stack_reserve);
1716         preempt_enable_notrace();
1717
1718 }
1719
1720 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1721                              int skip, int pc, struct pt_regs *regs)
1722 {
1723         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1724                 return;
1725
1726         __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1727 }
1728
1729 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1730                         int skip, int pc)
1731 {
1732         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1733                 return;
1734
1735         __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1736 }
1737
1738 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1739                    int pc)
1740 {
1741         __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1742 }
1743
1744 /**
1745  * trace_dump_stack - record a stack back trace in the trace buffer
1746  * @skip: Number of functions to skip (helper handlers)
1747  */
1748 void trace_dump_stack(int skip)
1749 {
1750         unsigned long flags;
1751
1752         if (tracing_disabled || tracing_selftest_running)
1753                 return;
1754
1755         local_save_flags(flags);
1756
1757         /*
1758          * Skip 3 more, seems to get us at the caller of
1759          * this function.
1760          */
1761         skip += 3;
1762         __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1763                              flags, skip, preempt_count(), NULL);
1764 }
1765
1766 static DEFINE_PER_CPU(int, user_stack_count);
1767
1768 void
1769 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1770 {
1771         struct ftrace_event_call *call = &event_user_stack;
1772         struct ring_buffer_event *event;
1773         struct userstack_entry *entry;
1774         struct stack_trace trace;
1775
1776         if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1777                 return;
1778
1779         /*
1780          * NMIs can not handle page faults, even with fix ups.
1781          * The save user stack can (and often does) fault.
1782          */
1783         if (unlikely(in_nmi()))
1784                 return;
1785
1786         /*
1787          * prevent recursion, since the user stack tracing may
1788          * trigger other kernel events.
1789          */
1790         preempt_disable();
1791         if (__this_cpu_read(user_stack_count))
1792                 goto out;
1793
1794         __this_cpu_inc(user_stack_count);
1795
1796         event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1797                                           sizeof(*entry), flags, pc);
1798         if (!event)
1799                 goto out_drop_count;
1800         entry   = ring_buffer_event_data(event);
1801
1802         entry->tgid             = current->tgid;
1803         memset(&entry->caller, 0, sizeof(entry->caller));
1804
1805         trace.nr_entries        = 0;
1806         trace.max_entries       = FTRACE_STACK_ENTRIES;
1807         trace.skip              = 0;
1808         trace.entries           = entry->caller;
1809
1810         save_stack_trace_user(&trace);
1811         if (!filter_check_discard(call, entry, buffer, event))
1812                 __buffer_unlock_commit(buffer, event);
1813
1814  out_drop_count:
1815         __this_cpu_dec(user_stack_count);
1816  out:
1817         preempt_enable();
1818 }
1819
1820 #ifdef UNUSED
1821 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1822 {
1823         ftrace_trace_userstack(tr, flags, preempt_count());
1824 }
1825 #endif /* UNUSED */
1826
1827 #endif /* CONFIG_STACKTRACE */
1828
1829 /* created for use with alloc_percpu */
1830 struct trace_buffer_struct {
1831         char buffer[TRACE_BUF_SIZE];
1832 };
1833
1834 static struct trace_buffer_struct *trace_percpu_buffer;
1835 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1836 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1837 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1838
1839 /*
1840  * The buffer used is dependent on the context. There is a per cpu
1841  * buffer for normal context, softirq contex, hard irq context and
1842  * for NMI context. Thise allows for lockless recording.
1843  *
1844  * Note, if the buffers failed to be allocated, then this returns NULL
1845  */
1846 static char *get_trace_buf(void)
1847 {
1848         struct trace_buffer_struct *percpu_buffer;
1849
1850         /*
1851          * If we have allocated per cpu buffers, then we do not
1852          * need to do any locking.
1853          */
1854         if (in_nmi())
1855                 percpu_buffer = trace_percpu_nmi_buffer;
1856         else if (in_irq())
1857                 percpu_buffer = trace_percpu_irq_buffer;
1858         else if (in_softirq())
1859                 percpu_buffer = trace_percpu_sirq_buffer;
1860         else
1861                 percpu_buffer = trace_percpu_buffer;
1862
1863         if (!percpu_buffer)
1864                 return NULL;
1865
1866         return this_cpu_ptr(&percpu_buffer->buffer[0]);
1867 }
1868
1869 static int alloc_percpu_trace_buffer(void)
1870 {
1871         struct trace_buffer_struct *buffers;
1872         struct trace_buffer_struct *sirq_buffers;
1873         struct trace_buffer_struct *irq_buffers;
1874         struct trace_buffer_struct *nmi_buffers;
1875
1876         buffers = alloc_percpu(struct trace_buffer_struct);
1877         if (!buffers)
1878                 goto err_warn;
1879
1880         sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1881         if (!sirq_buffers)
1882                 goto err_sirq;
1883
1884         irq_buffers = alloc_percpu(struct trace_buffer_struct);
1885         if (!irq_buffers)
1886                 goto err_irq;
1887
1888         nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1889         if (!nmi_buffers)
1890                 goto err_nmi;
1891
1892         trace_percpu_buffer = buffers;
1893         trace_percpu_sirq_buffer = sirq_buffers;
1894         trace_percpu_irq_buffer = irq_buffers;
1895         trace_percpu_nmi_buffer = nmi_buffers;
1896
1897         return 0;
1898
1899  err_nmi:
1900         free_percpu(irq_buffers);
1901  err_irq:
1902         free_percpu(sirq_buffers);
1903  err_sirq:
1904         free_percpu(buffers);
1905  err_warn:
1906         WARN(1, "Could not allocate percpu trace_printk buffer");
1907         return -ENOMEM;
1908 }
1909
1910 static int buffers_allocated;
1911
1912 void trace_printk_init_buffers(void)
1913 {
1914         if (buffers_allocated)
1915                 return;
1916
1917         if (alloc_percpu_trace_buffer())
1918                 return;
1919
1920         pr_info("ftrace: Allocated trace_printk buffers\n");
1921
1922         /* Expand the buffers to set size */
1923         tracing_update_buffers();
1924
1925         buffers_allocated = 1;
1926
1927         /*
1928          * trace_printk_init_buffers() can be called by modules.
1929          * If that happens, then we need to start cmdline recording
1930          * directly here. If the global_trace.buffer is already
1931          * allocated here, then this was called by module code.
1932          */
1933         if (global_trace.trace_buffer.buffer)
1934                 tracing_start_cmdline_record();
1935 }
1936
1937 void trace_printk_start_comm(void)
1938 {
1939         /* Start tracing comms if trace printk is set */
1940         if (!buffers_allocated)
1941                 return;
1942         tracing_start_cmdline_record();
1943 }
1944
1945 static void trace_printk_start_stop_comm(int enabled)
1946 {
1947         if (!buffers_allocated)
1948                 return;
1949
1950         if (enabled)
1951                 tracing_start_cmdline_record();
1952         else
1953                 tracing_stop_cmdline_record();
1954 }
1955
1956 /**
1957  * trace_vbprintk - write binary msg to tracing buffer
1958  *
1959  */
1960 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1961 {
1962         struct ftrace_event_call *call = &event_bprint;
1963         struct ring_buffer_event *event;
1964         struct ring_buffer *buffer;
1965         struct trace_array *tr = &global_trace;
1966         struct bprint_entry *entry;
1967         unsigned long flags;
1968         char *tbuffer;
1969         int len = 0, size, pc;
1970
1971         if (unlikely(tracing_selftest_running || tracing_disabled))
1972                 return 0;
1973
1974         /* Don't pollute graph traces with trace_vprintk internals */
1975         pause_graph_tracing();
1976
1977         pc = preempt_count();
1978         preempt_disable_notrace();
1979
1980         tbuffer = get_trace_buf();
1981         if (!tbuffer) {
1982                 len = 0;
1983                 goto out;
1984         }
1985
1986         len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1987
1988         if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
1989                 goto out;
1990
1991         local_save_flags(flags);
1992         size = sizeof(*entry) + sizeof(u32) * len;
1993         buffer = tr->trace_buffer.buffer;
1994         event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1995                                           flags, pc);
1996         if (!event)
1997                 goto out;
1998         entry = ring_buffer_event_data(event);
1999         entry->ip                       = ip;
2000         entry->fmt                      = fmt;
2001
2002         memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2003         if (!filter_check_discard(call, entry, buffer, event)) {
2004                 __buffer_unlock_commit(buffer, event);
2005                 ftrace_trace_stack(buffer, flags, 6, pc);
2006         }
2007
2008 out:
2009         preempt_enable_notrace();
2010         unpause_graph_tracing();
2011
2012         return len;
2013 }
2014 EXPORT_SYMBOL_GPL(trace_vbprintk);
2015
2016 static int
2017 __trace_array_vprintk(struct ring_buffer *buffer,
2018                       unsigned long ip, const char *fmt, va_list args)
2019 {
2020         struct ftrace_event_call *call = &event_print;
2021         struct ring_buffer_event *event;
2022         int len = 0, size, pc;
2023         struct print_entry *entry;
2024         unsigned long flags;
2025         char *tbuffer;
2026
2027         if (tracing_disabled || tracing_selftest_running)
2028                 return 0;
2029
2030         /* Don't pollute graph traces with trace_vprintk internals */
2031         pause_graph_tracing();
2032
2033         pc = preempt_count();
2034         preempt_disable_notrace();
2035
2036
2037         tbuffer = get_trace_buf();
2038         if (!tbuffer) {
2039                 len = 0;
2040                 goto out;
2041         }
2042
2043         len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2044         if (len > TRACE_BUF_SIZE)
2045                 goto out;
2046
2047         local_save_flags(flags);
2048         size = sizeof(*entry) + len + 1;
2049         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2050                                           flags, pc);
2051         if (!event)
2052                 goto out;
2053         entry = ring_buffer_event_data(event);
2054         entry->ip = ip;
2055
2056         memcpy(&entry->buf, tbuffer, len);
2057         entry->buf[len] = '\0';
2058         if (!filter_check_discard(call, entry, buffer, event)) {
2059                 __buffer_unlock_commit(buffer, event);
2060                 ftrace_trace_stack(buffer, flags, 6, pc);
2061         }
2062  out:
2063         preempt_enable_notrace();
2064         unpause_graph_tracing();
2065
2066         return len;
2067 }
2068
2069 int trace_array_vprintk(struct trace_array *tr,
2070                         unsigned long ip, const char *fmt, va_list args)
2071 {
2072         return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2073 }
2074
2075 int trace_array_printk(struct trace_array *tr,
2076                        unsigned long ip, const char *fmt, ...)
2077 {
2078         int ret;
2079         va_list ap;
2080
2081         if (!(trace_flags & TRACE_ITER_PRINTK))
2082                 return 0;
2083
2084         va_start(ap, fmt);
2085         ret = trace_array_vprintk(tr, ip, fmt, ap);
2086         va_end(ap);
2087         return ret;
2088 }
2089
2090 int trace_array_printk_buf(struct ring_buffer *buffer,
2091                            unsigned long ip, const char *fmt, ...)
2092 {
2093         int ret;
2094         va_list ap;
2095
2096         if (!(trace_flags & TRACE_ITER_PRINTK))
2097                 return 0;
2098
2099         va_start(ap, fmt);
2100         ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2101         va_end(ap);
2102         return ret;
2103 }
2104
2105 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2106 {
2107         return trace_array_vprintk(&global_trace, ip, fmt, args);
2108 }
2109 EXPORT_SYMBOL_GPL(trace_vprintk);
2110
2111 static void trace_iterator_increment(struct trace_iterator *iter)
2112 {
2113         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2114
2115         iter->idx++;
2116         if (buf_iter)
2117                 ring_buffer_read(buf_iter, NULL);
2118 }
2119
2120 static struct trace_entry *
2121 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2122                 unsigned long *lost_events)
2123 {
2124         struct ring_buffer_event *event;
2125         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2126
2127         if (buf_iter)
2128                 event = ring_buffer_iter_peek(buf_iter, ts);
2129         else
2130                 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2131                                          lost_events);
2132
2133         if (event) {
2134                 iter->ent_size = ring_buffer_event_length(event);
2135                 return ring_buffer_event_data(event);
2136         }
2137         iter->ent_size = 0;
2138         return NULL;
2139 }
2140
2141 static struct trace_entry *
2142 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2143                   unsigned long *missing_events, u64 *ent_ts)
2144 {
2145         struct ring_buffer *buffer = iter->trace_buffer->buffer;
2146         struct trace_entry *ent, *next = NULL;
2147         unsigned long lost_events = 0, next_lost = 0;
2148         int cpu_file = iter->cpu_file;
2149         u64 next_ts = 0, ts;
2150         int next_cpu = -1;
2151         int next_size = 0;
2152         int cpu;
2153
2154         /*
2155          * If we are in a per_cpu trace file, don't bother by iterating over
2156          * all cpu and peek directly.
2157          */
2158         if (cpu_file > RING_BUFFER_ALL_CPUS) {
2159                 if (ring_buffer_empty_cpu(buffer, cpu_file))
2160                         return NULL;
2161                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2162                 if (ent_cpu)
2163                         *ent_cpu = cpu_file;
2164
2165                 return ent;
2166         }
2167
2168         for_each_tracing_cpu(cpu) {
2169
2170                 if (ring_buffer_empty_cpu(buffer, cpu))
2171                         continue;
2172
2173                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2174
2175                 /*
2176                  * Pick the entry with the smallest timestamp:
2177                  */
2178                 if (ent && (!next || ts < next_ts)) {
2179                         next = ent;
2180                         next_cpu = cpu;
2181                         next_ts = ts;
2182                         next_lost = lost_events;
2183                         next_size = iter->ent_size;
2184                 }
2185         }
2186
2187         iter->ent_size = next_size;
2188
2189         if (ent_cpu)
2190                 *ent_cpu = next_cpu;
2191
2192         if (ent_ts)
2193                 *ent_ts = next_ts;
2194
2195         if (missing_events)
2196                 *missing_events = next_lost;
2197
2198         return next;
2199 }
2200
2201 /* Find the next real entry, without updating the iterator itself */
2202 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2203                                           int *ent_cpu, u64 *ent_ts)
2204 {
2205         return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2206 }
2207
2208 /* Find the next real entry, and increment the iterator to the next entry */
2209 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2210 {
2211         iter->ent = __find_next_entry(iter, &iter->cpu,
2212                                       &iter->lost_events, &iter->ts);
2213
2214         if (iter->ent)
2215                 trace_iterator_increment(iter);
2216
2217         return iter->ent ? iter : NULL;
2218 }
2219
2220 static void trace_consume(struct trace_iterator *iter)
2221 {
2222         ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2223                             &iter->lost_events);
2224 }
2225
2226 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2227 {
2228         struct trace_iterator *iter = m->private;
2229         int i = (int)*pos;
2230         void *ent;
2231
2232         WARN_ON_ONCE(iter->leftover);
2233
2234         (*pos)++;
2235
2236         /* can't go backwards */
2237         if (iter->idx > i)
2238                 return NULL;
2239
2240         if (iter->idx < 0)
2241                 ent = trace_find_next_entry_inc(iter);
2242         else
2243                 ent = iter;
2244
2245         while (ent && iter->idx < i)
2246                 ent = trace_find_next_entry_inc(iter);
2247
2248         iter->pos = *pos;
2249
2250         return ent;
2251 }
2252
2253 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2254 {
2255         struct ring_buffer_event *event;
2256         struct ring_buffer_iter *buf_iter;
2257         unsigned long entries = 0;
2258         u64 ts;
2259
2260         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2261
2262         buf_iter = trace_buffer_iter(iter, cpu);
2263         if (!buf_iter)
2264                 return;
2265
2266         ring_buffer_iter_reset(buf_iter);
2267
2268         /*
2269          * We could have the case with the max latency tracers
2270          * that a reset never took place on a cpu. This is evident
2271          * by the timestamp being before the start of the buffer.
2272          */
2273         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2274                 if (ts >= iter->trace_buffer->time_start)
2275                         break;
2276                 entries++;
2277                 ring_buffer_read(buf_iter, NULL);
2278         }
2279
2280         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2281 }
2282
2283 /*
2284  * The current tracer is copied to avoid a global locking
2285  * all around.
2286  */
2287 static void *s_start(struct seq_file *m, loff_t *pos)
2288 {
2289         struct trace_iterator *iter = m->private;
2290         struct trace_array *tr = iter->tr;
2291         int cpu_file = iter->cpu_file;
2292         void *p = NULL;
2293         loff_t l = 0;
2294         int cpu;
2295
2296         /*
2297          * copy the tracer to avoid using a global lock all around.
2298          * iter->trace is a copy of current_trace, the pointer to the
2299          * name may be used instead of a strcmp(), as iter->trace->name
2300          * will point to the same string as current_trace->name.
2301          */
2302         mutex_lock(&trace_types_lock);
2303         if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2304                 *iter->trace = *tr->current_trace;
2305         mutex_unlock(&trace_types_lock);
2306
2307 #ifdef CONFIG_TRACER_MAX_TRACE
2308         if (iter->snapshot && iter->trace->use_max_tr)
2309                 return ERR_PTR(-EBUSY);
2310 #endif
2311
2312         if (!iter->snapshot)
2313                 atomic_inc(&trace_record_cmdline_disabled);
2314
2315         if (*pos != iter->pos) {
2316                 iter->ent = NULL;
2317                 iter->cpu = 0;
2318                 iter->idx = -1;
2319
2320                 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2321                         for_each_tracing_cpu(cpu)
2322                                 tracing_iter_reset(iter, cpu);
2323                 } else
2324                         tracing_iter_reset(iter, cpu_file);
2325
2326                 iter->leftover = 0;
2327                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2328                         ;
2329
2330         } else {
2331                 /*
2332                  * If we overflowed the seq_file before, then we want
2333                  * to just reuse the trace_seq buffer again.
2334                  */
2335                 if (iter->leftover)
2336                         p = iter;
2337                 else {
2338                         l = *pos - 1;
2339                         p = s_next(m, p, &l);
2340                 }
2341         }
2342
2343         trace_event_read_lock();
2344         trace_access_lock(cpu_file);
2345         return p;
2346 }
2347
2348 static void s_stop(struct seq_file *m, void *p)
2349 {
2350         struct trace_iterator *iter = m->private;
2351
2352 #ifdef CONFIG_TRACER_MAX_TRACE
2353         if (iter->snapshot && iter->trace->use_max_tr)
2354                 return;
2355 #endif
2356
2357         if (!iter->snapshot)
2358                 atomic_dec(&trace_record_cmdline_disabled);
2359
2360         trace_access_unlock(iter->cpu_file);
2361         trace_event_read_unlock();
2362 }
2363
2364 static void
2365 get_total_entries(struct trace_buffer *buf,
2366                   unsigned long *total, unsigned long *entries)
2367 {
2368         unsigned long count;
2369         int cpu;
2370
2371         *total = 0;
2372         *entries = 0;
2373
2374         for_each_tracing_cpu(cpu) {
2375                 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2376                 /*
2377                  * If this buffer has skipped entries, then we hold all
2378                  * entries for the trace and we need to ignore the
2379                  * ones before the time stamp.
2380                  */
2381                 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2382                         count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2383                         /* total is the same as the entries */
2384                         *total += count;
2385                 } else
2386                         *total += count +
2387                                 ring_buffer_overrun_cpu(buf->buffer, cpu);
2388                 *entries += count;
2389         }
2390 }
2391
2392 static void print_lat_help_header(struct seq_file *m)
2393 {
2394         seq_puts(m, "#                  _------=> CPU#            \n");
2395         seq_puts(m, "#                 / _-----=> irqs-off        \n");
2396         seq_puts(m, "#                | / _----=> need-resched    \n");
2397         seq_puts(m, "#                || / _---=> hardirq/softirq \n");
2398         seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
2399         seq_puts(m, "#                |||| /     delay             \n");
2400         seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
2401         seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
2402 }
2403
2404 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2405 {
2406         unsigned long total;
2407         unsigned long entries;
2408
2409         get_total_entries(buf, &total, &entries);
2410         seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2411                    entries, total, num_online_cpus());
2412         seq_puts(m, "#\n");
2413 }
2414
2415 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2416 {
2417         print_event_info(buf, m);
2418         seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n");
2419         seq_puts(m, "#              | |       |          |         |\n");
2420 }
2421
2422 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2423 {
2424         print_event_info(buf, m);
2425         seq_puts(m, "#                              _-----=> irqs-off\n");
2426         seq_puts(m, "#                             / _----=> need-resched\n");
2427         seq_puts(m, "#                            | / _---=> hardirq/softirq\n");
2428         seq_puts(m, "#                            || / _--=> preempt-depth\n");
2429         seq_puts(m, "#                            ||| /     delay\n");
2430         seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
2431         seq_puts(m, "#              | |       |   ||||       |         |\n");
2432 }
2433
2434 void
2435 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2436 {
2437         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2438         struct trace_buffer *buf = iter->trace_buffer;
2439         struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2440         struct tracer *type = iter->trace;
2441         unsigned long entries;
2442         unsigned long total;
2443         const char *name = "preemption";
2444
2445         name = type->name;
2446
2447         get_total_entries(buf, &total, &entries);
2448
2449         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2450                    name, UTS_RELEASE);
2451         seq_puts(m, "# -----------------------------------"
2452                  "---------------------------------\n");
2453         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2454                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2455                    nsecs_to_usecs(data->saved_latency),
2456                    entries,
2457                    total,
2458                    buf->cpu,
2459 #if defined(CONFIG_PREEMPT_NONE)
2460                    "server",
2461 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2462                    "desktop",
2463 #elif defined(CONFIG_PREEMPT)
2464                    "preempt",
2465 #else
2466                    "unknown",
2467 #endif
2468                    /* These are reserved for later use */
2469                    0, 0, 0, 0);
2470 #ifdef CONFIG_SMP
2471         seq_printf(m, " #P:%d)\n", num_online_cpus());
2472 #else
2473         seq_puts(m, ")\n");
2474 #endif
2475         seq_puts(m, "#    -----------------\n");
2476         seq_printf(m, "#    | task: %.16s-%d "
2477                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2478                    data->comm, data->pid,
2479                    from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2480                    data->policy, data->rt_priority);
2481         seq_puts(m, "#    -----------------\n");
2482
2483         if (data->critical_start) {
2484                 seq_puts(m, "#  => started at: ");
2485                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2486                 trace_print_seq(m, &iter->seq);
2487                 seq_puts(m, "\n#  => ended at:   ");
2488                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2489                 trace_print_seq(m, &iter->seq);
2490                 seq_puts(m, "\n#\n");
2491         }
2492
2493         seq_puts(m, "#\n");
2494 }
2495
2496 static void test_cpu_buff_start(struct trace_iterator *iter)
2497 {
2498         struct trace_seq *s = &iter->seq;
2499
2500         if (!(trace_flags & TRACE_ITER_ANNOTATE))
2501                 return;
2502
2503         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2504                 return;
2505
2506         if (cpumask_test_cpu(iter->cpu, iter->started))
2507                 return;
2508
2509         if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2510                 return;
2511
2512         cpumask_set_cpu(iter->cpu, iter->started);
2513
2514         /* Don't print started cpu buffer for the first entry of the trace */
2515         if (iter->idx > 1)
2516                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2517                                 iter->cpu);
2518 }
2519
2520 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2521 {
2522         struct trace_seq *s = &iter->seq;
2523         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2524         struct trace_entry *entry;
2525         struct trace_event *event;
2526
2527         entry = iter->ent;
2528
2529         test_cpu_buff_start(iter);
2530
2531         event = ftrace_find_event(entry->type);
2532
2533         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2534                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2535                         if (!trace_print_lat_context(iter))
2536                                 goto partial;
2537                 } else {
2538                         if (!trace_print_context(iter))
2539                                 goto partial;
2540                 }
2541         }
2542
2543         if (event)
2544                 return event->funcs->trace(iter, sym_flags, event);
2545
2546         if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2547                 goto partial;
2548
2549         return TRACE_TYPE_HANDLED;
2550 partial:
2551         return TRACE_TYPE_PARTIAL_LINE;
2552 }
2553
2554 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2555 {
2556         struct trace_seq *s = &iter->seq;
2557         struct trace_entry *entry;
2558         struct trace_event *event;
2559
2560         entry = iter->ent;
2561
2562         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2563                 if (!trace_seq_printf(s, "%d %d %llu ",
2564                                       entry->pid, iter->cpu, iter->ts))
2565                         goto partial;
2566         }
2567
2568         event = ftrace_find_event(entry->type);
2569         if (event)
2570                 return event->funcs->raw(iter, 0, event);
2571
2572         if (!trace_seq_printf(s, "%d ?\n", entry->type))
2573                 goto partial;
2574
2575         return TRACE_TYPE_HANDLED;
2576 partial:
2577         return TRACE_TYPE_PARTIAL_LINE;
2578 }
2579
2580 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2581 {
2582         struct trace_seq *s = &iter->seq;
2583         unsigned char newline = '\n';
2584         struct trace_entry *entry;
2585         struct trace_event *event;
2586
2587         entry = iter->ent;
2588
2589         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2590                 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2591                 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2592                 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2593         }
2594
2595         event = ftrace_find_event(entry->type);
2596         if (event) {
2597                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2598                 if (ret != TRACE_TYPE_HANDLED)
2599                         return ret;
2600         }
2601
2602         SEQ_PUT_FIELD_RET(s, newline);
2603
2604         return TRACE_TYPE_HANDLED;
2605 }
2606
2607 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2608 {
2609         struct trace_seq *s = &iter->seq;
2610         struct trace_entry *entry;
2611         struct trace_event *event;
2612
2613         entry = iter->ent;
2614
2615         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2616                 SEQ_PUT_FIELD_RET(s, entry->pid);
2617                 SEQ_PUT_FIELD_RET(s, iter->cpu);
2618                 SEQ_PUT_FIELD_RET(s, iter->ts);
2619         }
2620
2621         event = ftrace_find_event(entry->type);
2622         return event ? event->funcs->binary(iter, 0, event) :
2623                 TRACE_TYPE_HANDLED;
2624 }
2625
2626 int trace_empty(struct trace_iterator *iter)
2627 {
2628         struct ring_buffer_iter *buf_iter;
2629         int cpu;
2630
2631         /* If we are looking at one CPU buffer, only check that one */
2632         if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2633                 cpu = iter->cpu_file;
2634                 buf_iter = trace_buffer_iter(iter, cpu);
2635                 if (buf_iter) {
2636                         if (!ring_buffer_iter_empty(buf_iter))
2637                                 return 0;
2638                 } else {
2639                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2640                                 return 0;
2641                 }
2642                 return 1;
2643         }
2644
2645         for_each_tracing_cpu(cpu) {
2646                 buf_iter = trace_buffer_iter(iter, cpu);
2647                 if (buf_iter) {
2648                         if (!ring_buffer_iter_empty(buf_iter))
2649                                 return 0;
2650                 } else {
2651                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2652                                 return 0;
2653                 }
2654         }
2655
2656         return 1;
2657 }
2658
2659 /*  Called with trace_event_read_lock() held. */
2660 enum print_line_t print_trace_line(struct trace_iterator *iter)
2661 {
2662         enum print_line_t ret;
2663
2664         if (iter->lost_events &&
2665             !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2666                                  iter->cpu, iter->lost_events))
2667                 return TRACE_TYPE_PARTIAL_LINE;
2668
2669         if (iter->trace && iter->trace->print_line) {
2670                 ret = iter->trace->print_line(iter);
2671                 if (ret != TRACE_TYPE_UNHANDLED)
2672                         return ret;
2673         }
2674
2675         if (iter->ent->type == TRACE_BPUTS &&
2676                         trace_flags & TRACE_ITER_PRINTK &&
2677                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2678                 return trace_print_bputs_msg_only(iter);
2679
2680         if (iter->ent->type == TRACE_BPRINT &&
2681                         trace_flags & TRACE_ITER_PRINTK &&
2682                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2683                 return trace_print_bprintk_msg_only(iter);
2684
2685         if (iter->ent->type == TRACE_PRINT &&
2686                         trace_flags & TRACE_ITER_PRINTK &&
2687                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2688                 return trace_print_printk_msg_only(iter);
2689
2690         if (trace_flags & TRACE_ITER_BIN)
2691                 return print_bin_fmt(iter);
2692
2693         if (trace_flags & TRACE_ITER_HEX)
2694                 return print_hex_fmt(iter);
2695
2696         if (trace_flags & TRACE_ITER_RAW)
2697                 return print_raw_fmt(iter);
2698
2699         return print_trace_fmt(iter);
2700 }
2701
2702 void trace_latency_header(struct seq_file *m)
2703 {
2704         struct trace_iterator *iter = m->private;
2705
2706         /* print nothing if the buffers are empty */
2707         if (trace_empty(iter))
2708                 return;
2709
2710         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2711                 print_trace_header(m, iter);
2712
2713         if (!(trace_flags & TRACE_ITER_VERBOSE))
2714                 print_lat_help_header(m);
2715 }
2716
2717 void trace_default_header(struct seq_file *m)
2718 {
2719         struct trace_iterator *iter = m->private;
2720
2721         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2722                 return;
2723
2724         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2725                 /* print nothing if the buffers are empty */
2726                 if (trace_empty(iter))
2727                         return;
2728                 print_trace_header(m, iter);
2729                 if (!(trace_flags & TRACE_ITER_VERBOSE))
2730                         print_lat_help_header(m);
2731         } else {
2732                 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2733                         if (trace_flags & TRACE_ITER_IRQ_INFO)
2734                                 print_func_help_header_irq(iter->trace_buffer, m);
2735                         else
2736                                 print_func_help_header(iter->trace_buffer, m);
2737                 }
2738         }
2739 }
2740
2741 static void test_ftrace_alive(struct seq_file *m)
2742 {
2743         if (!ftrace_is_dead())
2744                 return;
2745         seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2746         seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
2747 }
2748
2749 #ifdef CONFIG_TRACER_MAX_TRACE
2750 static void show_snapshot_main_help(struct seq_file *m)
2751 {
2752         seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2753         seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2754         seq_printf(m, "#                      Takes a snapshot of the main buffer.\n");
2755         seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2756         seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
2757         seq_printf(m, "#                       is not a '0' or '1')\n");
2758 }
2759
2760 static void show_snapshot_percpu_help(struct seq_file *m)
2761 {
2762         seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2763 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2764         seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2765         seq_printf(m, "#                      Takes a snapshot of the main buffer for this cpu.\n");
2766 #else
2767         seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2768         seq_printf(m, "#                     Must use main snapshot file to allocate.\n");
2769 #endif
2770         seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2771         seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
2772         seq_printf(m, "#                       is not a '0' or '1')\n");
2773 }
2774
2775 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2776 {
2777         if (iter->tr->allocated_snapshot)
2778                 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2779         else
2780                 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2781
2782         seq_printf(m, "# Snapshot commands:\n");
2783         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2784                 show_snapshot_main_help(m);
2785         else
2786                 show_snapshot_percpu_help(m);
2787 }
2788 #else
2789 /* Should never be called */
2790 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2791 #endif
2792
2793 static int s_show(struct seq_file *m, void *v)
2794 {
2795         struct trace_iterator *iter = v;
2796         int ret;
2797
2798         if (iter->ent == NULL) {
2799                 if (iter->tr) {
2800                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
2801                         seq_puts(m, "#\n");
2802                         test_ftrace_alive(m);
2803                 }
2804                 if (iter->snapshot && trace_empty(iter))
2805                         print_snapshot_help(m, iter);
2806                 else if (iter->trace && iter->trace->print_header)
2807                         iter->trace->print_header(m);
2808                 else
2809                         trace_default_header(m);
2810
2811         } else if (iter->leftover) {
2812                 /*
2813                  * If we filled the seq_file buffer earlier, we
2814                  * want to just show it now.
2815                  */
2816                 ret = trace_print_seq(m, &iter->seq);
2817
2818                 /* ret should this time be zero, but you never know */
2819                 iter->leftover = ret;
2820
2821         } else {
2822                 print_trace_line(iter);
2823                 ret = trace_print_seq(m, &iter->seq);
2824                 /*
2825                  * If we overflow the seq_file buffer, then it will
2826                  * ask us for this data again at start up.
2827                  * Use that instead.
2828                  *  ret is 0 if seq_file write succeeded.
2829                  *        -1 otherwise.
2830                  */
2831                 iter->leftover = ret;
2832         }
2833
2834         return 0;
2835 }
2836
2837 /*
2838  * Should be used after trace_array_get(), trace_types_lock
2839  * ensures that i_cdev was already initialized.
2840  */
2841 static inline int tracing_get_cpu(struct inode *inode)
2842 {
2843         if (inode->i_cdev) /* See trace_create_cpu_file() */
2844                 return (long)inode->i_cdev - 1;
2845         return RING_BUFFER_ALL_CPUS;
2846 }
2847
2848 static const struct seq_operations tracer_seq_ops = {
2849         .start          = s_start,
2850         .next           = s_next,
2851         .stop           = s_stop,
2852         .show           = s_show,
2853 };
2854
2855 static struct trace_iterator *
2856 __tracing_open(struct trace_array *tr, struct trace_cpu *tc,
2857                struct inode *inode, struct file *file, bool snapshot)
2858 {
2859         struct trace_iterator *iter;
2860         int cpu;
2861
2862         if (tracing_disabled)
2863                 return ERR_PTR(-ENODEV);
2864
2865         iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2866         if (!iter)
2867                 return ERR_PTR(-ENOMEM);
2868
2869         iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2870                                     GFP_KERNEL);
2871         if (!iter->buffer_iter)
2872                 goto release;
2873
2874         /*
2875          * We make a copy of the current tracer to avoid concurrent
2876          * changes on it while we are reading.
2877          */
2878         mutex_lock(&trace_types_lock);
2879         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2880         if (!iter->trace)
2881                 goto fail;
2882
2883         *iter->trace = *tr->current_trace;
2884
2885         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2886                 goto fail;
2887
2888         iter->tr = tr;
2889
2890 #ifdef CONFIG_TRACER_MAX_TRACE
2891         /* Currently only the top directory has a snapshot */
2892         if (tr->current_trace->print_max || snapshot)
2893                 iter->trace_buffer = &tr->max_buffer;
2894         else
2895 #endif
2896                 iter->trace_buffer = &tr->trace_buffer;
2897         iter->snapshot = snapshot;
2898         iter->pos = -1;
2899         mutex_init(&iter->mutex);
2900         iter->cpu_file = tc->cpu;
2901
2902         /* Notify the tracer early; before we stop tracing. */
2903         if (iter->trace && iter->trace->open)
2904                 iter->trace->open(iter);
2905
2906         /* Annotate start of buffers if we had overruns */
2907         if (ring_buffer_overruns(iter->trace_buffer->buffer))
2908                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2909
2910         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2911         if (trace_clocks[tr->clock_id].in_ns)
2912                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2913
2914         /* stop the trace while dumping if we are not opening "snapshot" */
2915         if (!iter->snapshot)
2916                 tracing_stop_tr(tr);
2917
2918         if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
2919                 for_each_tracing_cpu(cpu) {
2920                         iter->buffer_iter[cpu] =
2921                                 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2922                 }
2923                 ring_buffer_read_prepare_sync();
2924                 for_each_tracing_cpu(cpu) {
2925                         ring_buffer_read_start(iter->buffer_iter[cpu]);
2926                         tracing_iter_reset(iter, cpu);
2927                 }
2928         } else {
2929                 cpu = iter->cpu_file;
2930                 iter->buffer_iter[cpu] =
2931                         ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2932                 ring_buffer_read_prepare_sync();
2933                 ring_buffer_read_start(iter->buffer_iter[cpu]);
2934                 tracing_iter_reset(iter, cpu);
2935         }
2936
2937         mutex_unlock(&trace_types_lock);
2938
2939         return iter;
2940
2941  fail:
2942         mutex_unlock(&trace_types_lock);
2943         kfree(iter->trace);
2944         kfree(iter->buffer_iter);
2945 release:
2946         seq_release_private(inode, file);
2947         return ERR_PTR(-ENOMEM);
2948 }
2949
2950 int tracing_open_generic(struct inode *inode, struct file *filp)
2951 {
2952         if (tracing_disabled)
2953                 return -ENODEV;
2954
2955         filp->private_data = inode->i_private;
2956         return 0;
2957 }
2958
2959 /*
2960  * Open and update trace_array ref count.
2961  * Must have the current trace_array passed to it.
2962  */
2963 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
2964 {
2965         struct trace_array *tr = inode->i_private;
2966
2967         if (tracing_disabled)
2968                 return -ENODEV;
2969
2970         if (trace_array_get(tr) < 0)
2971                 return -ENODEV;
2972
2973         filp->private_data = inode->i_private;
2974
2975         return 0;
2976 }
2977
2978 static int tracing_release(struct inode *inode, struct file *file)
2979 {
2980         struct seq_file *m = file->private_data;
2981         struct trace_iterator *iter;
2982         struct trace_array *tr;
2983         int cpu;
2984
2985         /* Writes do not use seq_file, need to grab tr from inode */
2986         if (!(file->f_mode & FMODE_READ)) {
2987                 struct trace_cpu *tc = inode->i_private;
2988
2989                 trace_array_put(tc->tr);
2990                 return 0;
2991         }
2992
2993         iter = m->private;
2994         tr = iter->tr;
2995
2996         mutex_lock(&trace_types_lock);
2997
2998         for_each_tracing_cpu(cpu) {
2999                 if (iter->buffer_iter[cpu])
3000                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
3001         }
3002
3003         if (iter->trace && iter->trace->close)
3004                 iter->trace->close(iter);
3005
3006         if (!iter->snapshot)
3007                 /* reenable tracing if it was previously enabled */
3008                 tracing_start_tr(tr);
3009
3010         __trace_array_put(tr);
3011
3012         mutex_unlock(&trace_types_lock);
3013
3014         mutex_destroy(&iter->mutex);
3015         free_cpumask_var(iter->started);
3016         kfree(iter->trace);
3017         kfree(iter->buffer_iter);
3018         seq_release_private(inode, file);
3019
3020         return 0;
3021 }
3022
3023 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3024 {
3025         struct trace_array *tr = inode->i_private;
3026
3027         trace_array_put(tr);
3028         return 0;
3029 }
3030
3031 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3032 {
3033         struct trace_array *tr = inode->i_private;
3034
3035         trace_array_put(tr);
3036
3037         return single_release(inode, file);
3038 }
3039
3040 static int tracing_open(struct inode *inode, struct file *file)
3041 {
3042         struct trace_cpu *tc = inode->i_private;
3043         struct trace_array *tr = tc->tr;
3044         struct trace_iterator *iter;
3045         int ret = 0;
3046
3047         if (trace_array_get(tr) < 0)
3048                 return -ENODEV;
3049
3050         /* If this file was open for write, then erase contents */
3051         if ((file->f_mode & FMODE_WRITE) &&
3052             (file->f_flags & O_TRUNC)) {
3053                 if (tc->cpu == RING_BUFFER_ALL_CPUS)
3054                         tracing_reset_online_cpus(&tr->trace_buffer);
3055                 else
3056                         tracing_reset(&tr->trace_buffer, tc->cpu);
3057         }
3058
3059         if (file->f_mode & FMODE_READ) {
3060                 iter = __tracing_open(tr, tc, inode, file, false);
3061                 if (IS_ERR(iter))
3062                         ret = PTR_ERR(iter);
3063                 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3064                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
3065         }
3066
3067         if (ret < 0)
3068                 trace_array_put(tr);
3069
3070         return ret;
3071 }
3072
3073 static void *
3074 t_next(struct seq_file *m, void *v, loff_t *pos)
3075 {
3076         struct tracer *t = v;
3077
3078         (*pos)++;
3079
3080         if (t)
3081                 t = t->next;
3082
3083         return t;
3084 }
3085
3086 static void *t_start(struct seq_file *m, loff_t *pos)
3087 {
3088         struct tracer *t;
3089         loff_t l = 0;
3090
3091         mutex_lock(&trace_types_lock);
3092         for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
3093                 ;
3094
3095         return t;
3096 }
3097
3098 static void t_stop(struct seq_file *m, void *p)
3099 {
3100         mutex_unlock(&trace_types_lock);
3101 }
3102
3103 static int t_show(struct seq_file *m, void *v)
3104 {
3105         struct tracer *t = v;
3106
3107         if (!t)
3108                 return 0;
3109
3110         seq_printf(m, "%s", t->name);
3111         if (t->next)
3112                 seq_putc(m, ' ');
3113         else
3114                 seq_putc(m, '\n');
3115
3116         return 0;
3117 }
3118
3119 static const struct seq_operations show_traces_seq_ops = {
3120         .start          = t_start,
3121         .next           = t_next,
3122         .stop           = t_stop,
3123         .show           = t_show,
3124 };
3125
3126 static int show_traces_open(struct inode *inode, struct file *file)
3127 {
3128         if (tracing_disabled)
3129                 return -ENODEV;
3130
3131         return seq_open(file, &show_traces_seq_ops);
3132 }
3133
3134 static ssize_t
3135 tracing_write_stub(struct file *filp, const char __user *ubuf,
3136                    size_t count, loff_t *ppos)
3137 {
3138         return count;
3139 }
3140
3141 static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
3142 {
3143         if (file->f_mode & FMODE_READ)
3144                 return seq_lseek(file, offset, origin);
3145         else
3146                 return 0;
3147 }
3148
3149 static const struct file_operations tracing_fops = {
3150         .open           = tracing_open,
3151         .read           = seq_read,
3152         .write          = tracing_write_stub,
3153         .llseek         = tracing_seek,
3154         .release        = tracing_release,
3155 };
3156
3157 static const struct file_operations show_traces_fops = {
3158         .open           = show_traces_open,
3159         .read           = seq_read,
3160         .release        = seq_release,
3161         .llseek         = seq_lseek,
3162 };
3163
3164 /*
3165  * Only trace on a CPU if the bitmask is set:
3166  */
3167 static cpumask_var_t tracing_cpumask;
3168
3169 /*
3170  * The tracer itself will not take this lock, but still we want
3171  * to provide a consistent cpumask to user-space:
3172  */
3173 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3174
3175 /*
3176  * Temporary storage for the character representation of the
3177  * CPU bitmask (and one more byte for the newline):
3178  */
3179 static char mask_str[NR_CPUS + 1];
3180
3181 static ssize_t
3182 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3183                      size_t count, loff_t *ppos)
3184 {
3185         int len;
3186
3187         mutex_lock(&tracing_cpumask_update_lock);
3188
3189         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
3190         if (count - len < 2) {
3191                 count = -EINVAL;
3192                 goto out_err;
3193         }
3194         len += sprintf(mask_str + len, "\n");
3195         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3196
3197 out_err:
3198         mutex_unlock(&tracing_cpumask_update_lock);
3199
3200         return count;
3201 }
3202
3203 static ssize_t
3204 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3205                       size_t count, loff_t *ppos)
3206 {
3207         struct trace_array *tr = filp->private_data;
3208         cpumask_var_t tracing_cpumask_new;
3209         int err, cpu;
3210
3211         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3212                 return -ENOMEM;
3213
3214         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3215         if (err)
3216                 goto err_unlock;
3217
3218         mutex_lock(&tracing_cpumask_update_lock);
3219
3220         local_irq_disable();
3221         arch_spin_lock(&ftrace_max_lock);
3222         for_each_tracing_cpu(cpu) {
3223                 /*
3224                  * Increase/decrease the disabled counter if we are
3225                  * about to flip a bit in the cpumask:
3226                  */
3227                 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
3228                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3229                         atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3230                         ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3231                 }
3232                 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
3233                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3234                         atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3235                         ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3236                 }
3237         }
3238         arch_spin_unlock(&ftrace_max_lock);
3239         local_irq_enable();
3240
3241         cpumask_copy(tracing_cpumask, tracing_cpumask_new);
3242
3243         mutex_unlock(&tracing_cpumask_update_lock);
3244         free_cpumask_var(tracing_cpumask_new);
3245
3246         return count;
3247
3248 err_unlock:
3249         free_cpumask_var(tracing_cpumask_new);
3250
3251         return err;
3252 }
3253
3254 static const struct file_operations tracing_cpumask_fops = {
3255         .open           = tracing_open_generic,
3256         .read           = tracing_cpumask_read,
3257         .write          = tracing_cpumask_write,
3258         .llseek         = generic_file_llseek,
3259 };
3260
3261 static int tracing_trace_options_show(struct seq_file *m, void *v)
3262 {
3263         struct tracer_opt *trace_opts;
3264         struct trace_array *tr = m->private;
3265         u32 tracer_flags;
3266         int i;
3267
3268         mutex_lock(&trace_types_lock);
3269         tracer_flags = tr->current_trace->flags->val;
3270         trace_opts = tr->current_trace->flags->opts;
3271
3272         for (i = 0; trace_options[i]; i++) {
3273                 if (trace_flags & (1 << i))
3274                         seq_printf(m, "%s\n", trace_options[i]);
3275                 else
3276                         seq_printf(m, "no%s\n", trace_options[i]);
3277         }
3278
3279         for (i = 0; trace_opts[i].name; i++) {
3280                 if (tracer_flags & trace_opts[i].bit)
3281                         seq_printf(m, "%s\n", trace_opts[i].name);
3282                 else
3283                         seq_printf(m, "no%s\n", trace_opts[i].name);
3284         }
3285         mutex_unlock(&trace_types_lock);
3286
3287         return 0;
3288 }
3289
3290 static int __set_tracer_option(struct tracer *trace,
3291                                struct tracer_flags *tracer_flags,
3292                                struct tracer_opt *opts, int neg)
3293 {
3294         int ret;
3295
3296         ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
3297         if (ret)
3298                 return ret;
3299
3300         if (neg)
3301                 tracer_flags->val &= ~opts->bit;
3302         else
3303                 tracer_flags->val |= opts->bit;
3304         return 0;
3305 }
3306
3307 /* Try to assign a tracer specific option */
3308 static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3309 {
3310         struct tracer_flags *tracer_flags = trace->flags;
3311         struct tracer_opt *opts = NULL;
3312         int i;
3313
3314         for (i = 0; tracer_flags->opts[i].name; i++) {
3315                 opts = &tracer_flags->opts[i];
3316
3317                 if (strcmp(cmp, opts->name) == 0)
3318                         return __set_tracer_option(trace, trace->flags,
3319                                                    opts, neg);
3320         }
3321
3322         return -EINVAL;
3323 }
3324
3325 /* Some tracers require overwrite to stay enabled */
3326 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3327 {
3328         if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3329                 return -1;
3330
3331         return 0;
3332 }
3333
3334 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3335 {
3336         /* do nothing if flag is already set */
3337         if (!!(trace_flags & mask) == !!enabled)
3338                 return 0;
3339
3340         /* Give the tracer a chance to approve the change */
3341         if (tr->current_trace->flag_changed)
3342                 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
3343                         return -EINVAL;
3344
3345         if (enabled)
3346                 trace_flags |= mask;
3347         else
3348                 trace_flags &= ~mask;
3349
3350         if (mask == TRACE_ITER_RECORD_CMD)
3351                 trace_event_enable_cmd_record(enabled);
3352
3353         if (mask == TRACE_ITER_OVERWRITE) {
3354                 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3355 #ifdef CONFIG_TRACER_MAX_TRACE
3356                 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3357 #endif
3358         }
3359
3360         if (mask == TRACE_ITER_PRINTK)
3361                 trace_printk_start_stop_comm(enabled);
3362
3363         return 0;
3364 }
3365
3366 static int trace_set_options(struct trace_array *tr, char *option)
3367 {
3368         char *cmp;
3369         int neg = 0;
3370         int ret = -ENODEV;
3371         int i;
3372
3373         cmp = strstrip(option);
3374
3375         if (strncmp(cmp, "no", 2) == 0) {
3376                 neg = 1;
3377                 cmp += 2;
3378         }
3379
3380         mutex_lock(&trace_types_lock);
3381
3382         for (i = 0; trace_options[i]; i++) {
3383                 if (strcmp(cmp, trace_options[i]) == 0) {
3384                         ret = set_tracer_flag(tr, 1 << i, !neg);
3385                         break;
3386                 }
3387         }
3388
3389         /* If no option could be set, test the specific tracer options */
3390         if (!trace_options[i])
3391                 ret = set_tracer_option(tr->current_trace, cmp, neg);
3392
3393         mutex_unlock(&trace_types_lock);
3394
3395         return ret;
3396 }
3397
3398 static ssize_t
3399 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3400                         size_t cnt, loff_t *ppos)
3401 {
3402         struct seq_file *m = filp->private_data;
3403         struct trace_array *tr = m->private;
3404         char buf[64];
3405         int ret;
3406
3407         if (cnt >= sizeof(buf))
3408                 return -EINVAL;
3409
3410         if (copy_from_user(&buf, ubuf, cnt))
3411                 return -EFAULT;
3412
3413         buf[cnt] = 0;
3414
3415         ret = trace_set_options(tr, buf);
3416         if (ret < 0)
3417                 return ret;
3418
3419         *ppos += cnt;
3420
3421         return cnt;
3422 }
3423
3424 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3425 {
3426         struct trace_array *tr = inode->i_private;
3427         int ret;
3428
3429         if (tracing_disabled)
3430                 return -ENODEV;
3431
3432         if (trace_array_get(tr) < 0)
3433                 return -ENODEV;
3434
3435         ret = single_open(file, tracing_trace_options_show, inode->i_private);
3436         if (ret < 0)
3437                 trace_array_put(tr);
3438
3439         return ret;
3440 }
3441
3442 static const struct file_operations tracing_iter_fops = {
3443         .open           = tracing_trace_options_open,
3444         .read           = seq_read,
3445         .llseek         = seq_lseek,
3446         .release        = tracing_single_release_tr,
3447         .write          = tracing_trace_options_write,
3448 };
3449
3450 static const char readme_msg[] =
3451         "tracing mini-HOWTO:\n\n"
3452         "# echo 0 > tracing_on : quick way to disable tracing\n"
3453         "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3454         " Important files:\n"
3455         "  trace\t\t\t- The static contents of the buffer\n"
3456         "\t\t\t  To clear the buffer write into this file: echo > trace\n"
3457         "  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3458         "  current_tracer\t- function and latency tracers\n"
3459         "  available_tracers\t- list of configured tracers for current_tracer\n"
3460         "  buffer_size_kb\t- view and modify size of per cpu buffer\n"
3461         "  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
3462         "  trace_clock\t\t-change the clock used to order events\n"
3463         "       local:   Per cpu clock but may not be synced across CPUs\n"
3464         "      global:   Synced across CPUs but slows tracing down.\n"
3465         "     counter:   Not a clock, but just an increment\n"
3466         "      uptime:   Jiffy counter from time of boot\n"
3467         "        perf:   Same clock that perf events use\n"
3468 #ifdef CONFIG_X86_64
3469         "     x86-tsc:   TSC cycle counter\n"
3470 #endif
3471         "\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3472         "  tracing_cpumask\t- Limit which CPUs to trace\n"
3473         "  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3474         "\t\t\t  Remove sub-buffer with rmdir\n"
3475         "  trace_options\t\t- Set format or modify how tracing happens\n"
3476         "\t\t\t  Disable an option by adding a suffix 'no' to the option name\n"
3477 #ifdef CONFIG_DYNAMIC_FTRACE
3478         "\n  available_filter_functions - list of functions that can be filtered on\n"
3479         "  set_ftrace_filter\t- echo function name in here to only trace these functions\n"
3480         "            accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3481         "            modules: Can select a group via module\n"
3482         "             Format: :mod:<module-name>\n"
3483         "             example: echo :mod:ext3 > set_ftrace_filter\n"
3484         "            triggers: a command to perform when function is hit\n"
3485         "              Format: <function>:<trigger>[:count]\n"
3486         "             trigger: traceon, traceoff\n"
3487         "                      enable_event:<system>:<event>\n"
3488         "                      disable_event:<system>:<event>\n"
3489 #ifdef CONFIG_STACKTRACE
3490         "                      stacktrace\n"
3491 #endif
3492 #ifdef CONFIG_TRACER_SNAPSHOT
3493         "                      snapshot\n"
3494 #endif
3495         "             example: echo do_fault:traceoff > set_ftrace_filter\n"
3496         "                      echo do_trap:traceoff:3 > set_ftrace_filter\n"
3497         "             The first one will disable tracing every time do_fault is hit\n"
3498         "             The second will disable tracing at most 3 times when do_trap is hit\n"
3499         "               The first time do trap is hit and it disables tracing, the counter\n"
3500         "               will decrement to 2. If tracing is already disabled, the counter\n"
3501         "               will not decrement. It only decrements when the trigger did work\n"
3502         "             To remove trigger without count:\n"
3503         "               echo '!<function>:<trigger> > set_ftrace_filter\n"
3504         "             To remove trigger with a count:\n"
3505         "               echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3506         "  set_ftrace_notrace\t- echo function name in here to never trace.\n"
3507         "            accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3508         "            modules: Can select a group via module command :mod:\n"
3509         "            Does not accept triggers\n"
3510 #endif /* CONFIG_DYNAMIC_FTRACE */
3511 #ifdef CONFIG_FUNCTION_TRACER
3512         "  set_ftrace_pid\t- Write pid(s) to only function trace those pids (function)\n"
3513 #endif
3514 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3515         "  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3516         "  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3517 #endif
3518 #ifdef CONFIG_TRACER_SNAPSHOT
3519         "\n  snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n"
3520         "\t\t\t  Read the contents for more information\n"
3521 #endif
3522 #ifdef CONFIG_STACKTRACE
3523         "  stack_trace\t\t- Shows the max stack trace when active\n"
3524         "  stack_max_size\t- Shows current max stack size that was traced\n"
3525         "\t\t\t  Write into this file to reset the max size (trigger a new trace)\n"
3526 #ifdef CONFIG_DYNAMIC_FTRACE
3527         "  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n"
3528 #endif
3529 #endif /* CONFIG_STACKTRACE */
3530 ;
3531
3532 static ssize_t
3533 tracing_readme_read(struct file *filp, char __user *ubuf,
3534                        size_t cnt, loff_t *ppos)
3535 {
3536         return simple_read_from_buffer(ubuf, cnt, ppos,
3537                                         readme_msg, strlen(readme_msg));
3538 }
3539
3540 static const struct file_operations tracing_readme_fops = {
3541         .open           = tracing_open_generic,
3542         .read           = tracing_readme_read,
3543         .llseek         = generic_file_llseek,
3544 };
3545
3546 static ssize_t
3547 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3548                                 size_t cnt, loff_t *ppos)
3549 {
3550         char *buf_comm;
3551         char *file_buf;
3552         char *buf;
3553         int len = 0;
3554         int pid;
3555         int i;
3556
3557         file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3558         if (!file_buf)
3559                 return -ENOMEM;
3560
3561         buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3562         if (!buf_comm) {
3563                 kfree(file_buf);
3564                 return -ENOMEM;
3565         }
3566
3567         buf = file_buf;
3568
3569         for (i = 0; i < SAVED_CMDLINES; i++) {
3570                 int r;
3571
3572                 pid = map_cmdline_to_pid[i];
3573                 if (pid == -1 || pid == NO_CMDLINE_MAP)
3574                         continue;
3575
3576                 trace_find_cmdline(pid, buf_comm);
3577                 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3578                 buf += r;
3579                 len += r;
3580         }
3581
3582         len = simple_read_from_buffer(ubuf, cnt, ppos,
3583                                       file_buf, len);
3584
3585         kfree(file_buf);
3586         kfree(buf_comm);
3587
3588         return len;
3589 }
3590
3591 static const struct file_operations tracing_saved_cmdlines_fops = {
3592     .open       = tracing_open_generic,
3593     .read       = tracing_saved_cmdlines_read,
3594     .llseek     = generic_file_llseek,
3595 };
3596
3597 static ssize_t
3598 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3599                        size_t cnt, loff_t *ppos)
3600 {
3601         struct trace_array *tr = filp->private_data;
3602         char buf[MAX_TRACER_SIZE+2];
3603         int r;
3604
3605         mutex_lock(&trace_types_lock);
3606         r = sprintf(buf, "%s\n", tr->current_trace->name);
3607         mutex_unlock(&trace_types_lock);
3608
3609         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3610 }
3611
3612 int tracer_init(struct tracer *t, struct trace_array *tr)
3613 {
3614         tracing_reset_online_cpus(&tr->trace_buffer);
3615         return t->init(tr);
3616 }
3617
3618 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3619 {
3620         int cpu;
3621
3622         for_each_tracing_cpu(cpu)
3623                 per_cpu_ptr(buf->data, cpu)->entries = val;
3624 }
3625
3626 #ifdef CONFIG_TRACER_MAX_TRACE
3627 /* resize @tr's buffer to the size of @size_tr's entries */
3628 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3629                                         struct trace_buffer *size_buf, int cpu_id)
3630 {
3631         int cpu, ret = 0;
3632
3633         if (cpu_id == RING_BUFFER_ALL_CPUS) {
3634                 for_each_tracing_cpu(cpu) {
3635                         ret = ring_buffer_resize(trace_buf->buffer,
3636                                  per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3637                         if (ret < 0)
3638                                 break;
3639                         per_cpu_ptr(trace_buf->data, cpu)->entries =
3640                                 per_cpu_ptr(size_buf->data, cpu)->entries;
3641                 }
3642         } else {
3643                 ret = ring_buffer_resize(trace_buf->buffer,
3644                                  per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3645                 if (ret == 0)
3646                         per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3647                                 per_cpu_ptr(size_buf->data, cpu_id)->entries;
3648         }
3649
3650         return ret;
3651 }
3652 #endif /* CONFIG_TRACER_MAX_TRACE */
3653
3654 static int __tracing_resize_ring_buffer(struct trace_array *tr,
3655                                         unsigned long size, int cpu)
3656 {
3657         int ret;
3658
3659         /*
3660          * If kernel or user changes the size of the ring buffer
3661          * we use the size that was given, and we can forget about
3662          * expanding it later.
3663          */
3664         ring_buffer_expanded = true;
3665
3666         /* May be called before buffers are initialized */
3667         if (!tr->trace_buffer.buffer)
3668                 return 0;
3669
3670         ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3671         if (ret < 0)
3672                 return ret;
3673
3674 #ifdef CONFIG_TRACER_MAX_TRACE
3675         if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3676             !tr->current_trace->use_max_tr)
3677                 goto out;
3678
3679         ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3680         if (ret < 0) {
3681                 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3682                                                      &tr->trace_buffer, cpu);
3683                 if (r < 0) {
3684                         /*
3685                          * AARGH! We are left with different
3686                          * size max buffer!!!!
3687                          * The max buffer is our "snapshot" buffer.
3688                          * When a tracer needs a snapshot (one of the
3689                          * latency tracers), it swaps the max buffer
3690                          * with the saved snap shot. We succeeded to
3691                          * update the size of the main buffer, but failed to
3692                          * update the size of the max buffer. But when we tried
3693                          * to reset the main buffer to the original size, we
3694                          * failed there too. This is very unlikely to
3695                          * happen, but if it does, warn and kill all
3696                          * tracing.
3697                          */
3698                         WARN_ON(1);
3699                         tracing_disabled = 1;
3700                 }
3701                 return ret;
3702         }
3703
3704         if (cpu == RING_BUFFER_ALL_CPUS)
3705                 set_buffer_entries(&tr->max_buffer, size);
3706         else
3707                 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
3708
3709  out:
3710 #endif /* CONFIG_TRACER_MAX_TRACE */
3711
3712         if (cpu == RING_BUFFER_ALL_CPUS)
3713                 set_buffer_entries(&tr->trace_buffer, size);
3714         else
3715                 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
3716
3717         return ret;
3718 }
3719
3720 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3721                                           unsigned long size, int cpu_id)
3722 {
3723         int ret = size;
3724
3725         mutex_lock(&trace_types_lock);
3726
3727         if (cpu_id != RING_BUFFER_ALL_CPUS) {
3728                 /* make sure, this cpu is enabled in the mask */
3729                 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3730                         ret = -EINVAL;
3731                         goto out;
3732                 }
3733         }
3734
3735         ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
3736         if (ret < 0)
3737                 ret = -ENOMEM;
3738
3739 out:
3740         mutex_unlock(&trace_types_lock);
3741
3742         return ret;
3743 }
3744
3745
3746 /**
3747  * tracing_update_buffers - used by tracing facility to expand ring buffers
3748  *
3749  * To save on memory when the tracing is never used on a system with it
3750  * configured in. The ring buffers are set to a minimum size. But once
3751  * a user starts to use the tracing facility, then they need to grow
3752  * to their default size.
3753  *
3754  * This function is to be called when a tracer is about to be used.
3755  */
3756 int tracing_update_buffers(void)
3757 {
3758         int ret = 0;
3759
3760         mutex_lock(&trace_types_lock);
3761         if (!ring_buffer_expanded)
3762                 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
3763                                                 RING_BUFFER_ALL_CPUS);
3764         mutex_unlock(&trace_types_lock);
3765
3766         return ret;
3767 }
3768
3769 struct trace_option_dentry;
3770
3771 static struct trace_option_dentry *
3772 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
3773
3774 static void
3775 destroy_trace_option_files(struct trace_option_dentry *topts);
3776
3777 static int tracing_set_tracer(const char *buf)
3778 {
3779         static struct trace_option_dentry *topts;
3780         struct trace_array *tr = &global_trace;
3781         struct tracer *t;
3782 #ifdef CONFIG_TRACER_MAX_TRACE
3783         bool had_max_tr;
3784 #endif
3785         int ret = 0;
3786
3787         mutex_lock(&trace_types_lock);
3788
3789         if (!ring_buffer_expanded) {
3790                 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
3791                                                 RING_BUFFER_ALL_CPUS);
3792                 if (ret < 0)
3793                         goto out;
3794                 ret = 0;
3795         }
3796
3797         for (t = trace_types; t; t = t->next) {
3798                 if (strcmp(t->name, buf) == 0)
3799                         break;
3800         }
3801         if (!t) {
3802                 ret = -EINVAL;
3803                 goto out;
3804         }
3805         if (t == tr->current_trace)
3806                 goto out;
3807
3808         trace_branch_disable();
3809
3810         tr->current_trace->enabled = false;
3811
3812         if (tr->current_trace->reset)
3813                 tr->current_trace->reset(tr);
3814
3815         /* Current trace needs to be nop_trace before synchronize_sched */
3816         tr->current_trace = &nop_trace;
3817
3818 #ifdef CONFIG_TRACER_MAX_TRACE
3819         had_max_tr = tr->allocated_snapshot;
3820
3821         if (had_max_tr && !t->use_max_tr) {
3822                 /*
3823                  * We need to make sure that the update_max_tr sees that
3824                  * current_trace changed to nop_trace to keep it from
3825                  * swapping the buffers after we resize it.
3826                  * The update_max_tr is called from interrupts disabled
3827                  * so a synchronized_sched() is sufficient.
3828                  */
3829                 synchronize_sched();
3830                 free_snapshot(tr);
3831         }
3832 #endif
3833         destroy_trace_option_files(topts);
3834
3835         topts = create_trace_option_files(tr, t);
3836
3837 #ifdef CONFIG_TRACER_MAX_TRACE
3838         if (t->use_max_tr && !had_max_tr) {
3839                 ret = alloc_snapshot(tr);
3840                 if (ret < 0)
3841                         goto out;
3842         }
3843 #endif
3844
3845         if (t->init) {
3846                 ret = tracer_init(t, tr);
3847                 if (ret)
3848                         goto out;
3849         }
3850
3851         tr->current_trace = t;
3852         tr->current_trace->enabled = true;
3853         trace_branch_enable(tr);
3854  out:
3855         mutex_unlock(&trace_types_lock);
3856
3857         return ret;
3858 }
3859
3860 static ssize_t
3861 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3862                         size_t cnt, loff_t *ppos)
3863 {
3864         char buf[MAX_TRACER_SIZE+1];
3865         int i;
3866         size_t ret;
3867         int err;
3868
3869         ret = cnt;
3870
3871         if (cnt > MAX_TRACER_SIZE)
3872                 cnt = MAX_TRACER_SIZE;
3873
3874         if (copy_from_user(&buf, ubuf, cnt))
3875                 return -EFAULT;
3876
3877         buf[cnt] = 0;
3878
3879         /* strip ending whitespace. */
3880         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3881                 buf[i] = 0;
3882
3883         err = tracing_set_tracer(buf);
3884         if (err)
3885                 return err;
3886
3887         *ppos += ret;
3888
3889         return ret;
3890 }
3891
3892 static ssize_t
3893 tracing_max_lat_read(struct file *filp, char __user *ubuf,
3894                      size_t cnt, loff_t *ppos)
3895 {
3896         unsigned long *ptr = filp->private_data;
3897         char buf[64];
3898         int r;
3899
3900         r = snprintf(buf, sizeof(buf), "%ld\n",
3901                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
3902         if (r > sizeof(buf))
3903                 r = sizeof(buf);
3904         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3905 }
3906
3907 static ssize_t
3908 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3909                       size_t cnt, loff_t *ppos)
3910 {
3911         unsigned long *ptr = filp->private_data;
3912         unsigned long val;
3913         int ret;
3914
3915         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3916         if (ret)
3917                 return ret;
3918
3919         *ptr = val * 1000;
3920
3921         return cnt;
3922 }
3923
3924 static int tracing_open_pipe(struct inode *inode, struct file *filp)
3925 {
3926         struct trace_array *tr = inode->i_private;
3927         struct trace_iterator *iter;
3928         int ret = 0;
3929
3930         if (tracing_disabled)
3931                 return -ENODEV;
3932
3933         if (trace_array_get(tr) < 0)
3934                 return -ENODEV;
3935
3936         mutex_lock(&trace_types_lock);
3937
3938         /* create a buffer to store the information to pass to userspace */
3939         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3940         if (!iter) {
3941                 ret = -ENOMEM;
3942                 __trace_array_put(tr);
3943                 goto out;
3944         }
3945
3946         /*
3947          * We make a copy of the current tracer to avoid concurrent
3948          * changes on it while we are reading.
3949          */
3950         iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3951         if (!iter->trace) {
3952                 ret = -ENOMEM;
3953                 goto fail;
3954         }
3955         *iter->trace = *tr->current_trace;
3956
3957         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3958                 ret = -ENOMEM;
3959                 goto fail;
3960         }
3961
3962         /* trace pipe does not show start of buffer */
3963         cpumask_setall(iter->started);
3964
3965         if (trace_flags & TRACE_ITER_LATENCY_FMT)
3966                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3967
3968         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3969         if (trace_clocks[tr->clock_id].in_ns)
3970                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3971
3972         iter->tr = tr;
3973         iter->trace_buffer = &tr->trace_buffer;
3974         iter->cpu_file = tracing_get_cpu(inode);
3975         mutex_init(&iter->mutex);
3976         filp->private_data = iter;
3977
3978         if (iter->trace->pipe_open)
3979                 iter->trace->pipe_open(iter);
3980
3981         nonseekable_open(inode, filp);
3982 out:
3983         mutex_unlock(&trace_types_lock);
3984         return ret;
3985
3986 fail:
3987         kfree(iter->trace);
3988         kfree(iter);
3989         __trace_array_put(tr);
3990         mutex_unlock(&trace_types_lock);
3991         return ret;
3992 }
3993
3994 static int tracing_release_pipe(struct inode *inode, struct file *file)
3995 {
3996         struct trace_iterator *iter = file->private_data;
3997         struct trace_array *tr = inode->i_private;
3998
3999         mutex_lock(&trace_types_lock);
4000
4001         if (iter->trace->pipe_close)
4002                 iter->trace->pipe_close(iter);
4003
4004         mutex_unlock(&trace_types_lock);
4005
4006         free_cpumask_var(iter->started);
4007         mutex_destroy(&iter->mutex);
4008         kfree(iter->trace);
4009         kfree(iter);
4010
4011         trace_array_put(tr);
4012
4013         return 0;
4014 }
4015
4016 static unsigned int
4017 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4018 {
4019         /* Iterators are static, they should be filled or empty */
4020         if (trace_buffer_iter(iter, iter->cpu_file))
4021                 return POLLIN | POLLRDNORM;
4022
4023         if (trace_flags & TRACE_ITER_BLOCK)
4024                 /*
4025                  * Always select as readable when in blocking mode
4026                  */
4027                 return POLLIN | POLLRDNORM;
4028         else
4029                 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4030                                              filp, poll_table);
4031 }
4032
4033 static unsigned int
4034 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4035 {
4036         struct trace_iterator *iter = filp->private_data;
4037
4038         return trace_poll(iter, filp, poll_table);
4039 }
4040
4041 /*
4042  * This is a make-shift waitqueue.
4043  * A tracer might use this callback on some rare cases:
4044  *
4045  *  1) the current tracer might hold the runqueue lock when it wakes up
4046  *     a reader, hence a deadlock (sched, function, and function graph tracers)
4047  *  2) the function tracers, trace all functions, we don't want
4048  *     the overhead of calling wake_up and friends
4049  *     (and tracing them too)
4050  *
4051  *     Anyway, this is really very primitive wakeup.
4052  */
4053 void poll_wait_pipe(struct trace_iterator *iter)
4054 {
4055         set_current_state(TASK_INTERRUPTIBLE);
4056         /* sleep for 100 msecs, and try again. */
4057         schedule_timeout(HZ / 10);
4058 }
4059
4060 /* Must be called with trace_types_lock mutex held. */
4061 static int tracing_wait_pipe(struct file *filp)
4062 {
4063         struct trace_iterator *iter = filp->private_data;
4064
4065         while (trace_empty(iter)) {
4066
4067                 if ((filp->f_flags & O_NONBLOCK)) {
4068                         return -EAGAIN;
4069                 }
4070
4071                 mutex_unlock(&iter->mutex);
4072
4073                 iter->trace->wait_pipe(iter);
4074
4075                 mutex_lock(&iter->mutex);
4076
4077                 if (signal_pending(current))
4078                         return -EINTR;
4079
4080                 /*
4081                  * We block until we read something and tracing is disabled.
4082                  * We still block if tracing is disabled, but we have never
4083                  * read anything. This allows a user to cat this file, and
4084                  * then enable tracing. But after we have read something,
4085                  * we give an EOF when tracing is again disabled.
4086                  *
4087                  * iter->pos will be 0 if we haven't read anything.
4088                  */
4089                 if (!tracing_is_on() && iter->pos)
4090                         break;
4091         }
4092
4093         return 1;
4094 }
4095
4096 /*
4097  * Consumer reader.
4098  */
4099 static ssize_t
4100 tracing_read_pipe(struct file *filp, char __user *ubuf,
4101                   size_t cnt, loff_t *ppos)
4102 {
4103         struct trace_iterator *iter = filp->private_data;
4104         struct trace_array *tr = iter->tr;
4105         ssize_t sret;
4106
4107         /* return any leftover data */
4108         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4109         if (sret != -EBUSY)
4110                 return sret;
4111
4112         trace_seq_init(&iter->seq);
4113
4114         /* copy the tracer to avoid using a global lock all around */
4115         mutex_lock(&trace_types_lock);
4116         if (unlikely(iter->trace->name != tr->current_trace->name))
4117                 *iter->trace = *tr->current_trace;
4118         mutex_unlock(&trace_types_lock);
4119
4120         /*
4121          * Avoid more than one consumer on a single file descriptor
4122          * This is just a matter of traces coherency, the ring buffer itself
4123          * is protected.
4124          */
4125         mutex_lock(&iter->mutex);
4126         if (iter->trace->read) {
4127                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4128                 if (sret)
4129                         goto out;
4130         }
4131
4132 waitagain:
4133         sret = tracing_wait_pipe(filp);
4134         if (sret <= 0)
4135                 goto out;
4136
4137         /* stop when tracing is finished */
4138         if (trace_empty(iter)) {
4139                 sret = 0;
4140                 goto out;
4141         }
4142
4143         if (cnt >= PAGE_SIZE)
4144                 cnt = PAGE_SIZE - 1;
4145
4146         /* reset all but tr, trace, and overruns */
4147         memset(&iter->seq, 0,
4148                sizeof(struct trace_iterator) -
4149                offsetof(struct trace_iterator, seq));
4150         cpumask_clear(iter->started);
4151         iter->pos = -1;
4152
4153         trace_event_read_lock();
4154         trace_access_lock(iter->cpu_file);
4155         while (trace_find_next_entry_inc(iter) != NULL) {
4156                 enum print_line_t ret;
4157                 int len = iter->seq.len;
4158
4159                 ret = print_trace_line(iter);
4160                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4161                         /* don't print partial lines */
4162                         iter->seq.len = len;
4163                         break;
4164                 }
4165                 if (ret != TRACE_TYPE_NO_CONSUME)
4166                         trace_consume(iter);
4167
4168                 if (iter->seq.len >= cnt)
4169                         break;
4170
4171                 /*
4172                  * Setting the full flag means we reached the trace_seq buffer
4173                  * size and we should leave by partial output condition above.
4174                  * One of the trace_seq_* functions is not used properly.
4175                  */
4176                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4177                           iter->ent->type);
4178         }
4179         trace_access_unlock(iter->cpu_file);
4180         trace_event_read_unlock();
4181
4182         /* Now copy what we have to the user */
4183         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4184         if (iter->seq.readpos >= iter->seq.len)
4185                 trace_seq_init(&iter->seq);
4186
4187         /*
4188          * If there was nothing to send to user, in spite of consuming trace
4189          * entries, go back to wait for more entries.
4190          */
4191         if (sret == -EBUSY)
4192                 goto waitagain;
4193
4194 out:
4195         mutex_unlock(&iter->mutex);
4196
4197         return sret;
4198 }
4199
4200 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
4201                                      struct pipe_buffer *buf)
4202 {
4203         __free_page(buf->page);
4204 }
4205
4206 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4207                                      unsigned int idx)
4208 {
4209         __free_page(spd->pages[idx]);
4210 }
4211
4212 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4213         .can_merge              = 0,
4214         .map                    = generic_pipe_buf_map,
4215         .unmap                  = generic_pipe_buf_unmap,
4216         .confirm                = generic_pipe_buf_confirm,
4217         .release                = tracing_pipe_buf_release,
4218         .steal                  = generic_pipe_buf_steal,
4219         .get                    = generic_pipe_buf_get,
4220 };
4221
4222 static size_t
4223 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4224 {
4225         size_t count;
4226         int ret;
4227
4228         /* Seq buffer is page-sized, exactly what we need. */
4229         for (;;) {
4230                 count = iter->seq.len;
4231                 ret = print_trace_line(iter);
4232                 count = iter->seq.len - count;
4233                 if (rem < count) {
4234                         rem = 0;
4235                         iter->seq.len -= count;
4236                         break;
4237                 }
4238                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4239                         iter->seq.len -= count;
4240                         break;
4241                 }
4242
4243                 if (ret != TRACE_TYPE_NO_CONSUME)
4244                         trace_consume(iter);
4245                 rem -= count;
4246                 if (!trace_find_next_entry_inc(iter))   {
4247                         rem = 0;
4248                         iter->ent = NULL;
4249                         break;
4250                 }
4251         }
4252
4253         return rem;
4254 }
4255
4256 static ssize_t tracing_splice_read_pipe(struct file *filp,
4257                                         loff_t *ppos,
4258                                         struct pipe_inode_info *pipe,
4259                                         size_t len,
4260                                         unsigned int flags)
4261 {
4262         struct page *pages_def[PIPE_DEF_BUFFERS];
4263         struct partial_page partial_def[PIPE_DEF_BUFFERS];
4264         struct trace_iterator *iter = filp->private_data;
4265         struct splice_pipe_desc spd = {
4266                 .pages          = pages_def,
4267                 .partial        = partial_def,
4268                 .nr_pages       = 0, /* This gets updated below. */
4269                 .nr_pages_max   = PIPE_DEF_BUFFERS,
4270                 .flags          = flags,
4271                 .ops            = &tracing_pipe_buf_ops,
4272                 .spd_release    = tracing_spd_release_pipe,
4273         };
4274         struct trace_array *tr = iter->tr;
4275         ssize_t ret;
4276         size_t rem;
4277         unsigned int i;
4278
4279         if (splice_grow_spd(pipe, &spd))
4280                 return -ENOMEM;
4281
4282         /* copy the tracer to avoid using a global lock all around */
4283         mutex_lock(&trace_types_lock);
4284         if (unlikely(iter->trace->name != tr->current_trace->name))
4285                 *iter->trace = *tr->current_trace;
4286         mutex_unlock(&trace_types_lock);
4287
4288         mutex_lock(&iter->mutex);
4289
4290         if (iter->trace->splice_read) {
4291                 ret = iter->trace->splice_read(iter, filp,
4292                                                ppos, pipe, len, flags);
4293                 if (ret)
4294                         goto out_err;
4295         }
4296
4297         ret = tracing_wait_pipe(filp);
4298         if (ret <= 0)
4299                 goto out_err;
4300
4301         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4302                 ret = -EFAULT;
4303                 goto out_err;
4304         }
4305
4306         trace_event_read_lock();
4307         trace_access_lock(iter->cpu_file);
4308
4309         /* Fill as many pages as possible. */
4310         for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4311                 spd.pages[i] = alloc_page(GFP_KERNEL);
4312                 if (!spd.pages[i])
4313                         break;
4314
4315                 rem = tracing_fill_pipe_page(rem, iter);
4316
4317                 /* Copy the data into the page, so we can start over. */
4318                 ret = trace_seq_to_buffer(&iter->seq,
4319                                           page_address(spd.pages[i]),
4320                                           iter->seq.len);
4321                 if (ret < 0) {
4322                         __free_page(spd.pages[i]);
4323                         break;
4324                 }
4325                 spd.partial[i].offset = 0;
4326                 spd.partial[i].len = iter->seq.len;
4327
4328                 trace_seq_init(&iter->seq);
4329         }
4330
4331         trace_access_unlock(iter->cpu_file);
4332         trace_event_read_unlock();
4333         mutex_unlock(&iter->mutex);
4334
4335         spd.nr_pages = i;
4336
4337         ret = splice_to_pipe(pipe, &spd);
4338 out:
4339         splice_shrink_spd(&spd);
4340         return ret;
4341
4342 out_err:
4343         mutex_unlock(&iter->mutex);
4344         goto out;
4345 }
4346
4347 static ssize_t
4348 tracing_entries_read(struct file *filp, char __user *ubuf,
4349                      size_t cnt, loff_t *ppos)
4350 {
4351         struct inode *inode = file_inode(filp);
4352         struct trace_array *tr = inode->i_private;
4353         int cpu = tracing_get_cpu(inode);
4354         char buf[64];
4355         int r = 0;
4356         ssize_t ret;
4357
4358         mutex_lock(&trace_types_lock);
4359
4360         if (cpu == RING_BUFFER_ALL_CPUS) {
4361                 int cpu, buf_size_same;
4362                 unsigned long size;
4363
4364                 size = 0;
4365                 buf_size_same = 1;
4366                 /* check if all cpu sizes are same */
4367                 for_each_tracing_cpu(cpu) {
4368                         /* fill in the size from first enabled cpu */
4369                         if (size == 0)
4370                                 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4371                         if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4372                                 buf_size_same = 0;
4373                                 break;
4374                         }
4375                 }
4376
4377                 if (buf_size_same) {
4378                         if (!ring_buffer_expanded)
4379                                 r = sprintf(buf, "%lu (expanded: %lu)\n",
4380                                             size >> 10,
4381                                             trace_buf_size >> 10);
4382                         else
4383                                 r = sprintf(buf, "%lu\n", size >> 10);
4384                 } else
4385                         r = sprintf(buf, "X\n");
4386         } else
4387                 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4388
4389         mutex_unlock(&trace_types_lock);
4390
4391         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4392         return ret;
4393 }
4394
4395 static ssize_t
4396 tracing_entries_write(struct file *filp, const char __user *ubuf,
4397                       size_t cnt, loff_t *ppos)
4398 {
4399         struct inode *inode = file_inode(filp);
4400         struct trace_array *tr = inode->i_private;
4401         unsigned long val;
4402         int ret;
4403
4404         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4405         if (ret)
4406                 return ret;
4407
4408         /* must have at least 1 entry */
4409         if (!val)
4410                 return -EINVAL;
4411
4412         /* value is in KB */
4413         val <<= 10;
4414         ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4415         if (ret < 0)
4416                 return ret;
4417
4418         *ppos += cnt;
4419
4420         return cnt;
4421 }
4422
4423 static ssize_t
4424 tracing_total_entries_read(struct file *filp, char __user *ubuf,
4425                                 size_t cnt, loff_t *ppos)
4426 {
4427         struct trace_array *tr = filp->private_data;
4428         char buf[64];
4429         int r, cpu;
4430         unsigned long size = 0, expanded_size = 0;
4431
4432         mutex_lock(&trace_types_lock);
4433         for_each_tracing_cpu(cpu) {
4434                 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4435                 if (!ring_buffer_expanded)
4436                         expanded_size += trace_buf_size >> 10;
4437         }
4438         if (ring_buffer_expanded)
4439                 r = sprintf(buf, "%lu\n", size);
4440         else
4441                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4442         mutex_unlock(&trace_types_lock);
4443
4444         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4445 }
4446
4447 static ssize_t
4448 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4449                           size_t cnt, loff_t *ppos)
4450 {
4451         /*
4452          * There is no need to read what the user has written, this function
4453          * is just to make sure that there is no error when "echo" is used
4454          */
4455
4456         *ppos += cnt;
4457
4458         return cnt;
4459 }
4460
4461 static int
4462 tracing_free_buffer_release(struct inode *inode, struct file *filp)
4463 {
4464         struct trace_array *tr = inode->i_private;
4465
4466         /* disable tracing ? */
4467         if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4468                 tracer_tracing_off(tr);
4469         /* resize the ring buffer to 0 */
4470         tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4471
4472         trace_array_put(tr);
4473
4474         return 0;
4475 }
4476
4477 static ssize_t
4478 tracing_mark_write(struct file *filp, const char __user *ubuf,
4479                                         size_t cnt, loff_t *fpos)
4480 {
4481         unsigned long addr = (unsigned long)ubuf;
4482         struct trace_array *tr = filp->private_data;
4483         struct ring_buffer_event *event;
4484         struct ring_buffer *buffer;
4485         struct print_entry *entry;
4486         unsigned long irq_flags;
4487         struct page *pages[2];
4488         void *map_page[2];
4489         int nr_pages = 1;
4490         ssize_t written;
4491         int offset;
4492         int size;
4493         int len;
4494         int ret;
4495         int i;
4496
4497         if (tracing_disabled)
4498                 return -EINVAL;
4499
4500         if (!(trace_flags & TRACE_ITER_MARKERS))
4501                 return -EINVAL;
4502
4503         if (cnt > TRACE_BUF_SIZE)
4504                 cnt = TRACE_BUF_SIZE;
4505
4506         /*
4507          * Userspace is injecting traces into the kernel trace buffer.
4508          * We want to be as non intrusive as possible.
4509          * To do so, we do not want to allocate any special buffers
4510          * or take any locks, but instead write the userspace data
4511          * straight into the ring buffer.
4512          *
4513          * First we need to pin the userspace buffer into memory,
4514          * which, most likely it is, because it just referenced it.
4515          * But there's no guarantee that it is. By using get_user_pages_fast()
4516          * and kmap_atomic/kunmap_atomic() we can get access to the
4517          * pages directly. We then write the data directly into the
4518          * ring buffer.
4519          */
4520         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4521
4522         /* check if we cross pages */
4523         if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4524                 nr_pages = 2;
4525
4526         offset = addr & (PAGE_SIZE - 1);
4527         addr &= PAGE_MASK;
4528
4529         ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4530         if (ret < nr_pages) {
4531                 while (--ret >= 0)
4532                         put_page(pages[ret]);
4533                 written = -EFAULT;
4534                 goto out;
4535         }
4536
4537         for (i = 0; i < nr_pages; i++)
4538                 map_page[i] = kmap_atomic(pages[i]);
4539
4540         local_save_flags(irq_flags);
4541         size = sizeof(*entry) + cnt + 2; /* possible \n added */
4542         buffer = tr->trace_buffer.buffer;
4543         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4544                                           irq_flags, preempt_count());
4545         if (!event) {
4546                 /* Ring buffer disabled, return as if not open for write */
4547                 written = -EBADF;
4548                 goto out_unlock;
4549         }
4550
4551         entry = ring_buffer_event_data(event);
4552         entry->ip = _THIS_IP_;
4553
4554         if (nr_pages == 2) {
4555                 len = PAGE_SIZE - offset;
4556                 memcpy(&entry->buf, map_page[0] + offset, len);
4557                 memcpy(&entry->buf[len], map_page[1], cnt - len);
4558         } else
4559                 memcpy(&entry->buf, map_page[0] + offset, cnt);
4560
4561         if (entry->buf[cnt - 1] != '\n') {
4562                 entry->buf[cnt] = '\n';
4563                 entry->buf[cnt + 1] = '\0';
4564         } else
4565                 entry->buf[cnt] = '\0';
4566
4567         __buffer_unlock_commit(buffer, event);
4568
4569         written = cnt;
4570
4571         *fpos += written;
4572
4573  out_unlock:
4574         for (i = 0; i < nr_pages; i++){
4575                 kunmap_atomic(map_page[i]);
4576                 put_page(pages[i]);
4577         }
4578  out:
4579         return written;
4580 }
4581
4582 static int tracing_clock_show(struct seq_file *m, void *v)
4583 {
4584         struct trace_array *tr = m->private;
4585         int i;
4586
4587         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4588                 seq_printf(m,
4589                         "%s%s%s%s", i ? " " : "",
4590                         i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4591                         i == tr->clock_id ? "]" : "");
4592         seq_putc(m, '\n');
4593
4594         return 0;
4595 }
4596
4597 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4598                                    size_t cnt, loff_t *fpos)
4599 {
4600         struct seq_file *m = filp->private_data;
4601         struct trace_array *tr = m->private;
4602         char buf[64];
4603         const char *clockstr;
4604         int i;
4605
4606         if (cnt >= sizeof(buf))
4607                 return -EINVAL;
4608
4609         if (copy_from_user(&buf, ubuf, cnt))
4610                 return -EFAULT;
4611
4612         buf[cnt] = 0;
4613
4614         clockstr = strstrip(buf);
4615
4616         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4617                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4618                         break;
4619         }
4620         if (i == ARRAY_SIZE(trace_clocks))
4621                 return -EINVAL;
4622
4623         mutex_lock(&trace_types_lock);
4624
4625         tr->clock_id = i;
4626
4627         ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4628
4629         /*
4630          * New clock may not be consistent with the previous clock.
4631          * Reset the buffer so that it doesn't have incomparable timestamps.
4632          */
4633         tracing_reset_online_cpus(&tr->trace_buffer);
4634
4635 #ifdef CONFIG_TRACER_MAX_TRACE
4636         if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4637                 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4638         tracing_reset_online_cpus(&tr->max_buffer);
4639 #endif
4640
4641         mutex_unlock(&trace_types_lock);
4642
4643         *fpos += cnt;
4644
4645         return cnt;
4646 }
4647
4648 static int tracing_clock_open(struct inode *inode, struct file *file)
4649 {
4650         struct trace_array *tr = inode->i_private;
4651         int ret;
4652
4653         if (tracing_disabled)
4654                 return -ENODEV;
4655
4656         if (trace_array_get(tr))
4657                 return -ENODEV;
4658
4659         ret = single_open(file, tracing_clock_show, inode->i_private);
4660         if (ret < 0)
4661                 trace_array_put(tr);
4662
4663         return ret;
4664 }
4665
4666 struct ftrace_buffer_info {
4667         struct trace_iterator   iter;
4668         void                    *spare;
4669         unsigned int            read;
4670 };
4671
4672 #ifdef CONFIG_TRACER_SNAPSHOT
4673 static int tracing_snapshot_open(struct inode *inode, struct file *file)
4674 {
4675         struct trace_cpu *tc = inode->i_private;
4676         struct trace_array *tr = tc->tr;
4677         struct trace_iterator *iter;
4678         struct seq_file *m;
4679         int ret = 0;
4680
4681         if (trace_array_get(tr) < 0)
4682                 return -ENODEV;
4683
4684         if (file->f_mode & FMODE_READ) {
4685                 iter = __tracing_open(tr, tc, inode, file, true);
4686                 if (IS_ERR(iter))
4687                         ret = PTR_ERR(iter);
4688         } else {
4689                 /* Writes still need the seq_file to hold the private data */
4690                 ret = -ENOMEM;
4691                 m = kzalloc(sizeof(*m), GFP_KERNEL);
4692                 if (!m)
4693                         goto out;
4694                 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4695                 if (!iter) {
4696                         kfree(m);
4697                         goto out;
4698                 }
4699                 ret = 0;
4700
4701                 iter->tr = tr;
4702                 iter->trace_buffer = &tc->tr->max_buffer;
4703                 iter->cpu_file = tc->cpu;
4704                 m->private = iter;
4705                 file->private_data = m;
4706         }
4707 out:
4708         if (ret < 0)
4709                 trace_array_put(tr);
4710
4711         return ret;
4712 }
4713
4714 static ssize_t
4715 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4716                        loff_t *ppos)
4717 {
4718         struct seq_file *m = filp->private_data;
4719         struct trace_iterator *iter = m->private;
4720         struct trace_array *tr = iter->tr;
4721         unsigned long val;
4722         int ret;
4723
4724         ret = tracing_update_buffers();
4725         if (ret < 0)
4726                 return ret;
4727
4728         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4729         if (ret)
4730                 return ret;
4731
4732         mutex_lock(&trace_types_lock);
4733
4734         if (tr->current_trace->use_max_tr) {
4735                 ret = -EBUSY;
4736                 goto out;
4737         }
4738
4739         switch (val) {
4740         case 0:
4741                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4742                         ret = -EINVAL;
4743                         break;
4744                 }
4745                 if (tr->allocated_snapshot)
4746                         free_snapshot(tr);
4747                 break;
4748         case 1:
4749 /* Only allow per-cpu swap if the ring buffer supports it */
4750 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4751                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4752                         ret = -EINVAL;
4753                         break;
4754                 }
4755 #endif
4756                 if (!tr->allocated_snapshot) {
4757                         ret = alloc_snapshot(tr);
4758                         if (ret < 0)
4759                                 break;
4760                 }
4761                 local_irq_disable();
4762                 /* Now, we're going to swap */
4763                 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4764                         update_max_tr(tr, current, smp_processor_id());
4765                 else
4766                         update_max_tr_single(tr, current, iter->cpu_file);
4767                 local_irq_enable();
4768                 break;
4769         default:
4770                 if (tr->allocated_snapshot) {
4771                         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4772                                 tracing_reset_online_cpus(&tr->max_buffer);
4773                         else
4774                                 tracing_reset(&tr->max_buffer, iter->cpu_file);
4775                 }
4776                 break;
4777         }
4778
4779         if (ret >= 0) {
4780                 *ppos += cnt;
4781                 ret = cnt;
4782         }
4783 out:
4784         mutex_unlock(&trace_types_lock);
4785         return ret;
4786 }
4787
4788 static int tracing_snapshot_release(struct inode *inode, struct file *file)
4789 {
4790         struct seq_file *m = file->private_data;
4791         int ret;
4792
4793         ret = tracing_release(inode, file);
4794
4795         if (file->f_mode & FMODE_READ)
4796                 return ret;
4797
4798         /* If write only, the seq_file is just a stub */
4799         if (m)
4800                 kfree(m->private);
4801         kfree(m);
4802
4803         return 0;
4804 }
4805
4806 static int tracing_buffers_open(struct inode *inode, struct file *filp);
4807 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4808                                     size_t count, loff_t *ppos);
4809 static int tracing_buffers_release(struct inode *inode, struct file *file);
4810 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4811                    struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4812
4813 static int snapshot_raw_open(struct inode *inode, struct file *filp)
4814 {
4815         struct ftrace_buffer_info *info;
4816         int ret;
4817
4818         ret = tracing_buffers_open(inode, filp);
4819         if (ret < 0)
4820                 return ret;
4821
4822         info = filp->private_data;
4823
4824         if (info->iter.trace->use_max_tr) {
4825                 tracing_buffers_release(inode, filp);
4826                 return -EBUSY;
4827         }
4828
4829         info->iter.snapshot = true;
4830         info->iter.trace_buffer = &info->iter.tr->max_buffer;
4831
4832         return ret;
4833 }
4834
4835 #endif /* CONFIG_TRACER_SNAPSHOT */
4836
4837
4838 static const struct file_operations tracing_max_lat_fops = {
4839         .open           = tracing_open_generic,
4840         .read           = tracing_max_lat_read,
4841         .write          = tracing_max_lat_write,
4842         .llseek         = generic_file_llseek,
4843 };
4844
4845 static const struct file_operations set_tracer_fops = {
4846         .open           = tracing_open_generic,
4847         .read           = tracing_set_trace_read,
4848         .write          = tracing_set_trace_write,
4849         .llseek         = generic_file_llseek,
4850 };
4851
4852 static const struct file_operations tracing_pipe_fops = {
4853         .open           = tracing_open_pipe,
4854         .poll           = tracing_poll_pipe,
4855         .read           = tracing_read_pipe,
4856         .splice_read    = tracing_splice_read_pipe,
4857         .release        = tracing_release_pipe,
4858         .llseek         = no_llseek,
4859 };
4860
4861 static const struct file_operations tracing_entries_fops = {
4862         .open           = tracing_open_generic_tr,
4863         .read           = tracing_entries_read,
4864         .write          = tracing_entries_write,
4865         .llseek         = generic_file_llseek,
4866         .release        = tracing_release_generic_tr,
4867 };
4868
4869 static const struct file_operations tracing_total_entries_fops = {
4870         .open           = tracing_open_generic_tr,
4871         .read           = tracing_total_entries_read,
4872         .llseek         = generic_file_llseek,
4873         .release        = tracing_release_generic_tr,
4874 };
4875
4876 static const struct file_operations tracing_free_buffer_fops = {
4877         .open           = tracing_open_generic_tr,
4878         .write          = tracing_free_buffer_write,
4879         .release        = tracing_free_buffer_release,
4880 };
4881
4882 static const struct file_operations tracing_mark_fops = {
4883         .open           = tracing_open_generic_tr,
4884         .write          = tracing_mark_write,
4885         .llseek         = generic_file_llseek,
4886         .release        = tracing_release_generic_tr,
4887 };
4888
4889 static const struct file_operations trace_clock_fops = {
4890         .open           = tracing_clock_open,
4891         .read           = seq_read,
4892         .llseek         = seq_lseek,
4893         .release        = tracing_single_release_tr,
4894         .write          = tracing_clock_write,
4895 };
4896
4897 #ifdef CONFIG_TRACER_SNAPSHOT
4898 static const struct file_operations snapshot_fops = {
4899         .open           = tracing_snapshot_open,
4900         .read           = seq_read,
4901         .write          = tracing_snapshot_write,
4902         .llseek         = tracing_seek,
4903         .release        = tracing_snapshot_release,
4904 };
4905
4906 static const struct file_operations snapshot_raw_fops = {
4907         .open           = snapshot_raw_open,
4908         .read           = tracing_buffers_read,
4909         .release        = tracing_buffers_release,
4910         .splice_read    = tracing_buffers_splice_read,
4911         .llseek         = no_llseek,
4912 };
4913
4914 #endif /* CONFIG_TRACER_SNAPSHOT */
4915
4916 static int tracing_buffers_open(struct inode *inode, struct file *filp)
4917 {
4918         struct trace_array *tr = inode->i_private;
4919         struct ftrace_buffer_info *info;
4920         int ret;
4921
4922         if (tracing_disabled)
4923                 return -ENODEV;
4924
4925         if (trace_array_get(tr) < 0)
4926                 return -ENODEV;
4927
4928         info = kzalloc(sizeof(*info), GFP_KERNEL);
4929         if (!info) {
4930                 trace_array_put(tr);
4931                 return -ENOMEM;
4932         }
4933
4934         mutex_lock(&trace_types_lock);
4935
4936         info->iter.tr           = tr;
4937         info->iter.cpu_file     = tracing_get_cpu(inode);
4938         info->iter.trace        = tr->current_trace;
4939         info->iter.trace_buffer = &tr->trace_buffer;
4940         info->spare             = NULL;
4941         /* Force reading ring buffer for first read */
4942         info->read              = (unsigned int)-1;
4943
4944         filp->private_data = info;
4945
4946         mutex_unlock(&trace_types_lock);
4947
4948         ret = nonseekable_open(inode, filp);
4949         if (ret < 0)
4950                 trace_array_put(tr);
4951
4952         return ret;
4953 }
4954
4955 static unsigned int
4956 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
4957 {
4958         struct ftrace_buffer_info *info = filp->private_data;
4959         struct trace_iterator *iter = &info->iter;
4960
4961         return trace_poll(iter, filp, poll_table);
4962 }
4963
4964 static ssize_t
4965 tracing_buffers_read(struct file *filp, char __user *ubuf,
4966                      size_t count, loff_t *ppos)
4967 {
4968         struct ftrace_buffer_info *info = filp->private_data;
4969         struct trace_iterator *iter = &info->iter;
4970         ssize_t ret;
4971         ssize_t size;
4972
4973         if (!count)
4974                 return 0;
4975
4976         mutex_lock(&trace_types_lock);
4977
4978 #ifdef CONFIG_TRACER_MAX_TRACE
4979         if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
4980                 size = -EBUSY;
4981                 goto out_unlock;
4982         }
4983 #endif
4984
4985         if (!info->spare)
4986                 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
4987                                                           iter->cpu_file);
4988         size = -ENOMEM;
4989         if (!info->spare)
4990                 goto out_unlock;
4991
4992         /* Do we have previous read data to read? */
4993         if (info->read < PAGE_SIZE)
4994                 goto read;
4995
4996  again:
4997         trace_access_lock(iter->cpu_file);
4998         ret = ring_buffer_read_page(iter->trace_buffer->buffer,
4999                                     &info->spare,
5000                                     count,
5001                                     iter->cpu_file, 0);
5002         trace_access_unlock(iter->cpu_file);
5003
5004         if (ret < 0) {
5005                 if (trace_empty(iter)) {
5006                         if ((filp->f_flags & O_NONBLOCK)) {
5007                                 size = -EAGAIN;
5008                                 goto out_unlock;
5009                         }
5010                         mutex_unlock(&trace_types_lock);
5011                         iter->trace->wait_pipe(iter);
5012                         mutex_lock(&trace_types_lock);
5013                         if (signal_pending(current)) {
5014                                 size = -EINTR;
5015                                 goto out_unlock;
5016                         }
5017                         goto again;
5018                 }
5019                 size = 0;
5020                 goto out_unlock;
5021         }
5022
5023         info->read = 0;
5024  read:
5025         size = PAGE_SIZE - info->read;
5026         if (size > count)
5027                 size = count;
5028
5029         ret = copy_to_user(ubuf, info->spare + info->read, size);
5030         if (ret == size) {
5031                 size = -EFAULT;
5032                 goto out_unlock;
5033         }
5034         size -= ret;
5035
5036         *ppos += size;
5037         info->read += size;
5038
5039  out_unlock:
5040         mutex_unlock(&trace_types_lock);
5041
5042         return size;
5043 }
5044
5045 static int tracing_buffers_release(struct inode *inode, struct file *file)
5046 {
5047         struct ftrace_buffer_info *info = file->private_data;
5048         struct trace_iterator *iter = &info->iter;
5049
5050         mutex_lock(&trace_types_lock);
5051
5052         __trace_array_put(iter->tr);
5053
5054         if (info->spare)
5055                 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5056         kfree(info);
5057
5058         mutex_unlock(&trace_types_lock);
5059
5060         return 0;
5061 }
5062
5063 struct buffer_ref {
5064         struct ring_buffer      *buffer;
5065         void                    *page;
5066         int                     ref;
5067 };
5068
5069 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5070                                     struct pipe_buffer *buf)
5071 {
5072         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5073
5074         if (--ref->ref)
5075                 return;
5076
5077         ring_buffer_free_read_page(ref->buffer, ref->page);
5078         kfree(ref);
5079         buf->private = 0;
5080 }
5081
5082 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5083                                 struct pipe_buffer *buf)
5084 {
5085         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5086
5087         ref->ref++;
5088 }
5089
5090 /* Pipe buffer operations for a buffer. */
5091 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5092         .can_merge              = 0,
5093         .map                    = generic_pipe_buf_map,
5094         .unmap                  = generic_pipe_buf_unmap,
5095         .confirm                = generic_pipe_buf_confirm,
5096         .release                = buffer_pipe_buf_release,
5097         .steal                  = generic_pipe_buf_steal,
5098         .get                    = buffer_pipe_buf_get,
5099 };
5100
5101 /*
5102  * Callback from splice_to_pipe(), if we need to release some pages
5103  * at the end of the spd in case we error'ed out in filling the pipe.
5104  */
5105 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5106 {
5107         struct buffer_ref *ref =
5108                 (struct buffer_ref *)spd->partial[i].private;
5109
5110         if (--ref->ref)
5111                 return;
5112
5113         ring_buffer_free_read_page(ref->buffer, ref->page);
5114         kfree(ref);
5115         spd->partial[i].private = 0;
5116 }
5117
5118 static ssize_t
5119 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5120                             struct pipe_inode_info *pipe, size_t len,
5121                             unsigned int flags)
5122 {
5123         struct ftrace_buffer_info *info = file->private_data;
5124         struct trace_iterator *iter = &info->iter;
5125         struct partial_page partial_def[PIPE_DEF_BUFFERS];
5126         struct page *pages_def[PIPE_DEF_BUFFERS];
5127         struct splice_pipe_desc spd = {
5128                 .pages          = pages_def,
5129                 .partial        = partial_def,
5130                 .nr_pages_max   = PIPE_DEF_BUFFERS,
5131                 .flags          = flags,
5132                 .ops            = &buffer_pipe_buf_ops,
5133                 .spd_release    = buffer_spd_release,
5134         };
5135         struct buffer_ref *ref;
5136         int entries, size, i;
5137         ssize_t ret;
5138
5139         mutex_lock(&trace_types_lock);
5140
5141 #ifdef CONFIG_TRACER_MAX_TRACE
5142         if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5143                 ret = -EBUSY;
5144                 goto out;
5145         }
5146 #endif
5147
5148         if (splice_grow_spd(pipe, &spd)) {
5149                 ret = -ENOMEM;
5150                 goto out;
5151         }
5152
5153         if (*ppos & (PAGE_SIZE - 1)) {
5154                 ret = -EINVAL;
5155                 goto out;
5156         }
5157
5158         if (len & (PAGE_SIZE - 1)) {
5159                 if (len < PAGE_SIZE) {
5160                         ret = -EINVAL;
5161                         goto out;
5162                 }
5163                 len &= PAGE_MASK;
5164         }
5165
5166  again:
5167         trace_access_lock(iter->cpu_file);
5168         entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5169
5170         for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
5171                 struct page *page;
5172                 int r;
5173
5174                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5175                 if (!ref)
5176                         break;
5177
5178                 ref->ref = 1;
5179                 ref->buffer = iter->trace_buffer->buffer;
5180                 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5181                 if (!ref->page) {
5182                         kfree(ref);
5183                         break;
5184                 }
5185
5186                 r = ring_buffer_read_page(ref->buffer, &ref->page,
5187                                           len, iter->cpu_file, 1);
5188                 if (r < 0) {
5189                         ring_buffer_free_read_page(ref->buffer, ref->page);
5190                         kfree(ref);
5191                         break;
5192                 }
5193
5194                 /*
5195                  * zero out any left over data, this is going to
5196                  * user land.
5197                  */
5198                 size = ring_buffer_page_len(ref->page);
5199                 if (size < PAGE_SIZE)
5200                         memset(ref->page + size, 0, PAGE_SIZE - size);
5201
5202                 page = virt_to_page(ref->page);
5203
5204                 spd.pages[i] = page;
5205                 spd.partial[i].len = PAGE_SIZE;
5206                 spd.partial[i].offset = 0;
5207                 spd.partial[i].private = (unsigned long)ref;
5208                 spd.nr_pages++;
5209                 *ppos += PAGE_SIZE;
5210
5211                 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5212         }
5213
5214         trace_access_unlock(iter->cpu_file);
5215         spd.nr_pages = i;
5216
5217         /* did we read anything? */
5218         if (!spd.nr_pages) {
5219                 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5220                         ret = -EAGAIN;
5221                         goto out;
5222                 }
5223                 mutex_unlock(&trace_types_lock);
5224                 iter->trace->wait_pipe(iter);
5225                 mutex_lock(&trace_types_lock);
5226                 if (signal_pending(current)) {
5227                         ret = -EINTR;
5228                         goto out;
5229                 }
5230                 goto again;
5231         }
5232
5233         ret = splice_to_pipe(pipe, &spd);
5234         splice_shrink_spd(&spd);
5235 out:
5236         mutex_unlock(&trace_types_lock);
5237
5238         return ret;
5239 }
5240
5241 static const struct file_operations tracing_buffers_fops = {
5242         .open           = tracing_buffers_open,
5243         .read           = tracing_buffers_read,
5244         .poll           = tracing_buffers_poll,
5245         .release        = tracing_buffers_release,
5246         .splice_read    = tracing_buffers_splice_read,
5247         .llseek         = no_llseek,
5248 };
5249
5250 static ssize_t
5251 tracing_stats_read(struct file *filp, char __user *ubuf,
5252                    size_t count, loff_t *ppos)
5253 {
5254         struct inode *inode = file_inode(filp);
5255         struct trace_array *tr = inode->i_private;
5256         struct trace_buffer *trace_buf = &tr->trace_buffer;
5257         int cpu = tracing_get_cpu(inode);
5258         struct trace_seq *s;
5259         unsigned long cnt;
5260         unsigned long long t;
5261         unsigned long usec_rem;
5262
5263         s = kmalloc(sizeof(*s), GFP_KERNEL);
5264         if (!s)
5265                 return -ENOMEM;
5266
5267         trace_seq_init(s);
5268
5269         cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5270         trace_seq_printf(s, "entries: %ld\n", cnt);
5271
5272         cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5273         trace_seq_printf(s, "overrun: %ld\n", cnt);
5274
5275         cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5276         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5277
5278         cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5279         trace_seq_printf(s, "bytes: %ld\n", cnt);
5280
5281         if (trace_clocks[tr->clock_id].in_ns) {
5282                 /* local or global for trace_clock */
5283                 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5284                 usec_rem = do_div(t, USEC_PER_SEC);
5285                 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5286                                                                 t, usec_rem);
5287
5288                 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5289                 usec_rem = do_div(t, USEC_PER_SEC);
5290                 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5291         } else {
5292                 /* counter or tsc mode for trace_clock */
5293                 trace_seq_printf(s, "oldest event ts: %llu\n",
5294                                 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5295
5296                 trace_seq_printf(s, "now ts: %llu\n",
5297                                 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5298         }
5299
5300         cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5301         trace_seq_printf(s, "dropped events: %ld\n", cnt);
5302
5303         cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5304         trace_seq_printf(s, "read events: %ld\n", cnt);
5305
5306         count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5307
5308         kfree(s);
5309
5310         return count;
5311 }
5312
5313 static const struct file_operations tracing_stats_fops = {
5314         .open           = tracing_open_generic_tr,
5315         .read           = tracing_stats_read,
5316         .llseek         = generic_file_llseek,
5317         .release        = tracing_release_generic_tr,
5318 };
5319
5320 #ifdef CONFIG_DYNAMIC_FTRACE
5321
5322 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5323 {
5324         return 0;
5325 }
5326
5327 static ssize_t
5328 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5329                   size_t cnt, loff_t *ppos)
5330 {
5331         static char ftrace_dyn_info_buffer[1024];
5332         static DEFINE_MUTEX(dyn_info_mutex);
5333         unsigned long *p = filp->private_data;
5334         char *buf = ftrace_dyn_info_buffer;
5335         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5336         int r;
5337
5338         mutex_lock(&dyn_info_mutex);
5339         r = sprintf(buf, "%ld ", *p);
5340
5341         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5342         buf[r++] = '\n';
5343
5344         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5345
5346         mutex_unlock(&dyn_info_mutex);
5347
5348         return r;
5349 }
5350
5351 static const struct file_operations tracing_dyn_info_fops = {
5352         .open           = tracing_open_generic,
5353         .read           = tracing_read_dyn_info,
5354         .llseek         = generic_file_llseek,
5355 };
5356 #endif /* CONFIG_DYNAMIC_FTRACE */
5357
5358 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5359 static void
5360 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5361 {
5362         tracing_snapshot();
5363 }
5364
5365 static void
5366 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5367 {
5368         unsigned long *count = (long *)data;
5369
5370         if (!*count)
5371                 return;
5372
5373         if (*count != -1)
5374                 (*count)--;
5375
5376         tracing_snapshot();
5377 }
5378
5379 static int
5380 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5381                       struct ftrace_probe_ops *ops, void *data)
5382 {
5383         long count = (long)data;
5384
5385         seq_printf(m, "%ps:", (void *)ip);
5386
5387         seq_printf(m, "snapshot");
5388
5389         if (count == -1)
5390                 seq_printf(m, ":unlimited\n");
5391         else
5392                 seq_printf(m, ":count=%ld\n", count);
5393
5394         return 0;
5395 }
5396
5397 static struct ftrace_probe_ops snapshot_probe_ops = {
5398         .func                   = ftrace_snapshot,
5399         .print                  = ftrace_snapshot_print,
5400 };
5401
5402 static struct ftrace_probe_ops snapshot_count_probe_ops = {
5403         .func                   = ftrace_count_snapshot,
5404         .print                  = ftrace_snapshot_print,
5405 };
5406
5407 static int
5408 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5409                                char *glob, char *cmd, char *param, int enable)
5410 {
5411         struct ftrace_probe_ops *ops;
5412         void *count = (void *)-1;
5413         char *number;
5414         int ret;
5415
5416         /* hash funcs only work with set_ftrace_filter */
5417         if (!enable)
5418                 return -EINVAL;
5419
5420         ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
5421
5422         if (glob[0] == '!') {
5423                 unregister_ftrace_function_probe_func(glob+1, ops);
5424                 return 0;
5425         }
5426
5427         if (!param)
5428                 goto out_reg;
5429
5430         number = strsep(&param, ":");
5431
5432         if (!strlen(number))
5433                 goto out_reg;
5434
5435         /*
5436          * We use the callback data field (which is a pointer)
5437          * as our counter.
5438          */
5439         ret = kstrtoul(number, 0, (unsigned long *)&count);
5440         if (ret)
5441                 return ret;
5442
5443  out_reg:
5444         ret = register_ftrace_function_probe(glob, ops, count);
5445
5446         if (ret >= 0)
5447                 alloc_snapshot(&global_trace);
5448
5449         return ret < 0 ? ret : 0;
5450 }
5451
5452 static struct ftrace_func_command ftrace_snapshot_cmd = {
5453         .name                   = "snapshot",
5454         .func                   = ftrace_trace_snapshot_callback,
5455 };
5456
5457 static int register_snapshot_cmd(void)
5458 {
5459         return register_ftrace_command(&ftrace_snapshot_cmd);
5460 }
5461 #else
5462 static inline int register_snapshot_cmd(void) { return 0; }
5463 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5464
5465 struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
5466 {
5467         if (tr->dir)
5468                 return tr->dir;
5469
5470         if (!debugfs_initialized())
5471                 return NULL;
5472
5473         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5474                 tr->dir = debugfs_create_dir("tracing", NULL);
5475
5476         if (!tr->dir)
5477                 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5478
5479         return tr->dir;
5480 }
5481
5482 struct dentry *tracing_init_dentry(void)
5483 {
5484         return tracing_init_dentry_tr(&global_trace);
5485 }
5486
5487 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5488 {
5489         struct dentry *d_tracer;
5490
5491         if (tr->percpu_dir)
5492                 return tr->percpu_dir;
5493
5494         d_tracer = tracing_init_dentry_tr(tr);
5495         if (!d_tracer)
5496                 return NULL;
5497
5498         tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5499
5500         WARN_ONCE(!tr->percpu_dir,
5501                   "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5502
5503         return tr->percpu_dir;
5504 }
5505
5506 static struct dentry *
5507 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5508                       void *data, long cpu, const struct file_operations *fops)
5509 {
5510         struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5511
5512         if (ret) /* See tracing_get_cpu() */
5513                 ret->d_inode->i_cdev = (void *)(cpu + 1);
5514         return ret;
5515 }
5516
5517 static void
5518 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5519 {
5520         struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
5521         struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5522         struct dentry *d_cpu;
5523         char cpu_dir[30]; /* 30 characters should be more than enough */
5524
5525         if (!d_percpu)
5526                 return;
5527
5528         snprintf(cpu_dir, 30, "cpu%ld", cpu);
5529         d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5530         if (!d_cpu) {
5531                 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5532                 return;
5533         }
5534
5535         /* per cpu trace_pipe */
5536         trace_create_cpu_file("trace_pipe", 0444, d_cpu,
5537                                 tr, cpu, &tracing_pipe_fops);
5538
5539         /* per cpu trace */
5540         trace_create_cpu_file("trace", 0644, d_cpu,
5541                                 &data->trace_cpu, cpu, &tracing_fops);
5542
5543         trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
5544                                 tr, cpu, &tracing_buffers_fops);
5545
5546         trace_create_cpu_file("stats", 0444, d_cpu,
5547                                 tr, cpu, &tracing_stats_fops);
5548
5549         trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
5550                                 tr, cpu, &tracing_entries_fops);
5551
5552 #ifdef CONFIG_TRACER_SNAPSHOT
5553         trace_create_cpu_file("snapshot", 0644, d_cpu,
5554                                 &data->trace_cpu, cpu, &snapshot_fops);
5555
5556         trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
5557                                 tr, cpu, &snapshot_raw_fops);
5558 #endif
5559 }
5560
5561 #ifdef CONFIG_FTRACE_SELFTEST
5562 /* Let selftest have access to static functions in this file */
5563 #include "trace_selftest.c"
5564 #endif
5565
5566 struct trace_option_dentry {
5567         struct tracer_opt               *opt;
5568         struct tracer_flags             *flags;
5569         struct trace_array              *tr;
5570         struct dentry                   *entry;
5571 };
5572
5573 static ssize_t
5574 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5575                         loff_t *ppos)
5576 {
5577         struct trace_option_dentry *topt = filp->private_data;
5578         char *buf;
5579
5580         if (topt->flags->val & topt->opt->bit)
5581                 buf = "1\n";
5582         else
5583                 buf = "0\n";
5584
5585         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5586 }
5587
5588 static ssize_t
5589 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5590                          loff_t *ppos)
5591 {
5592         struct trace_option_dentry *topt = filp->private_data;
5593         unsigned long val;
5594         int ret;
5595
5596         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5597         if (ret)
5598                 return ret;
5599
5600         if (val != 0 && val != 1)
5601                 return -EINVAL;
5602
5603         if (!!(topt->flags->val & topt->opt->bit) != val) {
5604                 mutex_lock(&trace_types_lock);
5605                 ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
5606                                           topt->opt, !val);
5607                 mutex_unlock(&trace_types_lock);
5608                 if (ret)
5609                         return ret;
5610         }
5611
5612         *ppos += cnt;
5613
5614         return cnt;
5615 }
5616
5617
5618 static const struct file_operations trace_options_fops = {
5619         .open = tracing_open_generic,
5620         .read = trace_options_read,
5621         .write = trace_options_write,
5622         .llseek = generic_file_llseek,
5623 };
5624
5625 static ssize_t
5626 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5627                         loff_t *ppos)
5628 {
5629         long index = (long)filp->private_data;
5630         char *buf;
5631
5632         if (trace_flags & (1 << index))
5633                 buf = "1\n";
5634         else
5635                 buf = "0\n";
5636
5637         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5638 }
5639
5640 static ssize_t
5641 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5642                          loff_t *ppos)
5643 {
5644         struct trace_array *tr = &global_trace;
5645         long index = (long)filp->private_data;
5646         unsigned long val;
5647         int ret;
5648
5649         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5650         if (ret)
5651                 return ret;
5652
5653         if (val != 0 && val != 1)
5654                 return -EINVAL;
5655
5656         mutex_lock(&trace_types_lock);
5657         ret = set_tracer_flag(tr, 1 << index, val);
5658         mutex_unlock(&trace_types_lock);
5659
5660         if (ret < 0)
5661                 return ret;
5662
5663         *ppos += cnt;
5664
5665         return cnt;
5666 }
5667
5668 static const struct file_operations trace_options_core_fops = {
5669         .open = tracing_open_generic,
5670         .read = trace_options_core_read,
5671         .write = trace_options_core_write,
5672         .llseek = generic_file_llseek,
5673 };
5674
5675 struct dentry *trace_create_file(const char *name,
5676                                  umode_t mode,
5677                                  struct dentry *parent,
5678                                  void *data,
5679                                  const struct file_operations *fops)
5680 {
5681         struct dentry *ret;
5682
5683         ret = debugfs_create_file(name, mode, parent, data, fops);
5684         if (!ret)
5685                 pr_warning("Could not create debugfs '%s' entry\n", name);
5686
5687         return ret;
5688 }
5689
5690
5691 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
5692 {
5693         struct dentry *d_tracer;
5694
5695         if (tr->options)
5696                 return tr->options;
5697
5698         d_tracer = tracing_init_dentry_tr(tr);
5699         if (!d_tracer)
5700                 return NULL;
5701
5702         tr->options = debugfs_create_dir("options", d_tracer);
5703         if (!tr->options) {
5704                 pr_warning("Could not create debugfs directory 'options'\n");
5705                 return NULL;
5706         }
5707
5708         return tr->options;
5709 }
5710
5711 static void
5712 create_trace_option_file(struct trace_array *tr,
5713                          struct trace_option_dentry *topt,
5714                          struct tracer_flags *flags,
5715                          struct tracer_opt *opt)
5716 {
5717         struct dentry *t_options;
5718
5719         t_options = trace_options_init_dentry(tr);
5720         if (!t_options)
5721                 return;
5722
5723         topt->flags = flags;
5724         topt->opt = opt;
5725         topt->tr = tr;
5726
5727         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
5728                                     &trace_options_fops);
5729
5730 }
5731
5732 static struct trace_option_dentry *
5733 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
5734 {
5735         struct trace_option_dentry *topts;
5736         struct tracer_flags *flags;
5737         struct tracer_opt *opts;
5738         int cnt;
5739
5740         if (!tracer)
5741                 return NULL;
5742
5743         flags = tracer->flags;
5744
5745         if (!flags || !flags->opts)
5746                 return NULL;
5747
5748         opts = flags->opts;
5749
5750         for (cnt = 0; opts[cnt].name; cnt++)
5751                 ;
5752
5753         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
5754         if (!topts)
5755                 return NULL;
5756
5757         for (cnt = 0; opts[cnt].name; cnt++)
5758                 create_trace_option_file(tr, &topts[cnt], flags,
5759                                          &opts[cnt]);
5760
5761         return topts;
5762 }
5763
5764 static void
5765 destroy_trace_option_files(struct trace_option_dentry *topts)
5766 {
5767         int cnt;
5768
5769         if (!topts)
5770                 return;
5771
5772         for (cnt = 0; topts[cnt].opt; cnt++) {
5773                 if (topts[cnt].entry)
5774                         debugfs_remove(topts[cnt].entry);
5775         }
5776
5777         kfree(topts);
5778 }
5779
5780 static struct dentry *
5781 create_trace_option_core_file(struct trace_array *tr,
5782                               const char *option, long index)
5783 {
5784         struct dentry *t_options;
5785
5786         t_options = trace_options_init_dentry(tr);
5787         if (!t_options)
5788                 return NULL;
5789
5790         return trace_create_file(option, 0644, t_options, (void *)index,
5791                                     &trace_options_core_fops);
5792 }
5793
5794 static __init void create_trace_options_dir(struct trace_array *tr)
5795 {
5796         struct dentry *t_options;
5797         int i;
5798
5799         t_options = trace_options_init_dentry(tr);
5800         if (!t_options)
5801                 return;
5802
5803         for (i = 0; trace_options[i]; i++)
5804                 create_trace_option_core_file(tr, trace_options[i], i);
5805 }
5806
5807 static ssize_t
5808 rb_simple_read(struct file *filp, char __user *ubuf,
5809                size_t cnt, loff_t *ppos)
5810 {
5811         struct trace_array *tr = filp->private_data;
5812         char buf[64];
5813         int r;
5814
5815         r = tracer_tracing_is_on(tr);
5816         r = sprintf(buf, "%d\n", r);
5817
5818         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5819 }
5820
5821 static ssize_t
5822 rb_simple_write(struct file *filp, const char __user *ubuf,
5823                 size_t cnt, loff_t *ppos)
5824 {
5825         struct trace_array *tr = filp->private_data;
5826         struct ring_buffer *buffer = tr->trace_buffer.buffer;
5827         unsigned long val;
5828         int ret;
5829
5830         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5831         if (ret)
5832                 return ret;
5833
5834         if (buffer) {
5835                 mutex_lock(&trace_types_lock);
5836                 if (val) {
5837                         tracer_tracing_on(tr);
5838                         if (tr->current_trace->start)
5839                                 tr->current_trace->start(tr);
5840                 } else {
5841                         tracer_tracing_off(tr);
5842                         if (tr->current_trace->stop)
5843                                 tr->current_trace->stop(tr);
5844                 }
5845                 mutex_unlock(&trace_types_lock);
5846         }
5847
5848         (*ppos)++;
5849
5850         return cnt;
5851 }
5852
5853 static const struct file_operations rb_simple_fops = {
5854         .open           = tracing_open_generic_tr,
5855         .read           = rb_simple_read,
5856         .write          = rb_simple_write,
5857         .release        = tracing_release_generic_tr,
5858         .llseek         = default_llseek,
5859 };
5860
5861 struct dentry *trace_instance_dir;
5862
5863 static void
5864 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5865
5866 static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
5867 {
5868         int cpu;
5869
5870         for_each_tracing_cpu(cpu) {
5871                 memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
5872                 per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
5873                 per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
5874         }
5875 }
5876
5877 static int
5878 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
5879 {
5880         enum ring_buffer_flags rb_flags;
5881
5882         rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5883
5884         buf->buffer = ring_buffer_alloc(size, rb_flags);
5885         if (!buf->buffer)
5886                 return -ENOMEM;
5887
5888         buf->data = alloc_percpu(struct trace_array_cpu);
5889         if (!buf->data) {
5890                 ring_buffer_free(buf->buffer);
5891                 return -ENOMEM;
5892         }
5893
5894         init_trace_buffers(tr, buf);
5895
5896         /* Allocate the first page for all buffers */
5897         set_buffer_entries(&tr->trace_buffer,
5898                            ring_buffer_size(tr->trace_buffer.buffer, 0));
5899
5900         return 0;
5901 }
5902
5903 static int allocate_trace_buffers(struct trace_array *tr, int size)
5904 {
5905         int ret;
5906
5907         ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
5908         if (ret)
5909                 return ret;
5910
5911 #ifdef CONFIG_TRACER_MAX_TRACE
5912         ret = allocate_trace_buffer(tr, &tr->max_buffer,
5913                                     allocate_snapshot ? size : 1);
5914         if (WARN_ON(ret)) {
5915                 ring_buffer_free(tr->trace_buffer.buffer);
5916                 free_percpu(tr->trace_buffer.data);
5917                 return -ENOMEM;
5918         }
5919         tr->allocated_snapshot = allocate_snapshot;
5920
5921         /*
5922          * Only the top level trace array gets its snapshot allocated
5923          * from the kernel command line.
5924          */
5925         allocate_snapshot = false;
5926 #endif
5927         return 0;
5928 }
5929
5930 static int new_instance_create(const char *name)
5931 {
5932         struct trace_array *tr;
5933         int ret;
5934
5935         mutex_lock(&trace_types_lock);
5936
5937         ret = -EEXIST;
5938         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5939                 if (tr->name && strcmp(tr->name, name) == 0)
5940                         goto out_unlock;
5941         }
5942
5943         ret = -ENOMEM;
5944         tr = kzalloc(sizeof(*tr), GFP_KERNEL);
5945         if (!tr)
5946                 goto out_unlock;
5947
5948         tr->name = kstrdup(name, GFP_KERNEL);
5949         if (!tr->name)
5950                 goto out_free_tr;
5951
5952         raw_spin_lock_init(&tr->start_lock);
5953
5954         tr->current_trace = &nop_trace;
5955
5956         INIT_LIST_HEAD(&tr->systems);
5957         INIT_LIST_HEAD(&tr->events);
5958
5959         if (allocate_trace_buffers(tr, trace_buf_size) < 0)
5960                 goto out_free_tr;
5961
5962         /* Holder for file callbacks */
5963         tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5964         tr->trace_cpu.tr = tr;
5965
5966         tr->dir = debugfs_create_dir(name, trace_instance_dir);
5967         if (!tr->dir)
5968                 goto out_free_tr;
5969
5970         ret = event_trace_add_tracer(tr->dir, tr);
5971         if (ret) {
5972                 debugfs_remove_recursive(tr->dir);
5973                 goto out_free_tr;
5974         }
5975
5976         init_tracer_debugfs(tr, tr->dir);
5977
5978         list_add(&tr->list, &ftrace_trace_arrays);
5979
5980         mutex_unlock(&trace_types_lock);
5981
5982         return 0;
5983
5984  out_free_tr:
5985         if (tr->trace_buffer.buffer)
5986                 ring_buffer_free(tr->trace_buffer.buffer);
5987         kfree(tr->name);
5988         kfree(tr);
5989
5990  out_unlock:
5991         mutex_unlock(&trace_types_lock);
5992
5993         return ret;
5994
5995 }
5996
5997 static int instance_delete(const char *name)
5998 {
5999         struct trace_array *tr;
6000         int found = 0;
6001         int ret;
6002
6003         mutex_lock(&trace_types_lock);
6004
6005         ret = -ENODEV;
6006         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6007                 if (tr->name && strcmp(tr->name, name) == 0) {
6008                         found = 1;
6009                         break;
6010                 }
6011         }
6012         if (!found)
6013                 goto out_unlock;
6014
6015         ret = -EBUSY;
6016         if (tr->ref)
6017                 goto out_unlock;
6018
6019         list_del(&tr->list);
6020
6021         event_trace_del_tracer(tr);
6022         debugfs_remove_recursive(tr->dir);
6023         free_percpu(tr->trace_buffer.data);
6024         ring_buffer_free(tr->trace_buffer.buffer);
6025
6026         kfree(tr->name);
6027         kfree(tr);
6028
6029         ret = 0;
6030
6031  out_unlock:
6032         mutex_unlock(&trace_types_lock);
6033
6034         return ret;
6035 }
6036
6037 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6038 {
6039         struct dentry *parent;
6040         int ret;
6041
6042         /* Paranoid: Make sure the parent is the "instances" directory */
6043         parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6044         if (WARN_ON_ONCE(parent != trace_instance_dir))
6045                 return -ENOENT;
6046
6047         /*
6048          * The inode mutex is locked, but debugfs_create_dir() will also
6049          * take the mutex. As the instances directory can not be destroyed
6050          * or changed in any other way, it is safe to unlock it, and
6051          * let the dentry try. If two users try to make the same dir at
6052          * the same time, then the new_instance_create() will determine the
6053          * winner.
6054          */
6055         mutex_unlock(&inode->i_mutex);
6056
6057         ret = new_instance_create(dentry->d_iname);
6058
6059         mutex_lock(&inode->i_mutex);
6060
6061         return ret;
6062 }
6063
6064 static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6065 {
6066         struct dentry *parent;
6067         int ret;
6068
6069         /* Paranoid: Make sure the parent is the "instances" directory */
6070         parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6071         if (WARN_ON_ONCE(parent != trace_instance_dir))
6072                 return -ENOENT;
6073
6074         /* The caller did a dget() on dentry */
6075         mutex_unlock(&dentry->d_inode->i_mutex);
6076
6077         /*
6078          * The inode mutex is locked, but debugfs_create_dir() will also
6079          * take the mutex. As the instances directory can not be destroyed
6080          * or changed in any other way, it is safe to unlock it, and
6081          * let the dentry try. If two users try to make the same dir at
6082          * the same time, then the instance_delete() will determine the
6083          * winner.
6084          */
6085         mutex_unlock(&inode->i_mutex);
6086
6087         ret = instance_delete(dentry->d_iname);
6088
6089         mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6090         mutex_lock(&dentry->d_inode->i_mutex);
6091
6092         return ret;
6093 }
6094
6095 static const struct inode_operations instance_dir_inode_operations = {
6096         .lookup         = simple_lookup,
6097         .mkdir          = instance_mkdir,
6098         .rmdir          = instance_rmdir,
6099 };
6100
6101 static __init void create_trace_instances(struct dentry *d_tracer)
6102 {
6103         trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6104         if (WARN_ON(!trace_instance_dir))
6105                 return;
6106
6107         /* Hijack the dir inode operations, to allow mkdir */
6108         trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6109 }
6110
6111 static void
6112 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6113 {
6114         int cpu;
6115
6116         trace_create_file("trace_options", 0644, d_tracer,
6117                           tr, &tracing_iter_fops);
6118
6119         trace_create_file("trace", 0644, d_tracer,
6120                         (void *)&tr->trace_cpu, &tracing_fops);
6121
6122         trace_create_file("trace_pipe", 0444, d_tracer,
6123                           tr, &tracing_pipe_fops);
6124
6125         trace_create_file("buffer_size_kb", 0644, d_tracer,
6126                           tr, &tracing_entries_fops);
6127
6128         trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6129                           tr, &tracing_total_entries_fops);
6130
6131         trace_create_file("free_buffer", 0644, d_tracer,
6132                           tr, &tracing_free_buffer_fops);
6133
6134         trace_create_file("trace_marker", 0220, d_tracer,
6135                           tr, &tracing_mark_fops);
6136
6137         trace_create_file("trace_clock", 0644, d_tracer, tr,
6138                           &trace_clock_fops);
6139
6140         trace_create_file("tracing_on", 0644, d_tracer,
6141                             tr, &rb_simple_fops);
6142
6143 #ifdef CONFIG_TRACER_SNAPSHOT
6144         trace_create_file("snapshot", 0644, d_tracer,
6145                           (void *)&tr->trace_cpu, &snapshot_fops);
6146 #endif
6147
6148         for_each_tracing_cpu(cpu)
6149                 tracing_init_debugfs_percpu(tr, cpu);
6150
6151 }
6152
6153 static __init int tracer_init_debugfs(void)
6154 {
6155         struct dentry *d_tracer;
6156
6157         trace_access_lock_init();
6158
6159         d_tracer = tracing_init_dentry();
6160         if (!d_tracer)
6161                 return 0;
6162
6163         init_tracer_debugfs(&global_trace, d_tracer);
6164
6165         trace_create_file("tracing_cpumask", 0644, d_tracer,
6166                         &global_trace, &tracing_cpumask_fops);
6167
6168         trace_create_file("available_tracers", 0444, d_tracer,
6169                         &global_trace, &show_traces_fops);
6170
6171         trace_create_file("current_tracer", 0644, d_tracer,
6172                         &global_trace, &set_tracer_fops);
6173
6174 #ifdef CONFIG_TRACER_MAX_TRACE
6175         trace_create_file("tracing_max_latency", 0644, d_tracer,
6176                         &tracing_max_latency, &tracing_max_lat_fops);
6177 #endif
6178
6179         trace_create_file("tracing_thresh", 0644, d_tracer,
6180                         &tracing_thresh, &tracing_max_lat_fops);
6181
6182         trace_create_file("README", 0444, d_tracer,
6183                         NULL, &tracing_readme_fops);
6184
6185         trace_create_file("saved_cmdlines", 0444, d_tracer,
6186                         NULL, &tracing_saved_cmdlines_fops);
6187
6188 #ifdef CONFIG_DYNAMIC_FTRACE
6189         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6190                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6191 #endif
6192
6193         create_trace_instances(d_tracer);
6194
6195         create_trace_options_dir(&global_trace);
6196
6197         return 0;
6198 }
6199
6200 static int trace_panic_handler(struct notifier_block *this,
6201                                unsigned long event, void *unused)
6202 {
6203         if (ftrace_dump_on_oops)
6204                 ftrace_dump(ftrace_dump_on_oops);
6205         return NOTIFY_OK;
6206 }
6207
6208 static struct notifier_block trace_panic_notifier = {
6209         .notifier_call  = trace_panic_handler,
6210         .next           = NULL,
6211         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
6212 };
6213
6214 static int trace_die_handler(struct notifier_block *self,
6215                              unsigned long val,
6216                              void *data)
6217 {
6218         switch (val) {
6219         case DIE_OOPS:
6220                 if (ftrace_dump_on_oops)
6221                         ftrace_dump(ftrace_dump_on_oops);
6222                 break;
6223         default:
6224                 break;
6225         }
6226         return NOTIFY_OK;
6227 }
6228
6229 static struct notifier_block trace_die_notifier = {
6230         .notifier_call = trace_die_handler,
6231         .priority = 200
6232 };
6233
6234 /*
6235  * printk is set to max of 1024, we really don't need it that big.
6236  * Nothing should be printing 1000 characters anyway.
6237  */
6238 #define TRACE_MAX_PRINT         1000
6239
6240 /*
6241  * Define here KERN_TRACE so that we have one place to modify
6242  * it if we decide to change what log level the ftrace dump
6243  * should be at.
6244  */
6245 #define KERN_TRACE              KERN_EMERG
6246
6247 void
6248 trace_printk_seq(struct trace_seq *s)
6249 {
6250         /* Probably should print a warning here. */
6251         if (s->len >= TRACE_MAX_PRINT)
6252                 s->len = TRACE_MAX_PRINT;
6253
6254         /* should be zero ended, but we are paranoid. */
6255         s->buffer[s->len] = 0;
6256
6257         printk(KERN_TRACE "%s", s->buffer);
6258
6259         trace_seq_init(s);
6260 }
6261
6262 void trace_init_global_iter(struct trace_iterator *iter)
6263 {
6264         iter->tr = &global_trace;
6265         iter->trace = iter->tr->current_trace;
6266         iter->cpu_file = RING_BUFFER_ALL_CPUS;
6267         iter->trace_buffer = &global_trace.trace_buffer;
6268 }
6269
6270 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6271 {
6272         /* use static because iter can be a bit big for the stack */
6273         static struct trace_iterator iter;
6274         static atomic_t dump_running;
6275         unsigned int old_userobj;
6276         unsigned long flags;
6277         int cnt = 0, cpu;
6278
6279         /* Only allow one dump user at a time. */
6280         if (atomic_inc_return(&dump_running) != 1) {
6281                 atomic_dec(&dump_running);
6282                 return;
6283         }
6284
6285         /*
6286          * Always turn off tracing when we dump.
6287          * We don't need to show trace output of what happens
6288          * between multiple crashes.
6289          *
6290          * If the user does a sysrq-z, then they can re-enable
6291          * tracing with echo 1 > tracing_on.
6292          */
6293         tracing_off();
6294
6295         local_irq_save(flags);
6296
6297         /* Simulate the iterator */
6298         trace_init_global_iter(&iter);
6299
6300         for_each_tracing_cpu(cpu) {
6301                 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6302         }
6303
6304         old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6305
6306         /* don't look at user memory in panic mode */
6307         trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6308
6309         switch (oops_dump_mode) {
6310         case DUMP_ALL:
6311                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6312                 break;
6313         case DUMP_ORIG:
6314                 iter.cpu_file = raw_smp_processor_id();
6315                 break;
6316         case DUMP_NONE:
6317                 goto out_enable;
6318         default:
6319                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6320                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6321         }
6322
6323         printk(KERN_TRACE "Dumping ftrace buffer:\n");
6324
6325         /* Did function tracer already get disabled? */
6326         if (ftrace_is_dead()) {
6327                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6328                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
6329         }
6330
6331         /*
6332          * We need to stop all tracing on all CPUS to read the
6333          * the next buffer. This is a bit expensive, but is
6334          * not done often. We fill all what we can read,
6335          * and then release the locks again.
6336          */
6337
6338         while (!trace_empty(&iter)) {
6339
6340                 if (!cnt)
6341                         printk(KERN_TRACE "---------------------------------\n");
6342
6343                 cnt++;
6344
6345                 /* reset all but tr, trace, and overruns */
6346                 memset(&iter.seq, 0,
6347                        sizeof(struct trace_iterator) -
6348                        offsetof(struct trace_iterator, seq));
6349                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6350                 iter.pos = -1;
6351
6352                 if (trace_find_next_entry_inc(&iter) != NULL) {
6353                         int ret;
6354
6355                         ret = print_trace_line(&iter);
6356                         if (ret != TRACE_TYPE_NO_CONSUME)
6357                                 trace_consume(&iter);
6358                 }
6359                 touch_nmi_watchdog();
6360
6361                 trace_printk_seq(&iter.seq);
6362         }
6363
6364         if (!cnt)
6365                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
6366         else
6367                 printk(KERN_TRACE "---------------------------------\n");
6368
6369  out_enable:
6370         trace_flags |= old_userobj;
6371
6372         for_each_tracing_cpu(cpu) {
6373                 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6374         }
6375         atomic_dec(&dump_running);
6376         local_irq_restore(flags);
6377 }
6378 EXPORT_SYMBOL_GPL(ftrace_dump);
6379
6380 __init static int tracer_alloc_buffers(void)
6381 {
6382         int ring_buf_size;
6383         int ret = -ENOMEM;
6384
6385
6386         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6387                 goto out;
6388
6389         if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
6390                 goto out_free_buffer_mask;
6391
6392         /* Only allocate trace_printk buffers if a trace_printk exists */
6393         if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6394                 /* Must be called before global_trace.buffer is allocated */
6395                 trace_printk_init_buffers();
6396
6397         /* To save memory, keep the ring buffer size to its minimum */
6398         if (ring_buffer_expanded)
6399                 ring_buf_size = trace_buf_size;
6400         else
6401                 ring_buf_size = 1;
6402
6403         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6404         cpumask_copy(tracing_cpumask, cpu_all_mask);
6405
6406         raw_spin_lock_init(&global_trace.start_lock);
6407
6408         /* TODO: make the number of buffers hot pluggable with CPUS */
6409         if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6410                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6411                 WARN_ON(1);
6412                 goto out_free_cpumask;
6413         }
6414
6415         if (global_trace.buffer_disabled)
6416                 tracing_off();
6417
6418         trace_init_cmdlines();
6419
6420         /*
6421          * register_tracer() might reference current_trace, so it
6422          * needs to be set before we register anything. This is
6423          * just a bootstrap of current_trace anyway.
6424          */
6425         global_trace.current_trace = &nop_trace;
6426
6427         register_tracer(&nop_trace);
6428
6429         /* All seems OK, enable tracing */
6430         tracing_disabled = 0;
6431
6432         atomic_notifier_chain_register(&panic_notifier_list,
6433                                        &trace_panic_notifier);
6434
6435         register_die_notifier(&trace_die_notifier);
6436
6437         global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6438
6439         /* Holder for file callbacks */
6440         global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
6441         global_trace.trace_cpu.tr = &global_trace;
6442
6443         INIT_LIST_HEAD(&global_trace.systems);
6444         INIT_LIST_HEAD(&global_trace.events);
6445         list_add(&global_trace.list, &ftrace_trace_arrays);
6446
6447         while (trace_boot_options) {
6448                 char *option;
6449
6450                 option = strsep(&trace_boot_options, ",");
6451                 trace_set_options(&global_trace, option);
6452         }
6453
6454         register_snapshot_cmd();
6455
6456         return 0;
6457
6458 out_free_cpumask:
6459         free_percpu(global_trace.trace_buffer.data);
6460 #ifdef CONFIG_TRACER_MAX_TRACE
6461         free_percpu(global_trace.max_buffer.data);
6462 #endif
6463         free_cpumask_var(tracing_cpumask);
6464 out_free_buffer_mask:
6465         free_cpumask_var(tracing_buffer_mask);
6466 out:
6467         return ret;
6468 }
6469
6470 __init static int clear_boot_tracer(void)
6471 {
6472         /*
6473          * The default tracer at boot buffer is an init section.
6474          * This function is called in lateinit. If we did not
6475          * find the boot tracer, then clear it out, to prevent
6476          * later registration from accessing the buffer that is
6477          * about to be freed.
6478          */
6479         if (!default_bootup_tracer)
6480                 return 0;
6481
6482         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6483                default_bootup_tracer);
6484         default_bootup_tracer = NULL;
6485
6486         return 0;
6487 }
6488
6489 early_initcall(tracer_alloc_buffers);
6490 fs_initcall(tracer_init_debugfs);
6491 late_initcall(clear_boot_tracer);