Merge tag 'v3.10.7' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 Nadia Yvette Chambers
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
40 #include <linux/fs.h>
41 #include <linux/sched/rt.h>
42
43 #include "trace.h"
44 #include "trace_output.h"
45
46 /*
47  * On boot up, the ring buffer is set to the minimum size, so that
48  * we do not waste memory on systems that are not using tracing.
49  */
50 bool ring_buffer_expanded;
51
52 /*
53  * We need to change this state when a selftest is running.
54  * A selftest will lurk into the ring-buffer to count the
55  * entries inserted during the selftest although some concurrent
56  * insertions into the ring-buffer such as trace_printk could occurred
57  * at the same time, giving false positive or negative results.
58  */
59 static bool __read_mostly tracing_selftest_running;
60
61 /*
62  * If a tracer is running, we do not want to run SELFTEST.
63  */
64 bool __read_mostly tracing_selftest_disabled;
65
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt[] = {
68         { }
69 };
70
71 static struct tracer_flags dummy_tracer_flags = {
72         .val = 0,
73         .opts = dummy_tracer_opt
74 };
75
76 static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77 {
78         return 0;
79 }
80
81 /*
82  * To prevent the comm cache from being overwritten when no
83  * tracing is active, only save the comm when a trace event
84  * occurred.
85  */
86 static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
88 /*
89  * Kill all tracing for good (never come back).
90  * It is initialized to 1 but will turn to zero if the initialization
91  * of the tracer is successful. But that is the only place that sets
92  * this back to zero.
93  */
94 static int tracing_disabled = 1;
95
96 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
97
98 cpumask_var_t __read_mostly     tracing_buffer_mask;
99
100 /*
101  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102  *
103  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104  * is set, then ftrace_dump is called. This will output the contents
105  * of the ftrace buffers to the console.  This is very useful for
106  * capturing traces that lead to crashes and outputing it to a
107  * serial console.
108  *
109  * It is default off, but you can enable it with either specifying
110  * "ftrace_dump_on_oops" in the kernel command line, or setting
111  * /proc/sys/kernel/ftrace_dump_on_oops
112  * Set 1 if you want to dump buffers of all CPUs
113  * Set 2 if you want to dump the buffer of the CPU that triggered oops
114  */
115
116 enum ftrace_dump_mode ftrace_dump_on_oops;
117
118 static int tracing_set_tracer(const char *buf);
119
120 #define MAX_TRACER_SIZE         100
121 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
122 static char *default_bootup_tracer;
123
124 static bool allocate_snapshot;
125
126 static int __init set_cmdline_ftrace(char *str)
127 {
128         strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
129         default_bootup_tracer = bootup_tracer_buf;
130         /* We are using ftrace early, expand it */
131         ring_buffer_expanded = true;
132         return 1;
133 }
134 __setup("ftrace=", set_cmdline_ftrace);
135
136 static int __init set_ftrace_dump_on_oops(char *str)
137 {
138         if (*str++ != '=' || !*str) {
139                 ftrace_dump_on_oops = DUMP_ALL;
140                 return 1;
141         }
142
143         if (!strcmp("orig_cpu", str)) {
144                 ftrace_dump_on_oops = DUMP_ORIG;
145                 return 1;
146         }
147
148         return 0;
149 }
150 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
151
152 static int __init boot_alloc_snapshot(char *str)
153 {
154         allocate_snapshot = true;
155         /* We also need the main ring buffer expanded */
156         ring_buffer_expanded = true;
157         return 1;
158 }
159 __setup("alloc_snapshot", boot_alloc_snapshot);
160
161
162 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
163 static char *trace_boot_options __initdata;
164
165 static int __init set_trace_boot_options(char *str)
166 {
167         strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
168         trace_boot_options = trace_boot_options_buf;
169         return 0;
170 }
171 __setup("trace_options=", set_trace_boot_options);
172
173 unsigned long long ns2usecs(cycle_t nsec)
174 {
175         nsec += 500;
176         do_div(nsec, 1000);
177         return nsec;
178 }
179
180 /*
181  * The global_trace is the descriptor that holds the tracing
182  * buffers for the live tracing. For each CPU, it contains
183  * a link list of pages that will store trace entries. The
184  * page descriptor of the pages in the memory is used to hold
185  * the link list by linking the lru item in the page descriptor
186  * to each of the pages in the buffer per CPU.
187  *
188  * For each active CPU there is a data field that holds the
189  * pages for the buffer for that CPU. Each CPU has the same number
190  * of pages allocated for its buffer.
191  */
192 static struct trace_array       global_trace;
193
194 LIST_HEAD(ftrace_trace_arrays);
195
196 int trace_array_get(struct trace_array *this_tr)
197 {
198         struct trace_array *tr;
199         int ret = -ENODEV;
200
201         mutex_lock(&trace_types_lock);
202         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
203                 if (tr == this_tr) {
204                         tr->ref++;
205                         ret = 0;
206                         break;
207                 }
208         }
209         mutex_unlock(&trace_types_lock);
210
211         return ret;
212 }
213
214 static void __trace_array_put(struct trace_array *this_tr)
215 {
216         WARN_ON(!this_tr->ref);
217         this_tr->ref--;
218 }
219
220 void trace_array_put(struct trace_array *this_tr)
221 {
222         mutex_lock(&trace_types_lock);
223         __trace_array_put(this_tr);
224         mutex_unlock(&trace_types_lock);
225 }
226
227 int filter_current_check_discard(struct ring_buffer *buffer,
228                                  struct ftrace_event_call *call, void *rec,
229                                  struct ring_buffer_event *event)
230 {
231         return filter_check_discard(call, rec, buffer, event);
232 }
233 EXPORT_SYMBOL_GPL(filter_current_check_discard);
234
235 cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
236 {
237         u64 ts;
238
239         /* Early boot up does not have a buffer yet */
240         if (!buf->buffer)
241                 return trace_clock_local();
242
243         ts = ring_buffer_time_stamp(buf->buffer, cpu);
244         ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
245
246         return ts;
247 }
248
249 cycle_t ftrace_now(int cpu)
250 {
251         return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
252 }
253
254 /**
255  * tracing_is_enabled - Show if global_trace has been disabled
256  *
257  * Shows if the global trace has been enabled or not. It uses the
258  * mirror flag "buffer_disabled" to be used in fast paths such as for
259  * the irqsoff tracer. But it may be inaccurate due to races. If you
260  * need to know the accurate state, use tracing_is_on() which is a little
261  * slower, but accurate.
262  */
263 int tracing_is_enabled(void)
264 {
265         /*
266          * For quick access (irqsoff uses this in fast path), just
267          * return the mirror variable of the state of the ring buffer.
268          * It's a little racy, but we don't really care.
269          */
270         smp_rmb();
271         return !global_trace.buffer_disabled;
272 }
273
274 /*
275  * trace_buf_size is the size in bytes that is allocated
276  * for a buffer. Note, the number of bytes is always rounded
277  * to page size.
278  *
279  * This number is purposely set to a low number of 16384.
280  * If the dump on oops happens, it will be much appreciated
281  * to not have to wait for all that output. Anyway this can be
282  * boot time and run time configurable.
283  */
284 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
285
286 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
287
288 /* trace_types holds a link list of available tracers. */
289 static struct tracer            *trace_types __read_mostly;
290
291 /*
292  * trace_types_lock is used to protect the trace_types list.
293  */
294 DEFINE_MUTEX(trace_types_lock);
295
296 /*
297  * serialize the access of the ring buffer
298  *
299  * ring buffer serializes readers, but it is low level protection.
300  * The validity of the events (which returns by ring_buffer_peek() ..etc)
301  * are not protected by ring buffer.
302  *
303  * The content of events may become garbage if we allow other process consumes
304  * these events concurrently:
305  *   A) the page of the consumed events may become a normal page
306  *      (not reader page) in ring buffer, and this page will be rewrited
307  *      by events producer.
308  *   B) The page of the consumed events may become a page for splice_read,
309  *      and this page will be returned to system.
310  *
311  * These primitives allow multi process access to different cpu ring buffer
312  * concurrently.
313  *
314  * These primitives don't distinguish read-only and read-consume access.
315  * Multi read-only access are also serialized.
316  */
317
318 #ifdef CONFIG_SMP
319 static DECLARE_RWSEM(all_cpu_access_lock);
320 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
321
322 static inline void trace_access_lock(int cpu)
323 {
324         if (cpu == RING_BUFFER_ALL_CPUS) {
325                 /* gain it for accessing the whole ring buffer. */
326                 down_write(&all_cpu_access_lock);
327         } else {
328                 /* gain it for accessing a cpu ring buffer. */
329
330                 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
331                 down_read(&all_cpu_access_lock);
332
333                 /* Secondly block other access to this @cpu ring buffer. */
334                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
335         }
336 }
337
338 static inline void trace_access_unlock(int cpu)
339 {
340         if (cpu == RING_BUFFER_ALL_CPUS) {
341                 up_write(&all_cpu_access_lock);
342         } else {
343                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
344                 up_read(&all_cpu_access_lock);
345         }
346 }
347
348 static inline void trace_access_lock_init(void)
349 {
350         int cpu;
351
352         for_each_possible_cpu(cpu)
353                 mutex_init(&per_cpu(cpu_access_lock, cpu));
354 }
355
356 #else
357
358 static DEFINE_MUTEX(access_lock);
359
360 static inline void trace_access_lock(int cpu)
361 {
362         (void)cpu;
363         mutex_lock(&access_lock);
364 }
365
366 static inline void trace_access_unlock(int cpu)
367 {
368         (void)cpu;
369         mutex_unlock(&access_lock);
370 }
371
372 static inline void trace_access_lock_init(void)
373 {
374 }
375
376 #endif
377
378 /* trace_flags holds trace_options default values */
379 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
380         TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
381         TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
382         TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
383
384 void tracer_tracing_on(struct trace_array *tr)
385 {
386         if (tr->trace_buffer.buffer)
387                 ring_buffer_record_on(tr->trace_buffer.buffer);
388         /*
389          * This flag is looked at when buffers haven't been allocated
390          * yet, or by some tracers (like irqsoff), that just want to
391          * know if the ring buffer has been disabled, but it can handle
392          * races of where it gets disabled but we still do a record.
393          * As the check is in the fast path of the tracers, it is more
394          * important to be fast than accurate.
395          */
396         tr->buffer_disabled = 0;
397         /* Make the flag seen by readers */
398         smp_wmb();
399 }
400
401 /**
402  * tracing_on - enable tracing buffers
403  *
404  * This function enables tracing buffers that may have been
405  * disabled with tracing_off.
406  */
407 void tracing_on(void)
408 {
409         tracer_tracing_on(&global_trace);
410 }
411 EXPORT_SYMBOL_GPL(tracing_on);
412
413 /**
414  * __trace_puts - write a constant string into the trace buffer.
415  * @ip:    The address of the caller
416  * @str:   The constant string to write
417  * @size:  The size of the string.
418  */
419 int __trace_puts(unsigned long ip, const char *str, int size)
420 {
421         struct ring_buffer_event *event;
422         struct ring_buffer *buffer;
423         struct print_entry *entry;
424         unsigned long irq_flags;
425         int alloc;
426
427         alloc = sizeof(*entry) + size + 2; /* possible \n added */
428
429         local_save_flags(irq_flags);
430         buffer = global_trace.trace_buffer.buffer;
431         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
432                                           irq_flags, preempt_count());
433         if (!event)
434                 return 0;
435
436         entry = ring_buffer_event_data(event);
437         entry->ip = ip;
438
439         memcpy(&entry->buf, str, size);
440
441         /* Add a newline if necessary */
442         if (entry->buf[size - 1] != '\n') {
443                 entry->buf[size] = '\n';
444                 entry->buf[size + 1] = '\0';
445         } else
446                 entry->buf[size] = '\0';
447
448         __buffer_unlock_commit(buffer, event);
449
450         return size;
451 }
452 EXPORT_SYMBOL_GPL(__trace_puts);
453
454 /**
455  * __trace_bputs - write the pointer to a constant string into trace buffer
456  * @ip:    The address of the caller
457  * @str:   The constant string to write to the buffer to
458  */
459 int __trace_bputs(unsigned long ip, const char *str)
460 {
461         struct ring_buffer_event *event;
462         struct ring_buffer *buffer;
463         struct bputs_entry *entry;
464         unsigned long irq_flags;
465         int size = sizeof(struct bputs_entry);
466
467         local_save_flags(irq_flags);
468         buffer = global_trace.trace_buffer.buffer;
469         event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
470                                           irq_flags, preempt_count());
471         if (!event)
472                 return 0;
473
474         entry = ring_buffer_event_data(event);
475         entry->ip                       = ip;
476         entry->str                      = str;
477
478         __buffer_unlock_commit(buffer, event);
479
480         return 1;
481 }
482 EXPORT_SYMBOL_GPL(__trace_bputs);
483
484 #ifdef CONFIG_TRACER_SNAPSHOT
485 /**
486  * trace_snapshot - take a snapshot of the current buffer.
487  *
488  * This causes a swap between the snapshot buffer and the current live
489  * tracing buffer. You can use this to take snapshots of the live
490  * trace when some condition is triggered, but continue to trace.
491  *
492  * Note, make sure to allocate the snapshot with either
493  * a tracing_snapshot_alloc(), or by doing it manually
494  * with: echo 1 > /sys/kernel/debug/tracing/snapshot
495  *
496  * If the snapshot buffer is not allocated, it will stop tracing.
497  * Basically making a permanent snapshot.
498  */
499 void tracing_snapshot(void)
500 {
501         struct trace_array *tr = &global_trace;
502         struct tracer *tracer = tr->current_trace;
503         unsigned long flags;
504
505         if (in_nmi()) {
506                 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
507                 internal_trace_puts("*** snapshot is being ignored        ***\n");
508                 return;
509         }
510
511         if (!tr->allocated_snapshot) {
512                 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
513                 internal_trace_puts("*** stopping trace here!   ***\n");
514                 tracing_off();
515                 return;
516         }
517
518         /* Note, snapshot can not be used when the tracer uses it */
519         if (tracer->use_max_tr) {
520                 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
521                 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
522                 return;
523         }
524
525         local_irq_save(flags);
526         update_max_tr(tr, current, smp_processor_id());
527         local_irq_restore(flags);
528 }
529 EXPORT_SYMBOL_GPL(tracing_snapshot);
530
531 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
532                                         struct trace_buffer *size_buf, int cpu_id);
533 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
534
535 static int alloc_snapshot(struct trace_array *tr)
536 {
537         int ret;
538
539         if (!tr->allocated_snapshot) {
540
541                 /* allocate spare buffer */
542                 ret = resize_buffer_duplicate_size(&tr->max_buffer,
543                                    &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
544                 if (ret < 0)
545                         return ret;
546
547                 tr->allocated_snapshot = true;
548         }
549
550         return 0;
551 }
552
553 void free_snapshot(struct trace_array *tr)
554 {
555         /*
556          * We don't free the ring buffer. instead, resize it because
557          * The max_tr ring buffer has some state (e.g. ring->clock) and
558          * we want preserve it.
559          */
560         ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
561         set_buffer_entries(&tr->max_buffer, 1);
562         tracing_reset_online_cpus(&tr->max_buffer);
563         tr->allocated_snapshot = false;
564 }
565
566 /**
567  * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
568  *
569  * This is similar to trace_snapshot(), but it will allocate the
570  * snapshot buffer if it isn't already allocated. Use this only
571  * where it is safe to sleep, as the allocation may sleep.
572  *
573  * This causes a swap between the snapshot buffer and the current live
574  * tracing buffer. You can use this to take snapshots of the live
575  * trace when some condition is triggered, but continue to trace.
576  */
577 void tracing_snapshot_alloc(void)
578 {
579         struct trace_array *tr = &global_trace;
580         int ret;
581
582         ret = alloc_snapshot(tr);
583         if (WARN_ON(ret < 0))
584                 return;
585
586         tracing_snapshot();
587 }
588 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
589 #else
590 void tracing_snapshot(void)
591 {
592         WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
593 }
594 EXPORT_SYMBOL_GPL(tracing_snapshot);
595 void tracing_snapshot_alloc(void)
596 {
597         /* Give warning */
598         tracing_snapshot();
599 }
600 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
601 #endif /* CONFIG_TRACER_SNAPSHOT */
602
603 void tracer_tracing_off(struct trace_array *tr)
604 {
605         if (tr->trace_buffer.buffer)
606                 ring_buffer_record_off(tr->trace_buffer.buffer);
607         /*
608          * This flag is looked at when buffers haven't been allocated
609          * yet, or by some tracers (like irqsoff), that just want to
610          * know if the ring buffer has been disabled, but it can handle
611          * races of where it gets disabled but we still do a record.
612          * As the check is in the fast path of the tracers, it is more
613          * important to be fast than accurate.
614          */
615         tr->buffer_disabled = 1;
616         /* Make the flag seen by readers */
617         smp_wmb();
618 }
619
620 /**
621  * tracing_off - turn off tracing buffers
622  *
623  * This function stops the tracing buffers from recording data.
624  * It does not disable any overhead the tracers themselves may
625  * be causing. This function simply causes all recording to
626  * the ring buffers to fail.
627  */
628 void tracing_off(void)
629 {
630         tracer_tracing_off(&global_trace);
631 }
632 EXPORT_SYMBOL_GPL(tracing_off);
633
634 /**
635  * tracer_tracing_is_on - show real state of ring buffer enabled
636  * @tr : the trace array to know if ring buffer is enabled
637  *
638  * Shows real state of the ring buffer if it is enabled or not.
639  */
640 int tracer_tracing_is_on(struct trace_array *tr)
641 {
642         if (tr->trace_buffer.buffer)
643                 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
644         return !tr->buffer_disabled;
645 }
646
647 /**
648  * tracing_is_on - show state of ring buffers enabled
649  */
650 int tracing_is_on(void)
651 {
652         return tracer_tracing_is_on(&global_trace);
653 }
654 EXPORT_SYMBOL_GPL(tracing_is_on);
655
656 static int __init set_buf_size(char *str)
657 {
658         unsigned long buf_size;
659
660         if (!str)
661                 return 0;
662         buf_size = memparse(str, &str);
663         /* nr_entries can not be zero */
664         if (buf_size == 0)
665                 return 0;
666         trace_buf_size = buf_size;
667         return 1;
668 }
669 __setup("trace_buf_size=", set_buf_size);
670
671 static int __init set_tracing_thresh(char *str)
672 {
673         unsigned long threshold;
674         int ret;
675
676         if (!str)
677                 return 0;
678         ret = kstrtoul(str, 0, &threshold);
679         if (ret < 0)
680                 return 0;
681         tracing_thresh = threshold * 1000;
682         return 1;
683 }
684 __setup("tracing_thresh=", set_tracing_thresh);
685
686 unsigned long nsecs_to_usecs(unsigned long nsecs)
687 {
688         return nsecs / 1000;
689 }
690
691 /* These must match the bit postions in trace_iterator_flags */
692 static const char *trace_options[] = {
693         "print-parent",
694         "sym-offset",
695         "sym-addr",
696         "verbose",
697         "raw",
698         "hex",
699         "bin",
700         "block",
701         "stacktrace",
702         "trace_printk",
703         "ftrace_preempt",
704         "branch",
705         "annotate",
706         "userstacktrace",
707         "sym-userobj",
708         "printk-msg-only",
709         "context-info",
710         "latency-format",
711         "sleep-time",
712         "graph-time",
713         "record-cmd",
714         "overwrite",
715         "disable_on_free",
716         "irq-info",
717         "markers",
718         "function-trace",
719         NULL
720 };
721
722 static struct {
723         u64 (*func)(void);
724         const char *name;
725         int in_ns;              /* is this clock in nanoseconds? */
726 } trace_clocks[] = {
727         { trace_clock_local,    "local",        1 },
728         { trace_clock_global,   "global",       1 },
729         { trace_clock_counter,  "counter",      0 },
730         { trace_clock_jiffies,  "uptime",       1 },
731         { trace_clock,          "perf",         1 },
732         ARCH_TRACE_CLOCKS
733 };
734
735 /*
736  * trace_parser_get_init - gets the buffer for trace parser
737  */
738 int trace_parser_get_init(struct trace_parser *parser, int size)
739 {
740         memset(parser, 0, sizeof(*parser));
741
742         parser->buffer = kmalloc(size, GFP_KERNEL);
743         if (!parser->buffer)
744                 return 1;
745
746         parser->size = size;
747         return 0;
748 }
749
750 /*
751  * trace_parser_put - frees the buffer for trace parser
752  */
753 void trace_parser_put(struct trace_parser *parser)
754 {
755         kfree(parser->buffer);
756 }
757
758 /*
759  * trace_get_user - reads the user input string separated by  space
760  * (matched by isspace(ch))
761  *
762  * For each string found the 'struct trace_parser' is updated,
763  * and the function returns.
764  *
765  * Returns number of bytes read.
766  *
767  * See kernel/trace/trace.h for 'struct trace_parser' details.
768  */
769 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
770         size_t cnt, loff_t *ppos)
771 {
772         char ch;
773         size_t read = 0;
774         ssize_t ret;
775
776         if (!*ppos)
777                 trace_parser_clear(parser);
778
779         ret = get_user(ch, ubuf++);
780         if (ret)
781                 goto out;
782
783         read++;
784         cnt--;
785
786         /*
787          * The parser is not finished with the last write,
788          * continue reading the user input without skipping spaces.
789          */
790         if (!parser->cont) {
791                 /* skip white space */
792                 while (cnt && isspace(ch)) {
793                         ret = get_user(ch, ubuf++);
794                         if (ret)
795                                 goto out;
796                         read++;
797                         cnt--;
798                 }
799
800                 /* only spaces were written */
801                 if (isspace(ch)) {
802                         *ppos += read;
803                         ret = read;
804                         goto out;
805                 }
806
807                 parser->idx = 0;
808         }
809
810         /* read the non-space input */
811         while (cnt && !isspace(ch)) {
812                 if (parser->idx < parser->size - 1)
813                         parser->buffer[parser->idx++] = ch;
814                 else {
815                         ret = -EINVAL;
816                         goto out;
817                 }
818                 ret = get_user(ch, ubuf++);
819                 if (ret)
820                         goto out;
821                 read++;
822                 cnt--;
823         }
824
825         /* We either got finished input or we have to wait for another call. */
826         if (isspace(ch)) {
827                 parser->buffer[parser->idx] = 0;
828                 parser->cont = false;
829         } else {
830                 parser->cont = true;
831                 parser->buffer[parser->idx++] = ch;
832         }
833
834         *ppos += read;
835         ret = read;
836
837 out:
838         return ret;
839 }
840
841 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
842 {
843         int len;
844         int ret;
845
846         if (!cnt)
847                 return 0;
848
849         if (s->len <= s->readpos)
850                 return -EBUSY;
851
852         len = s->len - s->readpos;
853         if (cnt > len)
854                 cnt = len;
855         ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
856         if (ret == cnt)
857                 return -EFAULT;
858
859         cnt -= ret;
860
861         s->readpos += cnt;
862         return cnt;
863 }
864
865 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
866 {
867         int len;
868
869         if (s->len <= s->readpos)
870                 return -EBUSY;
871
872         len = s->len - s->readpos;
873         if (cnt > len)
874                 cnt = len;
875         memcpy(buf, s->buffer + s->readpos, cnt);
876
877         s->readpos += cnt;
878         return cnt;
879 }
880
881 /*
882  * ftrace_max_lock is used to protect the swapping of buffers
883  * when taking a max snapshot. The buffers themselves are
884  * protected by per_cpu spinlocks. But the action of the swap
885  * needs its own lock.
886  *
887  * This is defined as a arch_spinlock_t in order to help
888  * with performance when lockdep debugging is enabled.
889  *
890  * It is also used in other places outside the update_max_tr
891  * so it needs to be defined outside of the
892  * CONFIG_TRACER_MAX_TRACE.
893  */
894 static arch_spinlock_t ftrace_max_lock =
895         (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
896
897 unsigned long __read_mostly     tracing_thresh;
898
899 #ifdef CONFIG_TRACER_MAX_TRACE
900 unsigned long __read_mostly     tracing_max_latency;
901
902 /*
903  * Copy the new maximum trace into the separate maximum-trace
904  * structure. (this way the maximum trace is permanently saved,
905  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
906  */
907 static void
908 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
909 {
910         struct trace_buffer *trace_buf = &tr->trace_buffer;
911         struct trace_buffer *max_buf = &tr->max_buffer;
912         struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
913         struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
914
915         max_buf->cpu = cpu;
916         max_buf->time_start = data->preempt_timestamp;
917
918         max_data->saved_latency = tracing_max_latency;
919         max_data->critical_start = data->critical_start;
920         max_data->critical_end = data->critical_end;
921
922         memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
923         max_data->pid = tsk->pid;
924         /*
925          * If tsk == current, then use current_uid(), as that does not use
926          * RCU. The irq tracer can be called out of RCU scope.
927          */
928         if (tsk == current)
929                 max_data->uid = current_uid();
930         else
931                 max_data->uid = task_uid(tsk);
932
933         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
934         max_data->policy = tsk->policy;
935         max_data->rt_priority = tsk->rt_priority;
936
937         /* record this tasks comm */
938         tracing_record_cmdline(tsk);
939 }
940
941 /**
942  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
943  * @tr: tracer
944  * @tsk: the task with the latency
945  * @cpu: The cpu that initiated the trace.
946  *
947  * Flip the buffers between the @tr and the max_tr and record information
948  * about which task was the cause of this latency.
949  */
950 void
951 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
952 {
953         struct ring_buffer *buf;
954
955         if (tr->stop_count)
956                 return;
957
958         WARN_ON_ONCE(!irqs_disabled());
959
960         if (!tr->allocated_snapshot) {
961                 /* Only the nop tracer should hit this when disabling */
962                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
963                 return;
964         }
965
966         arch_spin_lock(&ftrace_max_lock);
967
968         buf = tr->trace_buffer.buffer;
969         tr->trace_buffer.buffer = tr->max_buffer.buffer;
970         tr->max_buffer.buffer = buf;
971
972         __update_max_tr(tr, tsk, cpu);
973         arch_spin_unlock(&ftrace_max_lock);
974 }
975
976 /**
977  * update_max_tr_single - only copy one trace over, and reset the rest
978  * @tr - tracer
979  * @tsk - task with the latency
980  * @cpu - the cpu of the buffer to copy.
981  *
982  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
983  */
984 void
985 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
986 {
987         int ret;
988
989         if (tr->stop_count)
990                 return;
991
992         WARN_ON_ONCE(!irqs_disabled());
993         if (!tr->allocated_snapshot) {
994                 /* Only the nop tracer should hit this when disabling */
995                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
996                 return;
997         }
998
999         arch_spin_lock(&ftrace_max_lock);
1000
1001         ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1002
1003         if (ret == -EBUSY) {
1004                 /*
1005                  * We failed to swap the buffer due to a commit taking
1006                  * place on this CPU. We fail to record, but we reset
1007                  * the max trace buffer (no one writes directly to it)
1008                  * and flag that it failed.
1009                  */
1010                 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1011                         "Failed to swap buffers due to commit in progress\n");
1012         }
1013
1014         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1015
1016         __update_max_tr(tr, tsk, cpu);
1017         arch_spin_unlock(&ftrace_max_lock);
1018 }
1019 #endif /* CONFIG_TRACER_MAX_TRACE */
1020
1021 static void default_wait_pipe(struct trace_iterator *iter)
1022 {
1023         /* Iterators are static, they should be filled or empty */
1024         if (trace_buffer_iter(iter, iter->cpu_file))
1025                 return;
1026
1027         ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
1028 }
1029
1030 #ifdef CONFIG_FTRACE_STARTUP_TEST
1031 static int run_tracer_selftest(struct tracer *type)
1032 {
1033         struct trace_array *tr = &global_trace;
1034         struct tracer *saved_tracer = tr->current_trace;
1035         int ret;
1036
1037         if (!type->selftest || tracing_selftest_disabled)
1038                 return 0;
1039
1040         /*
1041          * Run a selftest on this tracer.
1042          * Here we reset the trace buffer, and set the current
1043          * tracer to be this tracer. The tracer can then run some
1044          * internal tracing to verify that everything is in order.
1045          * If we fail, we do not register this tracer.
1046          */
1047         tracing_reset_online_cpus(&tr->trace_buffer);
1048
1049         tr->current_trace = type;
1050
1051 #ifdef CONFIG_TRACER_MAX_TRACE
1052         if (type->use_max_tr) {
1053                 /* If we expanded the buffers, make sure the max is expanded too */
1054                 if (ring_buffer_expanded)
1055                         ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1056                                            RING_BUFFER_ALL_CPUS);
1057                 tr->allocated_snapshot = true;
1058         }
1059 #endif
1060
1061         /* the test is responsible for initializing and enabling */
1062         pr_info("Testing tracer %s: ", type->name);
1063         ret = type->selftest(type, tr);
1064         /* the test is responsible for resetting too */
1065         tr->current_trace = saved_tracer;
1066         if (ret) {
1067                 printk(KERN_CONT "FAILED!\n");
1068                 /* Add the warning after printing 'FAILED' */
1069                 WARN_ON(1);
1070                 return -1;
1071         }
1072         /* Only reset on passing, to avoid touching corrupted buffers */
1073         tracing_reset_online_cpus(&tr->trace_buffer);
1074
1075 #ifdef CONFIG_TRACER_MAX_TRACE
1076         if (type->use_max_tr) {
1077                 tr->allocated_snapshot = false;
1078
1079                 /* Shrink the max buffer again */
1080                 if (ring_buffer_expanded)
1081                         ring_buffer_resize(tr->max_buffer.buffer, 1,
1082                                            RING_BUFFER_ALL_CPUS);
1083         }
1084 #endif
1085
1086         printk(KERN_CONT "PASSED\n");
1087         return 0;
1088 }
1089 #else
1090 static inline int run_tracer_selftest(struct tracer *type)
1091 {
1092         return 0;
1093 }
1094 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1095
1096 /**
1097  * register_tracer - register a tracer with the ftrace system.
1098  * @type - the plugin for the tracer
1099  *
1100  * Register a new plugin tracer.
1101  */
1102 int register_tracer(struct tracer *type)
1103 {
1104         struct tracer *t;
1105         int ret = 0;
1106
1107         if (!type->name) {
1108                 pr_info("Tracer must have a name\n");
1109                 return -1;
1110         }
1111
1112         if (strlen(type->name) >= MAX_TRACER_SIZE) {
1113                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1114                 return -1;
1115         }
1116
1117         mutex_lock(&trace_types_lock);
1118
1119         tracing_selftest_running = true;
1120
1121         for (t = trace_types; t; t = t->next) {
1122                 if (strcmp(type->name, t->name) == 0) {
1123                         /* already found */
1124                         pr_info("Tracer %s already registered\n",
1125                                 type->name);
1126                         ret = -1;
1127                         goto out;
1128                 }
1129         }
1130
1131         if (!type->set_flag)
1132                 type->set_flag = &dummy_set_flag;
1133         if (!type->flags)
1134                 type->flags = &dummy_tracer_flags;
1135         else
1136                 if (!type->flags->opts)
1137                         type->flags->opts = dummy_tracer_opt;
1138         if (!type->wait_pipe)
1139                 type->wait_pipe = default_wait_pipe;
1140
1141         ret = run_tracer_selftest(type);
1142         if (ret < 0)
1143                 goto out;
1144
1145         type->next = trace_types;
1146         trace_types = type;
1147
1148  out:
1149         tracing_selftest_running = false;
1150         mutex_unlock(&trace_types_lock);
1151
1152         if (ret || !default_bootup_tracer)
1153                 goto out_unlock;
1154
1155         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1156                 goto out_unlock;
1157
1158         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1159         /* Do we want this tracer to start on bootup? */
1160         tracing_set_tracer(type->name);
1161         default_bootup_tracer = NULL;
1162         /* disable other selftests, since this will break it. */
1163         tracing_selftest_disabled = true;
1164 #ifdef CONFIG_FTRACE_STARTUP_TEST
1165         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1166                type->name);
1167 #endif
1168
1169  out_unlock:
1170         return ret;
1171 }
1172
1173 void tracing_reset(struct trace_buffer *buf, int cpu)
1174 {
1175         struct ring_buffer *buffer = buf->buffer;
1176
1177         if (!buffer)
1178                 return;
1179
1180         ring_buffer_record_disable(buffer);
1181
1182         /* Make sure all commits have finished */
1183         synchronize_sched();
1184         ring_buffer_reset_cpu(buffer, cpu);
1185
1186         ring_buffer_record_enable(buffer);
1187 }
1188
1189 void tracing_reset_online_cpus(struct trace_buffer *buf)
1190 {
1191         struct ring_buffer *buffer = buf->buffer;
1192         int cpu;
1193
1194         if (!buffer)
1195                 return;
1196
1197         ring_buffer_record_disable(buffer);
1198
1199         /* Make sure all commits have finished */
1200         synchronize_sched();
1201
1202         buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1203
1204         for_each_online_cpu(cpu)
1205                 ring_buffer_reset_cpu(buffer, cpu);
1206
1207         ring_buffer_record_enable(buffer);
1208 }
1209
1210 /* Must have trace_types_lock held */
1211 void tracing_reset_all_online_cpus(void)
1212 {
1213         struct trace_array *tr;
1214
1215         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1216                 tracing_reset_online_cpus(&tr->trace_buffer);
1217 #ifdef CONFIG_TRACER_MAX_TRACE
1218                 tracing_reset_online_cpus(&tr->max_buffer);
1219 #endif
1220         }
1221 }
1222
1223 #define SAVED_CMDLINES 128
1224 #define NO_CMDLINE_MAP UINT_MAX
1225 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1226 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1227 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1228 static int cmdline_idx;
1229 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1230
1231 /* temporary disable recording */
1232 static atomic_t trace_record_cmdline_disabled __read_mostly;
1233
1234 static void trace_init_cmdlines(void)
1235 {
1236         memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1237         memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
1238         cmdline_idx = 0;
1239 }
1240
1241 int is_tracing_stopped(void)
1242 {
1243         return global_trace.stop_count;
1244 }
1245
1246 /**
1247  * ftrace_off_permanent - disable all ftrace code permanently
1248  *
1249  * This should only be called when a serious anomally has
1250  * been detected.  This will turn off the function tracing,
1251  * ring buffers, and other tracing utilites. It takes no
1252  * locks and can be called from any context.
1253  */
1254 void ftrace_off_permanent(void)
1255 {
1256         tracing_disabled = 1;
1257         ftrace_stop();
1258         tracing_off_permanent();
1259 }
1260
1261 /**
1262  * tracing_start - quick start of the tracer
1263  *
1264  * If tracing is enabled but was stopped by tracing_stop,
1265  * this will start the tracer back up.
1266  */
1267 void tracing_start(void)
1268 {
1269         struct ring_buffer *buffer;
1270         unsigned long flags;
1271
1272         if (tracing_disabled)
1273                 return;
1274
1275         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1276         if (--global_trace.stop_count) {
1277                 if (global_trace.stop_count < 0) {
1278                         /* Someone screwed up their debugging */
1279                         WARN_ON_ONCE(1);
1280                         global_trace.stop_count = 0;
1281                 }
1282                 goto out;
1283         }
1284
1285         /* Prevent the buffers from switching */
1286         arch_spin_lock(&ftrace_max_lock);
1287
1288         buffer = global_trace.trace_buffer.buffer;
1289         if (buffer)
1290                 ring_buffer_record_enable(buffer);
1291
1292 #ifdef CONFIG_TRACER_MAX_TRACE
1293         buffer = global_trace.max_buffer.buffer;
1294         if (buffer)
1295                 ring_buffer_record_enable(buffer);
1296 #endif
1297
1298         arch_spin_unlock(&ftrace_max_lock);
1299
1300         ftrace_start();
1301  out:
1302         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1303 }
1304
1305 static void tracing_start_tr(struct trace_array *tr)
1306 {
1307         struct ring_buffer *buffer;
1308         unsigned long flags;
1309
1310         if (tracing_disabled)
1311                 return;
1312
1313         /* If global, we need to also start the max tracer */
1314         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1315                 return tracing_start();
1316
1317         raw_spin_lock_irqsave(&tr->start_lock, flags);
1318
1319         if (--tr->stop_count) {
1320                 if (tr->stop_count < 0) {
1321                         /* Someone screwed up their debugging */
1322                         WARN_ON_ONCE(1);
1323                         tr->stop_count = 0;
1324                 }
1325                 goto out;
1326         }
1327
1328         buffer = tr->trace_buffer.buffer;
1329         if (buffer)
1330                 ring_buffer_record_enable(buffer);
1331
1332  out:
1333         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1334 }
1335
1336 /**
1337  * tracing_stop - quick stop of the tracer
1338  *
1339  * Light weight way to stop tracing. Use in conjunction with
1340  * tracing_start.
1341  */
1342 void tracing_stop(void)
1343 {
1344         struct ring_buffer *buffer;
1345         unsigned long flags;
1346
1347         ftrace_stop();
1348         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1349         if (global_trace.stop_count++)
1350                 goto out;
1351
1352         /* Prevent the buffers from switching */
1353         arch_spin_lock(&ftrace_max_lock);
1354
1355         buffer = global_trace.trace_buffer.buffer;
1356         if (buffer)
1357                 ring_buffer_record_disable(buffer);
1358
1359 #ifdef CONFIG_TRACER_MAX_TRACE
1360         buffer = global_trace.max_buffer.buffer;
1361         if (buffer)
1362                 ring_buffer_record_disable(buffer);
1363 #endif
1364
1365         arch_spin_unlock(&ftrace_max_lock);
1366
1367  out:
1368         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1369 }
1370
1371 static void tracing_stop_tr(struct trace_array *tr)
1372 {
1373         struct ring_buffer *buffer;
1374         unsigned long flags;
1375
1376         /* If global, we need to also stop the max tracer */
1377         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1378                 return tracing_stop();
1379
1380         raw_spin_lock_irqsave(&tr->start_lock, flags);
1381         if (tr->stop_count++)
1382                 goto out;
1383
1384         buffer = tr->trace_buffer.buffer;
1385         if (buffer)
1386                 ring_buffer_record_disable(buffer);
1387
1388  out:
1389         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1390 }
1391
1392 void trace_stop_cmdline_recording(void);
1393
1394 static void trace_save_cmdline(struct task_struct *tsk)
1395 {
1396         unsigned pid, idx;
1397
1398         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1399                 return;
1400
1401         /*
1402          * It's not the end of the world if we don't get
1403          * the lock, but we also don't want to spin
1404          * nor do we want to disable interrupts,
1405          * so if we miss here, then better luck next time.
1406          */
1407         if (!arch_spin_trylock(&trace_cmdline_lock))
1408                 return;
1409
1410         idx = map_pid_to_cmdline[tsk->pid];
1411         if (idx == NO_CMDLINE_MAP) {
1412                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1413
1414                 /*
1415                  * Check whether the cmdline buffer at idx has a pid
1416                  * mapped. We are going to overwrite that entry so we
1417                  * need to clear the map_pid_to_cmdline. Otherwise we
1418                  * would read the new comm for the old pid.
1419                  */
1420                 pid = map_cmdline_to_pid[idx];
1421                 if (pid != NO_CMDLINE_MAP)
1422                         map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1423
1424                 map_cmdline_to_pid[idx] = tsk->pid;
1425                 map_pid_to_cmdline[tsk->pid] = idx;
1426
1427                 cmdline_idx = idx;
1428         }
1429
1430         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1431
1432         arch_spin_unlock(&trace_cmdline_lock);
1433 }
1434
1435 void trace_find_cmdline(int pid, char comm[])
1436 {
1437         unsigned map;
1438
1439         if (!pid) {
1440                 strcpy(comm, "<idle>");
1441                 return;
1442         }
1443
1444         if (WARN_ON_ONCE(pid < 0)) {
1445                 strcpy(comm, "<XXX>");
1446                 return;
1447         }
1448
1449         if (pid > PID_MAX_DEFAULT) {
1450                 strcpy(comm, "<...>");
1451                 return;
1452         }
1453
1454         preempt_disable();
1455         arch_spin_lock(&trace_cmdline_lock);
1456         map = map_pid_to_cmdline[pid];
1457         if (map != NO_CMDLINE_MAP)
1458                 strcpy(comm, saved_cmdlines[map]);
1459         else
1460                 strcpy(comm, "<...>");
1461
1462         arch_spin_unlock(&trace_cmdline_lock);
1463         preempt_enable();
1464 }
1465
1466 void tracing_record_cmdline(struct task_struct *tsk)
1467 {
1468         if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1469                 return;
1470
1471         if (!__this_cpu_read(trace_cmdline_save))
1472                 return;
1473
1474         __this_cpu_write(trace_cmdline_save, false);
1475
1476         trace_save_cmdline(tsk);
1477 }
1478
1479 void
1480 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1481                              int pc)
1482 {
1483         struct task_struct *tsk = current;
1484
1485         entry->preempt_count            = pc & 0xff;
1486         entry->pid                      = (tsk) ? tsk->pid : 0;
1487         entry->flags =
1488 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1489                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1490 #else
1491                 TRACE_FLAG_IRQS_NOSUPPORT |
1492 #endif
1493                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1494                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1495                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1496 }
1497 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1498
1499 struct ring_buffer_event *
1500 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1501                           int type,
1502                           unsigned long len,
1503                           unsigned long flags, int pc)
1504 {
1505         struct ring_buffer_event *event;
1506
1507         event = ring_buffer_lock_reserve(buffer, len);
1508         if (event != NULL) {
1509                 struct trace_entry *ent = ring_buffer_event_data(event);
1510
1511                 tracing_generic_entry_update(ent, flags, pc);
1512                 ent->type = type;
1513         }
1514
1515         return event;
1516 }
1517
1518 void
1519 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1520 {
1521         __this_cpu_write(trace_cmdline_save, true);
1522         ring_buffer_unlock_commit(buffer, event);
1523 }
1524
1525 static inline void
1526 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1527                              struct ring_buffer_event *event,
1528                              unsigned long flags, int pc)
1529 {
1530         __buffer_unlock_commit(buffer, event);
1531
1532         ftrace_trace_stack(buffer, flags, 6, pc);
1533         ftrace_trace_userstack(buffer, flags, pc);
1534 }
1535
1536 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1537                                 struct ring_buffer_event *event,
1538                                 unsigned long flags, int pc)
1539 {
1540         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1541 }
1542 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1543
1544 struct ring_buffer_event *
1545 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1546                           struct ftrace_event_file *ftrace_file,
1547                           int type, unsigned long len,
1548                           unsigned long flags, int pc)
1549 {
1550         *current_rb = ftrace_file->tr->trace_buffer.buffer;
1551         return trace_buffer_lock_reserve(*current_rb,
1552                                          type, len, flags, pc);
1553 }
1554 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1555
1556 struct ring_buffer_event *
1557 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1558                                   int type, unsigned long len,
1559                                   unsigned long flags, int pc)
1560 {
1561         *current_rb = global_trace.trace_buffer.buffer;
1562         return trace_buffer_lock_reserve(*current_rb,
1563                                          type, len, flags, pc);
1564 }
1565 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1566
1567 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1568                                         struct ring_buffer_event *event,
1569                                         unsigned long flags, int pc)
1570 {
1571         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1572 }
1573 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1574
1575 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1576                                      struct ring_buffer_event *event,
1577                                      unsigned long flags, int pc,
1578                                      struct pt_regs *regs)
1579 {
1580         __buffer_unlock_commit(buffer, event);
1581
1582         ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1583         ftrace_trace_userstack(buffer, flags, pc);
1584 }
1585 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1586
1587 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1588                                          struct ring_buffer_event *event)
1589 {
1590         ring_buffer_discard_commit(buffer, event);
1591 }
1592 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1593
1594 void
1595 trace_function(struct trace_array *tr,
1596                unsigned long ip, unsigned long parent_ip, unsigned long flags,
1597                int pc)
1598 {
1599         struct ftrace_event_call *call = &event_function;
1600         struct ring_buffer *buffer = tr->trace_buffer.buffer;
1601         struct ring_buffer_event *event;
1602         struct ftrace_entry *entry;
1603
1604         /* If we are reading the ring buffer, don't trace */
1605         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1606                 return;
1607
1608         event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1609                                           flags, pc);
1610         if (!event)
1611                 return;
1612         entry   = ring_buffer_event_data(event);
1613         entry->ip                       = ip;
1614         entry->parent_ip                = parent_ip;
1615
1616         if (!filter_check_discard(call, entry, buffer, event))
1617                 __buffer_unlock_commit(buffer, event);
1618 }
1619
1620 void
1621 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1622        unsigned long ip, unsigned long parent_ip, unsigned long flags,
1623        int pc)
1624 {
1625         if (likely(!atomic_read(&data->disabled)))
1626                 trace_function(tr, ip, parent_ip, flags, pc);
1627 }
1628
1629 #ifdef CONFIG_STACKTRACE
1630
1631 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1632 struct ftrace_stack {
1633         unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
1634 };
1635
1636 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1637 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1638
1639 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1640                                  unsigned long flags,
1641                                  int skip, int pc, struct pt_regs *regs)
1642 {
1643         struct ftrace_event_call *call = &event_kernel_stack;
1644         struct ring_buffer_event *event;
1645         struct stack_entry *entry;
1646         struct stack_trace trace;
1647         int use_stack;
1648         int size = FTRACE_STACK_ENTRIES;
1649
1650         trace.nr_entries        = 0;
1651         trace.skip              = skip;
1652
1653         /*
1654          * Since events can happen in NMIs there's no safe way to
1655          * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1656          * or NMI comes in, it will just have to use the default
1657          * FTRACE_STACK_SIZE.
1658          */
1659         preempt_disable_notrace();
1660
1661         use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1662         /*
1663          * We don't need any atomic variables, just a barrier.
1664          * If an interrupt comes in, we don't care, because it would
1665          * have exited and put the counter back to what we want.
1666          * We just need a barrier to keep gcc from moving things
1667          * around.
1668          */
1669         barrier();
1670         if (use_stack == 1) {
1671                 trace.entries           = &__get_cpu_var(ftrace_stack).calls[0];
1672                 trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
1673
1674                 if (regs)
1675                         save_stack_trace_regs(regs, &trace);
1676                 else
1677                         save_stack_trace(&trace);
1678
1679                 if (trace.nr_entries > size)
1680                         size = trace.nr_entries;
1681         } else
1682                 /* From now on, use_stack is a boolean */
1683                 use_stack = 0;
1684
1685         size *= sizeof(unsigned long);
1686
1687         event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1688                                           sizeof(*entry) + size, flags, pc);
1689         if (!event)
1690                 goto out;
1691         entry = ring_buffer_event_data(event);
1692
1693         memset(&entry->caller, 0, size);
1694
1695         if (use_stack)
1696                 memcpy(&entry->caller, trace.entries,
1697                        trace.nr_entries * sizeof(unsigned long));
1698         else {
1699                 trace.max_entries       = FTRACE_STACK_ENTRIES;
1700                 trace.entries           = entry->caller;
1701                 if (regs)
1702                         save_stack_trace_regs(regs, &trace);
1703                 else
1704                         save_stack_trace(&trace);
1705         }
1706
1707         entry->size = trace.nr_entries;
1708
1709         if (!filter_check_discard(call, entry, buffer, event))
1710                 __buffer_unlock_commit(buffer, event);
1711
1712  out:
1713         /* Again, don't let gcc optimize things here */
1714         barrier();
1715         __this_cpu_dec(ftrace_stack_reserve);
1716         preempt_enable_notrace();
1717
1718 }
1719
1720 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1721                              int skip, int pc, struct pt_regs *regs)
1722 {
1723         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1724                 return;
1725
1726         __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1727 }
1728
1729 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1730                         int skip, int pc)
1731 {
1732         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1733                 return;
1734
1735         __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1736 }
1737
1738 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1739                    int pc)
1740 {
1741         __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1742 }
1743
1744 /**
1745  * trace_dump_stack - record a stack back trace in the trace buffer
1746  * @skip: Number of functions to skip (helper handlers)
1747  */
1748 void trace_dump_stack(int skip)
1749 {
1750         unsigned long flags;
1751
1752         if (tracing_disabled || tracing_selftest_running)
1753                 return;
1754
1755         local_save_flags(flags);
1756
1757         /*
1758          * Skip 3 more, seems to get us at the caller of
1759          * this function.
1760          */
1761         skip += 3;
1762         __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1763                              flags, skip, preempt_count(), NULL);
1764 }
1765
1766 static DEFINE_PER_CPU(int, user_stack_count);
1767
1768 void
1769 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1770 {
1771         struct ftrace_event_call *call = &event_user_stack;
1772         struct ring_buffer_event *event;
1773         struct userstack_entry *entry;
1774         struct stack_trace trace;
1775
1776         if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1777                 return;
1778
1779         /*
1780          * NMIs can not handle page faults, even with fix ups.
1781          * The save user stack can (and often does) fault.
1782          */
1783         if (unlikely(in_nmi()))
1784                 return;
1785
1786         /*
1787          * prevent recursion, since the user stack tracing may
1788          * trigger other kernel events.
1789          */
1790         preempt_disable();
1791         if (__this_cpu_read(user_stack_count))
1792                 goto out;
1793
1794         __this_cpu_inc(user_stack_count);
1795
1796         event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1797                                           sizeof(*entry), flags, pc);
1798         if (!event)
1799                 goto out_drop_count;
1800         entry   = ring_buffer_event_data(event);
1801
1802         entry->tgid             = current->tgid;
1803         memset(&entry->caller, 0, sizeof(entry->caller));
1804
1805         trace.nr_entries        = 0;
1806         trace.max_entries       = FTRACE_STACK_ENTRIES;
1807         trace.skip              = 0;
1808         trace.entries           = entry->caller;
1809
1810         save_stack_trace_user(&trace);
1811         if (!filter_check_discard(call, entry, buffer, event))
1812                 __buffer_unlock_commit(buffer, event);
1813
1814  out_drop_count:
1815         __this_cpu_dec(user_stack_count);
1816  out:
1817         preempt_enable();
1818 }
1819
1820 #ifdef UNUSED
1821 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1822 {
1823         ftrace_trace_userstack(tr, flags, preempt_count());
1824 }
1825 #endif /* UNUSED */
1826
1827 #endif /* CONFIG_STACKTRACE */
1828
1829 /* created for use with alloc_percpu */
1830 struct trace_buffer_struct {
1831         char buffer[TRACE_BUF_SIZE];
1832 };
1833
1834 static struct trace_buffer_struct *trace_percpu_buffer;
1835 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1836 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1837 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1838
1839 /*
1840  * The buffer used is dependent on the context. There is a per cpu
1841  * buffer for normal context, softirq contex, hard irq context and
1842  * for NMI context. Thise allows for lockless recording.
1843  *
1844  * Note, if the buffers failed to be allocated, then this returns NULL
1845  */
1846 static char *get_trace_buf(void)
1847 {
1848         struct trace_buffer_struct *percpu_buffer;
1849
1850         /*
1851          * If we have allocated per cpu buffers, then we do not
1852          * need to do any locking.
1853          */
1854         if (in_nmi())
1855                 percpu_buffer = trace_percpu_nmi_buffer;
1856         else if (in_irq())
1857                 percpu_buffer = trace_percpu_irq_buffer;
1858         else if (in_softirq())
1859                 percpu_buffer = trace_percpu_sirq_buffer;
1860         else
1861                 percpu_buffer = trace_percpu_buffer;
1862
1863         if (!percpu_buffer)
1864                 return NULL;
1865
1866         return this_cpu_ptr(&percpu_buffer->buffer[0]);
1867 }
1868
1869 static int alloc_percpu_trace_buffer(void)
1870 {
1871         struct trace_buffer_struct *buffers;
1872         struct trace_buffer_struct *sirq_buffers;
1873         struct trace_buffer_struct *irq_buffers;
1874         struct trace_buffer_struct *nmi_buffers;
1875
1876         buffers = alloc_percpu(struct trace_buffer_struct);
1877         if (!buffers)
1878                 goto err_warn;
1879
1880         sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1881         if (!sirq_buffers)
1882                 goto err_sirq;
1883
1884         irq_buffers = alloc_percpu(struct trace_buffer_struct);
1885         if (!irq_buffers)
1886                 goto err_irq;
1887
1888         nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1889         if (!nmi_buffers)
1890                 goto err_nmi;
1891
1892         trace_percpu_buffer = buffers;
1893         trace_percpu_sirq_buffer = sirq_buffers;
1894         trace_percpu_irq_buffer = irq_buffers;
1895         trace_percpu_nmi_buffer = nmi_buffers;
1896
1897         return 0;
1898
1899  err_nmi:
1900         free_percpu(irq_buffers);
1901  err_irq:
1902         free_percpu(sirq_buffers);
1903  err_sirq:
1904         free_percpu(buffers);
1905  err_warn:
1906         WARN(1, "Could not allocate percpu trace_printk buffer");
1907         return -ENOMEM;
1908 }
1909
1910 static int buffers_allocated;
1911
1912 void trace_printk_init_buffers(void)
1913 {
1914         if (buffers_allocated)
1915                 return;
1916
1917         if (alloc_percpu_trace_buffer())
1918                 return;
1919
1920         pr_info("ftrace: Allocated trace_printk buffers\n");
1921
1922         /* Expand the buffers to set size */
1923         tracing_update_buffers();
1924
1925         buffers_allocated = 1;
1926
1927         /*
1928          * trace_printk_init_buffers() can be called by modules.
1929          * If that happens, then we need to start cmdline recording
1930          * directly here. If the global_trace.buffer is already
1931          * allocated here, then this was called by module code.
1932          */
1933         if (global_trace.trace_buffer.buffer)
1934                 tracing_start_cmdline_record();
1935 }
1936
1937 void trace_printk_start_comm(void)
1938 {
1939         /* Start tracing comms if trace printk is set */
1940         if (!buffers_allocated)
1941                 return;
1942         tracing_start_cmdline_record();
1943 }
1944
1945 static void trace_printk_start_stop_comm(int enabled)
1946 {
1947         if (!buffers_allocated)
1948                 return;
1949
1950         if (enabled)
1951                 tracing_start_cmdline_record();
1952         else
1953                 tracing_stop_cmdline_record();
1954 }
1955
1956 /**
1957  * trace_vbprintk - write binary msg to tracing buffer
1958  *
1959  */
1960 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1961 {
1962         struct ftrace_event_call *call = &event_bprint;
1963         struct ring_buffer_event *event;
1964         struct ring_buffer *buffer;
1965         struct trace_array *tr = &global_trace;
1966         struct bprint_entry *entry;
1967         unsigned long flags;
1968         char *tbuffer;
1969         int len = 0, size, pc;
1970
1971         if (unlikely(tracing_selftest_running || tracing_disabled))
1972                 return 0;
1973
1974         /* Don't pollute graph traces with trace_vprintk internals */
1975         pause_graph_tracing();
1976
1977         pc = preempt_count();
1978         preempt_disable_notrace();
1979
1980         tbuffer = get_trace_buf();
1981         if (!tbuffer) {
1982                 len = 0;
1983                 goto out;
1984         }
1985
1986         len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1987
1988         if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
1989                 goto out;
1990
1991         local_save_flags(flags);
1992         size = sizeof(*entry) + sizeof(u32) * len;
1993         buffer = tr->trace_buffer.buffer;
1994         event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1995                                           flags, pc);
1996         if (!event)
1997                 goto out;
1998         entry = ring_buffer_event_data(event);
1999         entry->ip                       = ip;
2000         entry->fmt                      = fmt;
2001
2002         memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2003         if (!filter_check_discard(call, entry, buffer, event)) {
2004                 __buffer_unlock_commit(buffer, event);
2005                 ftrace_trace_stack(buffer, flags, 6, pc);
2006         }
2007
2008 out:
2009         preempt_enable_notrace();
2010         unpause_graph_tracing();
2011
2012         return len;
2013 }
2014 EXPORT_SYMBOL_GPL(trace_vbprintk);
2015
2016 static int
2017 __trace_array_vprintk(struct ring_buffer *buffer,
2018                       unsigned long ip, const char *fmt, va_list args)
2019 {
2020         struct ftrace_event_call *call = &event_print;
2021         struct ring_buffer_event *event;
2022         int len = 0, size, pc;
2023         struct print_entry *entry;
2024         unsigned long flags;
2025         char *tbuffer;
2026
2027         if (tracing_disabled || tracing_selftest_running)
2028                 return 0;
2029
2030         /* Don't pollute graph traces with trace_vprintk internals */
2031         pause_graph_tracing();
2032
2033         pc = preempt_count();
2034         preempt_disable_notrace();
2035
2036
2037         tbuffer = get_trace_buf();
2038         if (!tbuffer) {
2039                 len = 0;
2040                 goto out;
2041         }
2042
2043         len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2044         if (len > TRACE_BUF_SIZE)
2045                 goto out;
2046
2047         local_save_flags(flags);
2048         size = sizeof(*entry) + len + 1;
2049         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2050                                           flags, pc);
2051         if (!event)
2052                 goto out;
2053         entry = ring_buffer_event_data(event);
2054         entry->ip = ip;
2055
2056         memcpy(&entry->buf, tbuffer, len);
2057         entry->buf[len] = '\0';
2058         if (!filter_check_discard(call, entry, buffer, event)) {
2059                 __buffer_unlock_commit(buffer, event);
2060                 ftrace_trace_stack(buffer, flags, 6, pc);
2061         }
2062  out:
2063         preempt_enable_notrace();
2064         unpause_graph_tracing();
2065
2066         return len;
2067 }
2068
2069 int trace_array_vprintk(struct trace_array *tr,
2070                         unsigned long ip, const char *fmt, va_list args)
2071 {
2072         return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2073 }
2074
2075 int trace_array_printk(struct trace_array *tr,
2076                        unsigned long ip, const char *fmt, ...)
2077 {
2078         int ret;
2079         va_list ap;
2080
2081         if (!(trace_flags & TRACE_ITER_PRINTK))
2082                 return 0;
2083
2084         va_start(ap, fmt);
2085         ret = trace_array_vprintk(tr, ip, fmt, ap);
2086         va_end(ap);
2087         return ret;
2088 }
2089
2090 int trace_array_printk_buf(struct ring_buffer *buffer,
2091                            unsigned long ip, const char *fmt, ...)
2092 {
2093         int ret;
2094         va_list ap;
2095
2096         if (!(trace_flags & TRACE_ITER_PRINTK))
2097                 return 0;
2098
2099         va_start(ap, fmt);
2100         ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2101         va_end(ap);
2102         return ret;
2103 }
2104
2105 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2106 {
2107         return trace_array_vprintk(&global_trace, ip, fmt, args);
2108 }
2109 EXPORT_SYMBOL_GPL(trace_vprintk);
2110
2111 static void trace_iterator_increment(struct trace_iterator *iter)
2112 {
2113         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2114
2115         iter->idx++;
2116         if (buf_iter)
2117                 ring_buffer_read(buf_iter, NULL);
2118 }
2119
2120 static struct trace_entry *
2121 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2122                 unsigned long *lost_events)
2123 {
2124         struct ring_buffer_event *event;
2125         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2126
2127         if (buf_iter)
2128                 event = ring_buffer_iter_peek(buf_iter, ts);
2129         else
2130                 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2131                                          lost_events);
2132
2133         if (event) {
2134                 iter->ent_size = ring_buffer_event_length(event);
2135                 return ring_buffer_event_data(event);
2136         }
2137         iter->ent_size = 0;
2138         return NULL;
2139 }
2140
2141 static struct trace_entry *
2142 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2143                   unsigned long *missing_events, u64 *ent_ts)
2144 {
2145         struct ring_buffer *buffer = iter->trace_buffer->buffer;
2146         struct trace_entry *ent, *next = NULL;
2147         unsigned long lost_events = 0, next_lost = 0;
2148         int cpu_file = iter->cpu_file;
2149         u64 next_ts = 0, ts;
2150         int next_cpu = -1;
2151         int next_size = 0;
2152         int cpu;
2153
2154         /*
2155          * If we are in a per_cpu trace file, don't bother by iterating over
2156          * all cpu and peek directly.
2157          */
2158         if (cpu_file > RING_BUFFER_ALL_CPUS) {
2159                 if (ring_buffer_empty_cpu(buffer, cpu_file))
2160                         return NULL;
2161                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2162                 if (ent_cpu)
2163                         *ent_cpu = cpu_file;
2164
2165                 return ent;
2166         }
2167
2168         for_each_tracing_cpu(cpu) {
2169
2170                 if (ring_buffer_empty_cpu(buffer, cpu))
2171                         continue;
2172
2173                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2174
2175                 /*
2176                  * Pick the entry with the smallest timestamp:
2177                  */
2178                 if (ent && (!next || ts < next_ts)) {
2179                         next = ent;
2180                         next_cpu = cpu;
2181                         next_ts = ts;
2182                         next_lost = lost_events;
2183                         next_size = iter->ent_size;
2184                 }
2185         }
2186
2187         iter->ent_size = next_size;
2188
2189         if (ent_cpu)
2190                 *ent_cpu = next_cpu;
2191
2192         if (ent_ts)
2193                 *ent_ts = next_ts;
2194
2195         if (missing_events)
2196                 *missing_events = next_lost;
2197
2198         return next;
2199 }
2200
2201 /* Find the next real entry, without updating the iterator itself */
2202 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2203                                           int *ent_cpu, u64 *ent_ts)
2204 {
2205         return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2206 }
2207
2208 /* Find the next real entry, and increment the iterator to the next entry */
2209 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2210 {
2211         iter->ent = __find_next_entry(iter, &iter->cpu,
2212                                       &iter->lost_events, &iter->ts);
2213
2214         if (iter->ent)
2215                 trace_iterator_increment(iter);
2216
2217         return iter->ent ? iter : NULL;
2218 }
2219
2220 static void trace_consume(struct trace_iterator *iter)
2221 {
2222         ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2223                             &iter->lost_events);
2224 }
2225
2226 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2227 {
2228         struct trace_iterator *iter = m->private;
2229         int i = (int)*pos;
2230         void *ent;
2231
2232         WARN_ON_ONCE(iter->leftover);
2233
2234         (*pos)++;
2235
2236         /* can't go backwards */
2237         if (iter->idx > i)
2238                 return NULL;
2239
2240         if (iter->idx < 0)
2241                 ent = trace_find_next_entry_inc(iter);
2242         else
2243                 ent = iter;
2244
2245         while (ent && iter->idx < i)
2246                 ent = trace_find_next_entry_inc(iter);
2247
2248         iter->pos = *pos;
2249
2250         return ent;
2251 }
2252
2253 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2254 {
2255         struct ring_buffer_event *event;
2256         struct ring_buffer_iter *buf_iter;
2257         unsigned long entries = 0;
2258         u64 ts;
2259
2260         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2261
2262         buf_iter = trace_buffer_iter(iter, cpu);
2263         if (!buf_iter)
2264                 return;
2265
2266         ring_buffer_iter_reset(buf_iter);
2267
2268         /*
2269          * We could have the case with the max latency tracers
2270          * that a reset never took place on a cpu. This is evident
2271          * by the timestamp being before the start of the buffer.
2272          */
2273         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2274                 if (ts >= iter->trace_buffer->time_start)
2275                         break;
2276                 entries++;
2277                 ring_buffer_read(buf_iter, NULL);
2278         }
2279
2280         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2281 }
2282
2283 /*
2284  * The current tracer is copied to avoid a global locking
2285  * all around.
2286  */
2287 static void *s_start(struct seq_file *m, loff_t *pos)
2288 {
2289         struct trace_iterator *iter = m->private;
2290         struct trace_array *tr = iter->tr;
2291         int cpu_file = iter->cpu_file;
2292         void *p = NULL;
2293         loff_t l = 0;
2294         int cpu;
2295
2296         /*
2297          * copy the tracer to avoid using a global lock all around.
2298          * iter->trace is a copy of current_trace, the pointer to the
2299          * name may be used instead of a strcmp(), as iter->trace->name
2300          * will point to the same string as current_trace->name.
2301          */
2302         mutex_lock(&trace_types_lock);
2303         if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2304                 *iter->trace = *tr->current_trace;
2305         mutex_unlock(&trace_types_lock);
2306
2307 #ifdef CONFIG_TRACER_MAX_TRACE
2308         if (iter->snapshot && iter->trace->use_max_tr)
2309                 return ERR_PTR(-EBUSY);
2310 #endif
2311
2312         if (!iter->snapshot)
2313                 atomic_inc(&trace_record_cmdline_disabled);
2314
2315         if (*pos != iter->pos) {
2316                 iter->ent = NULL;
2317                 iter->cpu = 0;
2318                 iter->idx = -1;
2319
2320                 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2321                         for_each_tracing_cpu(cpu)
2322                                 tracing_iter_reset(iter, cpu);
2323                 } else
2324                         tracing_iter_reset(iter, cpu_file);
2325
2326                 iter->leftover = 0;
2327                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2328                         ;
2329
2330         } else {
2331                 /*
2332                  * If we overflowed the seq_file before, then we want
2333                  * to just reuse the trace_seq buffer again.
2334                  */
2335                 if (iter->leftover)
2336                         p = iter;
2337                 else {
2338                         l = *pos - 1;
2339                         p = s_next(m, p, &l);
2340                 }
2341         }
2342
2343         trace_event_read_lock();
2344         trace_access_lock(cpu_file);
2345         return p;
2346 }
2347
2348 static void s_stop(struct seq_file *m, void *p)
2349 {
2350         struct trace_iterator *iter = m->private;
2351
2352 #ifdef CONFIG_TRACER_MAX_TRACE
2353         if (iter->snapshot && iter->trace->use_max_tr)
2354                 return;
2355 #endif
2356
2357         if (!iter->snapshot)
2358                 atomic_dec(&trace_record_cmdline_disabled);
2359
2360         trace_access_unlock(iter->cpu_file);
2361         trace_event_read_unlock();
2362 }
2363
2364 static void
2365 get_total_entries(struct trace_buffer *buf,
2366                   unsigned long *total, unsigned long *entries)
2367 {
2368         unsigned long count;
2369         int cpu;
2370
2371         *total = 0;
2372         *entries = 0;
2373
2374         for_each_tracing_cpu(cpu) {
2375                 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2376                 /*
2377                  * If this buffer has skipped entries, then we hold all
2378                  * entries for the trace and we need to ignore the
2379                  * ones before the time stamp.
2380                  */
2381                 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2382                         count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2383                         /* total is the same as the entries */
2384                         *total += count;
2385                 } else
2386                         *total += count +
2387                                 ring_buffer_overrun_cpu(buf->buffer, cpu);
2388                 *entries += count;
2389         }
2390 }
2391
2392 static void print_lat_help_header(struct seq_file *m)
2393 {
2394         seq_puts(m, "#                  _------=> CPU#            \n");
2395         seq_puts(m, "#                 / _-----=> irqs-off        \n");
2396         seq_puts(m, "#                | / _----=> need-resched    \n");
2397         seq_puts(m, "#                || / _---=> hardirq/softirq \n");
2398         seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
2399         seq_puts(m, "#                |||| /     delay             \n");
2400         seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
2401         seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
2402 }
2403
2404 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2405 {
2406         unsigned long total;
2407         unsigned long entries;
2408
2409         get_total_entries(buf, &total, &entries);
2410         seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2411                    entries, total, num_online_cpus());
2412         seq_puts(m, "#\n");
2413 }
2414
2415 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2416 {
2417         print_event_info(buf, m);
2418         seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n");
2419         seq_puts(m, "#              | |       |          |         |\n");
2420 }
2421
2422 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2423 {
2424         print_event_info(buf, m);
2425         seq_puts(m, "#                              _-----=> irqs-off\n");
2426         seq_puts(m, "#                             / _----=> need-resched\n");
2427         seq_puts(m, "#                            | / _---=> hardirq/softirq\n");
2428         seq_puts(m, "#                            || / _--=> preempt-depth\n");
2429         seq_puts(m, "#                            ||| /     delay\n");
2430         seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
2431         seq_puts(m, "#              | |       |   ||||       |         |\n");
2432 }
2433
2434 void
2435 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2436 {
2437         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2438         struct trace_buffer *buf = iter->trace_buffer;
2439         struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2440         struct tracer *type = iter->trace;
2441         unsigned long entries;
2442         unsigned long total;
2443         const char *name = "preemption";
2444
2445         name = type->name;
2446
2447         get_total_entries(buf, &total, &entries);
2448
2449         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2450                    name, UTS_RELEASE);
2451         seq_puts(m, "# -----------------------------------"
2452                  "---------------------------------\n");
2453         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2454                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2455                    nsecs_to_usecs(data->saved_latency),
2456                    entries,
2457                    total,
2458                    buf->cpu,
2459 #if defined(CONFIG_PREEMPT_NONE)
2460                    "server",
2461 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2462                    "desktop",
2463 #elif defined(CONFIG_PREEMPT)
2464                    "preempt",
2465 #else
2466                    "unknown",
2467 #endif
2468                    /* These are reserved for later use */
2469                    0, 0, 0, 0);
2470 #ifdef CONFIG_SMP
2471         seq_printf(m, " #P:%d)\n", num_online_cpus());
2472 #else
2473         seq_puts(m, ")\n");
2474 #endif
2475         seq_puts(m, "#    -----------------\n");
2476         seq_printf(m, "#    | task: %.16s-%d "
2477                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2478                    data->comm, data->pid,
2479                    from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2480                    data->policy, data->rt_priority);
2481         seq_puts(m, "#    -----------------\n");
2482
2483         if (data->critical_start) {
2484                 seq_puts(m, "#  => started at: ");
2485                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2486                 trace_print_seq(m, &iter->seq);
2487                 seq_puts(m, "\n#  => ended at:   ");
2488                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2489                 trace_print_seq(m, &iter->seq);
2490                 seq_puts(m, "\n#\n");
2491         }
2492
2493         seq_puts(m, "#\n");
2494 }
2495
2496 static void test_cpu_buff_start(struct trace_iterator *iter)
2497 {
2498         struct trace_seq *s = &iter->seq;
2499
2500         if (!(trace_flags & TRACE_ITER_ANNOTATE))
2501                 return;
2502
2503         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2504                 return;
2505
2506         if (cpumask_test_cpu(iter->cpu, iter->started))
2507                 return;
2508
2509         if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2510                 return;
2511
2512         cpumask_set_cpu(iter->cpu, iter->started);
2513
2514         /* Don't print started cpu buffer for the first entry of the trace */
2515         if (iter->idx > 1)
2516                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2517                                 iter->cpu);
2518 }
2519
2520 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2521 {
2522         struct trace_seq *s = &iter->seq;
2523         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2524         struct trace_entry *entry;
2525         struct trace_event *event;
2526
2527         entry = iter->ent;
2528
2529         test_cpu_buff_start(iter);
2530
2531         event = ftrace_find_event(entry->type);
2532
2533         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2534                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2535                         if (!trace_print_lat_context(iter))
2536                                 goto partial;
2537                 } else {
2538                         if (!trace_print_context(iter))
2539                                 goto partial;
2540                 }
2541         }
2542
2543         if (event)
2544                 return event->funcs->trace(iter, sym_flags, event);
2545
2546         if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2547                 goto partial;
2548
2549         return TRACE_TYPE_HANDLED;
2550 partial:
2551         return TRACE_TYPE_PARTIAL_LINE;
2552 }
2553
2554 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2555 {
2556         struct trace_seq *s = &iter->seq;
2557         struct trace_entry *entry;
2558         struct trace_event *event;
2559
2560         entry = iter->ent;
2561
2562         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2563                 if (!trace_seq_printf(s, "%d %d %llu ",
2564                                       entry->pid, iter->cpu, iter->ts))
2565                         goto partial;
2566         }
2567
2568         event = ftrace_find_event(entry->type);
2569         if (event)
2570                 return event->funcs->raw(iter, 0, event);
2571
2572         if (!trace_seq_printf(s, "%d ?\n", entry->type))
2573                 goto partial;
2574
2575         return TRACE_TYPE_HANDLED;
2576 partial:
2577         return TRACE_TYPE_PARTIAL_LINE;
2578 }
2579
2580 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2581 {
2582         struct trace_seq *s = &iter->seq;
2583         unsigned char newline = '\n';
2584         struct trace_entry *entry;
2585         struct trace_event *event;
2586
2587         entry = iter->ent;
2588
2589         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2590                 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2591                 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2592                 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2593         }
2594
2595         event = ftrace_find_event(entry->type);
2596         if (event) {
2597                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2598                 if (ret != TRACE_TYPE_HANDLED)
2599                         return ret;
2600         }
2601
2602         SEQ_PUT_FIELD_RET(s, newline);
2603
2604         return TRACE_TYPE_HANDLED;
2605 }
2606
2607 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2608 {
2609         struct trace_seq *s = &iter->seq;
2610         struct trace_entry *entry;
2611         struct trace_event *event;
2612
2613         entry = iter->ent;
2614
2615         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2616                 SEQ_PUT_FIELD_RET(s, entry->pid);
2617                 SEQ_PUT_FIELD_RET(s, iter->cpu);
2618                 SEQ_PUT_FIELD_RET(s, iter->ts);
2619         }
2620
2621         event = ftrace_find_event(entry->type);
2622         return event ? event->funcs->binary(iter, 0, event) :
2623                 TRACE_TYPE_HANDLED;
2624 }
2625
2626 int trace_empty(struct trace_iterator *iter)
2627 {
2628         struct ring_buffer_iter *buf_iter;
2629         int cpu;
2630
2631         /* If we are looking at one CPU buffer, only check that one */
2632         if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2633                 cpu = iter->cpu_file;
2634                 buf_iter = trace_buffer_iter(iter, cpu);
2635                 if (buf_iter) {
2636                         if (!ring_buffer_iter_empty(buf_iter))
2637                                 return 0;
2638                 } else {
2639                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2640                                 return 0;
2641                 }
2642                 return 1;
2643         }
2644
2645         for_each_tracing_cpu(cpu) {
2646                 buf_iter = trace_buffer_iter(iter, cpu);
2647                 if (buf_iter) {
2648                         if (!ring_buffer_iter_empty(buf_iter))
2649                                 return 0;
2650                 } else {
2651                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2652                                 return 0;
2653                 }
2654         }
2655
2656         return 1;
2657 }
2658
2659 /*  Called with trace_event_read_lock() held. */
2660 enum print_line_t print_trace_line(struct trace_iterator *iter)
2661 {
2662         enum print_line_t ret;
2663
2664         if (iter->lost_events &&
2665             !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2666                                  iter->cpu, iter->lost_events))
2667                 return TRACE_TYPE_PARTIAL_LINE;
2668
2669         if (iter->trace && iter->trace->print_line) {
2670                 ret = iter->trace->print_line(iter);
2671                 if (ret != TRACE_TYPE_UNHANDLED)
2672                         return ret;
2673         }
2674
2675         if (iter->ent->type == TRACE_BPUTS &&
2676                         trace_flags & TRACE_ITER_PRINTK &&
2677                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2678                 return trace_print_bputs_msg_only(iter);
2679
2680         if (iter->ent->type == TRACE_BPRINT &&
2681                         trace_flags & TRACE_ITER_PRINTK &&
2682                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2683                 return trace_print_bprintk_msg_only(iter);
2684
2685         if (iter->ent->type == TRACE_PRINT &&
2686                         trace_flags & TRACE_ITER_PRINTK &&
2687                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2688                 return trace_print_printk_msg_only(iter);
2689
2690         if (trace_flags & TRACE_ITER_BIN)
2691                 return print_bin_fmt(iter);
2692
2693         if (trace_flags & TRACE_ITER_HEX)
2694                 return print_hex_fmt(iter);
2695
2696         if (trace_flags & TRACE_ITER_RAW)
2697                 return print_raw_fmt(iter);
2698
2699         return print_trace_fmt(iter);
2700 }
2701
2702 void trace_latency_header(struct seq_file *m)
2703 {
2704         struct trace_iterator *iter = m->private;
2705
2706         /* print nothing if the buffers are empty */
2707         if (trace_empty(iter))
2708                 return;
2709
2710         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2711                 print_trace_header(m, iter);
2712
2713         if (!(trace_flags & TRACE_ITER_VERBOSE))
2714                 print_lat_help_header(m);
2715 }
2716
2717 void trace_default_header(struct seq_file *m)
2718 {
2719         struct trace_iterator *iter = m->private;
2720
2721         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2722                 return;
2723
2724         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2725                 /* print nothing if the buffers are empty */
2726                 if (trace_empty(iter))
2727                         return;
2728                 print_trace_header(m, iter);
2729                 if (!(trace_flags & TRACE_ITER_VERBOSE))
2730                         print_lat_help_header(m);
2731         } else {
2732                 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2733                         if (trace_flags & TRACE_ITER_IRQ_INFO)
2734                                 print_func_help_header_irq(iter->trace_buffer, m);
2735                         else
2736                                 print_func_help_header(iter->trace_buffer, m);
2737                 }
2738         }
2739 }
2740
2741 static void test_ftrace_alive(struct seq_file *m)
2742 {
2743         if (!ftrace_is_dead())
2744                 return;
2745         seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2746         seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
2747 }
2748
2749 #ifdef CONFIG_TRACER_MAX_TRACE
2750 static void show_snapshot_main_help(struct seq_file *m)
2751 {
2752         seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2753         seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2754         seq_printf(m, "#                      Takes a snapshot of the main buffer.\n");
2755         seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2756         seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
2757         seq_printf(m, "#                       is not a '0' or '1')\n");
2758 }
2759
2760 static void show_snapshot_percpu_help(struct seq_file *m)
2761 {
2762         seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2763 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2764         seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2765         seq_printf(m, "#                      Takes a snapshot of the main buffer for this cpu.\n");
2766 #else
2767         seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2768         seq_printf(m, "#                     Must use main snapshot file to allocate.\n");
2769 #endif
2770         seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2771         seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
2772         seq_printf(m, "#                       is not a '0' or '1')\n");
2773 }
2774
2775 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2776 {
2777         if (iter->tr->allocated_snapshot)
2778                 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2779         else
2780                 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2781
2782         seq_printf(m, "# Snapshot commands:\n");
2783         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2784                 show_snapshot_main_help(m);
2785         else
2786                 show_snapshot_percpu_help(m);
2787 }
2788 #else
2789 /* Should never be called */
2790 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2791 #endif
2792
2793 static int s_show(struct seq_file *m, void *v)
2794 {
2795         struct trace_iterator *iter = v;
2796         int ret;
2797
2798         if (iter->ent == NULL) {
2799                 if (iter->tr) {
2800                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
2801                         seq_puts(m, "#\n");
2802                         test_ftrace_alive(m);
2803                 }
2804                 if (iter->snapshot && trace_empty(iter))
2805                         print_snapshot_help(m, iter);
2806                 else if (iter->trace && iter->trace->print_header)
2807                         iter->trace->print_header(m);
2808                 else
2809                         trace_default_header(m);
2810
2811         } else if (iter->leftover) {
2812                 /*
2813                  * If we filled the seq_file buffer earlier, we
2814                  * want to just show it now.
2815                  */
2816                 ret = trace_print_seq(m, &iter->seq);
2817
2818                 /* ret should this time be zero, but you never know */
2819                 iter->leftover = ret;
2820
2821         } else {
2822                 print_trace_line(iter);
2823                 ret = trace_print_seq(m, &iter->seq);
2824                 /*
2825                  * If we overflow the seq_file buffer, then it will
2826                  * ask us for this data again at start up.
2827                  * Use that instead.
2828                  *  ret is 0 if seq_file write succeeded.
2829                  *        -1 otherwise.
2830                  */
2831                 iter->leftover = ret;
2832         }
2833
2834         return 0;
2835 }
2836
2837 static const struct seq_operations tracer_seq_ops = {
2838         .start          = s_start,
2839         .next           = s_next,
2840         .stop           = s_stop,
2841         .show           = s_show,
2842 };
2843
2844 static struct trace_iterator *
2845 __tracing_open(struct trace_array *tr, struct trace_cpu *tc,
2846                struct inode *inode, struct file *file, bool snapshot)
2847 {
2848         struct trace_iterator *iter;
2849         int cpu;
2850
2851         if (tracing_disabled)
2852                 return ERR_PTR(-ENODEV);
2853
2854         iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2855         if (!iter)
2856                 return ERR_PTR(-ENOMEM);
2857
2858         iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2859                                     GFP_KERNEL);
2860         if (!iter->buffer_iter)
2861                 goto release;
2862
2863         /*
2864          * We make a copy of the current tracer to avoid concurrent
2865          * changes on it while we are reading.
2866          */
2867         mutex_lock(&trace_types_lock);
2868         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2869         if (!iter->trace)
2870                 goto fail;
2871
2872         *iter->trace = *tr->current_trace;
2873
2874         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2875                 goto fail;
2876
2877         iter->tr = tr;
2878
2879 #ifdef CONFIG_TRACER_MAX_TRACE
2880         /* Currently only the top directory has a snapshot */
2881         if (tr->current_trace->print_max || snapshot)
2882                 iter->trace_buffer = &tr->max_buffer;
2883         else
2884 #endif
2885                 iter->trace_buffer = &tr->trace_buffer;
2886         iter->snapshot = snapshot;
2887         iter->pos = -1;
2888         mutex_init(&iter->mutex);
2889         iter->cpu_file = tc->cpu;
2890
2891         /* Notify the tracer early; before we stop tracing. */
2892         if (iter->trace && iter->trace->open)
2893                 iter->trace->open(iter);
2894
2895         /* Annotate start of buffers if we had overruns */
2896         if (ring_buffer_overruns(iter->trace_buffer->buffer))
2897                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2898
2899         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2900         if (trace_clocks[tr->clock_id].in_ns)
2901                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2902
2903         /* stop the trace while dumping if we are not opening "snapshot" */
2904         if (!iter->snapshot)
2905                 tracing_stop_tr(tr);
2906
2907         if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
2908                 for_each_tracing_cpu(cpu) {
2909                         iter->buffer_iter[cpu] =
2910                                 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2911                 }
2912                 ring_buffer_read_prepare_sync();
2913                 for_each_tracing_cpu(cpu) {
2914                         ring_buffer_read_start(iter->buffer_iter[cpu]);
2915                         tracing_iter_reset(iter, cpu);
2916                 }
2917         } else {
2918                 cpu = iter->cpu_file;
2919                 iter->buffer_iter[cpu] =
2920                         ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2921                 ring_buffer_read_prepare_sync();
2922                 ring_buffer_read_start(iter->buffer_iter[cpu]);
2923                 tracing_iter_reset(iter, cpu);
2924         }
2925
2926         mutex_unlock(&trace_types_lock);
2927
2928         return iter;
2929
2930  fail:
2931         mutex_unlock(&trace_types_lock);
2932         kfree(iter->trace);
2933         kfree(iter->buffer_iter);
2934 release:
2935         seq_release_private(inode, file);
2936         return ERR_PTR(-ENOMEM);
2937 }
2938
2939 int tracing_open_generic(struct inode *inode, struct file *filp)
2940 {
2941         if (tracing_disabled)
2942                 return -ENODEV;
2943
2944         filp->private_data = inode->i_private;
2945         return 0;
2946 }
2947
2948 /*
2949  * Open and update trace_array ref count.
2950  * Must have the current trace_array passed to it.
2951  */
2952 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
2953 {
2954         struct trace_array *tr = inode->i_private;
2955
2956         if (tracing_disabled)
2957                 return -ENODEV;
2958
2959         if (trace_array_get(tr) < 0)
2960                 return -ENODEV;
2961
2962         filp->private_data = inode->i_private;
2963
2964         return 0;
2965
2966 }
2967
2968 int tracing_open_generic_tc(struct inode *inode, struct file *filp)
2969 {
2970         struct trace_cpu *tc = inode->i_private;
2971         struct trace_array *tr = tc->tr;
2972
2973         if (tracing_disabled)
2974                 return -ENODEV;
2975
2976         if (trace_array_get(tr) < 0)
2977                 return -ENODEV;
2978
2979         filp->private_data = inode->i_private;
2980
2981         return 0;
2982
2983 }
2984
2985 static int tracing_release(struct inode *inode, struct file *file)
2986 {
2987         struct seq_file *m = file->private_data;
2988         struct trace_iterator *iter;
2989         struct trace_array *tr;
2990         int cpu;
2991
2992         /* Writes do not use seq_file, need to grab tr from inode */
2993         if (!(file->f_mode & FMODE_READ)) {
2994                 struct trace_cpu *tc = inode->i_private;
2995
2996                 trace_array_put(tc->tr);
2997                 return 0;
2998         }
2999
3000         iter = m->private;
3001         tr = iter->tr;
3002
3003         mutex_lock(&trace_types_lock);
3004
3005         for_each_tracing_cpu(cpu) {
3006                 if (iter->buffer_iter[cpu])
3007                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
3008         }
3009
3010         if (iter->trace && iter->trace->close)
3011                 iter->trace->close(iter);
3012
3013         if (!iter->snapshot)
3014                 /* reenable tracing if it was previously enabled */
3015                 tracing_start_tr(tr);
3016
3017         __trace_array_put(tr);
3018
3019         mutex_unlock(&trace_types_lock);
3020
3021         mutex_destroy(&iter->mutex);
3022         free_cpumask_var(iter->started);
3023         kfree(iter->trace);
3024         kfree(iter->buffer_iter);
3025         seq_release_private(inode, file);
3026
3027         return 0;
3028 }
3029
3030 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3031 {
3032         struct trace_array *tr = inode->i_private;
3033
3034         trace_array_put(tr);
3035         return 0;
3036 }
3037
3038 static int tracing_release_generic_tc(struct inode *inode, struct file *file)
3039 {
3040         struct trace_cpu *tc = inode->i_private;
3041         struct trace_array *tr = tc->tr;
3042
3043         trace_array_put(tr);
3044         return 0;
3045 }
3046
3047 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3048 {
3049         struct trace_array *tr = inode->i_private;
3050
3051         trace_array_put(tr);
3052
3053         return single_release(inode, file);
3054 }
3055
3056 static int tracing_open(struct inode *inode, struct file *file)
3057 {
3058         struct trace_cpu *tc = inode->i_private;
3059         struct trace_array *tr = tc->tr;
3060         struct trace_iterator *iter;
3061         int ret = 0;
3062
3063         if (trace_array_get(tr) < 0)
3064                 return -ENODEV;
3065
3066         /* If this file was open for write, then erase contents */
3067         if ((file->f_mode & FMODE_WRITE) &&
3068             (file->f_flags & O_TRUNC)) {
3069                 if (tc->cpu == RING_BUFFER_ALL_CPUS)
3070                         tracing_reset_online_cpus(&tr->trace_buffer);
3071                 else
3072                         tracing_reset(&tr->trace_buffer, tc->cpu);
3073         }
3074
3075         if (file->f_mode & FMODE_READ) {
3076                 iter = __tracing_open(tr, tc, inode, file, false);
3077                 if (IS_ERR(iter))
3078                         ret = PTR_ERR(iter);
3079                 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3080                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
3081         }
3082
3083         if (ret < 0)
3084                 trace_array_put(tr);
3085
3086         return ret;
3087 }
3088
3089 static void *
3090 t_next(struct seq_file *m, void *v, loff_t *pos)
3091 {
3092         struct tracer *t = v;
3093
3094         (*pos)++;
3095
3096         if (t)
3097                 t = t->next;
3098
3099         return t;
3100 }
3101
3102 static void *t_start(struct seq_file *m, loff_t *pos)
3103 {
3104         struct tracer *t;
3105         loff_t l = 0;
3106
3107         mutex_lock(&trace_types_lock);
3108         for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
3109                 ;
3110
3111         return t;
3112 }
3113
3114 static void t_stop(struct seq_file *m, void *p)
3115 {
3116         mutex_unlock(&trace_types_lock);
3117 }
3118
3119 static int t_show(struct seq_file *m, void *v)
3120 {
3121         struct tracer *t = v;
3122
3123         if (!t)
3124                 return 0;
3125
3126         seq_printf(m, "%s", t->name);
3127         if (t->next)
3128                 seq_putc(m, ' ');
3129         else
3130                 seq_putc(m, '\n');
3131
3132         return 0;
3133 }
3134
3135 static const struct seq_operations show_traces_seq_ops = {
3136         .start          = t_start,
3137         .next           = t_next,
3138         .stop           = t_stop,
3139         .show           = t_show,
3140 };
3141
3142 static int show_traces_open(struct inode *inode, struct file *file)
3143 {
3144         if (tracing_disabled)
3145                 return -ENODEV;
3146
3147         return seq_open(file, &show_traces_seq_ops);
3148 }
3149
3150 static ssize_t
3151 tracing_write_stub(struct file *filp, const char __user *ubuf,
3152                    size_t count, loff_t *ppos)
3153 {
3154         return count;
3155 }
3156
3157 static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
3158 {
3159         if (file->f_mode & FMODE_READ)
3160                 return seq_lseek(file, offset, origin);
3161         else
3162                 return 0;
3163 }
3164
3165 static const struct file_operations tracing_fops = {
3166         .open           = tracing_open,
3167         .read           = seq_read,
3168         .write          = tracing_write_stub,
3169         .llseek         = tracing_seek,
3170         .release        = tracing_release,
3171 };
3172
3173 static const struct file_operations show_traces_fops = {
3174         .open           = show_traces_open,
3175         .read           = seq_read,
3176         .release        = seq_release,
3177         .llseek         = seq_lseek,
3178 };
3179
3180 /*
3181  * Only trace on a CPU if the bitmask is set:
3182  */
3183 static cpumask_var_t tracing_cpumask;
3184
3185 /*
3186  * The tracer itself will not take this lock, but still we want
3187  * to provide a consistent cpumask to user-space:
3188  */
3189 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3190
3191 /*
3192  * Temporary storage for the character representation of the
3193  * CPU bitmask (and one more byte for the newline):
3194  */
3195 static char mask_str[NR_CPUS + 1];
3196
3197 static ssize_t
3198 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3199                      size_t count, loff_t *ppos)
3200 {
3201         int len;
3202
3203         mutex_lock(&tracing_cpumask_update_lock);
3204
3205         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
3206         if (count - len < 2) {
3207                 count = -EINVAL;
3208                 goto out_err;
3209         }
3210         len += sprintf(mask_str + len, "\n");
3211         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3212
3213 out_err:
3214         mutex_unlock(&tracing_cpumask_update_lock);
3215
3216         return count;
3217 }
3218
3219 static ssize_t
3220 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3221                       size_t count, loff_t *ppos)
3222 {
3223         struct trace_array *tr = filp->private_data;
3224         cpumask_var_t tracing_cpumask_new;
3225         int err, cpu;
3226
3227         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3228                 return -ENOMEM;
3229
3230         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3231         if (err)
3232                 goto err_unlock;
3233
3234         mutex_lock(&tracing_cpumask_update_lock);
3235
3236         local_irq_disable();
3237         arch_spin_lock(&ftrace_max_lock);
3238         for_each_tracing_cpu(cpu) {
3239                 /*
3240                  * Increase/decrease the disabled counter if we are
3241                  * about to flip a bit in the cpumask:
3242                  */
3243                 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
3244                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3245                         atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3246                         ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3247                 }
3248                 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
3249                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3250                         atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3251                         ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3252                 }
3253         }
3254         arch_spin_unlock(&ftrace_max_lock);
3255         local_irq_enable();
3256
3257         cpumask_copy(tracing_cpumask, tracing_cpumask_new);
3258
3259         mutex_unlock(&tracing_cpumask_update_lock);
3260         free_cpumask_var(tracing_cpumask_new);
3261
3262         return count;
3263
3264 err_unlock:
3265         free_cpumask_var(tracing_cpumask_new);
3266
3267         return err;
3268 }
3269
3270 static const struct file_operations tracing_cpumask_fops = {
3271         .open           = tracing_open_generic,
3272         .read           = tracing_cpumask_read,
3273         .write          = tracing_cpumask_write,
3274         .llseek         = generic_file_llseek,
3275 };
3276
3277 static int tracing_trace_options_show(struct seq_file *m, void *v)
3278 {
3279         struct tracer_opt *trace_opts;
3280         struct trace_array *tr = m->private;
3281         u32 tracer_flags;
3282         int i;
3283
3284         mutex_lock(&trace_types_lock);
3285         tracer_flags = tr->current_trace->flags->val;
3286         trace_opts = tr->current_trace->flags->opts;
3287
3288         for (i = 0; trace_options[i]; i++) {
3289                 if (trace_flags & (1 << i))
3290                         seq_printf(m, "%s\n", trace_options[i]);
3291                 else
3292                         seq_printf(m, "no%s\n", trace_options[i]);
3293         }
3294
3295         for (i = 0; trace_opts[i].name; i++) {
3296                 if (tracer_flags & trace_opts[i].bit)
3297                         seq_printf(m, "%s\n", trace_opts[i].name);
3298                 else
3299                         seq_printf(m, "no%s\n", trace_opts[i].name);
3300         }
3301         mutex_unlock(&trace_types_lock);
3302
3303         return 0;
3304 }
3305
3306 static int __set_tracer_option(struct tracer *trace,
3307                                struct tracer_flags *tracer_flags,
3308                                struct tracer_opt *opts, int neg)
3309 {
3310         int ret;
3311
3312         ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
3313         if (ret)
3314                 return ret;
3315
3316         if (neg)
3317                 tracer_flags->val &= ~opts->bit;
3318         else
3319                 tracer_flags->val |= opts->bit;
3320         return 0;
3321 }
3322
3323 /* Try to assign a tracer specific option */
3324 static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3325 {
3326         struct tracer_flags *tracer_flags = trace->flags;
3327         struct tracer_opt *opts = NULL;
3328         int i;
3329
3330         for (i = 0; tracer_flags->opts[i].name; i++) {
3331                 opts = &tracer_flags->opts[i];
3332
3333                 if (strcmp(cmp, opts->name) == 0)
3334                         return __set_tracer_option(trace, trace->flags,
3335                                                    opts, neg);
3336         }
3337
3338         return -EINVAL;
3339 }
3340
3341 /* Some tracers require overwrite to stay enabled */
3342 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3343 {
3344         if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3345                 return -1;
3346
3347         return 0;
3348 }
3349
3350 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3351 {
3352         /* do nothing if flag is already set */
3353         if (!!(trace_flags & mask) == !!enabled)
3354                 return 0;
3355
3356         /* Give the tracer a chance to approve the change */
3357         if (tr->current_trace->flag_changed)
3358                 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
3359                         return -EINVAL;
3360
3361         if (enabled)
3362                 trace_flags |= mask;
3363         else
3364                 trace_flags &= ~mask;
3365
3366         if (mask == TRACE_ITER_RECORD_CMD)
3367                 trace_event_enable_cmd_record(enabled);
3368
3369         if (mask == TRACE_ITER_OVERWRITE) {
3370                 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3371 #ifdef CONFIG_TRACER_MAX_TRACE
3372                 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3373 #endif
3374         }
3375
3376         if (mask == TRACE_ITER_PRINTK)
3377                 trace_printk_start_stop_comm(enabled);
3378
3379         return 0;
3380 }
3381
3382 static int trace_set_options(struct trace_array *tr, char *option)
3383 {
3384         char *cmp;
3385         int neg = 0;
3386         int ret = -ENODEV;
3387         int i;
3388
3389         cmp = strstrip(option);
3390
3391         if (strncmp(cmp, "no", 2) == 0) {
3392                 neg = 1;
3393                 cmp += 2;
3394         }
3395
3396         mutex_lock(&trace_types_lock);
3397
3398         for (i = 0; trace_options[i]; i++) {
3399                 if (strcmp(cmp, trace_options[i]) == 0) {
3400                         ret = set_tracer_flag(tr, 1 << i, !neg);
3401                         break;
3402                 }
3403         }
3404
3405         /* If no option could be set, test the specific tracer options */
3406         if (!trace_options[i])
3407                 ret = set_tracer_option(tr->current_trace, cmp, neg);
3408
3409         mutex_unlock(&trace_types_lock);
3410
3411         return ret;
3412 }
3413
3414 static ssize_t
3415 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3416                         size_t cnt, loff_t *ppos)
3417 {
3418         struct seq_file *m = filp->private_data;
3419         struct trace_array *tr = m->private;
3420         char buf[64];
3421         int ret;
3422
3423         if (cnt >= sizeof(buf))
3424                 return -EINVAL;
3425
3426         if (copy_from_user(&buf, ubuf, cnt))
3427                 return -EFAULT;
3428
3429         buf[cnt] = 0;
3430
3431         ret = trace_set_options(tr, buf);
3432         if (ret < 0)
3433                 return ret;
3434
3435         *ppos += cnt;
3436
3437         return cnt;
3438 }
3439
3440 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3441 {
3442         struct trace_array *tr = inode->i_private;
3443         int ret;
3444
3445         if (tracing_disabled)
3446                 return -ENODEV;
3447
3448         if (trace_array_get(tr) < 0)
3449                 return -ENODEV;
3450
3451         ret = single_open(file, tracing_trace_options_show, inode->i_private);
3452         if (ret < 0)
3453                 trace_array_put(tr);
3454
3455         return ret;
3456 }
3457
3458 static const struct file_operations tracing_iter_fops = {
3459         .open           = tracing_trace_options_open,
3460         .read           = seq_read,
3461         .llseek         = seq_lseek,
3462         .release        = tracing_single_release_tr,
3463         .write          = tracing_trace_options_write,
3464 };
3465
3466 static const char readme_msg[] =
3467         "tracing mini-HOWTO:\n\n"
3468         "# echo 0 > tracing_on : quick way to disable tracing\n"
3469         "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3470         " Important files:\n"
3471         "  trace\t\t\t- The static contents of the buffer\n"
3472         "\t\t\t  To clear the buffer write into this file: echo > trace\n"
3473         "  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3474         "  current_tracer\t- function and latency tracers\n"
3475         "  available_tracers\t- list of configured tracers for current_tracer\n"
3476         "  buffer_size_kb\t- view and modify size of per cpu buffer\n"
3477         "  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
3478         "  trace_clock\t\t-change the clock used to order events\n"
3479         "       local:   Per cpu clock but may not be synced across CPUs\n"
3480         "      global:   Synced across CPUs but slows tracing down.\n"
3481         "     counter:   Not a clock, but just an increment\n"
3482         "      uptime:   Jiffy counter from time of boot\n"
3483         "        perf:   Same clock that perf events use\n"
3484 #ifdef CONFIG_X86_64
3485         "     x86-tsc:   TSC cycle counter\n"
3486 #endif
3487         "\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3488         "  tracing_cpumask\t- Limit which CPUs to trace\n"
3489         "  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3490         "\t\t\t  Remove sub-buffer with rmdir\n"
3491         "  trace_options\t\t- Set format or modify how tracing happens\n"
3492         "\t\t\t  Disable an option by adding a suffix 'no' to the option name\n"
3493 #ifdef CONFIG_DYNAMIC_FTRACE
3494         "\n  available_filter_functions - list of functions that can be filtered on\n"
3495         "  set_ftrace_filter\t- echo function name in here to only trace these functions\n"
3496         "            accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3497         "            modules: Can select a group via module\n"
3498         "             Format: :mod:<module-name>\n"
3499         "             example: echo :mod:ext3 > set_ftrace_filter\n"
3500         "            triggers: a command to perform when function is hit\n"
3501         "              Format: <function>:<trigger>[:count]\n"
3502         "             trigger: traceon, traceoff\n"
3503         "                      enable_event:<system>:<event>\n"
3504         "                      disable_event:<system>:<event>\n"
3505 #ifdef CONFIG_STACKTRACE
3506         "                      stacktrace\n"
3507 #endif
3508 #ifdef CONFIG_TRACER_SNAPSHOT
3509         "                      snapshot\n"
3510 #endif
3511         "             example: echo do_fault:traceoff > set_ftrace_filter\n"
3512         "                      echo do_trap:traceoff:3 > set_ftrace_filter\n"
3513         "             The first one will disable tracing every time do_fault is hit\n"
3514         "             The second will disable tracing at most 3 times when do_trap is hit\n"
3515         "               The first time do trap is hit and it disables tracing, the counter\n"
3516         "               will decrement to 2. If tracing is already disabled, the counter\n"
3517         "               will not decrement. It only decrements when the trigger did work\n"
3518         "             To remove trigger without count:\n"
3519         "               echo '!<function>:<trigger> > set_ftrace_filter\n"
3520         "             To remove trigger with a count:\n"
3521         "               echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3522         "  set_ftrace_notrace\t- echo function name in here to never trace.\n"
3523         "            accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3524         "            modules: Can select a group via module command :mod:\n"
3525         "            Does not accept triggers\n"
3526 #endif /* CONFIG_DYNAMIC_FTRACE */
3527 #ifdef CONFIG_FUNCTION_TRACER
3528         "  set_ftrace_pid\t- Write pid(s) to only function trace those pids (function)\n"
3529 #endif
3530 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3531         "  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3532         "  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3533 #endif
3534 #ifdef CONFIG_TRACER_SNAPSHOT
3535         "\n  snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n"
3536         "\t\t\t  Read the contents for more information\n"
3537 #endif
3538 #ifdef CONFIG_STACKTRACE
3539         "  stack_trace\t\t- Shows the max stack trace when active\n"
3540         "  stack_max_size\t- Shows current max stack size that was traced\n"
3541         "\t\t\t  Write into this file to reset the max size (trigger a new trace)\n"
3542 #ifdef CONFIG_DYNAMIC_FTRACE
3543         "  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n"
3544 #endif
3545 #endif /* CONFIG_STACKTRACE */
3546 ;
3547
3548 static ssize_t
3549 tracing_readme_read(struct file *filp, char __user *ubuf,
3550                        size_t cnt, loff_t *ppos)
3551 {
3552         return simple_read_from_buffer(ubuf, cnt, ppos,
3553                                         readme_msg, strlen(readme_msg));
3554 }
3555
3556 static const struct file_operations tracing_readme_fops = {
3557         .open           = tracing_open_generic,
3558         .read           = tracing_readme_read,
3559         .llseek         = generic_file_llseek,
3560 };
3561
3562 static ssize_t
3563 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3564                                 size_t cnt, loff_t *ppos)
3565 {
3566         char *buf_comm;
3567         char *file_buf;
3568         char *buf;
3569         int len = 0;
3570         int pid;
3571         int i;
3572
3573         file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3574         if (!file_buf)
3575                 return -ENOMEM;
3576
3577         buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3578         if (!buf_comm) {
3579                 kfree(file_buf);
3580                 return -ENOMEM;
3581         }
3582
3583         buf = file_buf;
3584
3585         for (i = 0; i < SAVED_CMDLINES; i++) {
3586                 int r;
3587
3588                 pid = map_cmdline_to_pid[i];
3589                 if (pid == -1 || pid == NO_CMDLINE_MAP)
3590                         continue;
3591
3592                 trace_find_cmdline(pid, buf_comm);
3593                 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3594                 buf += r;
3595                 len += r;
3596         }
3597
3598         len = simple_read_from_buffer(ubuf, cnt, ppos,
3599                                       file_buf, len);
3600
3601         kfree(file_buf);
3602         kfree(buf_comm);
3603
3604         return len;
3605 }
3606
3607 static const struct file_operations tracing_saved_cmdlines_fops = {
3608     .open       = tracing_open_generic,
3609     .read       = tracing_saved_cmdlines_read,
3610     .llseek     = generic_file_llseek,
3611 };
3612
3613 static ssize_t
3614 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3615                        size_t cnt, loff_t *ppos)
3616 {
3617         struct trace_array *tr = filp->private_data;
3618         char buf[MAX_TRACER_SIZE+2];
3619         int r;
3620
3621         mutex_lock(&trace_types_lock);
3622         r = sprintf(buf, "%s\n", tr->current_trace->name);
3623         mutex_unlock(&trace_types_lock);
3624
3625         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3626 }
3627
3628 int tracer_init(struct tracer *t, struct trace_array *tr)
3629 {
3630         tracing_reset_online_cpus(&tr->trace_buffer);
3631         return t->init(tr);
3632 }
3633
3634 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3635 {
3636         int cpu;
3637
3638         for_each_tracing_cpu(cpu)
3639                 per_cpu_ptr(buf->data, cpu)->entries = val;
3640 }
3641
3642 #ifdef CONFIG_TRACER_MAX_TRACE
3643 /* resize @tr's buffer to the size of @size_tr's entries */
3644 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3645                                         struct trace_buffer *size_buf, int cpu_id)
3646 {
3647         int cpu, ret = 0;
3648
3649         if (cpu_id == RING_BUFFER_ALL_CPUS) {
3650                 for_each_tracing_cpu(cpu) {
3651                         ret = ring_buffer_resize(trace_buf->buffer,
3652                                  per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3653                         if (ret < 0)
3654                                 break;
3655                         per_cpu_ptr(trace_buf->data, cpu)->entries =
3656                                 per_cpu_ptr(size_buf->data, cpu)->entries;
3657                 }
3658         } else {
3659                 ret = ring_buffer_resize(trace_buf->buffer,
3660                                  per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3661                 if (ret == 0)
3662                         per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3663                                 per_cpu_ptr(size_buf->data, cpu_id)->entries;
3664         }
3665
3666         return ret;
3667 }
3668 #endif /* CONFIG_TRACER_MAX_TRACE */
3669
3670 static int __tracing_resize_ring_buffer(struct trace_array *tr,
3671                                         unsigned long size, int cpu)
3672 {
3673         int ret;
3674
3675         /*
3676          * If kernel or user changes the size of the ring buffer
3677          * we use the size that was given, and we can forget about
3678          * expanding it later.
3679          */
3680         ring_buffer_expanded = true;
3681
3682         /* May be called before buffers are initialized */
3683         if (!tr->trace_buffer.buffer)
3684                 return 0;
3685
3686         ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3687         if (ret < 0)
3688                 return ret;
3689
3690 #ifdef CONFIG_TRACER_MAX_TRACE
3691         if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3692             !tr->current_trace->use_max_tr)
3693                 goto out;
3694
3695         ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3696         if (ret < 0) {
3697                 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3698                                                      &tr->trace_buffer, cpu);
3699                 if (r < 0) {
3700                         /*
3701                          * AARGH! We are left with different
3702                          * size max buffer!!!!
3703                          * The max buffer is our "snapshot" buffer.
3704                          * When a tracer needs a snapshot (one of the
3705                          * latency tracers), it swaps the max buffer
3706                          * with the saved snap shot. We succeeded to
3707                          * update the size of the main buffer, but failed to
3708                          * update the size of the max buffer. But when we tried
3709                          * to reset the main buffer to the original size, we
3710                          * failed there too. This is very unlikely to
3711                          * happen, but if it does, warn and kill all
3712                          * tracing.
3713                          */
3714                         WARN_ON(1);
3715                         tracing_disabled = 1;
3716                 }
3717                 return ret;
3718         }
3719
3720         if (cpu == RING_BUFFER_ALL_CPUS)
3721                 set_buffer_entries(&tr->max_buffer, size);
3722         else
3723                 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
3724
3725  out:
3726 #endif /* CONFIG_TRACER_MAX_TRACE */
3727
3728         if (cpu == RING_BUFFER_ALL_CPUS)
3729                 set_buffer_entries(&tr->trace_buffer, size);
3730         else
3731                 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
3732
3733         return ret;
3734 }
3735
3736 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3737                                           unsigned long size, int cpu_id)
3738 {
3739         int ret = size;
3740
3741         mutex_lock(&trace_types_lock);
3742
3743         if (cpu_id != RING_BUFFER_ALL_CPUS) {
3744                 /* make sure, this cpu is enabled in the mask */
3745                 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3746                         ret = -EINVAL;
3747                         goto out;
3748                 }
3749         }
3750
3751         ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
3752         if (ret < 0)
3753                 ret = -ENOMEM;
3754
3755 out:
3756         mutex_unlock(&trace_types_lock);
3757
3758         return ret;
3759 }
3760
3761
3762 /**
3763  * tracing_update_buffers - used by tracing facility to expand ring buffers
3764  *
3765  * To save on memory when the tracing is never used on a system with it
3766  * configured in. The ring buffers are set to a minimum size. But once
3767  * a user starts to use the tracing facility, then they need to grow
3768  * to their default size.
3769  *
3770  * This function is to be called when a tracer is about to be used.
3771  */
3772 int tracing_update_buffers(void)
3773 {
3774         int ret = 0;
3775
3776         mutex_lock(&trace_types_lock);
3777         if (!ring_buffer_expanded)
3778                 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
3779                                                 RING_BUFFER_ALL_CPUS);
3780         mutex_unlock(&trace_types_lock);
3781
3782         return ret;
3783 }
3784
3785 struct trace_option_dentry;
3786
3787 static struct trace_option_dentry *
3788 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
3789
3790 static void
3791 destroy_trace_option_files(struct trace_option_dentry *topts);
3792
3793 static int tracing_set_tracer(const char *buf)
3794 {
3795         static struct trace_option_dentry *topts;
3796         struct trace_array *tr = &global_trace;
3797         struct tracer *t;
3798 #ifdef CONFIG_TRACER_MAX_TRACE
3799         bool had_max_tr;
3800 #endif
3801         int ret = 0;
3802
3803         mutex_lock(&trace_types_lock);
3804
3805         if (!ring_buffer_expanded) {
3806                 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
3807                                                 RING_BUFFER_ALL_CPUS);
3808                 if (ret < 0)
3809                         goto out;
3810                 ret = 0;
3811         }
3812
3813         for (t = trace_types; t; t = t->next) {
3814                 if (strcmp(t->name, buf) == 0)
3815                         break;
3816         }
3817         if (!t) {
3818                 ret = -EINVAL;
3819                 goto out;
3820         }
3821         if (t == tr->current_trace)
3822                 goto out;
3823
3824         trace_branch_disable();
3825
3826         tr->current_trace->enabled = false;
3827
3828         if (tr->current_trace->reset)
3829                 tr->current_trace->reset(tr);
3830
3831         /* Current trace needs to be nop_trace before synchronize_sched */
3832         tr->current_trace = &nop_trace;
3833
3834 #ifdef CONFIG_TRACER_MAX_TRACE
3835         had_max_tr = tr->allocated_snapshot;
3836
3837         if (had_max_tr && !t->use_max_tr) {
3838                 /*
3839                  * We need to make sure that the update_max_tr sees that
3840                  * current_trace changed to nop_trace to keep it from
3841                  * swapping the buffers after we resize it.
3842                  * The update_max_tr is called from interrupts disabled
3843                  * so a synchronized_sched() is sufficient.
3844                  */
3845                 synchronize_sched();
3846                 free_snapshot(tr);
3847         }
3848 #endif
3849         destroy_trace_option_files(topts);
3850
3851         topts = create_trace_option_files(tr, t);
3852
3853 #ifdef CONFIG_TRACER_MAX_TRACE
3854         if (t->use_max_tr && !had_max_tr) {
3855                 ret = alloc_snapshot(tr);
3856                 if (ret < 0)
3857                         goto out;
3858         }
3859 #endif
3860
3861         if (t->init) {
3862                 ret = tracer_init(t, tr);
3863                 if (ret)
3864                         goto out;
3865         }
3866
3867         tr->current_trace = t;
3868         tr->current_trace->enabled = true;
3869         trace_branch_enable(tr);
3870  out:
3871         mutex_unlock(&trace_types_lock);
3872
3873         return ret;
3874 }
3875
3876 static ssize_t
3877 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3878                         size_t cnt, loff_t *ppos)
3879 {
3880         char buf[MAX_TRACER_SIZE+1];
3881         int i;
3882         size_t ret;
3883         int err;
3884
3885         ret = cnt;
3886
3887         if (cnt > MAX_TRACER_SIZE)
3888                 cnt = MAX_TRACER_SIZE;
3889
3890         if (copy_from_user(&buf, ubuf, cnt))
3891                 return -EFAULT;
3892
3893         buf[cnt] = 0;
3894
3895         /* strip ending whitespace. */
3896         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3897                 buf[i] = 0;
3898
3899         err = tracing_set_tracer(buf);
3900         if (err)
3901                 return err;
3902
3903         *ppos += ret;
3904
3905         return ret;
3906 }
3907
3908 static ssize_t
3909 tracing_max_lat_read(struct file *filp, char __user *ubuf,
3910                      size_t cnt, loff_t *ppos)
3911 {
3912         unsigned long *ptr = filp->private_data;
3913         char buf[64];
3914         int r;
3915
3916         r = snprintf(buf, sizeof(buf), "%ld\n",
3917                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
3918         if (r > sizeof(buf))
3919                 r = sizeof(buf);
3920         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3921 }
3922
3923 static ssize_t
3924 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3925                       size_t cnt, loff_t *ppos)
3926 {
3927         unsigned long *ptr = filp->private_data;
3928         unsigned long val;
3929         int ret;
3930
3931         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3932         if (ret)
3933                 return ret;
3934
3935         *ptr = val * 1000;
3936
3937         return cnt;
3938 }
3939
3940 static int tracing_open_pipe(struct inode *inode, struct file *filp)
3941 {
3942         struct trace_cpu *tc = inode->i_private;
3943         struct trace_array *tr = tc->tr;
3944         struct trace_iterator *iter;
3945         int ret = 0;
3946
3947         if (tracing_disabled)
3948                 return -ENODEV;
3949
3950         if (trace_array_get(tr) < 0)
3951                 return -ENODEV;
3952
3953         mutex_lock(&trace_types_lock);
3954
3955         /* create a buffer to store the information to pass to userspace */
3956         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3957         if (!iter) {
3958                 ret = -ENOMEM;
3959                 __trace_array_put(tr);
3960                 goto out;
3961         }
3962
3963         /*
3964          * We make a copy of the current tracer to avoid concurrent
3965          * changes on it while we are reading.
3966          */
3967         iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3968         if (!iter->trace) {
3969                 ret = -ENOMEM;
3970                 goto fail;
3971         }
3972         *iter->trace = *tr->current_trace;
3973
3974         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3975                 ret = -ENOMEM;
3976                 goto fail;
3977         }
3978
3979         /* trace pipe does not show start of buffer */
3980         cpumask_setall(iter->started);
3981
3982         if (trace_flags & TRACE_ITER_LATENCY_FMT)
3983                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3984
3985         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3986         if (trace_clocks[tr->clock_id].in_ns)
3987                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3988
3989         iter->cpu_file = tc->cpu;
3990         iter->tr = tc->tr;
3991         iter->trace_buffer = &tc->tr->trace_buffer;
3992         mutex_init(&iter->mutex);
3993         filp->private_data = iter;
3994
3995         if (iter->trace->pipe_open)
3996                 iter->trace->pipe_open(iter);
3997
3998         nonseekable_open(inode, filp);
3999 out:
4000         mutex_unlock(&trace_types_lock);
4001         return ret;
4002
4003 fail:
4004         kfree(iter->trace);
4005         kfree(iter);
4006         __trace_array_put(tr);
4007         mutex_unlock(&trace_types_lock);
4008         return ret;
4009 }
4010
4011 static int tracing_release_pipe(struct inode *inode, struct file *file)
4012 {
4013         struct trace_iterator *iter = file->private_data;
4014         struct trace_cpu *tc = inode->i_private;
4015         struct trace_array *tr = tc->tr;
4016
4017         mutex_lock(&trace_types_lock);
4018
4019         if (iter->trace->pipe_close)
4020                 iter->trace->pipe_close(iter);
4021
4022         mutex_unlock(&trace_types_lock);
4023
4024         free_cpumask_var(iter->started);
4025         mutex_destroy(&iter->mutex);
4026         kfree(iter->trace);
4027         kfree(iter);
4028
4029         trace_array_put(tr);
4030
4031         return 0;
4032 }
4033
4034 static unsigned int
4035 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4036 {
4037         /* Iterators are static, they should be filled or empty */
4038         if (trace_buffer_iter(iter, iter->cpu_file))
4039                 return POLLIN | POLLRDNORM;
4040
4041         if (trace_flags & TRACE_ITER_BLOCK)
4042                 /*
4043                  * Always select as readable when in blocking mode
4044                  */
4045                 return POLLIN | POLLRDNORM;
4046         else
4047                 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4048                                              filp, poll_table);
4049 }
4050
4051 static unsigned int
4052 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4053 {
4054         struct trace_iterator *iter = filp->private_data;
4055
4056         return trace_poll(iter, filp, poll_table);
4057 }
4058
4059 /*
4060  * This is a make-shift waitqueue.
4061  * A tracer might use this callback on some rare cases:
4062  *
4063  *  1) the current tracer might hold the runqueue lock when it wakes up
4064  *     a reader, hence a deadlock (sched, function, and function graph tracers)
4065  *  2) the function tracers, trace all functions, we don't want
4066  *     the overhead of calling wake_up and friends
4067  *     (and tracing them too)
4068  *
4069  *     Anyway, this is really very primitive wakeup.
4070  */
4071 void poll_wait_pipe(struct trace_iterator *iter)
4072 {
4073         set_current_state(TASK_INTERRUPTIBLE);
4074         /* sleep for 100 msecs, and try again. */
4075         schedule_timeout(HZ / 10);
4076 }
4077
4078 /* Must be called with trace_types_lock mutex held. */
4079 static int tracing_wait_pipe(struct file *filp)
4080 {
4081         struct trace_iterator *iter = filp->private_data;
4082
4083         while (trace_empty(iter)) {
4084
4085                 if ((filp->f_flags & O_NONBLOCK)) {
4086                         return -EAGAIN;
4087                 }
4088
4089                 mutex_unlock(&iter->mutex);
4090
4091                 iter->trace->wait_pipe(iter);
4092
4093                 mutex_lock(&iter->mutex);
4094
4095                 if (signal_pending(current))
4096                         return -EINTR;
4097
4098                 /*
4099                  * We block until we read something and tracing is disabled.
4100                  * We still block if tracing is disabled, but we have never
4101                  * read anything. This allows a user to cat this file, and
4102                  * then enable tracing. But after we have read something,
4103                  * we give an EOF when tracing is again disabled.
4104                  *
4105                  * iter->pos will be 0 if we haven't read anything.
4106                  */
4107                 if (!tracing_is_on() && iter->pos)
4108                         break;
4109         }
4110
4111         return 1;
4112 }
4113
4114 /*
4115  * Consumer reader.
4116  */
4117 static ssize_t
4118 tracing_read_pipe(struct file *filp, char __user *ubuf,
4119                   size_t cnt, loff_t *ppos)
4120 {
4121         struct trace_iterator *iter = filp->private_data;
4122         struct trace_array *tr = iter->tr;
4123         ssize_t sret;
4124
4125         /* return any leftover data */
4126         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4127         if (sret != -EBUSY)
4128                 return sret;
4129
4130         trace_seq_init(&iter->seq);
4131
4132         /* copy the tracer to avoid using a global lock all around */
4133         mutex_lock(&trace_types_lock);
4134         if (unlikely(iter->trace->name != tr->current_trace->name))
4135                 *iter->trace = *tr->current_trace;
4136         mutex_unlock(&trace_types_lock);
4137
4138         /*
4139          * Avoid more than one consumer on a single file descriptor
4140          * This is just a matter of traces coherency, the ring buffer itself
4141          * is protected.
4142          */
4143         mutex_lock(&iter->mutex);
4144         if (iter->trace->read) {
4145                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4146                 if (sret)
4147                         goto out;
4148         }
4149
4150 waitagain:
4151         sret = tracing_wait_pipe(filp);
4152         if (sret <= 0)
4153                 goto out;
4154
4155         /* stop when tracing is finished */
4156         if (trace_empty(iter)) {
4157                 sret = 0;
4158                 goto out;
4159         }
4160
4161         if (cnt >= PAGE_SIZE)
4162                 cnt = PAGE_SIZE - 1;
4163
4164         /* reset all but tr, trace, and overruns */
4165         memset(&iter->seq, 0,
4166                sizeof(struct trace_iterator) -
4167                offsetof(struct trace_iterator, seq));
4168         cpumask_clear(iter->started);
4169         iter->pos = -1;
4170
4171         trace_event_read_lock();
4172         trace_access_lock(iter->cpu_file);
4173         while (trace_find_next_entry_inc(iter) != NULL) {
4174                 enum print_line_t ret;
4175                 int len = iter->seq.len;
4176
4177                 ret = print_trace_line(iter);
4178                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4179                         /* don't print partial lines */
4180                         iter->seq.len = len;
4181                         break;
4182                 }
4183                 if (ret != TRACE_TYPE_NO_CONSUME)
4184                         trace_consume(iter);
4185
4186                 if (iter->seq.len >= cnt)
4187                         break;
4188
4189                 /*
4190                  * Setting the full flag means we reached the trace_seq buffer
4191                  * size and we should leave by partial output condition above.
4192                  * One of the trace_seq_* functions is not used properly.
4193                  */
4194                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4195                           iter->ent->type);
4196         }
4197         trace_access_unlock(iter->cpu_file);
4198         trace_event_read_unlock();
4199
4200         /* Now copy what we have to the user */
4201         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4202         if (iter->seq.readpos >= iter->seq.len)
4203                 trace_seq_init(&iter->seq);
4204
4205         /*
4206          * If there was nothing to send to user, in spite of consuming trace
4207          * entries, go back to wait for more entries.
4208          */
4209         if (sret == -EBUSY)
4210                 goto waitagain;
4211
4212 out:
4213         mutex_unlock(&iter->mutex);
4214
4215         return sret;
4216 }
4217
4218 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
4219                                      struct pipe_buffer *buf)
4220 {
4221         __free_page(buf->page);
4222 }
4223
4224 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4225                                      unsigned int idx)
4226 {
4227         __free_page(spd->pages[idx]);
4228 }
4229
4230 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4231         .can_merge              = 0,
4232         .map                    = generic_pipe_buf_map,
4233         .unmap                  = generic_pipe_buf_unmap,
4234         .confirm                = generic_pipe_buf_confirm,
4235         .release                = tracing_pipe_buf_release,
4236         .steal                  = generic_pipe_buf_steal,
4237         .get                    = generic_pipe_buf_get,
4238 };
4239
4240 static size_t
4241 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4242 {
4243         size_t count;
4244         int ret;
4245
4246         /* Seq buffer is page-sized, exactly what we need. */
4247         for (;;) {
4248                 count = iter->seq.len;
4249                 ret = print_trace_line(iter);
4250                 count = iter->seq.len - count;
4251                 if (rem < count) {
4252                         rem = 0;
4253                         iter->seq.len -= count;
4254                         break;
4255                 }
4256                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4257                         iter->seq.len -= count;
4258                         break;
4259                 }
4260
4261                 if (ret != TRACE_TYPE_NO_CONSUME)
4262                         trace_consume(iter);
4263                 rem -= count;
4264                 if (!trace_find_next_entry_inc(iter))   {
4265                         rem = 0;
4266                         iter->ent = NULL;
4267                         break;
4268                 }
4269         }
4270
4271         return rem;
4272 }
4273
4274 static ssize_t tracing_splice_read_pipe(struct file *filp,
4275                                         loff_t *ppos,
4276                                         struct pipe_inode_info *pipe,
4277                                         size_t len,
4278                                         unsigned int flags)
4279 {
4280         struct page *pages_def[PIPE_DEF_BUFFERS];
4281         struct partial_page partial_def[PIPE_DEF_BUFFERS];
4282         struct trace_iterator *iter = filp->private_data;
4283         struct splice_pipe_desc spd = {
4284                 .pages          = pages_def,
4285                 .partial        = partial_def,
4286                 .nr_pages       = 0, /* This gets updated below. */
4287                 .nr_pages_max   = PIPE_DEF_BUFFERS,
4288                 .flags          = flags,
4289                 .ops            = &tracing_pipe_buf_ops,
4290                 .spd_release    = tracing_spd_release_pipe,
4291         };
4292         struct trace_array *tr = iter->tr;
4293         ssize_t ret;
4294         size_t rem;
4295         unsigned int i;
4296
4297         if (splice_grow_spd(pipe, &spd))
4298                 return -ENOMEM;
4299
4300         /* copy the tracer to avoid using a global lock all around */
4301         mutex_lock(&trace_types_lock);
4302         if (unlikely(iter->trace->name != tr->current_trace->name))
4303                 *iter->trace = *tr->current_trace;
4304         mutex_unlock(&trace_types_lock);
4305
4306         mutex_lock(&iter->mutex);
4307
4308         if (iter->trace->splice_read) {
4309                 ret = iter->trace->splice_read(iter, filp,
4310                                                ppos, pipe, len, flags);
4311                 if (ret)
4312                         goto out_err;
4313         }
4314
4315         ret = tracing_wait_pipe(filp);
4316         if (ret <= 0)
4317                 goto out_err;
4318
4319         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4320                 ret = -EFAULT;
4321                 goto out_err;
4322         }
4323
4324         trace_event_read_lock();
4325         trace_access_lock(iter->cpu_file);
4326
4327         /* Fill as many pages as possible. */
4328         for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4329                 spd.pages[i] = alloc_page(GFP_KERNEL);
4330                 if (!spd.pages[i])
4331                         break;
4332
4333                 rem = tracing_fill_pipe_page(rem, iter);
4334
4335                 /* Copy the data into the page, so we can start over. */
4336                 ret = trace_seq_to_buffer(&iter->seq,
4337                                           page_address(spd.pages[i]),
4338                                           iter->seq.len);
4339                 if (ret < 0) {
4340                         __free_page(spd.pages[i]);
4341                         break;
4342                 }
4343                 spd.partial[i].offset = 0;
4344                 spd.partial[i].len = iter->seq.len;
4345
4346                 trace_seq_init(&iter->seq);
4347         }
4348
4349         trace_access_unlock(iter->cpu_file);
4350         trace_event_read_unlock();
4351         mutex_unlock(&iter->mutex);
4352
4353         spd.nr_pages = i;
4354
4355         ret = splice_to_pipe(pipe, &spd);
4356 out:
4357         splice_shrink_spd(&spd);
4358         return ret;
4359
4360 out_err:
4361         mutex_unlock(&iter->mutex);
4362         goto out;
4363 }
4364
4365 static ssize_t
4366 tracing_entries_read(struct file *filp, char __user *ubuf,
4367                      size_t cnt, loff_t *ppos)
4368 {
4369         struct trace_cpu *tc = filp->private_data;
4370         struct trace_array *tr = tc->tr;
4371         char buf[64];
4372         int r = 0;
4373         ssize_t ret;
4374
4375         mutex_lock(&trace_types_lock);
4376
4377         if (tc->cpu == RING_BUFFER_ALL_CPUS) {
4378                 int cpu, buf_size_same;
4379                 unsigned long size;
4380
4381                 size = 0;
4382                 buf_size_same = 1;
4383                 /* check if all cpu sizes are same */
4384                 for_each_tracing_cpu(cpu) {
4385                         /* fill in the size from first enabled cpu */
4386                         if (size == 0)
4387                                 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4388                         if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4389                                 buf_size_same = 0;
4390                                 break;
4391                         }
4392                 }
4393
4394                 if (buf_size_same) {
4395                         if (!ring_buffer_expanded)
4396                                 r = sprintf(buf, "%lu (expanded: %lu)\n",
4397                                             size >> 10,
4398                                             trace_buf_size >> 10);
4399                         else
4400                                 r = sprintf(buf, "%lu\n", size >> 10);
4401                 } else
4402                         r = sprintf(buf, "X\n");
4403         } else
4404                 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
4405
4406         mutex_unlock(&trace_types_lock);
4407
4408         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4409         return ret;
4410 }
4411
4412 static ssize_t
4413 tracing_entries_write(struct file *filp, const char __user *ubuf,
4414                       size_t cnt, loff_t *ppos)
4415 {
4416         struct trace_cpu *tc = filp->private_data;
4417         unsigned long val;
4418         int ret;
4419
4420         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4421         if (ret)
4422                 return ret;
4423
4424         /* must have at least 1 entry */
4425         if (!val)
4426                 return -EINVAL;
4427
4428         /* value is in KB */
4429         val <<= 10;
4430
4431         ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
4432         if (ret < 0)
4433                 return ret;
4434
4435         *ppos += cnt;
4436
4437         return cnt;
4438 }
4439
4440 static ssize_t
4441 tracing_total_entries_read(struct file *filp, char __user *ubuf,
4442                                 size_t cnt, loff_t *ppos)
4443 {
4444         struct trace_array *tr = filp->private_data;
4445         char buf[64];
4446         int r, cpu;
4447         unsigned long size = 0, expanded_size = 0;
4448
4449         mutex_lock(&trace_types_lock);
4450         for_each_tracing_cpu(cpu) {
4451                 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4452                 if (!ring_buffer_expanded)
4453                         expanded_size += trace_buf_size >> 10;
4454         }
4455         if (ring_buffer_expanded)
4456                 r = sprintf(buf, "%lu\n", size);
4457         else
4458                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4459         mutex_unlock(&trace_types_lock);
4460
4461         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4462 }
4463
4464 static ssize_t
4465 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4466                           size_t cnt, loff_t *ppos)
4467 {
4468         /*
4469          * There is no need to read what the user has written, this function
4470          * is just to make sure that there is no error when "echo" is used
4471          */
4472
4473         *ppos += cnt;
4474
4475         return cnt;
4476 }
4477
4478 static int
4479 tracing_free_buffer_release(struct inode *inode, struct file *filp)
4480 {
4481         struct trace_array *tr = inode->i_private;
4482
4483         /* disable tracing ? */
4484         if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4485                 tracer_tracing_off(tr);
4486         /* resize the ring buffer to 0 */
4487         tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4488
4489         trace_array_put(tr);
4490
4491         return 0;
4492 }
4493
4494 static ssize_t
4495 tracing_mark_write(struct file *filp, const char __user *ubuf,
4496                                         size_t cnt, loff_t *fpos)
4497 {
4498         unsigned long addr = (unsigned long)ubuf;
4499         struct trace_array *tr = filp->private_data;
4500         struct ring_buffer_event *event;
4501         struct ring_buffer *buffer;
4502         struct print_entry *entry;
4503         unsigned long irq_flags;
4504         struct page *pages[2];
4505         void *map_page[2];
4506         int nr_pages = 1;
4507         ssize_t written;
4508         int offset;
4509         int size;
4510         int len;
4511         int ret;
4512         int i;
4513
4514         if (tracing_disabled)
4515                 return -EINVAL;
4516
4517         if (!(trace_flags & TRACE_ITER_MARKERS))
4518                 return -EINVAL;
4519
4520         if (cnt > TRACE_BUF_SIZE)
4521                 cnt = TRACE_BUF_SIZE;
4522
4523         /*
4524          * Userspace is injecting traces into the kernel trace buffer.
4525          * We want to be as non intrusive as possible.
4526          * To do so, we do not want to allocate any special buffers
4527          * or take any locks, but instead write the userspace data
4528          * straight into the ring buffer.
4529          *
4530          * First we need to pin the userspace buffer into memory,
4531          * which, most likely it is, because it just referenced it.
4532          * But there's no guarantee that it is. By using get_user_pages_fast()
4533          * and kmap_atomic/kunmap_atomic() we can get access to the
4534          * pages directly. We then write the data directly into the
4535          * ring buffer.
4536          */
4537         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4538
4539         /* check if we cross pages */
4540         if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4541                 nr_pages = 2;
4542
4543         offset = addr & (PAGE_SIZE - 1);
4544         addr &= PAGE_MASK;
4545
4546         ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4547         if (ret < nr_pages) {
4548                 while (--ret >= 0)
4549                         put_page(pages[ret]);
4550                 written = -EFAULT;
4551                 goto out;
4552         }
4553
4554         for (i = 0; i < nr_pages; i++)
4555                 map_page[i] = kmap_atomic(pages[i]);
4556
4557         local_save_flags(irq_flags);
4558         size = sizeof(*entry) + cnt + 2; /* possible \n added */
4559         buffer = tr->trace_buffer.buffer;
4560         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4561                                           irq_flags, preempt_count());
4562         if (!event) {
4563                 /* Ring buffer disabled, return as if not open for write */
4564                 written = -EBADF;
4565                 goto out_unlock;
4566         }
4567
4568         entry = ring_buffer_event_data(event);
4569         entry->ip = _THIS_IP_;
4570
4571         if (nr_pages == 2) {
4572                 len = PAGE_SIZE - offset;
4573                 memcpy(&entry->buf, map_page[0] + offset, len);
4574                 memcpy(&entry->buf[len], map_page[1], cnt - len);
4575         } else
4576                 memcpy(&entry->buf, map_page[0] + offset, cnt);
4577
4578         if (entry->buf[cnt - 1] != '\n') {
4579                 entry->buf[cnt] = '\n';
4580                 entry->buf[cnt + 1] = '\0';
4581         } else
4582                 entry->buf[cnt] = '\0';
4583
4584         __buffer_unlock_commit(buffer, event);
4585
4586         written = cnt;
4587
4588         *fpos += written;
4589
4590  out_unlock:
4591         for (i = 0; i < nr_pages; i++){
4592                 kunmap_atomic(map_page[i]);
4593                 put_page(pages[i]);
4594         }
4595  out:
4596         return written;
4597 }
4598
4599 static int tracing_clock_show(struct seq_file *m, void *v)
4600 {
4601         struct trace_array *tr = m->private;
4602         int i;
4603
4604         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4605                 seq_printf(m,
4606                         "%s%s%s%s", i ? " " : "",
4607                         i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4608                         i == tr->clock_id ? "]" : "");
4609         seq_putc(m, '\n');
4610
4611         return 0;
4612 }
4613
4614 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4615                                    size_t cnt, loff_t *fpos)
4616 {
4617         struct seq_file *m = filp->private_data;
4618         struct trace_array *tr = m->private;
4619         char buf[64];
4620         const char *clockstr;
4621         int i;
4622
4623         if (cnt >= sizeof(buf))
4624                 return -EINVAL;
4625
4626         if (copy_from_user(&buf, ubuf, cnt))
4627                 return -EFAULT;
4628
4629         buf[cnt] = 0;
4630
4631         clockstr = strstrip(buf);
4632
4633         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4634                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4635                         break;
4636         }
4637         if (i == ARRAY_SIZE(trace_clocks))
4638                 return -EINVAL;
4639
4640         mutex_lock(&trace_types_lock);
4641
4642         tr->clock_id = i;
4643
4644         ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4645
4646         /*
4647          * New clock may not be consistent with the previous clock.
4648          * Reset the buffer so that it doesn't have incomparable timestamps.
4649          */
4650         tracing_reset_online_cpus(&tr->trace_buffer);
4651
4652 #ifdef CONFIG_TRACER_MAX_TRACE
4653         if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4654                 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4655         tracing_reset_online_cpus(&tr->max_buffer);
4656 #endif
4657
4658         mutex_unlock(&trace_types_lock);
4659
4660         *fpos += cnt;
4661
4662         return cnt;
4663 }
4664
4665 static int tracing_clock_open(struct inode *inode, struct file *file)
4666 {
4667         struct trace_array *tr = inode->i_private;
4668         int ret;
4669
4670         if (tracing_disabled)
4671                 return -ENODEV;
4672
4673         if (trace_array_get(tr))
4674                 return -ENODEV;
4675
4676         ret = single_open(file, tracing_clock_show, inode->i_private);
4677         if (ret < 0)
4678                 trace_array_put(tr);
4679
4680         return ret;
4681 }
4682
4683 struct ftrace_buffer_info {
4684         struct trace_iterator   iter;
4685         void                    *spare;
4686         unsigned int            read;
4687 };
4688
4689 #ifdef CONFIG_TRACER_SNAPSHOT
4690 static int tracing_snapshot_open(struct inode *inode, struct file *file)
4691 {
4692         struct trace_cpu *tc = inode->i_private;
4693         struct trace_array *tr = tc->tr;
4694         struct trace_iterator *iter;
4695         struct seq_file *m;
4696         int ret = 0;
4697
4698         if (trace_array_get(tr) < 0)
4699                 return -ENODEV;
4700
4701         if (file->f_mode & FMODE_READ) {
4702                 iter = __tracing_open(tr, tc, inode, file, true);
4703                 if (IS_ERR(iter))
4704                         ret = PTR_ERR(iter);
4705         } else {
4706                 /* Writes still need the seq_file to hold the private data */
4707                 ret = -ENOMEM;
4708                 m = kzalloc(sizeof(*m), GFP_KERNEL);
4709                 if (!m)
4710                         goto out;
4711                 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4712                 if (!iter) {
4713                         kfree(m);
4714                         goto out;
4715                 }
4716                 ret = 0;
4717
4718                 iter->tr = tr;
4719                 iter->trace_buffer = &tc->tr->max_buffer;
4720                 iter->cpu_file = tc->cpu;
4721                 m->private = iter;
4722                 file->private_data = m;
4723         }
4724 out:
4725         if (ret < 0)
4726                 trace_array_put(tr);
4727
4728         return ret;
4729 }
4730
4731 static ssize_t
4732 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4733                        loff_t *ppos)
4734 {
4735         struct seq_file *m = filp->private_data;
4736         struct trace_iterator *iter = m->private;
4737         struct trace_array *tr = iter->tr;
4738         unsigned long val;
4739         int ret;
4740
4741         ret = tracing_update_buffers();
4742         if (ret < 0)
4743                 return ret;
4744
4745         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4746         if (ret)
4747                 return ret;
4748
4749         mutex_lock(&trace_types_lock);
4750
4751         if (tr->current_trace->use_max_tr) {
4752                 ret = -EBUSY;
4753                 goto out;
4754         }
4755
4756         switch (val) {
4757         case 0:
4758                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4759                         ret = -EINVAL;
4760                         break;
4761                 }
4762                 if (tr->allocated_snapshot)
4763                         free_snapshot(tr);
4764                 break;
4765         case 1:
4766 /* Only allow per-cpu swap if the ring buffer supports it */
4767 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4768                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4769                         ret = -EINVAL;
4770                         break;
4771                 }
4772 #endif
4773                 if (!tr->allocated_snapshot) {
4774                         ret = alloc_snapshot(tr);
4775                         if (ret < 0)
4776                                 break;
4777                 }
4778                 local_irq_disable();
4779                 /* Now, we're going to swap */
4780                 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4781                         update_max_tr(tr, current, smp_processor_id());
4782                 else
4783                         update_max_tr_single(tr, current, iter->cpu_file);
4784                 local_irq_enable();
4785                 break;
4786         default:
4787                 if (tr->allocated_snapshot) {
4788                         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4789                                 tracing_reset_online_cpus(&tr->max_buffer);
4790                         else
4791                                 tracing_reset(&tr->max_buffer, iter->cpu_file);
4792                 }
4793                 break;
4794         }
4795
4796         if (ret >= 0) {
4797                 *ppos += cnt;
4798                 ret = cnt;
4799         }
4800 out:
4801         mutex_unlock(&trace_types_lock);
4802         return ret;
4803 }
4804
4805 static int tracing_snapshot_release(struct inode *inode, struct file *file)
4806 {
4807         struct seq_file *m = file->private_data;
4808         int ret;
4809
4810         ret = tracing_release(inode, file);
4811
4812         if (file->f_mode & FMODE_READ)
4813                 return ret;
4814
4815         /* If write only, the seq_file is just a stub */
4816         if (m)
4817                 kfree(m->private);
4818         kfree(m);
4819
4820         return 0;
4821 }
4822
4823 static int tracing_buffers_open(struct inode *inode, struct file *filp);
4824 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4825                                     size_t count, loff_t *ppos);
4826 static int tracing_buffers_release(struct inode *inode, struct file *file);
4827 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4828                    struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4829
4830 static int snapshot_raw_open(struct inode *inode, struct file *filp)
4831 {
4832         struct ftrace_buffer_info *info;
4833         int ret;
4834
4835         ret = tracing_buffers_open(inode, filp);
4836         if (ret < 0)
4837                 return ret;
4838
4839         info = filp->private_data;
4840
4841         if (info->iter.trace->use_max_tr) {
4842                 tracing_buffers_release(inode, filp);
4843                 return -EBUSY;
4844         }
4845
4846         info->iter.snapshot = true;
4847         info->iter.trace_buffer = &info->iter.tr->max_buffer;
4848
4849         return ret;
4850 }
4851
4852 #endif /* CONFIG_TRACER_SNAPSHOT */
4853
4854
4855 static const struct file_operations tracing_max_lat_fops = {
4856         .open           = tracing_open_generic,
4857         .read           = tracing_max_lat_read,
4858         .write          = tracing_max_lat_write,
4859         .llseek         = generic_file_llseek,
4860 };
4861
4862 static const struct file_operations set_tracer_fops = {
4863         .open           = tracing_open_generic,
4864         .read           = tracing_set_trace_read,
4865         .write          = tracing_set_trace_write,
4866         .llseek         = generic_file_llseek,
4867 };
4868
4869 static const struct file_operations tracing_pipe_fops = {
4870         .open           = tracing_open_pipe,
4871         .poll           = tracing_poll_pipe,
4872         .read           = tracing_read_pipe,
4873         .splice_read    = tracing_splice_read_pipe,
4874         .release        = tracing_release_pipe,
4875         .llseek         = no_llseek,
4876 };
4877
4878 static const struct file_operations tracing_entries_fops = {
4879         .open           = tracing_open_generic_tc,
4880         .read           = tracing_entries_read,
4881         .write          = tracing_entries_write,
4882         .llseek         = generic_file_llseek,
4883         .release        = tracing_release_generic_tc,
4884 };
4885
4886 static const struct file_operations tracing_total_entries_fops = {
4887         .open           = tracing_open_generic_tr,
4888         .read           = tracing_total_entries_read,
4889         .llseek         = generic_file_llseek,
4890         .release        = tracing_release_generic_tr,
4891 };
4892
4893 static const struct file_operations tracing_free_buffer_fops = {
4894         .open           = tracing_open_generic_tr,
4895         .write          = tracing_free_buffer_write,
4896         .release        = tracing_free_buffer_release,
4897 };
4898
4899 static const struct file_operations tracing_mark_fops = {
4900         .open           = tracing_open_generic_tr,
4901         .write          = tracing_mark_write,
4902         .llseek         = generic_file_llseek,
4903         .release        = tracing_release_generic_tr,
4904 };
4905
4906 static const struct file_operations trace_clock_fops = {
4907         .open           = tracing_clock_open,
4908         .read           = seq_read,
4909         .llseek         = seq_lseek,
4910         .release        = tracing_single_release_tr,
4911         .write          = tracing_clock_write,
4912 };
4913
4914 #ifdef CONFIG_TRACER_SNAPSHOT
4915 static const struct file_operations snapshot_fops = {
4916         .open           = tracing_snapshot_open,
4917         .read           = seq_read,
4918         .write          = tracing_snapshot_write,
4919         .llseek         = tracing_seek,
4920         .release        = tracing_snapshot_release,
4921 };
4922
4923 static const struct file_operations snapshot_raw_fops = {
4924         .open           = snapshot_raw_open,
4925         .read           = tracing_buffers_read,
4926         .release        = tracing_buffers_release,
4927         .splice_read    = tracing_buffers_splice_read,
4928         .llseek         = no_llseek,
4929 };
4930
4931 #endif /* CONFIG_TRACER_SNAPSHOT */
4932
4933 static int tracing_buffers_open(struct inode *inode, struct file *filp)
4934 {
4935         struct trace_cpu *tc = inode->i_private;
4936         struct trace_array *tr = tc->tr;
4937         struct ftrace_buffer_info *info;
4938         int ret;
4939
4940         if (tracing_disabled)
4941                 return -ENODEV;
4942
4943         if (trace_array_get(tr) < 0)
4944                 return -ENODEV;
4945
4946         info = kzalloc(sizeof(*info), GFP_KERNEL);
4947         if (!info) {
4948                 trace_array_put(tr);
4949                 return -ENOMEM;
4950         }
4951
4952         mutex_lock(&trace_types_lock);
4953
4954         info->iter.tr           = tr;
4955         info->iter.cpu_file     = tc->cpu;
4956         info->iter.trace        = tr->current_trace;
4957         info->iter.trace_buffer = &tr->trace_buffer;
4958         info->spare             = NULL;
4959         /* Force reading ring buffer for first read */
4960         info->read              = (unsigned int)-1;
4961
4962         filp->private_data = info;
4963
4964         mutex_unlock(&trace_types_lock);
4965
4966         ret = nonseekable_open(inode, filp);
4967         if (ret < 0)
4968                 trace_array_put(tr);
4969
4970         return ret;
4971 }
4972
4973 static unsigned int
4974 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
4975 {
4976         struct ftrace_buffer_info *info = filp->private_data;
4977         struct trace_iterator *iter = &info->iter;
4978
4979         return trace_poll(iter, filp, poll_table);
4980 }
4981
4982 static ssize_t
4983 tracing_buffers_read(struct file *filp, char __user *ubuf,
4984                      size_t count, loff_t *ppos)
4985 {
4986         struct ftrace_buffer_info *info = filp->private_data;
4987         struct trace_iterator *iter = &info->iter;
4988         ssize_t ret;
4989         ssize_t size;
4990
4991         if (!count)
4992                 return 0;
4993
4994         mutex_lock(&trace_types_lock);
4995
4996 #ifdef CONFIG_TRACER_MAX_TRACE
4997         if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
4998                 size = -EBUSY;
4999                 goto out_unlock;
5000         }
5001 #endif
5002
5003         if (!info->spare)
5004                 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5005                                                           iter->cpu_file);
5006         size = -ENOMEM;
5007         if (!info->spare)
5008                 goto out_unlock;
5009
5010         /* Do we have previous read data to read? */
5011         if (info->read < PAGE_SIZE)
5012                 goto read;
5013
5014  again:
5015         trace_access_lock(iter->cpu_file);
5016         ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5017                                     &info->spare,
5018                                     count,
5019                                     iter->cpu_file, 0);
5020         trace_access_unlock(iter->cpu_file);
5021
5022         if (ret < 0) {
5023                 if (trace_empty(iter)) {
5024                         if ((filp->f_flags & O_NONBLOCK)) {
5025                                 size = -EAGAIN;
5026                                 goto out_unlock;
5027                         }
5028                         mutex_unlock(&trace_types_lock);
5029                         iter->trace->wait_pipe(iter);
5030                         mutex_lock(&trace_types_lock);
5031                         if (signal_pending(current)) {
5032                                 size = -EINTR;
5033                                 goto out_unlock;
5034                         }
5035                         goto again;
5036                 }
5037                 size = 0;
5038                 goto out_unlock;
5039         }
5040
5041         info->read = 0;
5042  read:
5043         size = PAGE_SIZE - info->read;
5044         if (size > count)
5045                 size = count;
5046
5047         ret = copy_to_user(ubuf, info->spare + info->read, size);
5048         if (ret == size) {
5049                 size = -EFAULT;
5050                 goto out_unlock;
5051         }
5052         size -= ret;
5053
5054         *ppos += size;
5055         info->read += size;
5056
5057  out_unlock:
5058         mutex_unlock(&trace_types_lock);
5059
5060         return size;
5061 }
5062
5063 static int tracing_buffers_release(struct inode *inode, struct file *file)
5064 {
5065         struct ftrace_buffer_info *info = file->private_data;
5066         struct trace_iterator *iter = &info->iter;
5067
5068         mutex_lock(&trace_types_lock);
5069
5070         __trace_array_put(iter->tr);
5071
5072         if (info->spare)
5073                 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5074         kfree(info);
5075
5076         mutex_unlock(&trace_types_lock);
5077
5078         return 0;
5079 }
5080
5081 struct buffer_ref {
5082         struct ring_buffer      *buffer;
5083         void                    *page;
5084         int                     ref;
5085 };
5086
5087 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5088                                     struct pipe_buffer *buf)
5089 {
5090         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5091
5092         if (--ref->ref)
5093                 return;
5094
5095         ring_buffer_free_read_page(ref->buffer, ref->page);
5096         kfree(ref);
5097         buf->private = 0;
5098 }
5099
5100 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5101                                 struct pipe_buffer *buf)
5102 {
5103         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5104
5105         ref->ref++;
5106 }
5107
5108 /* Pipe buffer operations for a buffer. */
5109 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5110         .can_merge              = 0,
5111         .map                    = generic_pipe_buf_map,
5112         .unmap                  = generic_pipe_buf_unmap,
5113         .confirm                = generic_pipe_buf_confirm,
5114         .release                = buffer_pipe_buf_release,
5115         .steal                  = generic_pipe_buf_steal,
5116         .get                    = buffer_pipe_buf_get,
5117 };
5118
5119 /*
5120  * Callback from splice_to_pipe(), if we need to release some pages
5121  * at the end of the spd in case we error'ed out in filling the pipe.
5122  */
5123 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5124 {
5125         struct buffer_ref *ref =
5126                 (struct buffer_ref *)spd->partial[i].private;
5127
5128         if (--ref->ref)
5129                 return;
5130
5131         ring_buffer_free_read_page(ref->buffer, ref->page);
5132         kfree(ref);
5133         spd->partial[i].private = 0;
5134 }
5135
5136 static ssize_t
5137 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5138                             struct pipe_inode_info *pipe, size_t len,
5139                             unsigned int flags)
5140 {
5141         struct ftrace_buffer_info *info = file->private_data;
5142         struct trace_iterator *iter = &info->iter;
5143         struct partial_page partial_def[PIPE_DEF_BUFFERS];
5144         struct page *pages_def[PIPE_DEF_BUFFERS];
5145         struct splice_pipe_desc spd = {
5146                 .pages          = pages_def,
5147                 .partial        = partial_def,
5148                 .nr_pages_max   = PIPE_DEF_BUFFERS,
5149                 .flags          = flags,
5150                 .ops            = &buffer_pipe_buf_ops,
5151                 .spd_release    = buffer_spd_release,
5152         };
5153         struct buffer_ref *ref;
5154         int entries, size, i;
5155         ssize_t ret;
5156
5157         mutex_lock(&trace_types_lock);
5158
5159 #ifdef CONFIG_TRACER_MAX_TRACE
5160         if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5161                 ret = -EBUSY;
5162                 goto out;
5163         }
5164 #endif
5165
5166         if (splice_grow_spd(pipe, &spd)) {
5167                 ret = -ENOMEM;
5168                 goto out;
5169         }
5170
5171         if (*ppos & (PAGE_SIZE - 1)) {
5172                 ret = -EINVAL;
5173                 goto out;
5174         }
5175
5176         if (len & (PAGE_SIZE - 1)) {
5177                 if (len < PAGE_SIZE) {
5178                         ret = -EINVAL;
5179                         goto out;
5180                 }
5181                 len &= PAGE_MASK;
5182         }
5183
5184  again:
5185         trace_access_lock(iter->cpu_file);
5186         entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5187
5188         for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
5189                 struct page *page;
5190                 int r;
5191
5192                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5193                 if (!ref)
5194                         break;
5195
5196                 ref->ref = 1;
5197                 ref->buffer = iter->trace_buffer->buffer;
5198                 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5199                 if (!ref->page) {
5200                         kfree(ref);
5201                         break;
5202                 }
5203
5204                 r = ring_buffer_read_page(ref->buffer, &ref->page,
5205                                           len, iter->cpu_file, 1);
5206                 if (r < 0) {
5207                         ring_buffer_free_read_page(ref->buffer, ref->page);
5208                         kfree(ref);
5209                         break;
5210                 }
5211
5212                 /*
5213                  * zero out any left over data, this is going to
5214                  * user land.
5215                  */
5216                 size = ring_buffer_page_len(ref->page);
5217                 if (size < PAGE_SIZE)
5218                         memset(ref->page + size, 0, PAGE_SIZE - size);
5219
5220                 page = virt_to_page(ref->page);
5221
5222                 spd.pages[i] = page;
5223                 spd.partial[i].len = PAGE_SIZE;
5224                 spd.partial[i].offset = 0;
5225                 spd.partial[i].private = (unsigned long)ref;
5226                 spd.nr_pages++;
5227                 *ppos += PAGE_SIZE;
5228
5229                 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5230         }
5231
5232         trace_access_unlock(iter->cpu_file);
5233         spd.nr_pages = i;
5234
5235         /* did we read anything? */
5236         if (!spd.nr_pages) {
5237                 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5238                         ret = -EAGAIN;
5239                         goto out;
5240                 }
5241                 mutex_unlock(&trace_types_lock);
5242                 iter->trace->wait_pipe(iter);
5243                 mutex_lock(&trace_types_lock);
5244                 if (signal_pending(current)) {
5245                         ret = -EINTR;
5246                         goto out;
5247                 }
5248                 goto again;
5249         }
5250
5251         ret = splice_to_pipe(pipe, &spd);
5252         splice_shrink_spd(&spd);
5253 out:
5254         mutex_unlock(&trace_types_lock);
5255
5256         return ret;
5257 }
5258
5259 static const struct file_operations tracing_buffers_fops = {
5260         .open           = tracing_buffers_open,
5261         .read           = tracing_buffers_read,
5262         .poll           = tracing_buffers_poll,
5263         .release        = tracing_buffers_release,
5264         .splice_read    = tracing_buffers_splice_read,
5265         .llseek         = no_llseek,
5266 };
5267
5268 static ssize_t
5269 tracing_stats_read(struct file *filp, char __user *ubuf,
5270                    size_t count, loff_t *ppos)
5271 {
5272         struct trace_cpu *tc = filp->private_data;
5273         struct trace_array *tr = tc->tr;
5274         struct trace_buffer *trace_buf = &tr->trace_buffer;
5275         struct trace_seq *s;
5276         unsigned long cnt;
5277         unsigned long long t;
5278         unsigned long usec_rem;
5279         int cpu = tc->cpu;
5280
5281         s = kmalloc(sizeof(*s), GFP_KERNEL);
5282         if (!s)
5283                 return -ENOMEM;
5284
5285         trace_seq_init(s);
5286
5287         cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5288         trace_seq_printf(s, "entries: %ld\n", cnt);
5289
5290         cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5291         trace_seq_printf(s, "overrun: %ld\n", cnt);
5292
5293         cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5294         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5295
5296         cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5297         trace_seq_printf(s, "bytes: %ld\n", cnt);
5298
5299         if (trace_clocks[tr->clock_id].in_ns) {
5300                 /* local or global for trace_clock */
5301                 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5302                 usec_rem = do_div(t, USEC_PER_SEC);
5303                 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5304                                                                 t, usec_rem);
5305
5306                 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5307                 usec_rem = do_div(t, USEC_PER_SEC);
5308                 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5309         } else {
5310                 /* counter or tsc mode for trace_clock */
5311                 trace_seq_printf(s, "oldest event ts: %llu\n",
5312                                 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5313
5314                 trace_seq_printf(s, "now ts: %llu\n",
5315                                 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5316         }
5317
5318         cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5319         trace_seq_printf(s, "dropped events: %ld\n", cnt);
5320
5321         cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5322         trace_seq_printf(s, "read events: %ld\n", cnt);
5323
5324         count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5325
5326         kfree(s);
5327
5328         return count;
5329 }
5330
5331 static const struct file_operations tracing_stats_fops = {
5332         .open           = tracing_open_generic_tc,
5333         .read           = tracing_stats_read,
5334         .llseek         = generic_file_llseek,
5335         .release        = tracing_release_generic_tc,
5336 };
5337
5338 #ifdef CONFIG_DYNAMIC_FTRACE
5339
5340 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5341 {
5342         return 0;
5343 }
5344
5345 static ssize_t
5346 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5347                   size_t cnt, loff_t *ppos)
5348 {
5349         static char ftrace_dyn_info_buffer[1024];
5350         static DEFINE_MUTEX(dyn_info_mutex);
5351         unsigned long *p = filp->private_data;
5352         char *buf = ftrace_dyn_info_buffer;
5353         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5354         int r;
5355
5356         mutex_lock(&dyn_info_mutex);
5357         r = sprintf(buf, "%ld ", *p);
5358
5359         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5360         buf[r++] = '\n';
5361
5362         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5363
5364         mutex_unlock(&dyn_info_mutex);
5365
5366         return r;
5367 }
5368
5369 static const struct file_operations tracing_dyn_info_fops = {
5370         .open           = tracing_open_generic,
5371         .read           = tracing_read_dyn_info,
5372         .llseek         = generic_file_llseek,
5373 };
5374 #endif /* CONFIG_DYNAMIC_FTRACE */
5375
5376 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5377 static void
5378 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5379 {
5380         tracing_snapshot();
5381 }
5382
5383 static void
5384 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5385 {
5386         unsigned long *count = (long *)data;
5387
5388         if (!*count)
5389                 return;
5390
5391         if (*count != -1)
5392                 (*count)--;
5393
5394         tracing_snapshot();
5395 }
5396
5397 static int
5398 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5399                       struct ftrace_probe_ops *ops, void *data)
5400 {
5401         long count = (long)data;
5402
5403         seq_printf(m, "%ps:", (void *)ip);
5404
5405         seq_printf(m, "snapshot");
5406
5407         if (count == -1)
5408                 seq_printf(m, ":unlimited\n");
5409         else
5410                 seq_printf(m, ":count=%ld\n", count);
5411
5412         return 0;
5413 }
5414
5415 static struct ftrace_probe_ops snapshot_probe_ops = {
5416         .func                   = ftrace_snapshot,
5417         .print                  = ftrace_snapshot_print,
5418 };
5419
5420 static struct ftrace_probe_ops snapshot_count_probe_ops = {
5421         .func                   = ftrace_count_snapshot,
5422         .print                  = ftrace_snapshot_print,
5423 };
5424
5425 static int
5426 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5427                                char *glob, char *cmd, char *param, int enable)
5428 {
5429         struct ftrace_probe_ops *ops;
5430         void *count = (void *)-1;
5431         char *number;
5432         int ret;
5433
5434         /* hash funcs only work with set_ftrace_filter */
5435         if (!enable)
5436                 return -EINVAL;
5437
5438         ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
5439
5440         if (glob[0] == '!') {
5441                 unregister_ftrace_function_probe_func(glob+1, ops);
5442                 return 0;
5443         }
5444
5445         if (!param)
5446                 goto out_reg;
5447
5448         number = strsep(&param, ":");
5449
5450         if (!strlen(number))
5451                 goto out_reg;
5452
5453         /*
5454          * We use the callback data field (which is a pointer)
5455          * as our counter.
5456          */
5457         ret = kstrtoul(number, 0, (unsigned long *)&count);
5458         if (ret)
5459                 return ret;
5460
5461  out_reg:
5462         ret = register_ftrace_function_probe(glob, ops, count);
5463
5464         if (ret >= 0)
5465                 alloc_snapshot(&global_trace);
5466
5467         return ret < 0 ? ret : 0;
5468 }
5469
5470 static struct ftrace_func_command ftrace_snapshot_cmd = {
5471         .name                   = "snapshot",
5472         .func                   = ftrace_trace_snapshot_callback,
5473 };
5474
5475 static int register_snapshot_cmd(void)
5476 {
5477         return register_ftrace_command(&ftrace_snapshot_cmd);
5478 }
5479 #else
5480 static inline int register_snapshot_cmd(void) { return 0; }
5481 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5482
5483 struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
5484 {
5485         if (tr->dir)
5486                 return tr->dir;
5487
5488         if (!debugfs_initialized())
5489                 return NULL;
5490
5491         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5492                 tr->dir = debugfs_create_dir("tracing", NULL);
5493
5494         if (!tr->dir)
5495                 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5496
5497         return tr->dir;
5498 }
5499
5500 struct dentry *tracing_init_dentry(void)
5501 {
5502         return tracing_init_dentry_tr(&global_trace);
5503 }
5504
5505 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5506 {
5507         struct dentry *d_tracer;
5508
5509         if (tr->percpu_dir)
5510                 return tr->percpu_dir;
5511
5512         d_tracer = tracing_init_dentry_tr(tr);
5513         if (!d_tracer)
5514                 return NULL;
5515
5516         tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5517
5518         WARN_ONCE(!tr->percpu_dir,
5519                   "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5520
5521         return tr->percpu_dir;
5522 }
5523
5524 static void
5525 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5526 {
5527         struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
5528         struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5529         struct dentry *d_cpu;
5530         char cpu_dir[30]; /* 30 characters should be more than enough */
5531
5532         if (!d_percpu)
5533                 return;
5534
5535         snprintf(cpu_dir, 30, "cpu%ld", cpu);
5536         d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5537         if (!d_cpu) {
5538                 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5539                 return;
5540         }
5541
5542         /* per cpu trace_pipe */
5543         trace_create_file("trace_pipe", 0444, d_cpu,
5544                         (void *)&data->trace_cpu, &tracing_pipe_fops);
5545
5546         /* per cpu trace */
5547         trace_create_file("trace", 0644, d_cpu,
5548                         (void *)&data->trace_cpu, &tracing_fops);
5549
5550         trace_create_file("trace_pipe_raw", 0444, d_cpu,
5551                         (void *)&data->trace_cpu, &tracing_buffers_fops);
5552
5553         trace_create_file("stats", 0444, d_cpu,
5554                         (void *)&data->trace_cpu, &tracing_stats_fops);
5555
5556         trace_create_file("buffer_size_kb", 0444, d_cpu,
5557                         (void *)&data->trace_cpu, &tracing_entries_fops);
5558
5559 #ifdef CONFIG_TRACER_SNAPSHOT
5560         trace_create_file("snapshot", 0644, d_cpu,
5561                           (void *)&data->trace_cpu, &snapshot_fops);
5562
5563         trace_create_file("snapshot_raw", 0444, d_cpu,
5564                         (void *)&data->trace_cpu, &snapshot_raw_fops);
5565 #endif
5566 }
5567
5568 #ifdef CONFIG_FTRACE_SELFTEST
5569 /* Let selftest have access to static functions in this file */
5570 #include "trace_selftest.c"
5571 #endif
5572
5573 struct trace_option_dentry {
5574         struct tracer_opt               *opt;
5575         struct tracer_flags             *flags;
5576         struct trace_array              *tr;
5577         struct dentry                   *entry;
5578 };
5579
5580 static ssize_t
5581 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5582                         loff_t *ppos)
5583 {
5584         struct trace_option_dentry *topt = filp->private_data;
5585         char *buf;
5586
5587         if (topt->flags->val & topt->opt->bit)
5588                 buf = "1\n";
5589         else
5590                 buf = "0\n";
5591
5592         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5593 }
5594
5595 static ssize_t
5596 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5597                          loff_t *ppos)
5598 {
5599         struct trace_option_dentry *topt = filp->private_data;
5600         unsigned long val;
5601         int ret;
5602
5603         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5604         if (ret)
5605                 return ret;
5606
5607         if (val != 0 && val != 1)
5608                 return -EINVAL;
5609
5610         if (!!(topt->flags->val & topt->opt->bit) != val) {
5611                 mutex_lock(&trace_types_lock);
5612                 ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
5613                                           topt->opt, !val);
5614                 mutex_unlock(&trace_types_lock);
5615                 if (ret)
5616                         return ret;
5617         }
5618
5619         *ppos += cnt;
5620
5621         return cnt;
5622 }
5623
5624
5625 static const struct file_operations trace_options_fops = {
5626         .open = tracing_open_generic,
5627         .read = trace_options_read,
5628         .write = trace_options_write,
5629         .llseek = generic_file_llseek,
5630 };
5631
5632 static ssize_t
5633 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5634                         loff_t *ppos)
5635 {
5636         long index = (long)filp->private_data;
5637         char *buf;
5638
5639         if (trace_flags & (1 << index))
5640                 buf = "1\n";
5641         else
5642                 buf = "0\n";
5643
5644         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5645 }
5646
5647 static ssize_t
5648 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5649                          loff_t *ppos)
5650 {
5651         struct trace_array *tr = &global_trace;
5652         long index = (long)filp->private_data;
5653         unsigned long val;
5654         int ret;
5655
5656         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5657         if (ret)
5658                 return ret;
5659
5660         if (val != 0 && val != 1)
5661                 return -EINVAL;
5662
5663         mutex_lock(&trace_types_lock);
5664         ret = set_tracer_flag(tr, 1 << index, val);
5665         mutex_unlock(&trace_types_lock);
5666
5667         if (ret < 0)
5668                 return ret;
5669
5670         *ppos += cnt;
5671
5672         return cnt;
5673 }
5674
5675 static const struct file_operations trace_options_core_fops = {
5676         .open = tracing_open_generic,
5677         .read = trace_options_core_read,
5678         .write = trace_options_core_write,
5679         .llseek = generic_file_llseek,
5680 };
5681
5682 struct dentry *trace_create_file(const char *name,
5683                                  umode_t mode,
5684                                  struct dentry *parent,
5685                                  void *data,
5686                                  const struct file_operations *fops)
5687 {
5688         struct dentry *ret;
5689
5690         ret = debugfs_create_file(name, mode, parent, data, fops);
5691         if (!ret)
5692                 pr_warning("Could not create debugfs '%s' entry\n", name);
5693
5694         return ret;
5695 }
5696
5697
5698 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
5699 {
5700         struct dentry *d_tracer;
5701
5702         if (tr->options)
5703                 return tr->options;
5704
5705         d_tracer = tracing_init_dentry_tr(tr);
5706         if (!d_tracer)
5707                 return NULL;
5708
5709         tr->options = debugfs_create_dir("options", d_tracer);
5710         if (!tr->options) {
5711                 pr_warning("Could not create debugfs directory 'options'\n");
5712                 return NULL;
5713         }
5714
5715         return tr->options;
5716 }
5717
5718 static void
5719 create_trace_option_file(struct trace_array *tr,
5720                          struct trace_option_dentry *topt,
5721                          struct tracer_flags *flags,
5722                          struct tracer_opt *opt)
5723 {
5724         struct dentry *t_options;
5725
5726         t_options = trace_options_init_dentry(tr);
5727         if (!t_options)
5728                 return;
5729
5730         topt->flags = flags;
5731         topt->opt = opt;
5732         topt->tr = tr;
5733
5734         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
5735                                     &trace_options_fops);
5736
5737 }
5738
5739 static struct trace_option_dentry *
5740 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
5741 {
5742         struct trace_option_dentry *topts;
5743         struct tracer_flags *flags;
5744         struct tracer_opt *opts;
5745         int cnt;
5746
5747         if (!tracer)
5748                 return NULL;
5749
5750         flags = tracer->flags;
5751
5752         if (!flags || !flags->opts)
5753                 return NULL;
5754
5755         opts = flags->opts;
5756
5757         for (cnt = 0; opts[cnt].name; cnt++)
5758                 ;
5759
5760         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
5761         if (!topts)
5762                 return NULL;
5763
5764         for (cnt = 0; opts[cnt].name; cnt++)
5765                 create_trace_option_file(tr, &topts[cnt], flags,
5766                                          &opts[cnt]);
5767
5768         return topts;
5769 }
5770
5771 static void
5772 destroy_trace_option_files(struct trace_option_dentry *topts)
5773 {
5774         int cnt;
5775
5776         if (!topts)
5777                 return;
5778
5779         for (cnt = 0; topts[cnt].opt; cnt++) {
5780                 if (topts[cnt].entry)
5781                         debugfs_remove(topts[cnt].entry);
5782         }
5783
5784         kfree(topts);
5785 }
5786
5787 static struct dentry *
5788 create_trace_option_core_file(struct trace_array *tr,
5789                               const char *option, long index)
5790 {
5791         struct dentry *t_options;
5792
5793         t_options = trace_options_init_dentry(tr);
5794         if (!t_options)
5795                 return NULL;
5796
5797         return trace_create_file(option, 0644, t_options, (void *)index,
5798                                     &trace_options_core_fops);
5799 }
5800
5801 static __init void create_trace_options_dir(struct trace_array *tr)
5802 {
5803         struct dentry *t_options;
5804         int i;
5805
5806         t_options = trace_options_init_dentry(tr);
5807         if (!t_options)
5808                 return;
5809
5810         for (i = 0; trace_options[i]; i++)
5811                 create_trace_option_core_file(tr, trace_options[i], i);
5812 }
5813
5814 static ssize_t
5815 rb_simple_read(struct file *filp, char __user *ubuf,
5816                size_t cnt, loff_t *ppos)
5817 {
5818         struct trace_array *tr = filp->private_data;
5819         char buf[64];
5820         int r;
5821
5822         r = tracer_tracing_is_on(tr);
5823         r = sprintf(buf, "%d\n", r);
5824
5825         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5826 }
5827
5828 static ssize_t
5829 rb_simple_write(struct file *filp, const char __user *ubuf,
5830                 size_t cnt, loff_t *ppos)
5831 {
5832         struct trace_array *tr = filp->private_data;
5833         struct ring_buffer *buffer = tr->trace_buffer.buffer;
5834         unsigned long val;
5835         int ret;
5836
5837         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5838         if (ret)
5839                 return ret;
5840
5841         if (buffer) {
5842                 mutex_lock(&trace_types_lock);
5843                 if (val) {
5844                         tracer_tracing_on(tr);
5845                         if (tr->current_trace->start)
5846                                 tr->current_trace->start(tr);
5847                 } else {
5848                         tracer_tracing_off(tr);
5849                         if (tr->current_trace->stop)
5850                                 tr->current_trace->stop(tr);
5851                 }
5852                 mutex_unlock(&trace_types_lock);
5853         }
5854
5855         (*ppos)++;
5856
5857         return cnt;
5858 }
5859
5860 static const struct file_operations rb_simple_fops = {
5861         .open           = tracing_open_generic_tr,
5862         .read           = rb_simple_read,
5863         .write          = rb_simple_write,
5864         .release        = tracing_release_generic_tr,
5865         .llseek         = default_llseek,
5866 };
5867
5868 struct dentry *trace_instance_dir;
5869
5870 static void
5871 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5872
5873 static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
5874 {
5875         int cpu;
5876
5877         for_each_tracing_cpu(cpu) {
5878                 memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
5879                 per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
5880                 per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
5881         }
5882 }
5883
5884 static int
5885 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
5886 {
5887         enum ring_buffer_flags rb_flags;
5888
5889         rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5890
5891         buf->buffer = ring_buffer_alloc(size, rb_flags);
5892         if (!buf->buffer)
5893                 return -ENOMEM;
5894
5895         buf->data = alloc_percpu(struct trace_array_cpu);
5896         if (!buf->data) {
5897                 ring_buffer_free(buf->buffer);
5898                 return -ENOMEM;
5899         }
5900
5901         init_trace_buffers(tr, buf);
5902
5903         /* Allocate the first page for all buffers */
5904         set_buffer_entries(&tr->trace_buffer,
5905                            ring_buffer_size(tr->trace_buffer.buffer, 0));
5906
5907         return 0;
5908 }
5909
5910 static int allocate_trace_buffers(struct trace_array *tr, int size)
5911 {
5912         int ret;
5913
5914         ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
5915         if (ret)
5916                 return ret;
5917
5918 #ifdef CONFIG_TRACER_MAX_TRACE
5919         ret = allocate_trace_buffer(tr, &tr->max_buffer,
5920                                     allocate_snapshot ? size : 1);
5921         if (WARN_ON(ret)) {
5922                 ring_buffer_free(tr->trace_buffer.buffer);
5923                 free_percpu(tr->trace_buffer.data);
5924                 return -ENOMEM;
5925         }
5926         tr->allocated_snapshot = allocate_snapshot;
5927
5928         /*
5929          * Only the top level trace array gets its snapshot allocated
5930          * from the kernel command line.
5931          */
5932         allocate_snapshot = false;
5933 #endif
5934         return 0;
5935 }
5936
5937 static int new_instance_create(const char *name)
5938 {
5939         struct trace_array *tr;
5940         int ret;
5941
5942         mutex_lock(&trace_types_lock);
5943
5944         ret = -EEXIST;
5945         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5946                 if (tr->name && strcmp(tr->name, name) == 0)
5947                         goto out_unlock;
5948         }
5949
5950         ret = -ENOMEM;
5951         tr = kzalloc(sizeof(*tr), GFP_KERNEL);
5952         if (!tr)
5953                 goto out_unlock;
5954
5955         tr->name = kstrdup(name, GFP_KERNEL);
5956         if (!tr->name)
5957                 goto out_free_tr;
5958
5959         raw_spin_lock_init(&tr->start_lock);
5960
5961         tr->current_trace = &nop_trace;
5962
5963         INIT_LIST_HEAD(&tr->systems);
5964         INIT_LIST_HEAD(&tr->events);
5965
5966         if (allocate_trace_buffers(tr, trace_buf_size) < 0)
5967                 goto out_free_tr;
5968
5969         /* Holder for file callbacks */
5970         tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5971         tr->trace_cpu.tr = tr;
5972
5973         tr->dir = debugfs_create_dir(name, trace_instance_dir);
5974         if (!tr->dir)
5975                 goto out_free_tr;
5976
5977         ret = event_trace_add_tracer(tr->dir, tr);
5978         if (ret) {
5979                 debugfs_remove_recursive(tr->dir);
5980                 goto out_free_tr;
5981         }
5982
5983         init_tracer_debugfs(tr, tr->dir);
5984
5985         list_add(&tr->list, &ftrace_trace_arrays);
5986
5987         mutex_unlock(&trace_types_lock);
5988
5989         return 0;
5990
5991  out_free_tr:
5992         if (tr->trace_buffer.buffer)
5993                 ring_buffer_free(tr->trace_buffer.buffer);
5994         kfree(tr->name);
5995         kfree(tr);
5996
5997  out_unlock:
5998         mutex_unlock(&trace_types_lock);
5999
6000         return ret;
6001
6002 }
6003
6004 static int instance_delete(const char *name)
6005 {
6006         struct trace_array *tr;
6007         int found = 0;
6008         int ret;
6009
6010         mutex_lock(&trace_types_lock);
6011
6012         ret = -ENODEV;
6013         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6014                 if (tr->name && strcmp(tr->name, name) == 0) {
6015                         found = 1;
6016                         break;
6017                 }
6018         }
6019         if (!found)
6020                 goto out_unlock;
6021
6022         ret = -EBUSY;
6023         if (tr->ref)
6024                 goto out_unlock;
6025
6026         list_del(&tr->list);
6027
6028         event_trace_del_tracer(tr);
6029         debugfs_remove_recursive(tr->dir);
6030         free_percpu(tr->trace_buffer.data);
6031         ring_buffer_free(tr->trace_buffer.buffer);
6032
6033         kfree(tr->name);
6034         kfree(tr);
6035
6036         ret = 0;
6037
6038  out_unlock:
6039         mutex_unlock(&trace_types_lock);
6040
6041         return ret;
6042 }
6043
6044 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6045 {
6046         struct dentry *parent;
6047         int ret;
6048
6049         /* Paranoid: Make sure the parent is the "instances" directory */
6050         parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6051         if (WARN_ON_ONCE(parent != trace_instance_dir))
6052                 return -ENOENT;
6053
6054         /*
6055          * The inode mutex is locked, but debugfs_create_dir() will also
6056          * take the mutex. As the instances directory can not be destroyed
6057          * or changed in any other way, it is safe to unlock it, and
6058          * let the dentry try. If two users try to make the same dir at
6059          * the same time, then the new_instance_create() will determine the
6060          * winner.
6061          */
6062         mutex_unlock(&inode->i_mutex);
6063
6064         ret = new_instance_create(dentry->d_iname);
6065
6066         mutex_lock(&inode->i_mutex);
6067
6068         return ret;
6069 }
6070
6071 static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6072 {
6073         struct dentry *parent;
6074         int ret;
6075
6076         /* Paranoid: Make sure the parent is the "instances" directory */
6077         parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6078         if (WARN_ON_ONCE(parent != trace_instance_dir))
6079                 return -ENOENT;
6080
6081         /* The caller did a dget() on dentry */
6082         mutex_unlock(&dentry->d_inode->i_mutex);
6083
6084         /*
6085          * The inode mutex is locked, but debugfs_create_dir() will also
6086          * take the mutex. As the instances directory can not be destroyed
6087          * or changed in any other way, it is safe to unlock it, and
6088          * let the dentry try. If two users try to make the same dir at
6089          * the same time, then the instance_delete() will determine the
6090          * winner.
6091          */
6092         mutex_unlock(&inode->i_mutex);
6093
6094         ret = instance_delete(dentry->d_iname);
6095
6096         mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6097         mutex_lock(&dentry->d_inode->i_mutex);
6098
6099         return ret;
6100 }
6101
6102 static const struct inode_operations instance_dir_inode_operations = {
6103         .lookup         = simple_lookup,
6104         .mkdir          = instance_mkdir,
6105         .rmdir          = instance_rmdir,
6106 };
6107
6108 static __init void create_trace_instances(struct dentry *d_tracer)
6109 {
6110         trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6111         if (WARN_ON(!trace_instance_dir))
6112                 return;
6113
6114         /* Hijack the dir inode operations, to allow mkdir */
6115         trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6116 }
6117
6118 static void
6119 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6120 {
6121         int cpu;
6122
6123         trace_create_file("trace_options", 0644, d_tracer,
6124                           tr, &tracing_iter_fops);
6125
6126         trace_create_file("trace", 0644, d_tracer,
6127                         (void *)&tr->trace_cpu, &tracing_fops);
6128
6129         trace_create_file("trace_pipe", 0444, d_tracer,
6130                         (void *)&tr->trace_cpu, &tracing_pipe_fops);
6131
6132         trace_create_file("buffer_size_kb", 0644, d_tracer,
6133                         (void *)&tr->trace_cpu, &tracing_entries_fops);
6134
6135         trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6136                           tr, &tracing_total_entries_fops);
6137
6138         trace_create_file("free_buffer", 0644, d_tracer,
6139                           tr, &tracing_free_buffer_fops);
6140
6141         trace_create_file("trace_marker", 0220, d_tracer,
6142                           tr, &tracing_mark_fops);
6143
6144         trace_create_file("trace_clock", 0644, d_tracer, tr,
6145                           &trace_clock_fops);
6146
6147         trace_create_file("tracing_on", 0644, d_tracer,
6148                             tr, &rb_simple_fops);
6149
6150 #ifdef CONFIG_TRACER_SNAPSHOT
6151         trace_create_file("snapshot", 0644, d_tracer,
6152                           (void *)&tr->trace_cpu, &snapshot_fops);
6153 #endif
6154
6155         for_each_tracing_cpu(cpu)
6156                 tracing_init_debugfs_percpu(tr, cpu);
6157
6158 }
6159
6160 static __init int tracer_init_debugfs(void)
6161 {
6162         struct dentry *d_tracer;
6163
6164         trace_access_lock_init();
6165
6166         d_tracer = tracing_init_dentry();
6167         if (!d_tracer)
6168                 return 0;
6169
6170         init_tracer_debugfs(&global_trace, d_tracer);
6171
6172         trace_create_file("tracing_cpumask", 0644, d_tracer,
6173                         &global_trace, &tracing_cpumask_fops);
6174
6175         trace_create_file("available_tracers", 0444, d_tracer,
6176                         &global_trace, &show_traces_fops);
6177
6178         trace_create_file("current_tracer", 0644, d_tracer,
6179                         &global_trace, &set_tracer_fops);
6180
6181 #ifdef CONFIG_TRACER_MAX_TRACE
6182         trace_create_file("tracing_max_latency", 0644, d_tracer,
6183                         &tracing_max_latency, &tracing_max_lat_fops);
6184 #endif
6185
6186         trace_create_file("tracing_thresh", 0644, d_tracer,
6187                         &tracing_thresh, &tracing_max_lat_fops);
6188
6189         trace_create_file("README", 0444, d_tracer,
6190                         NULL, &tracing_readme_fops);
6191
6192         trace_create_file("saved_cmdlines", 0444, d_tracer,
6193                         NULL, &tracing_saved_cmdlines_fops);
6194
6195 #ifdef CONFIG_DYNAMIC_FTRACE
6196         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6197                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6198 #endif
6199
6200         create_trace_instances(d_tracer);
6201
6202         create_trace_options_dir(&global_trace);
6203
6204         return 0;
6205 }
6206
6207 static int trace_panic_handler(struct notifier_block *this,
6208                                unsigned long event, void *unused)
6209 {
6210         if (ftrace_dump_on_oops)
6211                 ftrace_dump(ftrace_dump_on_oops);
6212         return NOTIFY_OK;
6213 }
6214
6215 static struct notifier_block trace_panic_notifier = {
6216         .notifier_call  = trace_panic_handler,
6217         .next           = NULL,
6218         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
6219 };
6220
6221 static int trace_die_handler(struct notifier_block *self,
6222                              unsigned long val,
6223                              void *data)
6224 {
6225         switch (val) {
6226         case DIE_OOPS:
6227                 if (ftrace_dump_on_oops)
6228                         ftrace_dump(ftrace_dump_on_oops);
6229                 break;
6230         default:
6231                 break;
6232         }
6233         return NOTIFY_OK;
6234 }
6235
6236 static struct notifier_block trace_die_notifier = {
6237         .notifier_call = trace_die_handler,
6238         .priority = 200
6239 };
6240
6241 /*
6242  * printk is set to max of 1024, we really don't need it that big.
6243  * Nothing should be printing 1000 characters anyway.
6244  */
6245 #define TRACE_MAX_PRINT         1000
6246
6247 /*
6248  * Define here KERN_TRACE so that we have one place to modify
6249  * it if we decide to change what log level the ftrace dump
6250  * should be at.
6251  */
6252 #define KERN_TRACE              KERN_EMERG
6253
6254 void
6255 trace_printk_seq(struct trace_seq *s)
6256 {
6257         /* Probably should print a warning here. */
6258         if (s->len >= TRACE_MAX_PRINT)
6259                 s->len = TRACE_MAX_PRINT;
6260
6261         /* should be zero ended, but we are paranoid. */
6262         s->buffer[s->len] = 0;
6263
6264         printk(KERN_TRACE "%s", s->buffer);
6265
6266         trace_seq_init(s);
6267 }
6268
6269 void trace_init_global_iter(struct trace_iterator *iter)
6270 {
6271         iter->tr = &global_trace;
6272         iter->trace = iter->tr->current_trace;
6273         iter->cpu_file = RING_BUFFER_ALL_CPUS;
6274         iter->trace_buffer = &global_trace.trace_buffer;
6275 }
6276
6277 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6278 {
6279         /* use static because iter can be a bit big for the stack */
6280         static struct trace_iterator iter;
6281         static atomic_t dump_running;
6282         unsigned int old_userobj;
6283         unsigned long flags;
6284         int cnt = 0, cpu;
6285
6286         /* Only allow one dump user at a time. */
6287         if (atomic_inc_return(&dump_running) != 1) {
6288                 atomic_dec(&dump_running);
6289                 return;
6290         }
6291
6292         /*
6293          * Always turn off tracing when we dump.
6294          * We don't need to show trace output of what happens
6295          * between multiple crashes.
6296          *
6297          * If the user does a sysrq-z, then they can re-enable
6298          * tracing with echo 1 > tracing_on.
6299          */
6300         tracing_off();
6301
6302         local_irq_save(flags);
6303
6304         /* Simulate the iterator */
6305         trace_init_global_iter(&iter);
6306
6307         for_each_tracing_cpu(cpu) {
6308                 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6309         }
6310
6311         old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6312
6313         /* don't look at user memory in panic mode */
6314         trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6315
6316         switch (oops_dump_mode) {
6317         case DUMP_ALL:
6318                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6319                 break;
6320         case DUMP_ORIG:
6321                 iter.cpu_file = raw_smp_processor_id();
6322                 break;
6323         case DUMP_NONE:
6324                 goto out_enable;
6325         default:
6326                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6327                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6328         }
6329
6330         printk(KERN_TRACE "Dumping ftrace buffer:\n");
6331
6332         /* Did function tracer already get disabled? */
6333         if (ftrace_is_dead()) {
6334                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6335                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
6336         }
6337
6338         /*
6339          * We need to stop all tracing on all CPUS to read the
6340          * the next buffer. This is a bit expensive, but is
6341          * not done often. We fill all what we can read,
6342          * and then release the locks again.
6343          */
6344
6345         while (!trace_empty(&iter)) {
6346
6347                 if (!cnt)
6348                         printk(KERN_TRACE "---------------------------------\n");
6349
6350                 cnt++;
6351
6352                 /* reset all but tr, trace, and overruns */
6353                 memset(&iter.seq, 0,
6354                        sizeof(struct trace_iterator) -
6355                        offsetof(struct trace_iterator, seq));
6356                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6357                 iter.pos = -1;
6358
6359                 if (trace_find_next_entry_inc(&iter) != NULL) {
6360                         int ret;
6361
6362                         ret = print_trace_line(&iter);
6363                         if (ret != TRACE_TYPE_NO_CONSUME)
6364                                 trace_consume(&iter);
6365                 }
6366                 touch_nmi_watchdog();
6367
6368                 trace_printk_seq(&iter.seq);
6369         }
6370
6371         if (!cnt)
6372                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
6373         else
6374                 printk(KERN_TRACE "---------------------------------\n");
6375
6376  out_enable:
6377         trace_flags |= old_userobj;
6378
6379         for_each_tracing_cpu(cpu) {
6380                 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6381         }
6382         atomic_dec(&dump_running);
6383         local_irq_restore(flags);
6384 }
6385 EXPORT_SYMBOL_GPL(ftrace_dump);
6386
6387 __init static int tracer_alloc_buffers(void)
6388 {
6389         int ring_buf_size;
6390         int ret = -ENOMEM;
6391
6392
6393         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6394                 goto out;
6395
6396         if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
6397                 goto out_free_buffer_mask;
6398
6399         /* Only allocate trace_printk buffers if a trace_printk exists */
6400         if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6401                 /* Must be called before global_trace.buffer is allocated */
6402                 trace_printk_init_buffers();
6403
6404         /* To save memory, keep the ring buffer size to its minimum */
6405         if (ring_buffer_expanded)
6406                 ring_buf_size = trace_buf_size;
6407         else
6408                 ring_buf_size = 1;
6409
6410         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6411         cpumask_copy(tracing_cpumask, cpu_all_mask);
6412
6413         raw_spin_lock_init(&global_trace.start_lock);
6414
6415         /* TODO: make the number of buffers hot pluggable with CPUS */
6416         if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6417                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6418                 WARN_ON(1);
6419                 goto out_free_cpumask;
6420         }
6421
6422         if (global_trace.buffer_disabled)
6423                 tracing_off();
6424
6425         trace_init_cmdlines();
6426
6427         /*
6428          * register_tracer() might reference current_trace, so it
6429          * needs to be set before we register anything. This is
6430          * just a bootstrap of current_trace anyway.
6431          */
6432         global_trace.current_trace = &nop_trace;
6433
6434         register_tracer(&nop_trace);
6435
6436         /* All seems OK, enable tracing */
6437         tracing_disabled = 0;
6438
6439         atomic_notifier_chain_register(&panic_notifier_list,
6440                                        &trace_panic_notifier);
6441
6442         register_die_notifier(&trace_die_notifier);
6443
6444         global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6445
6446         /* Holder for file callbacks */
6447         global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
6448         global_trace.trace_cpu.tr = &global_trace;
6449
6450         INIT_LIST_HEAD(&global_trace.systems);
6451         INIT_LIST_HEAD(&global_trace.events);
6452         list_add(&global_trace.list, &ftrace_trace_arrays);
6453
6454         while (trace_boot_options) {
6455                 char *option;
6456
6457                 option = strsep(&trace_boot_options, ",");
6458                 trace_set_options(&global_trace, option);
6459         }
6460
6461         register_snapshot_cmd();
6462
6463         return 0;
6464
6465 out_free_cpumask:
6466         free_percpu(global_trace.trace_buffer.data);
6467 #ifdef CONFIG_TRACER_MAX_TRACE
6468         free_percpu(global_trace.max_buffer.data);
6469 #endif
6470         free_cpumask_var(tracing_cpumask);
6471 out_free_buffer_mask:
6472         free_cpumask_var(tracing_buffer_mask);
6473 out:
6474         return ret;
6475 }
6476
6477 __init static int clear_boot_tracer(void)
6478 {
6479         /*
6480          * The default tracer at boot buffer is an init section.
6481          * This function is called in lateinit. If we did not
6482          * find the boot tracer, then clear it out, to prevent
6483          * later registration from accessing the buffer that is
6484          * about to be freed.
6485          */
6486         if (!default_bootup_tracer)
6487                 return 0;
6488
6489         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6490                default_bootup_tracer);
6491         default_bootup_tracer = NULL;
6492
6493         return 0;
6494 }
6495
6496 early_initcall(tracer_alloc_buffers);
6497 fs_initcall(tracer_init_debugfs);
6498 late_initcall(clear_boot_tracer);