28e6654e640d82e0f486ced6d8b32d28ed715470
[firefly-linux-kernel-4.4.55.git] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 Nadia Yvette Chambers
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
40 #include <linux/fs.h>
41 #include <linux/sched/rt.h>
42
43 #include "trace.h"
44 #include "trace_output.h"
45
46 /*
47  * On boot up, the ring buffer is set to the minimum size, so that
48  * we do not waste memory on systems that are not using tracing.
49  */
50 bool ring_buffer_expanded;
51
52 /*
53  * We need to change this state when a selftest is running.
54  * A selftest will lurk into the ring-buffer to count the
55  * entries inserted during the selftest although some concurrent
56  * insertions into the ring-buffer such as trace_printk could occurred
57  * at the same time, giving false positive or negative results.
58  */
59 static bool __read_mostly tracing_selftest_running;
60
61 /*
62  * If a tracer is running, we do not want to run SELFTEST.
63  */
64 bool __read_mostly tracing_selftest_disabled;
65
66 /* Pipe tracepoints to printk */
67 struct trace_iterator *tracepoint_print_iter;
68 int tracepoint_printk;
69
70 /* For tracers that don't implement custom flags */
71 static struct tracer_opt dummy_tracer_opt[] = {
72         { }
73 };
74
75 static struct tracer_flags dummy_tracer_flags = {
76         .val = 0,
77         .opts = dummy_tracer_opt
78 };
79
80 static int
81 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
82 {
83         return 0;
84 }
85
86 /*
87  * To prevent the comm cache from being overwritten when no
88  * tracing is active, only save the comm when a trace event
89  * occurred.
90  */
91 static DEFINE_PER_CPU(bool, trace_cmdline_save);
92
93 /*
94  * Kill all tracing for good (never come back).
95  * It is initialized to 1 but will turn to zero if the initialization
96  * of the tracer is successful. But that is the only place that sets
97  * this back to zero.
98  */
99 static int tracing_disabled = 1;
100
101 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
102
103 cpumask_var_t __read_mostly     tracing_buffer_mask;
104
105 /*
106  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107  *
108  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109  * is set, then ftrace_dump is called. This will output the contents
110  * of the ftrace buffers to the console.  This is very useful for
111  * capturing traces that lead to crashes and outputing it to a
112  * serial console.
113  *
114  * It is default off, but you can enable it with either specifying
115  * "ftrace_dump_on_oops" in the kernel command line, or setting
116  * /proc/sys/kernel/ftrace_dump_on_oops
117  * Set 1 if you want to dump buffers of all CPUs
118  * Set 2 if you want to dump the buffer of the CPU that triggered oops
119  */
120
121 enum ftrace_dump_mode ftrace_dump_on_oops;
122
123 /* When set, tracing will stop when a WARN*() is hit */
124 int __disable_trace_on_warning;
125
126 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
127
128 #define MAX_TRACER_SIZE         100
129 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130 static char *default_bootup_tracer;
131
132 static bool allocate_snapshot;
133
134 static int __init set_cmdline_ftrace(char *str)
135 {
136         strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
137         default_bootup_tracer = bootup_tracer_buf;
138         /* We are using ftrace early, expand it */
139         ring_buffer_expanded = true;
140         return 1;
141 }
142 __setup("ftrace=", set_cmdline_ftrace);
143
144 static int __init set_ftrace_dump_on_oops(char *str)
145 {
146         if (*str++ != '=' || !*str) {
147                 ftrace_dump_on_oops = DUMP_ALL;
148                 return 1;
149         }
150
151         if (!strcmp("orig_cpu", str)) {
152                 ftrace_dump_on_oops = DUMP_ORIG;
153                 return 1;
154         }
155
156         return 0;
157 }
158 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
159
160 static int __init stop_trace_on_warning(char *str)
161 {
162         if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
163                 __disable_trace_on_warning = 1;
164         return 1;
165 }
166 __setup("traceoff_on_warning", stop_trace_on_warning);
167
168 static int __init boot_alloc_snapshot(char *str)
169 {
170         allocate_snapshot = true;
171         /* We also need the main ring buffer expanded */
172         ring_buffer_expanded = true;
173         return 1;
174 }
175 __setup("alloc_snapshot", boot_alloc_snapshot);
176
177
178 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
179 static char *trace_boot_options __initdata;
180
181 static int __init set_trace_boot_options(char *str)
182 {
183         strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
184         trace_boot_options = trace_boot_options_buf;
185         return 0;
186 }
187 __setup("trace_options=", set_trace_boot_options);
188
189 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
190 static char *trace_boot_clock __initdata;
191
192 static int __init set_trace_boot_clock(char *str)
193 {
194         strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
195         trace_boot_clock = trace_boot_clock_buf;
196         return 0;
197 }
198 __setup("trace_clock=", set_trace_boot_clock);
199
200 static int __init set_tracepoint_printk(char *str)
201 {
202         if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
203                 tracepoint_printk = 1;
204         return 1;
205 }
206 __setup("tp_printk", set_tracepoint_printk);
207
208 unsigned long long ns2usecs(cycle_t nsec)
209 {
210         nsec += 500;
211         do_div(nsec, 1000);
212         return nsec;
213 }
214
215 /*
216  * The global_trace is the descriptor that holds the tracing
217  * buffers for the live tracing. For each CPU, it contains
218  * a link list of pages that will store trace entries. The
219  * page descriptor of the pages in the memory is used to hold
220  * the link list by linking the lru item in the page descriptor
221  * to each of the pages in the buffer per CPU.
222  *
223  * For each active CPU there is a data field that holds the
224  * pages for the buffer for that CPU. Each CPU has the same number
225  * of pages allocated for its buffer.
226  */
227 static struct trace_array       global_trace;
228
229 LIST_HEAD(ftrace_trace_arrays);
230
231 int trace_array_get(struct trace_array *this_tr)
232 {
233         struct trace_array *tr;
234         int ret = -ENODEV;
235
236         mutex_lock(&trace_types_lock);
237         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
238                 if (tr == this_tr) {
239                         tr->ref++;
240                         ret = 0;
241                         break;
242                 }
243         }
244         mutex_unlock(&trace_types_lock);
245
246         return ret;
247 }
248
249 static void __trace_array_put(struct trace_array *this_tr)
250 {
251         WARN_ON(!this_tr->ref);
252         this_tr->ref--;
253 }
254
255 void trace_array_put(struct trace_array *this_tr)
256 {
257         mutex_lock(&trace_types_lock);
258         __trace_array_put(this_tr);
259         mutex_unlock(&trace_types_lock);
260 }
261
262 int filter_check_discard(struct ftrace_event_file *file, void *rec,
263                          struct ring_buffer *buffer,
264                          struct ring_buffer_event *event)
265 {
266         if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
267             !filter_match_preds(file->filter, rec)) {
268                 ring_buffer_discard_commit(buffer, event);
269                 return 1;
270         }
271
272         return 0;
273 }
274 EXPORT_SYMBOL_GPL(filter_check_discard);
275
276 int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
277                               struct ring_buffer *buffer,
278                               struct ring_buffer_event *event)
279 {
280         if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
281             !filter_match_preds(call->filter, rec)) {
282                 ring_buffer_discard_commit(buffer, event);
283                 return 1;
284         }
285
286         return 0;
287 }
288 EXPORT_SYMBOL_GPL(call_filter_check_discard);
289
290 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
291 {
292         u64 ts;
293
294         /* Early boot up does not have a buffer yet */
295         if (!buf->buffer)
296                 return trace_clock_local();
297
298         ts = ring_buffer_time_stamp(buf->buffer, cpu);
299         ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
300
301         return ts;
302 }
303
304 cycle_t ftrace_now(int cpu)
305 {
306         return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
307 }
308
309 /**
310  * tracing_is_enabled - Show if global_trace has been disabled
311  *
312  * Shows if the global trace has been enabled or not. It uses the
313  * mirror flag "buffer_disabled" to be used in fast paths such as for
314  * the irqsoff tracer. But it may be inaccurate due to races. If you
315  * need to know the accurate state, use tracing_is_on() which is a little
316  * slower, but accurate.
317  */
318 int tracing_is_enabled(void)
319 {
320         /*
321          * For quick access (irqsoff uses this in fast path), just
322          * return the mirror variable of the state of the ring buffer.
323          * It's a little racy, but we don't really care.
324          */
325         smp_rmb();
326         return !global_trace.buffer_disabled;
327 }
328
329 /*
330  * trace_buf_size is the size in bytes that is allocated
331  * for a buffer. Note, the number of bytes is always rounded
332  * to page size.
333  *
334  * This number is purposely set to a low number of 16384.
335  * If the dump on oops happens, it will be much appreciated
336  * to not have to wait for all that output. Anyway this can be
337  * boot time and run time configurable.
338  */
339 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
340
341 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
342
343 /* trace_types holds a link list of available tracers. */
344 static struct tracer            *trace_types __read_mostly;
345
346 /*
347  * trace_types_lock is used to protect the trace_types list.
348  */
349 DEFINE_MUTEX(trace_types_lock);
350
351 /*
352  * serialize the access of the ring buffer
353  *
354  * ring buffer serializes readers, but it is low level protection.
355  * The validity of the events (which returns by ring_buffer_peek() ..etc)
356  * are not protected by ring buffer.
357  *
358  * The content of events may become garbage if we allow other process consumes
359  * these events concurrently:
360  *   A) the page of the consumed events may become a normal page
361  *      (not reader page) in ring buffer, and this page will be rewrited
362  *      by events producer.
363  *   B) The page of the consumed events may become a page for splice_read,
364  *      and this page will be returned to system.
365  *
366  * These primitives allow multi process access to different cpu ring buffer
367  * concurrently.
368  *
369  * These primitives don't distinguish read-only and read-consume access.
370  * Multi read-only access are also serialized.
371  */
372
373 #ifdef CONFIG_SMP
374 static DECLARE_RWSEM(all_cpu_access_lock);
375 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
376
377 static inline void trace_access_lock(int cpu)
378 {
379         if (cpu == RING_BUFFER_ALL_CPUS) {
380                 /* gain it for accessing the whole ring buffer. */
381                 down_write(&all_cpu_access_lock);
382         } else {
383                 /* gain it for accessing a cpu ring buffer. */
384
385                 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
386                 down_read(&all_cpu_access_lock);
387
388                 /* Secondly block other access to this @cpu ring buffer. */
389                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
390         }
391 }
392
393 static inline void trace_access_unlock(int cpu)
394 {
395         if (cpu == RING_BUFFER_ALL_CPUS) {
396                 up_write(&all_cpu_access_lock);
397         } else {
398                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
399                 up_read(&all_cpu_access_lock);
400         }
401 }
402
403 static inline void trace_access_lock_init(void)
404 {
405         int cpu;
406
407         for_each_possible_cpu(cpu)
408                 mutex_init(&per_cpu(cpu_access_lock, cpu));
409 }
410
411 #else
412
413 static DEFINE_MUTEX(access_lock);
414
415 static inline void trace_access_lock(int cpu)
416 {
417         (void)cpu;
418         mutex_lock(&access_lock);
419 }
420
421 static inline void trace_access_unlock(int cpu)
422 {
423         (void)cpu;
424         mutex_unlock(&access_lock);
425 }
426
427 static inline void trace_access_lock_init(void)
428 {
429 }
430
431 #endif
432
433 /* trace_flags holds trace_options default values */
434 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
435         TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
436         TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
437         TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
438
439 static void tracer_tracing_on(struct trace_array *tr)
440 {
441         if (tr->trace_buffer.buffer)
442                 ring_buffer_record_on(tr->trace_buffer.buffer);
443         /*
444          * This flag is looked at when buffers haven't been allocated
445          * yet, or by some tracers (like irqsoff), that just want to
446          * know if the ring buffer has been disabled, but it can handle
447          * races of where it gets disabled but we still do a record.
448          * As the check is in the fast path of the tracers, it is more
449          * important to be fast than accurate.
450          */
451         tr->buffer_disabled = 0;
452         /* Make the flag seen by readers */
453         smp_wmb();
454 }
455
456 /**
457  * tracing_on - enable tracing buffers
458  *
459  * This function enables tracing buffers that may have been
460  * disabled with tracing_off.
461  */
462 void tracing_on(void)
463 {
464         tracer_tracing_on(&global_trace);
465 }
466 EXPORT_SYMBOL_GPL(tracing_on);
467
468 /**
469  * __trace_puts - write a constant string into the trace buffer.
470  * @ip:    The address of the caller
471  * @str:   The constant string to write
472  * @size:  The size of the string.
473  */
474 int __trace_puts(unsigned long ip, const char *str, int size)
475 {
476         struct ring_buffer_event *event;
477         struct ring_buffer *buffer;
478         struct print_entry *entry;
479         unsigned long irq_flags;
480         int alloc;
481         int pc;
482
483         if (!(trace_flags & TRACE_ITER_PRINTK))
484                 return 0;
485
486         pc = preempt_count();
487
488         if (unlikely(tracing_selftest_running || tracing_disabled))
489                 return 0;
490
491         alloc = sizeof(*entry) + size + 2; /* possible \n added */
492
493         local_save_flags(irq_flags);
494         buffer = global_trace.trace_buffer.buffer;
495         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
496                                           irq_flags, pc);
497         if (!event)
498                 return 0;
499
500         entry = ring_buffer_event_data(event);
501         entry->ip = ip;
502
503         memcpy(&entry->buf, str, size);
504
505         /* Add a newline if necessary */
506         if (entry->buf[size - 1] != '\n') {
507                 entry->buf[size] = '\n';
508                 entry->buf[size + 1] = '\0';
509         } else
510                 entry->buf[size] = '\0';
511
512         __buffer_unlock_commit(buffer, event);
513         ftrace_trace_stack(buffer, irq_flags, 4, pc);
514
515         return size;
516 }
517 EXPORT_SYMBOL_GPL(__trace_puts);
518
519 /**
520  * __trace_bputs - write the pointer to a constant string into trace buffer
521  * @ip:    The address of the caller
522  * @str:   The constant string to write to the buffer to
523  */
524 int __trace_bputs(unsigned long ip, const char *str)
525 {
526         struct ring_buffer_event *event;
527         struct ring_buffer *buffer;
528         struct bputs_entry *entry;
529         unsigned long irq_flags;
530         int size = sizeof(struct bputs_entry);
531         int pc;
532
533         if (!(trace_flags & TRACE_ITER_PRINTK))
534                 return 0;
535
536         pc = preempt_count();
537
538         if (unlikely(tracing_selftest_running || tracing_disabled))
539                 return 0;
540
541         local_save_flags(irq_flags);
542         buffer = global_trace.trace_buffer.buffer;
543         event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
544                                           irq_flags, pc);
545         if (!event)
546                 return 0;
547
548         entry = ring_buffer_event_data(event);
549         entry->ip                       = ip;
550         entry->str                      = str;
551
552         __buffer_unlock_commit(buffer, event);
553         ftrace_trace_stack(buffer, irq_flags, 4, pc);
554
555         return 1;
556 }
557 EXPORT_SYMBOL_GPL(__trace_bputs);
558
559 #ifdef CONFIG_TRACER_SNAPSHOT
560 /**
561  * trace_snapshot - take a snapshot of the current buffer.
562  *
563  * This causes a swap between the snapshot buffer and the current live
564  * tracing buffer. You can use this to take snapshots of the live
565  * trace when some condition is triggered, but continue to trace.
566  *
567  * Note, make sure to allocate the snapshot with either
568  * a tracing_snapshot_alloc(), or by doing it manually
569  * with: echo 1 > /sys/kernel/debug/tracing/snapshot
570  *
571  * If the snapshot buffer is not allocated, it will stop tracing.
572  * Basically making a permanent snapshot.
573  */
574 void tracing_snapshot(void)
575 {
576         struct trace_array *tr = &global_trace;
577         struct tracer *tracer = tr->current_trace;
578         unsigned long flags;
579
580         if (in_nmi()) {
581                 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
582                 internal_trace_puts("*** snapshot is being ignored        ***\n");
583                 return;
584         }
585
586         if (!tr->allocated_snapshot) {
587                 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
588                 internal_trace_puts("*** stopping trace here!   ***\n");
589                 tracing_off();
590                 return;
591         }
592
593         /* Note, snapshot can not be used when the tracer uses it */
594         if (tracer->use_max_tr) {
595                 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
596                 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
597                 return;
598         }
599
600         local_irq_save(flags);
601         update_max_tr(tr, current, smp_processor_id());
602         local_irq_restore(flags);
603 }
604 EXPORT_SYMBOL_GPL(tracing_snapshot);
605
606 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
607                                         struct trace_buffer *size_buf, int cpu_id);
608 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
609
610 static int alloc_snapshot(struct trace_array *tr)
611 {
612         int ret;
613
614         if (!tr->allocated_snapshot) {
615
616                 /* allocate spare buffer */
617                 ret = resize_buffer_duplicate_size(&tr->max_buffer,
618                                    &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
619                 if (ret < 0)
620                         return ret;
621
622                 tr->allocated_snapshot = true;
623         }
624
625         return 0;
626 }
627
628 static void free_snapshot(struct trace_array *tr)
629 {
630         /*
631          * We don't free the ring buffer. instead, resize it because
632          * The max_tr ring buffer has some state (e.g. ring->clock) and
633          * we want preserve it.
634          */
635         ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
636         set_buffer_entries(&tr->max_buffer, 1);
637         tracing_reset_online_cpus(&tr->max_buffer);
638         tr->allocated_snapshot = false;
639 }
640
641 /**
642  * tracing_alloc_snapshot - allocate snapshot buffer.
643  *
644  * This only allocates the snapshot buffer if it isn't already
645  * allocated - it doesn't also take a snapshot.
646  *
647  * This is meant to be used in cases where the snapshot buffer needs
648  * to be set up for events that can't sleep but need to be able to
649  * trigger a snapshot.
650  */
651 int tracing_alloc_snapshot(void)
652 {
653         struct trace_array *tr = &global_trace;
654         int ret;
655
656         ret = alloc_snapshot(tr);
657         WARN_ON(ret < 0);
658
659         return ret;
660 }
661 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
662
663 /**
664  * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
665  *
666  * This is similar to trace_snapshot(), but it will allocate the
667  * snapshot buffer if it isn't already allocated. Use this only
668  * where it is safe to sleep, as the allocation may sleep.
669  *
670  * This causes a swap between the snapshot buffer and the current live
671  * tracing buffer. You can use this to take snapshots of the live
672  * trace when some condition is triggered, but continue to trace.
673  */
674 void tracing_snapshot_alloc(void)
675 {
676         int ret;
677
678         ret = tracing_alloc_snapshot();
679         if (ret < 0)
680                 return;
681
682         tracing_snapshot();
683 }
684 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
685 #else
686 void tracing_snapshot(void)
687 {
688         WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
689 }
690 EXPORT_SYMBOL_GPL(tracing_snapshot);
691 int tracing_alloc_snapshot(void)
692 {
693         WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
694         return -ENODEV;
695 }
696 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
697 void tracing_snapshot_alloc(void)
698 {
699         /* Give warning */
700         tracing_snapshot();
701 }
702 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
703 #endif /* CONFIG_TRACER_SNAPSHOT */
704
705 static void tracer_tracing_off(struct trace_array *tr)
706 {
707         if (tr->trace_buffer.buffer)
708                 ring_buffer_record_off(tr->trace_buffer.buffer);
709         /*
710          * This flag is looked at when buffers haven't been allocated
711          * yet, or by some tracers (like irqsoff), that just want to
712          * know if the ring buffer has been disabled, but it can handle
713          * races of where it gets disabled but we still do a record.
714          * As the check is in the fast path of the tracers, it is more
715          * important to be fast than accurate.
716          */
717         tr->buffer_disabled = 1;
718         /* Make the flag seen by readers */
719         smp_wmb();
720 }
721
722 /**
723  * tracing_off - turn off tracing buffers
724  *
725  * This function stops the tracing buffers from recording data.
726  * It does not disable any overhead the tracers themselves may
727  * be causing. This function simply causes all recording to
728  * the ring buffers to fail.
729  */
730 void tracing_off(void)
731 {
732         tracer_tracing_off(&global_trace);
733 }
734 EXPORT_SYMBOL_GPL(tracing_off);
735
736 void disable_trace_on_warning(void)
737 {
738         if (__disable_trace_on_warning)
739                 tracing_off();
740 }
741
742 /**
743  * tracer_tracing_is_on - show real state of ring buffer enabled
744  * @tr : the trace array to know if ring buffer is enabled
745  *
746  * Shows real state of the ring buffer if it is enabled or not.
747  */
748 static int tracer_tracing_is_on(struct trace_array *tr)
749 {
750         if (tr->trace_buffer.buffer)
751                 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
752         return !tr->buffer_disabled;
753 }
754
755 /**
756  * tracing_is_on - show state of ring buffers enabled
757  */
758 int tracing_is_on(void)
759 {
760         return tracer_tracing_is_on(&global_trace);
761 }
762 EXPORT_SYMBOL_GPL(tracing_is_on);
763
764 static int __init set_buf_size(char *str)
765 {
766         unsigned long buf_size;
767
768         if (!str)
769                 return 0;
770         buf_size = memparse(str, &str);
771         /* nr_entries can not be zero */
772         if (buf_size == 0)
773                 return 0;
774         trace_buf_size = buf_size;
775         return 1;
776 }
777 __setup("trace_buf_size=", set_buf_size);
778
779 static int __init set_tracing_thresh(char *str)
780 {
781         unsigned long threshold;
782         int ret;
783
784         if (!str)
785                 return 0;
786         ret = kstrtoul(str, 0, &threshold);
787         if (ret < 0)
788                 return 0;
789         tracing_thresh = threshold * 1000;
790         return 1;
791 }
792 __setup("tracing_thresh=", set_tracing_thresh);
793
794 unsigned long nsecs_to_usecs(unsigned long nsecs)
795 {
796         return nsecs / 1000;
797 }
798
799 /* These must match the bit postions in trace_iterator_flags */
800 static const char *trace_options[] = {
801         "print-parent",
802         "sym-offset",
803         "sym-addr",
804         "verbose",
805         "raw",
806         "hex",
807         "bin",
808         "block",
809         "stacktrace",
810         "trace_printk",
811         "ftrace_preempt",
812         "branch",
813         "annotate",
814         "userstacktrace",
815         "sym-userobj",
816         "printk-msg-only",
817         "context-info",
818         "latency-format",
819         "sleep-time",
820         "graph-time",
821         "record-cmd",
822         "overwrite",
823         "disable_on_free",
824         "irq-info",
825         "markers",
826         "function-trace",
827         NULL
828 };
829
830 static struct {
831         u64 (*func)(void);
832         const char *name;
833         int in_ns;              /* is this clock in nanoseconds? */
834 } trace_clocks[] = {
835         { trace_clock_local,            "local",        1 },
836         { trace_clock_global,           "global",       1 },
837         { trace_clock_counter,          "counter",      0 },
838         { trace_clock_jiffies,          "uptime",       0 },
839         { trace_clock,                  "perf",         1 },
840         { ktime_get_mono_fast_ns,       "mono",         1 },
841         ARCH_TRACE_CLOCKS
842 };
843
844 /*
845  * trace_parser_get_init - gets the buffer for trace parser
846  */
847 int trace_parser_get_init(struct trace_parser *parser, int size)
848 {
849         memset(parser, 0, sizeof(*parser));
850
851         parser->buffer = kmalloc(size, GFP_KERNEL);
852         if (!parser->buffer)
853                 return 1;
854
855         parser->size = size;
856         return 0;
857 }
858
859 /*
860  * trace_parser_put - frees the buffer for trace parser
861  */
862 void trace_parser_put(struct trace_parser *parser)
863 {
864         kfree(parser->buffer);
865 }
866
867 /*
868  * trace_get_user - reads the user input string separated by  space
869  * (matched by isspace(ch))
870  *
871  * For each string found the 'struct trace_parser' is updated,
872  * and the function returns.
873  *
874  * Returns number of bytes read.
875  *
876  * See kernel/trace/trace.h for 'struct trace_parser' details.
877  */
878 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
879         size_t cnt, loff_t *ppos)
880 {
881         char ch;
882         size_t read = 0;
883         ssize_t ret;
884
885         if (!*ppos)
886                 trace_parser_clear(parser);
887
888         ret = get_user(ch, ubuf++);
889         if (ret)
890                 goto out;
891
892         read++;
893         cnt--;
894
895         /*
896          * The parser is not finished with the last write,
897          * continue reading the user input without skipping spaces.
898          */
899         if (!parser->cont) {
900                 /* skip white space */
901                 while (cnt && isspace(ch)) {
902                         ret = get_user(ch, ubuf++);
903                         if (ret)
904                                 goto out;
905                         read++;
906                         cnt--;
907                 }
908
909                 /* only spaces were written */
910                 if (isspace(ch)) {
911                         *ppos += read;
912                         ret = read;
913                         goto out;
914                 }
915
916                 parser->idx = 0;
917         }
918
919         /* read the non-space input */
920         while (cnt && !isspace(ch)) {
921                 if (parser->idx < parser->size - 1)
922                         parser->buffer[parser->idx++] = ch;
923                 else {
924                         ret = -EINVAL;
925                         goto out;
926                 }
927                 ret = get_user(ch, ubuf++);
928                 if (ret)
929                         goto out;
930                 read++;
931                 cnt--;
932         }
933
934         /* We either got finished input or we have to wait for another call. */
935         if (isspace(ch)) {
936                 parser->buffer[parser->idx] = 0;
937                 parser->cont = false;
938         } else if (parser->idx < parser->size - 1) {
939                 parser->cont = true;
940                 parser->buffer[parser->idx++] = ch;
941         } else {
942                 ret = -EINVAL;
943                 goto out;
944         }
945
946         *ppos += read;
947         ret = read;
948
949 out:
950         return ret;
951 }
952
953 /* TODO add a seq_buf_to_buffer() */
954 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
955 {
956         int len;
957
958         if (trace_seq_used(s) <= s->seq.readpos)
959                 return -EBUSY;
960
961         len = trace_seq_used(s) - s->seq.readpos;
962         if (cnt > len)
963                 cnt = len;
964         memcpy(buf, s->buffer + s->seq.readpos, cnt);
965
966         s->seq.readpos += cnt;
967         return cnt;
968 }
969
970 unsigned long __read_mostly     tracing_thresh;
971
972 #ifdef CONFIG_TRACER_MAX_TRACE
973 /*
974  * Copy the new maximum trace into the separate maximum-trace
975  * structure. (this way the maximum trace is permanently saved,
976  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
977  */
978 static void
979 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
980 {
981         struct trace_buffer *trace_buf = &tr->trace_buffer;
982         struct trace_buffer *max_buf = &tr->max_buffer;
983         struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
984         struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
985
986         max_buf->cpu = cpu;
987         max_buf->time_start = data->preempt_timestamp;
988
989         max_data->saved_latency = tr->max_latency;
990         max_data->critical_start = data->critical_start;
991         max_data->critical_end = data->critical_end;
992
993         memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
994         max_data->pid = tsk->pid;
995         /*
996          * If tsk == current, then use current_uid(), as that does not use
997          * RCU. The irq tracer can be called out of RCU scope.
998          */
999         if (tsk == current)
1000                 max_data->uid = current_uid();
1001         else
1002                 max_data->uid = task_uid(tsk);
1003
1004         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1005         max_data->policy = tsk->policy;
1006         max_data->rt_priority = tsk->rt_priority;
1007
1008         /* record this tasks comm */
1009         tracing_record_cmdline(tsk);
1010 }
1011
1012 /**
1013  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1014  * @tr: tracer
1015  * @tsk: the task with the latency
1016  * @cpu: The cpu that initiated the trace.
1017  *
1018  * Flip the buffers between the @tr and the max_tr and record information
1019  * about which task was the cause of this latency.
1020  */
1021 void
1022 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1023 {
1024         struct ring_buffer *buf;
1025
1026         if (tr->stop_count)
1027                 return;
1028
1029         WARN_ON_ONCE(!irqs_disabled());
1030
1031         if (!tr->allocated_snapshot) {
1032                 /* Only the nop tracer should hit this when disabling */
1033                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1034                 return;
1035         }
1036
1037         arch_spin_lock(&tr->max_lock);
1038
1039         buf = tr->trace_buffer.buffer;
1040         tr->trace_buffer.buffer = tr->max_buffer.buffer;
1041         tr->max_buffer.buffer = buf;
1042
1043         __update_max_tr(tr, tsk, cpu);
1044         arch_spin_unlock(&tr->max_lock);
1045 }
1046
1047 /**
1048  * update_max_tr_single - only copy one trace over, and reset the rest
1049  * @tr - tracer
1050  * @tsk - task with the latency
1051  * @cpu - the cpu of the buffer to copy.
1052  *
1053  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1054  */
1055 void
1056 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1057 {
1058         int ret;
1059
1060         if (tr->stop_count)
1061                 return;
1062
1063         WARN_ON_ONCE(!irqs_disabled());
1064         if (!tr->allocated_snapshot) {
1065                 /* Only the nop tracer should hit this when disabling */
1066                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1067                 return;
1068         }
1069
1070         arch_spin_lock(&tr->max_lock);
1071
1072         ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1073
1074         if (ret == -EBUSY) {
1075                 /*
1076                  * We failed to swap the buffer due to a commit taking
1077                  * place on this CPU. We fail to record, but we reset
1078                  * the max trace buffer (no one writes directly to it)
1079                  * and flag that it failed.
1080                  */
1081                 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1082                         "Failed to swap buffers due to commit in progress\n");
1083         }
1084
1085         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1086
1087         __update_max_tr(tr, tsk, cpu);
1088         arch_spin_unlock(&tr->max_lock);
1089 }
1090 #endif /* CONFIG_TRACER_MAX_TRACE */
1091
1092 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1093 {
1094         /* Iterators are static, they should be filled or empty */
1095         if (trace_buffer_iter(iter, iter->cpu_file))
1096                 return 0;
1097
1098         return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1099                                 full);
1100 }
1101
1102 #ifdef CONFIG_FTRACE_STARTUP_TEST
1103 static int run_tracer_selftest(struct tracer *type)
1104 {
1105         struct trace_array *tr = &global_trace;
1106         struct tracer *saved_tracer = tr->current_trace;
1107         int ret;
1108
1109         if (!type->selftest || tracing_selftest_disabled)
1110                 return 0;
1111
1112         /*
1113          * Run a selftest on this tracer.
1114          * Here we reset the trace buffer, and set the current
1115          * tracer to be this tracer. The tracer can then run some
1116          * internal tracing to verify that everything is in order.
1117          * If we fail, we do not register this tracer.
1118          */
1119         tracing_reset_online_cpus(&tr->trace_buffer);
1120
1121         tr->current_trace = type;
1122
1123 #ifdef CONFIG_TRACER_MAX_TRACE
1124         if (type->use_max_tr) {
1125                 /* If we expanded the buffers, make sure the max is expanded too */
1126                 if (ring_buffer_expanded)
1127                         ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1128                                            RING_BUFFER_ALL_CPUS);
1129                 tr->allocated_snapshot = true;
1130         }
1131 #endif
1132
1133         /* the test is responsible for initializing and enabling */
1134         pr_info("Testing tracer %s: ", type->name);
1135         ret = type->selftest(type, tr);
1136         /* the test is responsible for resetting too */
1137         tr->current_trace = saved_tracer;
1138         if (ret) {
1139                 printk(KERN_CONT "FAILED!\n");
1140                 /* Add the warning after printing 'FAILED' */
1141                 WARN_ON(1);
1142                 return -1;
1143         }
1144         /* Only reset on passing, to avoid touching corrupted buffers */
1145         tracing_reset_online_cpus(&tr->trace_buffer);
1146
1147 #ifdef CONFIG_TRACER_MAX_TRACE
1148         if (type->use_max_tr) {
1149                 tr->allocated_snapshot = false;
1150
1151                 /* Shrink the max buffer again */
1152                 if (ring_buffer_expanded)
1153                         ring_buffer_resize(tr->max_buffer.buffer, 1,
1154                                            RING_BUFFER_ALL_CPUS);
1155         }
1156 #endif
1157
1158         printk(KERN_CONT "PASSED\n");
1159         return 0;
1160 }
1161 #else
1162 static inline int run_tracer_selftest(struct tracer *type)
1163 {
1164         return 0;
1165 }
1166 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1167
1168 /**
1169  * register_tracer - register a tracer with the ftrace system.
1170  * @type - the plugin for the tracer
1171  *
1172  * Register a new plugin tracer.
1173  */
1174 int register_tracer(struct tracer *type)
1175 {
1176         struct tracer *t;
1177         int ret = 0;
1178
1179         if (!type->name) {
1180                 pr_info("Tracer must have a name\n");
1181                 return -1;
1182         }
1183
1184         if (strlen(type->name) >= MAX_TRACER_SIZE) {
1185                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1186                 return -1;
1187         }
1188
1189         mutex_lock(&trace_types_lock);
1190
1191         tracing_selftest_running = true;
1192
1193         for (t = trace_types; t; t = t->next) {
1194                 if (strcmp(type->name, t->name) == 0) {
1195                         /* already found */
1196                         pr_info("Tracer %s already registered\n",
1197                                 type->name);
1198                         ret = -1;
1199                         goto out;
1200                 }
1201         }
1202
1203         if (!type->set_flag)
1204                 type->set_flag = &dummy_set_flag;
1205         if (!type->flags)
1206                 type->flags = &dummy_tracer_flags;
1207         else
1208                 if (!type->flags->opts)
1209                         type->flags->opts = dummy_tracer_opt;
1210
1211         ret = run_tracer_selftest(type);
1212         if (ret < 0)
1213                 goto out;
1214
1215         type->next = trace_types;
1216         trace_types = type;
1217
1218  out:
1219         tracing_selftest_running = false;
1220         mutex_unlock(&trace_types_lock);
1221
1222         if (ret || !default_bootup_tracer)
1223                 goto out_unlock;
1224
1225         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1226                 goto out_unlock;
1227
1228         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1229         /* Do we want this tracer to start on bootup? */
1230         tracing_set_tracer(&global_trace, type->name);
1231         default_bootup_tracer = NULL;
1232         /* disable other selftests, since this will break it. */
1233         tracing_selftest_disabled = true;
1234 #ifdef CONFIG_FTRACE_STARTUP_TEST
1235         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1236                type->name);
1237 #endif
1238
1239  out_unlock:
1240         return ret;
1241 }
1242
1243 void tracing_reset(struct trace_buffer *buf, int cpu)
1244 {
1245         struct ring_buffer *buffer = buf->buffer;
1246
1247         if (!buffer)
1248                 return;
1249
1250         ring_buffer_record_disable(buffer);
1251
1252         /* Make sure all commits have finished */
1253         synchronize_sched();
1254         ring_buffer_reset_cpu(buffer, cpu);
1255
1256         ring_buffer_record_enable(buffer);
1257 }
1258
1259 void tracing_reset_online_cpus(struct trace_buffer *buf)
1260 {
1261         struct ring_buffer *buffer = buf->buffer;
1262         int cpu;
1263
1264         if (!buffer)
1265                 return;
1266
1267         ring_buffer_record_disable(buffer);
1268
1269         /* Make sure all commits have finished */
1270         synchronize_sched();
1271
1272         buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1273
1274         for_each_online_cpu(cpu)
1275                 ring_buffer_reset_cpu(buffer, cpu);
1276
1277         ring_buffer_record_enable(buffer);
1278 }
1279
1280 /* Must have trace_types_lock held */
1281 void tracing_reset_all_online_cpus(void)
1282 {
1283         struct trace_array *tr;
1284
1285         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1286                 tracing_reset_online_cpus(&tr->trace_buffer);
1287 #ifdef CONFIG_TRACER_MAX_TRACE
1288                 tracing_reset_online_cpus(&tr->max_buffer);
1289 #endif
1290         }
1291 }
1292
1293 #define SAVED_CMDLINES_DEFAULT 128
1294 #define NO_CMDLINE_MAP UINT_MAX
1295 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1296 struct saved_cmdlines_buffer {
1297         unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1298         unsigned *map_cmdline_to_pid;
1299         unsigned cmdline_num;
1300         int cmdline_idx;
1301         char *saved_cmdlines;
1302 };
1303 static struct saved_cmdlines_buffer *savedcmd;
1304
1305 /* temporary disable recording */
1306 static atomic_t trace_record_cmdline_disabled __read_mostly;
1307
1308 static inline char *get_saved_cmdlines(int idx)
1309 {
1310         return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1311 }
1312
1313 static inline void set_cmdline(int idx, const char *cmdline)
1314 {
1315         memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1316 }
1317
1318 static int allocate_cmdlines_buffer(unsigned int val,
1319                                     struct saved_cmdlines_buffer *s)
1320 {
1321         s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1322                                         GFP_KERNEL);
1323         if (!s->map_cmdline_to_pid)
1324                 return -ENOMEM;
1325
1326         s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1327         if (!s->saved_cmdlines) {
1328                 kfree(s->map_cmdline_to_pid);
1329                 return -ENOMEM;
1330         }
1331
1332         s->cmdline_idx = 0;
1333         s->cmdline_num = val;
1334         memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1335                sizeof(s->map_pid_to_cmdline));
1336         memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1337                val * sizeof(*s->map_cmdline_to_pid));
1338
1339         return 0;
1340 }
1341
1342 static int trace_create_savedcmd(void)
1343 {
1344         int ret;
1345
1346         savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1347         if (!savedcmd)
1348                 return -ENOMEM;
1349
1350         ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1351         if (ret < 0) {
1352                 kfree(savedcmd);
1353                 savedcmd = NULL;
1354                 return -ENOMEM;
1355         }
1356
1357         return 0;
1358 }
1359
1360 int is_tracing_stopped(void)
1361 {
1362         return global_trace.stop_count;
1363 }
1364
1365 /**
1366  * tracing_start - quick start of the tracer
1367  *
1368  * If tracing is enabled but was stopped by tracing_stop,
1369  * this will start the tracer back up.
1370  */
1371 void tracing_start(void)
1372 {
1373         struct ring_buffer *buffer;
1374         unsigned long flags;
1375
1376         if (tracing_disabled)
1377                 return;
1378
1379         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1380         if (--global_trace.stop_count) {
1381                 if (global_trace.stop_count < 0) {
1382                         /* Someone screwed up their debugging */
1383                         WARN_ON_ONCE(1);
1384                         global_trace.stop_count = 0;
1385                 }
1386                 goto out;
1387         }
1388
1389         /* Prevent the buffers from switching */
1390         arch_spin_lock(&global_trace.max_lock);
1391
1392         buffer = global_trace.trace_buffer.buffer;
1393         if (buffer)
1394                 ring_buffer_record_enable(buffer);
1395
1396 #ifdef CONFIG_TRACER_MAX_TRACE
1397         buffer = global_trace.max_buffer.buffer;
1398         if (buffer)
1399                 ring_buffer_record_enable(buffer);
1400 #endif
1401
1402         arch_spin_unlock(&global_trace.max_lock);
1403
1404  out:
1405         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1406 }
1407
1408 static void tracing_start_tr(struct trace_array *tr)
1409 {
1410         struct ring_buffer *buffer;
1411         unsigned long flags;
1412
1413         if (tracing_disabled)
1414                 return;
1415
1416         /* If global, we need to also start the max tracer */
1417         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1418                 return tracing_start();
1419
1420         raw_spin_lock_irqsave(&tr->start_lock, flags);
1421
1422         if (--tr->stop_count) {
1423                 if (tr->stop_count < 0) {
1424                         /* Someone screwed up their debugging */
1425                         WARN_ON_ONCE(1);
1426                         tr->stop_count = 0;
1427                 }
1428                 goto out;
1429         }
1430
1431         buffer = tr->trace_buffer.buffer;
1432         if (buffer)
1433                 ring_buffer_record_enable(buffer);
1434
1435  out:
1436         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1437 }
1438
1439 /**
1440  * tracing_stop - quick stop of the tracer
1441  *
1442  * Light weight way to stop tracing. Use in conjunction with
1443  * tracing_start.
1444  */
1445 void tracing_stop(void)
1446 {
1447         struct ring_buffer *buffer;
1448         unsigned long flags;
1449
1450         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1451         if (global_trace.stop_count++)
1452                 goto out;
1453
1454         /* Prevent the buffers from switching */
1455         arch_spin_lock(&global_trace.max_lock);
1456
1457         buffer = global_trace.trace_buffer.buffer;
1458         if (buffer)
1459                 ring_buffer_record_disable(buffer);
1460
1461 #ifdef CONFIG_TRACER_MAX_TRACE
1462         buffer = global_trace.max_buffer.buffer;
1463         if (buffer)
1464                 ring_buffer_record_disable(buffer);
1465 #endif
1466
1467         arch_spin_unlock(&global_trace.max_lock);
1468
1469  out:
1470         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1471 }
1472
1473 static void tracing_stop_tr(struct trace_array *tr)
1474 {
1475         struct ring_buffer *buffer;
1476         unsigned long flags;
1477
1478         /* If global, we need to also stop the max tracer */
1479         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1480                 return tracing_stop();
1481
1482         raw_spin_lock_irqsave(&tr->start_lock, flags);
1483         if (tr->stop_count++)
1484                 goto out;
1485
1486         buffer = tr->trace_buffer.buffer;
1487         if (buffer)
1488                 ring_buffer_record_disable(buffer);
1489
1490  out:
1491         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1492 }
1493
1494 void trace_stop_cmdline_recording(void);
1495
1496 static int trace_save_cmdline(struct task_struct *tsk)
1497 {
1498         unsigned pid, idx;
1499
1500         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1501                 return 0;
1502
1503         /*
1504          * It's not the end of the world if we don't get
1505          * the lock, but we also don't want to spin
1506          * nor do we want to disable interrupts,
1507          * so if we miss here, then better luck next time.
1508          */
1509         if (!arch_spin_trylock(&trace_cmdline_lock))
1510                 return 0;
1511
1512         idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1513         if (idx == NO_CMDLINE_MAP) {
1514                 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1515
1516                 /*
1517                  * Check whether the cmdline buffer at idx has a pid
1518                  * mapped. We are going to overwrite that entry so we
1519                  * need to clear the map_pid_to_cmdline. Otherwise we
1520                  * would read the new comm for the old pid.
1521                  */
1522                 pid = savedcmd->map_cmdline_to_pid[idx];
1523                 if (pid != NO_CMDLINE_MAP)
1524                         savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1525
1526                 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1527                 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1528
1529                 savedcmd->cmdline_idx = idx;
1530         }
1531
1532         set_cmdline(idx, tsk->comm);
1533
1534         arch_spin_unlock(&trace_cmdline_lock);
1535
1536         return 1;
1537 }
1538
1539 static void __trace_find_cmdline(int pid, char comm[])
1540 {
1541         unsigned map;
1542
1543         if (!pid) {
1544                 strcpy(comm, "<idle>");
1545                 return;
1546         }
1547
1548         if (WARN_ON_ONCE(pid < 0)) {
1549                 strcpy(comm, "<XXX>");
1550                 return;
1551         }
1552
1553         if (pid > PID_MAX_DEFAULT) {
1554                 strcpy(comm, "<...>");
1555                 return;
1556         }
1557
1558         map = savedcmd->map_pid_to_cmdline[pid];
1559         if (map != NO_CMDLINE_MAP)
1560                 strcpy(comm, get_saved_cmdlines(map));
1561         else
1562                 strcpy(comm, "<...>");
1563 }
1564
1565 void trace_find_cmdline(int pid, char comm[])
1566 {
1567         preempt_disable();
1568         arch_spin_lock(&trace_cmdline_lock);
1569
1570         __trace_find_cmdline(pid, comm);
1571
1572         arch_spin_unlock(&trace_cmdline_lock);
1573         preempt_enable();
1574 }
1575
1576 void tracing_record_cmdline(struct task_struct *tsk)
1577 {
1578         if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1579                 return;
1580
1581         if (!__this_cpu_read(trace_cmdline_save))
1582                 return;
1583
1584         if (trace_save_cmdline(tsk))
1585                 __this_cpu_write(trace_cmdline_save, false);
1586 }
1587
1588 void
1589 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1590                              int pc)
1591 {
1592         struct task_struct *tsk = current;
1593
1594         entry->preempt_count            = pc & 0xff;
1595         entry->pid                      = (tsk) ? tsk->pid : 0;
1596         entry->flags =
1597 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1598                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1599 #else
1600                 TRACE_FLAG_IRQS_NOSUPPORT |
1601 #endif
1602                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1603                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1604                 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1605                 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1606 }
1607 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1608
1609 struct ring_buffer_event *
1610 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1611                           int type,
1612                           unsigned long len,
1613                           unsigned long flags, int pc)
1614 {
1615         struct ring_buffer_event *event;
1616
1617         event = ring_buffer_lock_reserve(buffer, len);
1618         if (event != NULL) {
1619                 struct trace_entry *ent = ring_buffer_event_data(event);
1620
1621                 tracing_generic_entry_update(ent, flags, pc);
1622                 ent->type = type;
1623         }
1624
1625         return event;
1626 }
1627
1628 void
1629 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1630 {
1631         __this_cpu_write(trace_cmdline_save, true);
1632         ring_buffer_unlock_commit(buffer, event);
1633 }
1634
1635 static inline void
1636 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1637                              struct ring_buffer_event *event,
1638                              unsigned long flags, int pc)
1639 {
1640         __buffer_unlock_commit(buffer, event);
1641
1642         ftrace_trace_stack(buffer, flags, 6, pc);
1643         ftrace_trace_userstack(buffer, flags, pc);
1644 }
1645
1646 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1647                                 struct ring_buffer_event *event,
1648                                 unsigned long flags, int pc)
1649 {
1650         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1651 }
1652 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1653
1654 static struct ring_buffer *temp_buffer;
1655
1656 struct ring_buffer_event *
1657 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1658                           struct ftrace_event_file *ftrace_file,
1659                           int type, unsigned long len,
1660                           unsigned long flags, int pc)
1661 {
1662         struct ring_buffer_event *entry;
1663
1664         *current_rb = ftrace_file->tr->trace_buffer.buffer;
1665         entry = trace_buffer_lock_reserve(*current_rb,
1666                                          type, len, flags, pc);
1667         /*
1668          * If tracing is off, but we have triggers enabled
1669          * we still need to look at the event data. Use the temp_buffer
1670          * to store the trace event for the tigger to use. It's recusive
1671          * safe and will not be recorded anywhere.
1672          */
1673         if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1674                 *current_rb = temp_buffer;
1675                 entry = trace_buffer_lock_reserve(*current_rb,
1676                                                   type, len, flags, pc);
1677         }
1678         return entry;
1679 }
1680 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1681
1682 struct ring_buffer_event *
1683 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1684                                   int type, unsigned long len,
1685                                   unsigned long flags, int pc)
1686 {
1687         *current_rb = global_trace.trace_buffer.buffer;
1688         return trace_buffer_lock_reserve(*current_rb,
1689                                          type, len, flags, pc);
1690 }
1691 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1692
1693 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1694                                         struct ring_buffer_event *event,
1695                                         unsigned long flags, int pc)
1696 {
1697         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1698 }
1699 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1700
1701 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1702                                      struct ring_buffer_event *event,
1703                                      unsigned long flags, int pc,
1704                                      struct pt_regs *regs)
1705 {
1706         __buffer_unlock_commit(buffer, event);
1707
1708         ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1709         ftrace_trace_userstack(buffer, flags, pc);
1710 }
1711 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1712
1713 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1714                                          struct ring_buffer_event *event)
1715 {
1716         ring_buffer_discard_commit(buffer, event);
1717 }
1718 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1719
1720 void
1721 trace_function(struct trace_array *tr,
1722                unsigned long ip, unsigned long parent_ip, unsigned long flags,
1723                int pc)
1724 {
1725         struct ftrace_event_call *call = &event_function;
1726         struct ring_buffer *buffer = tr->trace_buffer.buffer;
1727         struct ring_buffer_event *event;
1728         struct ftrace_entry *entry;
1729
1730         /* If we are reading the ring buffer, don't trace */
1731         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1732                 return;
1733
1734         event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1735                                           flags, pc);
1736         if (!event)
1737                 return;
1738         entry   = ring_buffer_event_data(event);
1739         entry->ip                       = ip;
1740         entry->parent_ip                = parent_ip;
1741
1742         if (!call_filter_check_discard(call, entry, buffer, event))
1743                 __buffer_unlock_commit(buffer, event);
1744 }
1745
1746 #ifdef CONFIG_STACKTRACE
1747
1748 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1749 struct ftrace_stack {
1750         unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
1751 };
1752
1753 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1754 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1755
1756 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1757                                  unsigned long flags,
1758                                  int skip, int pc, struct pt_regs *regs)
1759 {
1760         struct ftrace_event_call *call = &event_kernel_stack;
1761         struct ring_buffer_event *event;
1762         struct stack_entry *entry;
1763         struct stack_trace trace;
1764         int use_stack;
1765         int size = FTRACE_STACK_ENTRIES;
1766
1767         trace.nr_entries        = 0;
1768         trace.skip              = skip;
1769
1770         /*
1771          * Since events can happen in NMIs there's no safe way to
1772          * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1773          * or NMI comes in, it will just have to use the default
1774          * FTRACE_STACK_SIZE.
1775          */
1776         preempt_disable_notrace();
1777
1778         use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1779         /*
1780          * We don't need any atomic variables, just a barrier.
1781          * If an interrupt comes in, we don't care, because it would
1782          * have exited and put the counter back to what we want.
1783          * We just need a barrier to keep gcc from moving things
1784          * around.
1785          */
1786         barrier();
1787         if (use_stack == 1) {
1788                 trace.entries           = this_cpu_ptr(ftrace_stack.calls);
1789                 trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
1790
1791                 if (regs)
1792                         save_stack_trace_regs(regs, &trace);
1793                 else
1794                         save_stack_trace(&trace);
1795
1796                 if (trace.nr_entries > size)
1797                         size = trace.nr_entries;
1798         } else
1799                 /* From now on, use_stack is a boolean */
1800                 use_stack = 0;
1801
1802         size *= sizeof(unsigned long);
1803
1804         event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1805                                           sizeof(*entry) + size, flags, pc);
1806         if (!event)
1807                 goto out;
1808         entry = ring_buffer_event_data(event);
1809
1810         memset(&entry->caller, 0, size);
1811
1812         if (use_stack)
1813                 memcpy(&entry->caller, trace.entries,
1814                        trace.nr_entries * sizeof(unsigned long));
1815         else {
1816                 trace.max_entries       = FTRACE_STACK_ENTRIES;
1817                 trace.entries           = entry->caller;
1818                 if (regs)
1819                         save_stack_trace_regs(regs, &trace);
1820                 else
1821                         save_stack_trace(&trace);
1822         }
1823
1824         entry->size = trace.nr_entries;
1825
1826         if (!call_filter_check_discard(call, entry, buffer, event))
1827                 __buffer_unlock_commit(buffer, event);
1828
1829  out:
1830         /* Again, don't let gcc optimize things here */
1831         barrier();
1832         __this_cpu_dec(ftrace_stack_reserve);
1833         preempt_enable_notrace();
1834
1835 }
1836
1837 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1838                              int skip, int pc, struct pt_regs *regs)
1839 {
1840         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1841                 return;
1842
1843         __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1844 }
1845
1846 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1847                         int skip, int pc)
1848 {
1849         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1850                 return;
1851
1852         __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1853 }
1854
1855 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1856                    int pc)
1857 {
1858         __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1859 }
1860
1861 /**
1862  * trace_dump_stack - record a stack back trace in the trace buffer
1863  * @skip: Number of functions to skip (helper handlers)
1864  */
1865 void trace_dump_stack(int skip)
1866 {
1867         unsigned long flags;
1868
1869         if (tracing_disabled || tracing_selftest_running)
1870                 return;
1871
1872         local_save_flags(flags);
1873
1874         /*
1875          * Skip 3 more, seems to get us at the caller of
1876          * this function.
1877          */
1878         skip += 3;
1879         __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1880                              flags, skip, preempt_count(), NULL);
1881 }
1882
1883 static DEFINE_PER_CPU(int, user_stack_count);
1884
1885 void
1886 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1887 {
1888         struct ftrace_event_call *call = &event_user_stack;
1889         struct ring_buffer_event *event;
1890         struct userstack_entry *entry;
1891         struct stack_trace trace;
1892
1893         if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1894                 return;
1895
1896         /*
1897          * NMIs can not handle page faults, even with fix ups.
1898          * The save user stack can (and often does) fault.
1899          */
1900         if (unlikely(in_nmi()))
1901                 return;
1902
1903         /*
1904          * prevent recursion, since the user stack tracing may
1905          * trigger other kernel events.
1906          */
1907         preempt_disable();
1908         if (__this_cpu_read(user_stack_count))
1909                 goto out;
1910
1911         __this_cpu_inc(user_stack_count);
1912
1913         event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1914                                           sizeof(*entry), flags, pc);
1915         if (!event)
1916                 goto out_drop_count;
1917         entry   = ring_buffer_event_data(event);
1918
1919         entry->tgid             = current->tgid;
1920         memset(&entry->caller, 0, sizeof(entry->caller));
1921
1922         trace.nr_entries        = 0;
1923         trace.max_entries       = FTRACE_STACK_ENTRIES;
1924         trace.skip              = 0;
1925         trace.entries           = entry->caller;
1926
1927         save_stack_trace_user(&trace);
1928         if (!call_filter_check_discard(call, entry, buffer, event))
1929                 __buffer_unlock_commit(buffer, event);
1930
1931  out_drop_count:
1932         __this_cpu_dec(user_stack_count);
1933  out:
1934         preempt_enable();
1935 }
1936
1937 #ifdef UNUSED
1938 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1939 {
1940         ftrace_trace_userstack(tr, flags, preempt_count());
1941 }
1942 #endif /* UNUSED */
1943
1944 #endif /* CONFIG_STACKTRACE */
1945
1946 /* created for use with alloc_percpu */
1947 struct trace_buffer_struct {
1948         char buffer[TRACE_BUF_SIZE];
1949 };
1950
1951 static struct trace_buffer_struct *trace_percpu_buffer;
1952 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1953 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1954 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1955
1956 /*
1957  * The buffer used is dependent on the context. There is a per cpu
1958  * buffer for normal context, softirq contex, hard irq context and
1959  * for NMI context. Thise allows for lockless recording.
1960  *
1961  * Note, if the buffers failed to be allocated, then this returns NULL
1962  */
1963 static char *get_trace_buf(void)
1964 {
1965         struct trace_buffer_struct *percpu_buffer;
1966
1967         /*
1968          * If we have allocated per cpu buffers, then we do not
1969          * need to do any locking.
1970          */
1971         if (in_nmi())
1972                 percpu_buffer = trace_percpu_nmi_buffer;
1973         else if (in_irq())
1974                 percpu_buffer = trace_percpu_irq_buffer;
1975         else if (in_softirq())
1976                 percpu_buffer = trace_percpu_sirq_buffer;
1977         else
1978                 percpu_buffer = trace_percpu_buffer;
1979
1980         if (!percpu_buffer)
1981                 return NULL;
1982
1983         return this_cpu_ptr(&percpu_buffer->buffer[0]);
1984 }
1985
1986 static int alloc_percpu_trace_buffer(void)
1987 {
1988         struct trace_buffer_struct *buffers;
1989         struct trace_buffer_struct *sirq_buffers;
1990         struct trace_buffer_struct *irq_buffers;
1991         struct trace_buffer_struct *nmi_buffers;
1992
1993         buffers = alloc_percpu(struct trace_buffer_struct);
1994         if (!buffers)
1995                 goto err_warn;
1996
1997         sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1998         if (!sirq_buffers)
1999                 goto err_sirq;
2000
2001         irq_buffers = alloc_percpu(struct trace_buffer_struct);
2002         if (!irq_buffers)
2003                 goto err_irq;
2004
2005         nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2006         if (!nmi_buffers)
2007                 goto err_nmi;
2008
2009         trace_percpu_buffer = buffers;
2010         trace_percpu_sirq_buffer = sirq_buffers;
2011         trace_percpu_irq_buffer = irq_buffers;
2012         trace_percpu_nmi_buffer = nmi_buffers;
2013
2014         return 0;
2015
2016  err_nmi:
2017         free_percpu(irq_buffers);
2018  err_irq:
2019         free_percpu(sirq_buffers);
2020  err_sirq:
2021         free_percpu(buffers);
2022  err_warn:
2023         WARN(1, "Could not allocate percpu trace_printk buffer");
2024         return -ENOMEM;
2025 }
2026
2027 static int buffers_allocated;
2028
2029 void trace_printk_init_buffers(void)
2030 {
2031         if (buffers_allocated)
2032                 return;
2033
2034         if (alloc_percpu_trace_buffer())
2035                 return;
2036
2037         /* trace_printk() is for debug use only. Don't use it in production. */
2038
2039         pr_warning("\n");
2040         pr_warning("**********************************************************\n");
2041         pr_warning("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2042         pr_warning("**                                                      **\n");
2043         pr_warning("** trace_printk() being used. Allocating extra memory.  **\n");
2044         pr_warning("**                                                      **\n");
2045         pr_warning("** This means that this is a DEBUG kernel and it is     **\n");
2046         pr_warning("** unsafe for production use.                           **\n");
2047         pr_warning("**                                                      **\n");
2048         pr_warning("** If you see this message and you are not debugging    **\n");
2049         pr_warning("** the kernel, report this immediately to your vendor!  **\n");
2050         pr_warning("**                                                      **\n");
2051         pr_warning("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2052         pr_warning("**********************************************************\n");
2053
2054         /* Expand the buffers to set size */
2055         tracing_update_buffers();
2056
2057         buffers_allocated = 1;
2058
2059         /*
2060          * trace_printk_init_buffers() can be called by modules.
2061          * If that happens, then we need to start cmdline recording
2062          * directly here. If the global_trace.buffer is already
2063          * allocated here, then this was called by module code.
2064          */
2065         if (global_trace.trace_buffer.buffer)
2066                 tracing_start_cmdline_record();
2067 }
2068
2069 void trace_printk_start_comm(void)
2070 {
2071         /* Start tracing comms if trace printk is set */
2072         if (!buffers_allocated)
2073                 return;
2074         tracing_start_cmdline_record();
2075 }
2076
2077 static void trace_printk_start_stop_comm(int enabled)
2078 {
2079         if (!buffers_allocated)
2080                 return;
2081
2082         if (enabled)
2083                 tracing_start_cmdline_record();
2084         else
2085                 tracing_stop_cmdline_record();
2086 }
2087
2088 /**
2089  * trace_vbprintk - write binary msg to tracing buffer
2090  *
2091  */
2092 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2093 {
2094         struct ftrace_event_call *call = &event_bprint;
2095         struct ring_buffer_event *event;
2096         struct ring_buffer *buffer;
2097         struct trace_array *tr = &global_trace;
2098         struct bprint_entry *entry;
2099         unsigned long flags;
2100         char *tbuffer;
2101         int len = 0, size, pc;
2102
2103         if (unlikely(tracing_selftest_running || tracing_disabled))
2104                 return 0;
2105
2106         /* Don't pollute graph traces with trace_vprintk internals */
2107         pause_graph_tracing();
2108
2109         pc = preempt_count();
2110         preempt_disable_notrace();
2111
2112         tbuffer = get_trace_buf();
2113         if (!tbuffer) {
2114                 len = 0;
2115                 goto out;
2116         }
2117
2118         len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2119
2120         if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2121                 goto out;
2122
2123         local_save_flags(flags);
2124         size = sizeof(*entry) + sizeof(u32) * len;
2125         buffer = tr->trace_buffer.buffer;
2126         event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2127                                           flags, pc);
2128         if (!event)
2129                 goto out;
2130         entry = ring_buffer_event_data(event);
2131         entry->ip                       = ip;
2132         entry->fmt                      = fmt;
2133
2134         memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2135         if (!call_filter_check_discard(call, entry, buffer, event)) {
2136                 __buffer_unlock_commit(buffer, event);
2137                 ftrace_trace_stack(buffer, flags, 6, pc);
2138         }
2139
2140 out:
2141         preempt_enable_notrace();
2142         unpause_graph_tracing();
2143
2144         return len;
2145 }
2146 EXPORT_SYMBOL_GPL(trace_vbprintk);
2147
2148 static int
2149 __trace_array_vprintk(struct ring_buffer *buffer,
2150                       unsigned long ip, const char *fmt, va_list args)
2151 {
2152         struct ftrace_event_call *call = &event_print;
2153         struct ring_buffer_event *event;
2154         int len = 0, size, pc;
2155         struct print_entry *entry;
2156         unsigned long flags;
2157         char *tbuffer;
2158
2159         if (tracing_disabled || tracing_selftest_running)
2160                 return 0;
2161
2162         /* Don't pollute graph traces with trace_vprintk internals */
2163         pause_graph_tracing();
2164
2165         pc = preempt_count();
2166         preempt_disable_notrace();
2167
2168
2169         tbuffer = get_trace_buf();
2170         if (!tbuffer) {
2171                 len = 0;
2172                 goto out;
2173         }
2174
2175         len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2176
2177         local_save_flags(flags);
2178         size = sizeof(*entry) + len + 1;
2179         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2180                                           flags, pc);
2181         if (!event)
2182                 goto out;
2183         entry = ring_buffer_event_data(event);
2184         entry->ip = ip;
2185
2186         memcpy(&entry->buf, tbuffer, len + 1);
2187         if (!call_filter_check_discard(call, entry, buffer, event)) {
2188                 __buffer_unlock_commit(buffer, event);
2189                 ftrace_trace_stack(buffer, flags, 6, pc);
2190         }
2191  out:
2192         preempt_enable_notrace();
2193         unpause_graph_tracing();
2194
2195         return len;
2196 }
2197
2198 int trace_array_vprintk(struct trace_array *tr,
2199                         unsigned long ip, const char *fmt, va_list args)
2200 {
2201         return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2202 }
2203
2204 int trace_array_printk(struct trace_array *tr,
2205                        unsigned long ip, const char *fmt, ...)
2206 {
2207         int ret;
2208         va_list ap;
2209
2210         if (!(trace_flags & TRACE_ITER_PRINTK))
2211                 return 0;
2212
2213         va_start(ap, fmt);
2214         ret = trace_array_vprintk(tr, ip, fmt, ap);
2215         va_end(ap);
2216         return ret;
2217 }
2218
2219 int trace_array_printk_buf(struct ring_buffer *buffer,
2220                            unsigned long ip, const char *fmt, ...)
2221 {
2222         int ret;
2223         va_list ap;
2224
2225         if (!(trace_flags & TRACE_ITER_PRINTK))
2226                 return 0;
2227
2228         va_start(ap, fmt);
2229         ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2230         va_end(ap);
2231         return ret;
2232 }
2233
2234 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2235 {
2236         return trace_array_vprintk(&global_trace, ip, fmt, args);
2237 }
2238 EXPORT_SYMBOL_GPL(trace_vprintk);
2239
2240 static void trace_iterator_increment(struct trace_iterator *iter)
2241 {
2242         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2243
2244         iter->idx++;
2245         if (buf_iter)
2246                 ring_buffer_read(buf_iter, NULL);
2247 }
2248
2249 static struct trace_entry *
2250 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2251                 unsigned long *lost_events)
2252 {
2253         struct ring_buffer_event *event;
2254         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2255
2256         if (buf_iter)
2257                 event = ring_buffer_iter_peek(buf_iter, ts);
2258         else
2259                 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2260                                          lost_events);
2261
2262         if (event) {
2263                 iter->ent_size = ring_buffer_event_length(event);
2264                 return ring_buffer_event_data(event);
2265         }
2266         iter->ent_size = 0;
2267         return NULL;
2268 }
2269
2270 static struct trace_entry *
2271 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2272                   unsigned long *missing_events, u64 *ent_ts)
2273 {
2274         struct ring_buffer *buffer = iter->trace_buffer->buffer;
2275         struct trace_entry *ent, *next = NULL;
2276         unsigned long lost_events = 0, next_lost = 0;
2277         int cpu_file = iter->cpu_file;
2278         u64 next_ts = 0, ts;
2279         int next_cpu = -1;
2280         int next_size = 0;
2281         int cpu;
2282
2283         /*
2284          * If we are in a per_cpu trace file, don't bother by iterating over
2285          * all cpu and peek directly.
2286          */
2287         if (cpu_file > RING_BUFFER_ALL_CPUS) {
2288                 if (ring_buffer_empty_cpu(buffer, cpu_file))
2289                         return NULL;
2290                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2291                 if (ent_cpu)
2292                         *ent_cpu = cpu_file;
2293
2294                 return ent;
2295         }
2296
2297         for_each_tracing_cpu(cpu) {
2298
2299                 if (ring_buffer_empty_cpu(buffer, cpu))
2300                         continue;
2301
2302                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2303
2304                 /*
2305                  * Pick the entry with the smallest timestamp:
2306                  */
2307                 if (ent && (!next || ts < next_ts)) {
2308                         next = ent;
2309                         next_cpu = cpu;
2310                         next_ts = ts;
2311                         next_lost = lost_events;
2312                         next_size = iter->ent_size;
2313                 }
2314         }
2315
2316         iter->ent_size = next_size;
2317
2318         if (ent_cpu)
2319                 *ent_cpu = next_cpu;
2320
2321         if (ent_ts)
2322                 *ent_ts = next_ts;
2323
2324         if (missing_events)
2325                 *missing_events = next_lost;
2326
2327         return next;
2328 }
2329
2330 /* Find the next real entry, without updating the iterator itself */
2331 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2332                                           int *ent_cpu, u64 *ent_ts)
2333 {
2334         return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2335 }
2336
2337 /* Find the next real entry, and increment the iterator to the next entry */
2338 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2339 {
2340         iter->ent = __find_next_entry(iter, &iter->cpu,
2341                                       &iter->lost_events, &iter->ts);
2342
2343         if (iter->ent)
2344                 trace_iterator_increment(iter);
2345
2346         return iter->ent ? iter : NULL;
2347 }
2348
2349 static void trace_consume(struct trace_iterator *iter)
2350 {
2351         ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2352                             &iter->lost_events);
2353 }
2354
2355 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2356 {
2357         struct trace_iterator *iter = m->private;
2358         int i = (int)*pos;
2359         void *ent;
2360
2361         WARN_ON_ONCE(iter->leftover);
2362
2363         (*pos)++;
2364
2365         /* can't go backwards */
2366         if (iter->idx > i)
2367                 return NULL;
2368
2369         if (iter->idx < 0)
2370                 ent = trace_find_next_entry_inc(iter);
2371         else
2372                 ent = iter;
2373
2374         while (ent && iter->idx < i)
2375                 ent = trace_find_next_entry_inc(iter);
2376
2377         iter->pos = *pos;
2378
2379         return ent;
2380 }
2381
2382 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2383 {
2384         struct ring_buffer_event *event;
2385         struct ring_buffer_iter *buf_iter;
2386         unsigned long entries = 0;
2387         u64 ts;
2388
2389         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2390
2391         buf_iter = trace_buffer_iter(iter, cpu);
2392         if (!buf_iter)
2393                 return;
2394
2395         ring_buffer_iter_reset(buf_iter);
2396
2397         /*
2398          * We could have the case with the max latency tracers
2399          * that a reset never took place on a cpu. This is evident
2400          * by the timestamp being before the start of the buffer.
2401          */
2402         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2403                 if (ts >= iter->trace_buffer->time_start)
2404                         break;
2405                 entries++;
2406                 ring_buffer_read(buf_iter, NULL);
2407         }
2408
2409         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2410 }
2411
2412 /*
2413  * The current tracer is copied to avoid a global locking
2414  * all around.
2415  */
2416 static void *s_start(struct seq_file *m, loff_t *pos)
2417 {
2418         struct trace_iterator *iter = m->private;
2419         struct trace_array *tr = iter->tr;
2420         int cpu_file = iter->cpu_file;
2421         void *p = NULL;
2422         loff_t l = 0;
2423         int cpu;
2424
2425         /*
2426          * copy the tracer to avoid using a global lock all around.
2427          * iter->trace is a copy of current_trace, the pointer to the
2428          * name may be used instead of a strcmp(), as iter->trace->name
2429          * will point to the same string as current_trace->name.
2430          */
2431         mutex_lock(&trace_types_lock);
2432         if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2433                 *iter->trace = *tr->current_trace;
2434         mutex_unlock(&trace_types_lock);
2435
2436 #ifdef CONFIG_TRACER_MAX_TRACE
2437         if (iter->snapshot && iter->trace->use_max_tr)
2438                 return ERR_PTR(-EBUSY);
2439 #endif
2440
2441         if (!iter->snapshot)
2442                 atomic_inc(&trace_record_cmdline_disabled);
2443
2444         if (*pos != iter->pos) {
2445                 iter->ent = NULL;
2446                 iter->cpu = 0;
2447                 iter->idx = -1;
2448
2449                 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2450                         for_each_tracing_cpu(cpu)
2451                                 tracing_iter_reset(iter, cpu);
2452                 } else
2453                         tracing_iter_reset(iter, cpu_file);
2454
2455                 iter->leftover = 0;
2456                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2457                         ;
2458
2459         } else {
2460                 /*
2461                  * If we overflowed the seq_file before, then we want
2462                  * to just reuse the trace_seq buffer again.
2463                  */
2464                 if (iter->leftover)
2465                         p = iter;
2466                 else {
2467                         l = *pos - 1;
2468                         p = s_next(m, p, &l);
2469                 }
2470         }
2471
2472         trace_event_read_lock();
2473         trace_access_lock(cpu_file);
2474         return p;
2475 }
2476
2477 static void s_stop(struct seq_file *m, void *p)
2478 {
2479         struct trace_iterator *iter = m->private;
2480
2481 #ifdef CONFIG_TRACER_MAX_TRACE
2482         if (iter->snapshot && iter->trace->use_max_tr)
2483                 return;
2484 #endif
2485
2486         if (!iter->snapshot)
2487                 atomic_dec(&trace_record_cmdline_disabled);
2488
2489         trace_access_unlock(iter->cpu_file);
2490         trace_event_read_unlock();
2491 }
2492
2493 static void
2494 get_total_entries(struct trace_buffer *buf,
2495                   unsigned long *total, unsigned long *entries)
2496 {
2497         unsigned long count;
2498         int cpu;
2499
2500         *total = 0;
2501         *entries = 0;
2502
2503         for_each_tracing_cpu(cpu) {
2504                 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2505                 /*
2506                  * If this buffer has skipped entries, then we hold all
2507                  * entries for the trace and we need to ignore the
2508                  * ones before the time stamp.
2509                  */
2510                 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2511                         count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2512                         /* total is the same as the entries */
2513                         *total += count;
2514                 } else
2515                         *total += count +
2516                                 ring_buffer_overrun_cpu(buf->buffer, cpu);
2517                 *entries += count;
2518         }
2519 }
2520
2521 static void print_lat_help_header(struct seq_file *m)
2522 {
2523         seq_puts(m, "#                  _------=> CPU#            \n"
2524                     "#                 / _-----=> irqs-off        \n"
2525                     "#                | / _----=> need-resched    \n"
2526                     "#                || / _---=> hardirq/softirq \n"
2527                     "#                ||| / _--=> preempt-depth   \n"
2528                     "#                |||| /     delay            \n"
2529                     "#  cmd     pid   ||||| time  |   caller      \n"
2530                     "#     \\   /      |||||  \\    |   /         \n");
2531 }
2532
2533 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2534 {
2535         unsigned long total;
2536         unsigned long entries;
2537
2538         get_total_entries(buf, &total, &entries);
2539         seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2540                    entries, total, num_online_cpus());
2541         seq_puts(m, "#\n");
2542 }
2543
2544 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2545 {
2546         print_event_info(buf, m);
2547         seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n"
2548                     "#              | |       |          |         |\n");
2549 }
2550
2551 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2552 {
2553         print_event_info(buf, m);
2554         seq_puts(m, "#                              _-----=> irqs-off\n"
2555                     "#                             / _----=> need-resched\n"
2556                     "#                            | / _---=> hardirq/softirq\n"
2557                     "#                            || / _--=> preempt-depth\n"
2558                     "#                            ||| /     delay\n"
2559                     "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n"
2560                     "#              | |       |   ||||       |         |\n");
2561 }
2562
2563 void
2564 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2565 {
2566         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2567         struct trace_buffer *buf = iter->trace_buffer;
2568         struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2569         struct tracer *type = iter->trace;
2570         unsigned long entries;
2571         unsigned long total;
2572         const char *name = "preemption";
2573
2574         name = type->name;
2575
2576         get_total_entries(buf, &total, &entries);
2577
2578         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2579                    name, UTS_RELEASE);
2580         seq_puts(m, "# -----------------------------------"
2581                  "---------------------------------\n");
2582         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2583                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2584                    nsecs_to_usecs(data->saved_latency),
2585                    entries,
2586                    total,
2587                    buf->cpu,
2588 #if defined(CONFIG_PREEMPT_NONE)
2589                    "server",
2590 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2591                    "desktop",
2592 #elif defined(CONFIG_PREEMPT)
2593                    "preempt",
2594 #else
2595                    "unknown",
2596 #endif
2597                    /* These are reserved for later use */
2598                    0, 0, 0, 0);
2599 #ifdef CONFIG_SMP
2600         seq_printf(m, " #P:%d)\n", num_online_cpus());
2601 #else
2602         seq_puts(m, ")\n");
2603 #endif
2604         seq_puts(m, "#    -----------------\n");
2605         seq_printf(m, "#    | task: %.16s-%d "
2606                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2607                    data->comm, data->pid,
2608                    from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2609                    data->policy, data->rt_priority);
2610         seq_puts(m, "#    -----------------\n");
2611
2612         if (data->critical_start) {
2613                 seq_puts(m, "#  => started at: ");
2614                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2615                 trace_print_seq(m, &iter->seq);
2616                 seq_puts(m, "\n#  => ended at:   ");
2617                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2618                 trace_print_seq(m, &iter->seq);
2619                 seq_puts(m, "\n#\n");
2620         }
2621
2622         seq_puts(m, "#\n");
2623 }
2624
2625 static void test_cpu_buff_start(struct trace_iterator *iter)
2626 {
2627         struct trace_seq *s = &iter->seq;
2628
2629         if (!(trace_flags & TRACE_ITER_ANNOTATE))
2630                 return;
2631
2632         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2633                 return;
2634
2635         if (cpumask_test_cpu(iter->cpu, iter->started))
2636                 return;
2637
2638         if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2639                 return;
2640
2641         cpumask_set_cpu(iter->cpu, iter->started);
2642
2643         /* Don't print started cpu buffer for the first entry of the trace */
2644         if (iter->idx > 1)
2645                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2646                                 iter->cpu);
2647 }
2648
2649 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2650 {
2651         struct trace_seq *s = &iter->seq;
2652         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2653         struct trace_entry *entry;
2654         struct trace_event *event;
2655
2656         entry = iter->ent;
2657
2658         test_cpu_buff_start(iter);
2659
2660         event = ftrace_find_event(entry->type);
2661
2662         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2663                 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2664                         trace_print_lat_context(iter);
2665                 else
2666                         trace_print_context(iter);
2667         }
2668
2669         if (trace_seq_has_overflowed(s))
2670                 return TRACE_TYPE_PARTIAL_LINE;
2671
2672         if (event)
2673                 return event->funcs->trace(iter, sym_flags, event);
2674
2675         trace_seq_printf(s, "Unknown type %d\n", entry->type);
2676
2677         return trace_handle_return(s);
2678 }
2679
2680 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2681 {
2682         struct trace_seq *s = &iter->seq;
2683         struct trace_entry *entry;
2684         struct trace_event *event;
2685
2686         entry = iter->ent;
2687
2688         if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2689                 trace_seq_printf(s, "%d %d %llu ",
2690                                  entry->pid, iter->cpu, iter->ts);
2691
2692         if (trace_seq_has_overflowed(s))
2693                 return TRACE_TYPE_PARTIAL_LINE;
2694
2695         event = ftrace_find_event(entry->type);
2696         if (event)
2697                 return event->funcs->raw(iter, 0, event);
2698
2699         trace_seq_printf(s, "%d ?\n", entry->type);
2700
2701         return trace_handle_return(s);
2702 }
2703
2704 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2705 {
2706         struct trace_seq *s = &iter->seq;
2707         unsigned char newline = '\n';
2708         struct trace_entry *entry;
2709         struct trace_event *event;
2710
2711         entry = iter->ent;
2712
2713         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2714                 SEQ_PUT_HEX_FIELD(s, entry->pid);
2715                 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2716                 SEQ_PUT_HEX_FIELD(s, iter->ts);
2717                 if (trace_seq_has_overflowed(s))
2718                         return TRACE_TYPE_PARTIAL_LINE;
2719         }
2720
2721         event = ftrace_find_event(entry->type);
2722         if (event) {
2723                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2724                 if (ret != TRACE_TYPE_HANDLED)
2725                         return ret;
2726         }
2727
2728         SEQ_PUT_FIELD(s, newline);
2729
2730         return trace_handle_return(s);
2731 }
2732
2733 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2734 {
2735         struct trace_seq *s = &iter->seq;
2736         struct trace_entry *entry;
2737         struct trace_event *event;
2738
2739         entry = iter->ent;
2740
2741         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2742                 SEQ_PUT_FIELD(s, entry->pid);
2743                 SEQ_PUT_FIELD(s, iter->cpu);
2744                 SEQ_PUT_FIELD(s, iter->ts);
2745                 if (trace_seq_has_overflowed(s))
2746                         return TRACE_TYPE_PARTIAL_LINE;
2747         }
2748
2749         event = ftrace_find_event(entry->type);
2750         return event ? event->funcs->binary(iter, 0, event) :
2751                 TRACE_TYPE_HANDLED;
2752 }
2753
2754 int trace_empty(struct trace_iterator *iter)
2755 {
2756         struct ring_buffer_iter *buf_iter;
2757         int cpu;
2758
2759         /* If we are looking at one CPU buffer, only check that one */
2760         if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2761                 cpu = iter->cpu_file;
2762                 buf_iter = trace_buffer_iter(iter, cpu);
2763                 if (buf_iter) {
2764                         if (!ring_buffer_iter_empty(buf_iter))
2765                                 return 0;
2766                 } else {
2767                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2768                                 return 0;
2769                 }
2770                 return 1;
2771         }
2772
2773         for_each_tracing_cpu(cpu) {
2774                 buf_iter = trace_buffer_iter(iter, cpu);
2775                 if (buf_iter) {
2776                         if (!ring_buffer_iter_empty(buf_iter))
2777                                 return 0;
2778                 } else {
2779                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2780                                 return 0;
2781                 }
2782         }
2783
2784         return 1;
2785 }
2786
2787 /*  Called with trace_event_read_lock() held. */
2788 enum print_line_t print_trace_line(struct trace_iterator *iter)
2789 {
2790         enum print_line_t ret;
2791
2792         if (iter->lost_events) {
2793                 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2794                                  iter->cpu, iter->lost_events);
2795                 if (trace_seq_has_overflowed(&iter->seq))
2796                         return TRACE_TYPE_PARTIAL_LINE;
2797         }
2798
2799         if (iter->trace && iter->trace->print_line) {
2800                 ret = iter->trace->print_line(iter);
2801                 if (ret != TRACE_TYPE_UNHANDLED)
2802                         return ret;
2803         }
2804
2805         if (iter->ent->type == TRACE_BPUTS &&
2806                         trace_flags & TRACE_ITER_PRINTK &&
2807                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2808                 return trace_print_bputs_msg_only(iter);
2809
2810         if (iter->ent->type == TRACE_BPRINT &&
2811                         trace_flags & TRACE_ITER_PRINTK &&
2812                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2813                 return trace_print_bprintk_msg_only(iter);
2814
2815         if (iter->ent->type == TRACE_PRINT &&
2816                         trace_flags & TRACE_ITER_PRINTK &&
2817                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2818                 return trace_print_printk_msg_only(iter);
2819
2820         if (trace_flags & TRACE_ITER_BIN)
2821                 return print_bin_fmt(iter);
2822
2823         if (trace_flags & TRACE_ITER_HEX)
2824                 return print_hex_fmt(iter);
2825
2826         if (trace_flags & TRACE_ITER_RAW)
2827                 return print_raw_fmt(iter);
2828
2829         return print_trace_fmt(iter);
2830 }
2831
2832 void trace_latency_header(struct seq_file *m)
2833 {
2834         struct trace_iterator *iter = m->private;
2835
2836         /* print nothing if the buffers are empty */
2837         if (trace_empty(iter))
2838                 return;
2839
2840         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2841                 print_trace_header(m, iter);
2842
2843         if (!(trace_flags & TRACE_ITER_VERBOSE))
2844                 print_lat_help_header(m);
2845 }
2846
2847 void trace_default_header(struct seq_file *m)
2848 {
2849         struct trace_iterator *iter = m->private;
2850
2851         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2852                 return;
2853
2854         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2855                 /* print nothing if the buffers are empty */
2856                 if (trace_empty(iter))
2857                         return;
2858                 print_trace_header(m, iter);
2859                 if (!(trace_flags & TRACE_ITER_VERBOSE))
2860                         print_lat_help_header(m);
2861         } else {
2862                 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2863                         if (trace_flags & TRACE_ITER_IRQ_INFO)
2864                                 print_func_help_header_irq(iter->trace_buffer, m);
2865                         else
2866                                 print_func_help_header(iter->trace_buffer, m);
2867                 }
2868         }
2869 }
2870
2871 static void test_ftrace_alive(struct seq_file *m)
2872 {
2873         if (!ftrace_is_dead())
2874                 return;
2875         seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2876                     "#          MAY BE MISSING FUNCTION EVENTS\n");
2877 }
2878
2879 #ifdef CONFIG_TRACER_MAX_TRACE
2880 static void show_snapshot_main_help(struct seq_file *m)
2881 {
2882         seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2883                     "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2884                     "#                      Takes a snapshot of the main buffer.\n"
2885                     "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2886                     "#                      (Doesn't have to be '2' works with any number that\n"
2887                     "#                       is not a '0' or '1')\n");
2888 }
2889
2890 static void show_snapshot_percpu_help(struct seq_file *m)
2891 {
2892         seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2893 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2894         seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2895                     "#                      Takes a snapshot of the main buffer for this cpu.\n");
2896 #else
2897         seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2898                     "#                     Must use main snapshot file to allocate.\n");
2899 #endif
2900         seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2901                     "#                      (Doesn't have to be '2' works with any number that\n"
2902                     "#                       is not a '0' or '1')\n");
2903 }
2904
2905 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2906 {
2907         if (iter->tr->allocated_snapshot)
2908                 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2909         else
2910                 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2911
2912         seq_puts(m, "# Snapshot commands:\n");
2913         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2914                 show_snapshot_main_help(m);
2915         else
2916                 show_snapshot_percpu_help(m);
2917 }
2918 #else
2919 /* Should never be called */
2920 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2921 #endif
2922
2923 static int s_show(struct seq_file *m, void *v)
2924 {
2925         struct trace_iterator *iter = v;
2926         int ret;
2927
2928         if (iter->ent == NULL) {
2929                 if (iter->tr) {
2930                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
2931                         seq_puts(m, "#\n");
2932                         test_ftrace_alive(m);
2933                 }
2934                 if (iter->snapshot && trace_empty(iter))
2935                         print_snapshot_help(m, iter);
2936                 else if (iter->trace && iter->trace->print_header)
2937                         iter->trace->print_header(m);
2938                 else
2939                         trace_default_header(m);
2940
2941         } else if (iter->leftover) {
2942                 /*
2943                  * If we filled the seq_file buffer earlier, we
2944                  * want to just show it now.
2945                  */
2946                 ret = trace_print_seq(m, &iter->seq);
2947
2948                 /* ret should this time be zero, but you never know */
2949                 iter->leftover = ret;
2950
2951         } else {
2952                 print_trace_line(iter);
2953                 ret = trace_print_seq(m, &iter->seq);
2954                 /*
2955                  * If we overflow the seq_file buffer, then it will
2956                  * ask us for this data again at start up.
2957                  * Use that instead.
2958                  *  ret is 0 if seq_file write succeeded.
2959                  *        -1 otherwise.
2960                  */
2961                 iter->leftover = ret;
2962         }
2963
2964         return 0;
2965 }
2966
2967 /*
2968  * Should be used after trace_array_get(), trace_types_lock
2969  * ensures that i_cdev was already initialized.
2970  */
2971 static inline int tracing_get_cpu(struct inode *inode)
2972 {
2973         if (inode->i_cdev) /* See trace_create_cpu_file() */
2974                 return (long)inode->i_cdev - 1;
2975         return RING_BUFFER_ALL_CPUS;
2976 }
2977
2978 static const struct seq_operations tracer_seq_ops = {
2979         .start          = s_start,
2980         .next           = s_next,
2981         .stop           = s_stop,
2982         .show           = s_show,
2983 };
2984
2985 static struct trace_iterator *
2986 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2987 {
2988         struct trace_array *tr = inode->i_private;
2989         struct trace_iterator *iter;
2990         int cpu;
2991
2992         if (tracing_disabled)
2993                 return ERR_PTR(-ENODEV);
2994
2995         iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2996         if (!iter)
2997                 return ERR_PTR(-ENOMEM);
2998
2999         iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
3000                                     GFP_KERNEL);
3001         if (!iter->buffer_iter)
3002                 goto release;
3003
3004         /*
3005          * We make a copy of the current tracer to avoid concurrent
3006          * changes on it while we are reading.
3007          */
3008         mutex_lock(&trace_types_lock);
3009         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3010         if (!iter->trace)
3011                 goto fail;
3012
3013         *iter->trace = *tr->current_trace;
3014
3015         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3016                 goto fail;
3017
3018         iter->tr = tr;
3019
3020 #ifdef CONFIG_TRACER_MAX_TRACE
3021         /* Currently only the top directory has a snapshot */
3022         if (tr->current_trace->print_max || snapshot)
3023                 iter->trace_buffer = &tr->max_buffer;
3024         else
3025 #endif
3026                 iter->trace_buffer = &tr->trace_buffer;
3027         iter->snapshot = snapshot;
3028         iter->pos = -1;
3029         iter->cpu_file = tracing_get_cpu(inode);
3030         mutex_init(&iter->mutex);
3031
3032         /* Notify the tracer early; before we stop tracing. */
3033         if (iter->trace && iter->trace->open)
3034                 iter->trace->open(iter);
3035
3036         /* Annotate start of buffers if we had overruns */
3037         if (ring_buffer_overruns(iter->trace_buffer->buffer))
3038                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3039
3040         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3041         if (trace_clocks[tr->clock_id].in_ns)
3042                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3043
3044         /* stop the trace while dumping if we are not opening "snapshot" */
3045         if (!iter->snapshot)
3046                 tracing_stop_tr(tr);
3047
3048         if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3049                 for_each_tracing_cpu(cpu) {
3050                         iter->buffer_iter[cpu] =
3051                                 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3052                 }
3053                 ring_buffer_read_prepare_sync();
3054                 for_each_tracing_cpu(cpu) {
3055                         ring_buffer_read_start(iter->buffer_iter[cpu]);
3056                         tracing_iter_reset(iter, cpu);
3057                 }
3058         } else {
3059                 cpu = iter->cpu_file;
3060                 iter->buffer_iter[cpu] =
3061                         ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3062                 ring_buffer_read_prepare_sync();
3063                 ring_buffer_read_start(iter->buffer_iter[cpu]);
3064                 tracing_iter_reset(iter, cpu);
3065         }
3066
3067         mutex_unlock(&trace_types_lock);
3068
3069         return iter;
3070
3071  fail:
3072         mutex_unlock(&trace_types_lock);
3073         kfree(iter->trace);
3074         kfree(iter->buffer_iter);
3075 release:
3076         seq_release_private(inode, file);
3077         return ERR_PTR(-ENOMEM);
3078 }
3079
3080 int tracing_open_generic(struct inode *inode, struct file *filp)
3081 {
3082         if (tracing_disabled)
3083                 return -ENODEV;
3084
3085         filp->private_data = inode->i_private;
3086         return 0;
3087 }
3088
3089 bool tracing_is_disabled(void)
3090 {
3091         return (tracing_disabled) ? true: false;
3092 }
3093
3094 /*
3095  * Open and update trace_array ref count.
3096  * Must have the current trace_array passed to it.
3097  */
3098 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3099 {
3100         struct trace_array *tr = inode->i_private;
3101
3102         if (tracing_disabled)
3103                 return -ENODEV;
3104
3105         if (trace_array_get(tr) < 0)
3106                 return -ENODEV;
3107
3108         filp->private_data = inode->i_private;
3109
3110         return 0;
3111 }
3112
3113 static int tracing_release(struct inode *inode, struct file *file)
3114 {
3115         struct trace_array *tr = inode->i_private;
3116         struct seq_file *m = file->private_data;
3117         struct trace_iterator *iter;
3118         int cpu;
3119
3120         if (!(file->f_mode & FMODE_READ)) {
3121                 trace_array_put(tr);
3122                 return 0;
3123         }
3124
3125         /* Writes do not use seq_file */
3126         iter = m->private;
3127         mutex_lock(&trace_types_lock);
3128
3129         for_each_tracing_cpu(cpu) {
3130                 if (iter->buffer_iter[cpu])
3131                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
3132         }
3133
3134         if (iter->trace && iter->trace->close)
3135                 iter->trace->close(iter);
3136
3137         if (!iter->snapshot)
3138                 /* reenable tracing if it was previously enabled */
3139                 tracing_start_tr(tr);
3140
3141         __trace_array_put(tr);
3142
3143         mutex_unlock(&trace_types_lock);
3144
3145         mutex_destroy(&iter->mutex);
3146         free_cpumask_var(iter->started);
3147         kfree(iter->trace);
3148         kfree(iter->buffer_iter);
3149         seq_release_private(inode, file);
3150
3151         return 0;
3152 }
3153
3154 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3155 {
3156         struct trace_array *tr = inode->i_private;
3157
3158         trace_array_put(tr);
3159         return 0;
3160 }
3161
3162 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3163 {
3164         struct trace_array *tr = inode->i_private;
3165
3166         trace_array_put(tr);
3167
3168         return single_release(inode, file);
3169 }
3170
3171 static int tracing_open(struct inode *inode, struct file *file)
3172 {
3173         struct trace_array *tr = inode->i_private;
3174         struct trace_iterator *iter;
3175         int ret = 0;
3176
3177         if (trace_array_get(tr) < 0)
3178                 return -ENODEV;
3179
3180         /* If this file was open for write, then erase contents */
3181         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3182                 int cpu = tracing_get_cpu(inode);
3183
3184                 if (cpu == RING_BUFFER_ALL_CPUS)
3185                         tracing_reset_online_cpus(&tr->trace_buffer);
3186                 else
3187                         tracing_reset(&tr->trace_buffer, cpu);
3188         }
3189
3190         if (file->f_mode & FMODE_READ) {
3191                 iter = __tracing_open(inode, file, false);
3192                 if (IS_ERR(iter))
3193                         ret = PTR_ERR(iter);
3194                 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3195                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
3196         }
3197
3198         if (ret < 0)
3199                 trace_array_put(tr);
3200
3201         return ret;
3202 }
3203
3204 /*
3205  * Some tracers are not suitable for instance buffers.
3206  * A tracer is always available for the global array (toplevel)
3207  * or if it explicitly states that it is.
3208  */
3209 static bool
3210 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3211 {
3212         return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3213 }
3214
3215 /* Find the next tracer that this trace array may use */
3216 static struct tracer *
3217 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3218 {
3219         while (t && !trace_ok_for_array(t, tr))
3220                 t = t->next;
3221
3222         return t;
3223 }
3224
3225 static void *
3226 t_next(struct seq_file *m, void *v, loff_t *pos)
3227 {
3228         struct trace_array *tr = m->private;
3229         struct tracer *t = v;
3230
3231         (*pos)++;
3232
3233         if (t)
3234                 t = get_tracer_for_array(tr, t->next);
3235
3236         return t;
3237 }
3238
3239 static void *t_start(struct seq_file *m, loff_t *pos)
3240 {
3241         struct trace_array *tr = m->private;
3242         struct tracer *t;
3243         loff_t l = 0;
3244
3245         mutex_lock(&trace_types_lock);
3246
3247         t = get_tracer_for_array(tr, trace_types);
3248         for (; t && l < *pos; t = t_next(m, t, &l))
3249                         ;
3250
3251         return t;
3252 }
3253
3254 static void t_stop(struct seq_file *m, void *p)
3255 {
3256         mutex_unlock(&trace_types_lock);
3257 }
3258
3259 static int t_show(struct seq_file *m, void *v)
3260 {
3261         struct tracer *t = v;
3262
3263         if (!t)
3264                 return 0;
3265
3266         seq_puts(m, t->name);
3267         if (t->next)
3268                 seq_putc(m, ' ');
3269         else
3270                 seq_putc(m, '\n');
3271
3272         return 0;
3273 }
3274
3275 static const struct seq_operations show_traces_seq_ops = {
3276         .start          = t_start,
3277         .next           = t_next,
3278         .stop           = t_stop,
3279         .show           = t_show,
3280 };
3281
3282 static int show_traces_open(struct inode *inode, struct file *file)
3283 {
3284         struct trace_array *tr = inode->i_private;
3285         struct seq_file *m;
3286         int ret;
3287
3288         if (tracing_disabled)
3289                 return -ENODEV;
3290
3291         ret = seq_open(file, &show_traces_seq_ops);
3292         if (ret)
3293                 return ret;
3294
3295         m = file->private_data;
3296         m->private = tr;
3297
3298         return 0;
3299 }
3300
3301 static ssize_t
3302 tracing_write_stub(struct file *filp, const char __user *ubuf,
3303                    size_t count, loff_t *ppos)
3304 {
3305         return count;
3306 }
3307
3308 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3309 {
3310         int ret;
3311
3312         if (file->f_mode & FMODE_READ)
3313                 ret = seq_lseek(file, offset, whence);
3314         else
3315                 file->f_pos = ret = 0;
3316
3317         return ret;
3318 }
3319
3320 static const struct file_operations tracing_fops = {
3321         .open           = tracing_open,
3322         .read           = seq_read,
3323         .write          = tracing_write_stub,
3324         .llseek         = tracing_lseek,
3325         .release        = tracing_release,
3326 };
3327
3328 static const struct file_operations show_traces_fops = {
3329         .open           = show_traces_open,
3330         .read           = seq_read,
3331         .release        = seq_release,
3332         .llseek         = seq_lseek,
3333 };
3334
3335 /*
3336  * The tracer itself will not take this lock, but still we want
3337  * to provide a consistent cpumask to user-space:
3338  */
3339 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3340
3341 /*
3342  * Temporary storage for the character representation of the
3343  * CPU bitmask (and one more byte for the newline):
3344  */
3345 static char mask_str[NR_CPUS + 1];
3346
3347 static ssize_t
3348 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3349                      size_t count, loff_t *ppos)
3350 {
3351         struct trace_array *tr = file_inode(filp)->i_private;
3352         int len;
3353
3354         mutex_lock(&tracing_cpumask_update_lock);
3355
3356         len = snprintf(mask_str, count, "%*pb\n",
3357                        cpumask_pr_args(tr->tracing_cpumask));
3358         if (len >= count) {
3359                 count = -EINVAL;
3360                 goto out_err;
3361         }
3362         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3363
3364 out_err:
3365         mutex_unlock(&tracing_cpumask_update_lock);
3366
3367         return count;
3368 }
3369
3370 static ssize_t
3371 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3372                       size_t count, loff_t *ppos)
3373 {
3374         struct trace_array *tr = file_inode(filp)->i_private;
3375         cpumask_var_t tracing_cpumask_new;
3376         int err, cpu;
3377
3378         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3379                 return -ENOMEM;
3380
3381         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3382         if (err)
3383                 goto err_unlock;
3384
3385         mutex_lock(&tracing_cpumask_update_lock);
3386
3387         local_irq_disable();
3388         arch_spin_lock(&tr->max_lock);
3389         for_each_tracing_cpu(cpu) {
3390                 /*
3391                  * Increase/decrease the disabled counter if we are
3392                  * about to flip a bit in the cpumask:
3393                  */
3394                 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3395                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3396                         atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3397                         ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3398                 }
3399                 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3400                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3401                         atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3402                         ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3403                 }
3404         }
3405         arch_spin_unlock(&tr->max_lock);
3406         local_irq_enable();
3407
3408         cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3409
3410         mutex_unlock(&tracing_cpumask_update_lock);
3411         free_cpumask_var(tracing_cpumask_new);
3412
3413         return count;
3414
3415 err_unlock:
3416         free_cpumask_var(tracing_cpumask_new);
3417
3418         return err;
3419 }
3420
3421 static const struct file_operations tracing_cpumask_fops = {
3422         .open           = tracing_open_generic_tr,
3423         .read           = tracing_cpumask_read,
3424         .write          = tracing_cpumask_write,
3425         .release        = tracing_release_generic_tr,
3426         .llseek         = generic_file_llseek,
3427 };
3428
3429 static int tracing_trace_options_show(struct seq_file *m, void *v)
3430 {
3431         struct tracer_opt *trace_opts;
3432         struct trace_array *tr = m->private;
3433         u32 tracer_flags;
3434         int i;
3435
3436         mutex_lock(&trace_types_lock);
3437         tracer_flags = tr->current_trace->flags->val;
3438         trace_opts = tr->current_trace->flags->opts;
3439
3440         for (i = 0; trace_options[i]; i++) {
3441                 if (trace_flags & (1 << i))
3442                         seq_printf(m, "%s\n", trace_options[i]);
3443                 else
3444                         seq_printf(m, "no%s\n", trace_options[i]);
3445         }
3446
3447         for (i = 0; trace_opts[i].name; i++) {
3448                 if (tracer_flags & trace_opts[i].bit)
3449                         seq_printf(m, "%s\n", trace_opts[i].name);
3450                 else
3451                         seq_printf(m, "no%s\n", trace_opts[i].name);
3452         }
3453         mutex_unlock(&trace_types_lock);
3454
3455         return 0;
3456 }
3457
3458 static int __set_tracer_option(struct trace_array *tr,
3459                                struct tracer_flags *tracer_flags,
3460                                struct tracer_opt *opts, int neg)
3461 {
3462         struct tracer *trace = tr->current_trace;
3463         int ret;
3464
3465         ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3466         if (ret)
3467                 return ret;
3468
3469         if (neg)
3470                 tracer_flags->val &= ~opts->bit;
3471         else
3472                 tracer_flags->val |= opts->bit;
3473         return 0;
3474 }
3475
3476 /* Try to assign a tracer specific option */
3477 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3478 {
3479         struct tracer *trace = tr->current_trace;
3480         struct tracer_flags *tracer_flags = trace->flags;
3481         struct tracer_opt *opts = NULL;
3482         int i;
3483
3484         for (i = 0; tracer_flags->opts[i].name; i++) {
3485                 opts = &tracer_flags->opts[i];
3486
3487                 if (strcmp(cmp, opts->name) == 0)
3488                         return __set_tracer_option(tr, trace->flags, opts, neg);
3489         }
3490
3491         return -EINVAL;
3492 }
3493
3494 /* Some tracers require overwrite to stay enabled */
3495 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3496 {
3497         if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3498                 return -1;
3499
3500         return 0;
3501 }
3502
3503 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3504 {
3505         /* do nothing if flag is already set */
3506         if (!!(trace_flags & mask) == !!enabled)
3507                 return 0;
3508
3509         /* Give the tracer a chance to approve the change */
3510         if (tr->current_trace->flag_changed)
3511                 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3512                         return -EINVAL;
3513
3514         if (enabled)
3515                 trace_flags |= mask;
3516         else
3517                 trace_flags &= ~mask;
3518
3519         if (mask == TRACE_ITER_RECORD_CMD)
3520                 trace_event_enable_cmd_record(enabled);
3521
3522         if (mask == TRACE_ITER_OVERWRITE) {
3523                 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3524 #ifdef CONFIG_TRACER_MAX_TRACE
3525                 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3526 #endif
3527         }
3528
3529         if (mask == TRACE_ITER_PRINTK)
3530                 trace_printk_start_stop_comm(enabled);
3531
3532         return 0;
3533 }
3534
3535 static int trace_set_options(struct trace_array *tr, char *option)
3536 {
3537         char *cmp;
3538         int neg = 0;
3539         int ret = -ENODEV;
3540         int i;
3541
3542         cmp = strstrip(option);
3543
3544         if (strncmp(cmp, "no", 2) == 0) {
3545                 neg = 1;
3546                 cmp += 2;
3547         }
3548
3549         mutex_lock(&trace_types_lock);
3550
3551         for (i = 0; trace_options[i]; i++) {
3552                 if (strcmp(cmp, trace_options[i]) == 0) {
3553                         ret = set_tracer_flag(tr, 1 << i, !neg);
3554                         break;
3555                 }
3556         }
3557
3558         /* If no option could be set, test the specific tracer options */
3559         if (!trace_options[i])
3560                 ret = set_tracer_option(tr, cmp, neg);
3561
3562         mutex_unlock(&trace_types_lock);
3563
3564         return ret;
3565 }
3566
3567 static ssize_t
3568 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3569                         size_t cnt, loff_t *ppos)
3570 {
3571         struct seq_file *m = filp->private_data;
3572         struct trace_array *tr = m->private;
3573         char buf[64];
3574         int ret;
3575
3576         if (cnt >= sizeof(buf))
3577                 return -EINVAL;
3578
3579         if (copy_from_user(&buf, ubuf, cnt))
3580                 return -EFAULT;
3581
3582         buf[cnt] = 0;
3583
3584         ret = trace_set_options(tr, buf);
3585         if (ret < 0)
3586                 return ret;
3587
3588         *ppos += cnt;
3589
3590         return cnt;
3591 }
3592
3593 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3594 {
3595         struct trace_array *tr = inode->i_private;
3596         int ret;
3597
3598         if (tracing_disabled)
3599                 return -ENODEV;
3600
3601         if (trace_array_get(tr) < 0)
3602                 return -ENODEV;
3603
3604         ret = single_open(file, tracing_trace_options_show, inode->i_private);
3605         if (ret < 0)
3606                 trace_array_put(tr);
3607
3608         return ret;
3609 }
3610
3611 static const struct file_operations tracing_iter_fops = {
3612         .open           = tracing_trace_options_open,
3613         .read           = seq_read,
3614         .llseek         = seq_lseek,
3615         .release        = tracing_single_release_tr,
3616         .write          = tracing_trace_options_write,
3617 };
3618
3619 static const char readme_msg[] =
3620         "tracing mini-HOWTO:\n\n"
3621         "# echo 0 > tracing_on : quick way to disable tracing\n"
3622         "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3623         " Important files:\n"
3624         "  trace\t\t\t- The static contents of the buffer\n"
3625         "\t\t\t  To clear the buffer write into this file: echo > trace\n"
3626         "  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3627         "  current_tracer\t- function and latency tracers\n"
3628         "  available_tracers\t- list of configured tracers for current_tracer\n"
3629         "  buffer_size_kb\t- view and modify size of per cpu buffer\n"
3630         "  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
3631         "  trace_clock\t\t-change the clock used to order events\n"
3632         "       local:   Per cpu clock but may not be synced across CPUs\n"
3633         "      global:   Synced across CPUs but slows tracing down.\n"
3634         "     counter:   Not a clock, but just an increment\n"
3635         "      uptime:   Jiffy counter from time of boot\n"
3636         "        perf:   Same clock that perf events use\n"
3637 #ifdef CONFIG_X86_64
3638         "     x86-tsc:   TSC cycle counter\n"
3639 #endif
3640         "\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3641         "  tracing_cpumask\t- Limit which CPUs to trace\n"
3642         "  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3643         "\t\t\t  Remove sub-buffer with rmdir\n"
3644         "  trace_options\t\t- Set format or modify how tracing happens\n"
3645         "\t\t\t  Disable an option by adding a suffix 'no' to the\n"
3646         "\t\t\t  option name\n"
3647         "  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3648 #ifdef CONFIG_DYNAMIC_FTRACE
3649         "\n  available_filter_functions - list of functions that can be filtered on\n"
3650         "  set_ftrace_filter\t- echo function name in here to only trace these\n"
3651         "\t\t\t  functions\n"
3652         "\t     accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3653         "\t     modules: Can select a group via module\n"
3654         "\t      Format: :mod:<module-name>\n"
3655         "\t     example: echo :mod:ext3 > set_ftrace_filter\n"
3656         "\t    triggers: a command to perform when function is hit\n"
3657         "\t      Format: <function>:<trigger>[:count]\n"
3658         "\t     trigger: traceon, traceoff\n"
3659         "\t\t      enable_event:<system>:<event>\n"
3660         "\t\t      disable_event:<system>:<event>\n"
3661 #ifdef CONFIG_STACKTRACE
3662         "\t\t      stacktrace\n"
3663 #endif
3664 #ifdef CONFIG_TRACER_SNAPSHOT
3665         "\t\t      snapshot\n"
3666 #endif
3667         "\t\t      dump\n"
3668         "\t\t      cpudump\n"
3669         "\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
3670         "\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
3671         "\t     The first one will disable tracing every time do_fault is hit\n"
3672         "\t     The second will disable tracing at most 3 times when do_trap is hit\n"
3673         "\t       The first time do trap is hit and it disables tracing, the\n"
3674         "\t       counter will decrement to 2. If tracing is already disabled,\n"
3675         "\t       the counter will not decrement. It only decrements when the\n"
3676         "\t       trigger did work\n"
3677         "\t     To remove trigger without count:\n"
3678         "\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
3679         "\t     To remove trigger with a count:\n"
3680         "\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3681         "  set_ftrace_notrace\t- echo function name in here to never trace.\n"
3682         "\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3683         "\t    modules: Can select a group via module command :mod:\n"
3684         "\t    Does not accept triggers\n"
3685 #endif /* CONFIG_DYNAMIC_FTRACE */
3686 #ifdef CONFIG_FUNCTION_TRACER
3687         "  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3688         "\t\t    (function)\n"
3689 #endif
3690 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3691         "  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3692         "  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3693         "  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3694 #endif
3695 #ifdef CONFIG_TRACER_SNAPSHOT
3696         "\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
3697         "\t\t\t  snapshot buffer. Read the contents for more\n"
3698         "\t\t\t  information\n"
3699 #endif
3700 #ifdef CONFIG_STACK_TRACER
3701         "  stack_trace\t\t- Shows the max stack trace when active\n"
3702         "  stack_max_size\t- Shows current max stack size that was traced\n"
3703         "\t\t\t  Write into this file to reset the max size (trigger a\n"
3704         "\t\t\t  new trace)\n"
3705 #ifdef CONFIG_DYNAMIC_FTRACE
3706         "  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3707         "\t\t\t  traces\n"
3708 #endif
3709 #endif /* CONFIG_STACK_TRACER */
3710         "  events/\t\t- Directory containing all trace event subsystems:\n"
3711         "      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3712         "  events/<system>/\t- Directory containing all trace events for <system>:\n"
3713         "      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3714         "\t\t\t  events\n"
3715         "      filter\t\t- If set, only events passing filter are traced\n"
3716         "  events/<system>/<event>/\t- Directory containing control files for\n"
3717         "\t\t\t  <event>:\n"
3718         "      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3719         "      filter\t\t- If set, only events passing filter are traced\n"
3720         "      trigger\t\t- If set, a command to perform when event is hit\n"
3721         "\t    Format: <trigger>[:count][if <filter>]\n"
3722         "\t   trigger: traceon, traceoff\n"
3723         "\t            enable_event:<system>:<event>\n"
3724         "\t            disable_event:<system>:<event>\n"
3725 #ifdef CONFIG_STACKTRACE
3726         "\t\t    stacktrace\n"
3727 #endif
3728 #ifdef CONFIG_TRACER_SNAPSHOT
3729         "\t\t    snapshot\n"
3730 #endif
3731         "\t   example: echo traceoff > events/block/block_unplug/trigger\n"
3732         "\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
3733         "\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3734         "\t                  events/block/block_unplug/trigger\n"
3735         "\t   The first disables tracing every time block_unplug is hit.\n"
3736         "\t   The second disables tracing the first 3 times block_unplug is hit.\n"
3737         "\t   The third enables the kmalloc event the first 3 times block_unplug\n"
3738         "\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3739         "\t   Like function triggers, the counter is only decremented if it\n"
3740         "\t    enabled or disabled tracing.\n"
3741         "\t   To remove a trigger without a count:\n"
3742         "\t     echo '!<trigger> > <system>/<event>/trigger\n"
3743         "\t   To remove a trigger with a count:\n"
3744         "\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
3745         "\t   Filters can be ignored when removing a trigger.\n"
3746 ;
3747
3748 static ssize_t
3749 tracing_readme_read(struct file *filp, char __user *ubuf,
3750                        size_t cnt, loff_t *ppos)
3751 {
3752         return simple_read_from_buffer(ubuf, cnt, ppos,
3753                                         readme_msg, strlen(readme_msg));
3754 }
3755
3756 static const struct file_operations tracing_readme_fops = {
3757         .open           = tracing_open_generic,
3758         .read           = tracing_readme_read,
3759         .llseek         = generic_file_llseek,
3760 };
3761
3762 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3763 {
3764         unsigned int *ptr = v;
3765
3766         if (*pos || m->count)
3767                 ptr++;
3768
3769         (*pos)++;
3770
3771         for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3772              ptr++) {
3773                 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3774                         continue;
3775
3776                 return ptr;
3777         }
3778
3779         return NULL;
3780 }
3781
3782 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3783 {
3784         void *v;
3785         loff_t l = 0;
3786
3787         preempt_disable();
3788         arch_spin_lock(&trace_cmdline_lock);
3789
3790         v = &savedcmd->map_cmdline_to_pid[0];
3791         while (l <= *pos) {
3792                 v = saved_cmdlines_next(m, v, &l);
3793                 if (!v)
3794                         return NULL;
3795         }
3796
3797         return v;
3798 }
3799
3800 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3801 {
3802         arch_spin_unlock(&trace_cmdline_lock);
3803         preempt_enable();
3804 }
3805
3806 static int saved_cmdlines_show(struct seq_file *m, void *v)
3807 {
3808         char buf[TASK_COMM_LEN];
3809         unsigned int *pid = v;
3810
3811         __trace_find_cmdline(*pid, buf);
3812         seq_printf(m, "%d %s\n", *pid, buf);
3813         return 0;
3814 }
3815
3816 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3817         .start          = saved_cmdlines_start,
3818         .next           = saved_cmdlines_next,
3819         .stop           = saved_cmdlines_stop,
3820         .show           = saved_cmdlines_show,
3821 };
3822
3823 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3824 {
3825         if (tracing_disabled)
3826                 return -ENODEV;
3827
3828         return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3829 }
3830
3831 static const struct file_operations tracing_saved_cmdlines_fops = {
3832         .open           = tracing_saved_cmdlines_open,
3833         .read           = seq_read,
3834         .llseek         = seq_lseek,
3835         .release        = seq_release,
3836 };
3837
3838 static ssize_t
3839 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3840                                  size_t cnt, loff_t *ppos)
3841 {
3842         char buf[64];
3843         int r;
3844
3845         arch_spin_lock(&trace_cmdline_lock);
3846         r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3847         arch_spin_unlock(&trace_cmdline_lock);
3848
3849         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3850 }
3851
3852 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3853 {
3854         kfree(s->saved_cmdlines);
3855         kfree(s->map_cmdline_to_pid);
3856         kfree(s);
3857 }
3858
3859 static int tracing_resize_saved_cmdlines(unsigned int val)
3860 {
3861         struct saved_cmdlines_buffer *s, *savedcmd_temp;
3862
3863         s = kmalloc(sizeof(*s), GFP_KERNEL);
3864         if (!s)
3865                 return -ENOMEM;
3866
3867         if (allocate_cmdlines_buffer(val, s) < 0) {
3868                 kfree(s);
3869                 return -ENOMEM;
3870         }
3871
3872         arch_spin_lock(&trace_cmdline_lock);
3873         savedcmd_temp = savedcmd;
3874         savedcmd = s;
3875         arch_spin_unlock(&trace_cmdline_lock);
3876         free_saved_cmdlines_buffer(savedcmd_temp);
3877
3878         return 0;
3879 }
3880
3881 static ssize_t
3882 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3883                                   size_t cnt, loff_t *ppos)
3884 {
3885         unsigned long val;
3886         int ret;
3887
3888         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3889         if (ret)
3890                 return ret;
3891
3892         /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3893         if (!val || val > PID_MAX_DEFAULT)
3894                 return -EINVAL;
3895
3896         ret = tracing_resize_saved_cmdlines((unsigned int)val);
3897         if (ret < 0)
3898                 return ret;
3899
3900         *ppos += cnt;
3901
3902         return cnt;
3903 }
3904
3905 static const struct file_operations tracing_saved_cmdlines_size_fops = {
3906         .open           = tracing_open_generic,
3907         .read           = tracing_saved_cmdlines_size_read,
3908         .write          = tracing_saved_cmdlines_size_write,
3909 };
3910
3911 static void trace_insert_enum_map(struct trace_enum_map **start, int len)
3912 {
3913         struct trace_enum_map **map;
3914
3915         if (len <= 0)
3916                 return;
3917
3918         map = start;
3919
3920         trace_event_enum_update(map, len);
3921 }
3922
3923 static ssize_t
3924 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3925                        size_t cnt, loff_t *ppos)
3926 {
3927         struct trace_array *tr = filp->private_data;
3928         char buf[MAX_TRACER_SIZE+2];
3929         int r;
3930
3931         mutex_lock(&trace_types_lock);
3932         r = sprintf(buf, "%s\n", tr->current_trace->name);
3933         mutex_unlock(&trace_types_lock);
3934
3935         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3936 }
3937
3938 int tracer_init(struct tracer *t, struct trace_array *tr)
3939 {
3940         tracing_reset_online_cpus(&tr->trace_buffer);
3941         return t->init(tr);
3942 }
3943
3944 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3945 {
3946         int cpu;
3947
3948         for_each_tracing_cpu(cpu)
3949                 per_cpu_ptr(buf->data, cpu)->entries = val;
3950 }
3951
3952 #ifdef CONFIG_TRACER_MAX_TRACE
3953 /* resize @tr's buffer to the size of @size_tr's entries */
3954 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3955                                         struct trace_buffer *size_buf, int cpu_id)
3956 {
3957         int cpu, ret = 0;
3958
3959         if (cpu_id == RING_BUFFER_ALL_CPUS) {
3960                 for_each_tracing_cpu(cpu) {
3961                         ret = ring_buffer_resize(trace_buf->buffer,
3962                                  per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3963                         if (ret < 0)
3964                                 break;
3965                         per_cpu_ptr(trace_buf->data, cpu)->entries =
3966                                 per_cpu_ptr(size_buf->data, cpu)->entries;
3967                 }
3968         } else {
3969                 ret = ring_buffer_resize(trace_buf->buffer,
3970                                  per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3971                 if (ret == 0)
3972                         per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3973                                 per_cpu_ptr(size_buf->data, cpu_id)->entries;
3974         }
3975
3976         return ret;
3977 }
3978 #endif /* CONFIG_TRACER_MAX_TRACE */
3979
3980 static int __tracing_resize_ring_buffer(struct trace_array *tr,
3981                                         unsigned long size, int cpu)
3982 {
3983         int ret;
3984
3985         /*
3986          * If kernel or user changes the size of the ring buffer
3987          * we use the size that was given, and we can forget about
3988          * expanding it later.
3989          */
3990         ring_buffer_expanded = true;
3991
3992         /* May be called before buffers are initialized */
3993         if (!tr->trace_buffer.buffer)
3994                 return 0;
3995
3996         ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3997         if (ret < 0)
3998                 return ret;
3999
4000 #ifdef CONFIG_TRACER_MAX_TRACE
4001         if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4002             !tr->current_trace->use_max_tr)
4003                 goto out;
4004
4005         ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4006         if (ret < 0) {
4007                 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4008                                                      &tr->trace_buffer, cpu);
4009                 if (r < 0) {
4010                         /*
4011                          * AARGH! We are left with different
4012                          * size max buffer!!!!
4013                          * The max buffer is our "snapshot" buffer.
4014                          * When a tracer needs a snapshot (one of the
4015                          * latency tracers), it swaps the max buffer
4016                          * with the saved snap shot. We succeeded to
4017                          * update the size of the main buffer, but failed to
4018                          * update the size of the max buffer. But when we tried
4019                          * to reset the main buffer to the original size, we
4020                          * failed there too. This is very unlikely to
4021                          * happen, but if it does, warn and kill all
4022                          * tracing.
4023                          */
4024                         WARN_ON(1);
4025                         tracing_disabled = 1;
4026                 }
4027                 return ret;
4028         }
4029
4030         if (cpu == RING_BUFFER_ALL_CPUS)
4031                 set_buffer_entries(&tr->max_buffer, size);
4032         else
4033                 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4034
4035  out:
4036 #endif /* CONFIG_TRACER_MAX_TRACE */
4037
4038         if (cpu == RING_BUFFER_ALL_CPUS)
4039                 set_buffer_entries(&tr->trace_buffer, size);
4040         else
4041                 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4042
4043         return ret;
4044 }
4045
4046 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4047                                           unsigned long size, int cpu_id)
4048 {
4049         int ret = size;
4050
4051         mutex_lock(&trace_types_lock);
4052
4053         if (cpu_id != RING_BUFFER_ALL_CPUS) {
4054                 /* make sure, this cpu is enabled in the mask */
4055                 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4056                         ret = -EINVAL;
4057                         goto out;
4058                 }
4059         }
4060
4061         ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4062         if (ret < 0)
4063                 ret = -ENOMEM;
4064
4065 out:
4066         mutex_unlock(&trace_types_lock);
4067
4068         return ret;
4069 }
4070
4071
4072 /**
4073  * tracing_update_buffers - used by tracing facility to expand ring buffers
4074  *
4075  * To save on memory when the tracing is never used on a system with it
4076  * configured in. The ring buffers are set to a minimum size. But once
4077  * a user starts to use the tracing facility, then they need to grow
4078  * to their default size.
4079  *
4080  * This function is to be called when a tracer is about to be used.
4081  */
4082 int tracing_update_buffers(void)
4083 {
4084         int ret = 0;
4085
4086         mutex_lock(&trace_types_lock);
4087         if (!ring_buffer_expanded)
4088                 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4089                                                 RING_BUFFER_ALL_CPUS);
4090         mutex_unlock(&trace_types_lock);
4091
4092         return ret;
4093 }
4094
4095 struct trace_option_dentry;
4096
4097 static struct trace_option_dentry *
4098 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4099
4100 static void
4101 destroy_trace_option_files(struct trace_option_dentry *topts);
4102
4103 /*
4104  * Used to clear out the tracer before deletion of an instance.
4105  * Must have trace_types_lock held.
4106  */
4107 static void tracing_set_nop(struct trace_array *tr)
4108 {
4109         if (tr->current_trace == &nop_trace)
4110                 return;
4111         
4112         tr->current_trace->enabled--;
4113
4114         if (tr->current_trace->reset)
4115                 tr->current_trace->reset(tr);
4116
4117         tr->current_trace = &nop_trace;
4118 }
4119
4120 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4121 {
4122         static struct trace_option_dentry *topts;
4123         struct tracer *t;
4124 #ifdef CONFIG_TRACER_MAX_TRACE
4125         bool had_max_tr;
4126 #endif
4127         int ret = 0;
4128
4129         mutex_lock(&trace_types_lock);
4130
4131         if (!ring_buffer_expanded) {
4132                 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4133                                                 RING_BUFFER_ALL_CPUS);
4134                 if (ret < 0)
4135                         goto out;
4136                 ret = 0;
4137         }
4138
4139         for (t = trace_types; t; t = t->next) {
4140                 if (strcmp(t->name, buf) == 0)
4141                         break;
4142         }
4143         if (!t) {
4144                 ret = -EINVAL;
4145                 goto out;
4146         }
4147         if (t == tr->current_trace)
4148                 goto out;
4149
4150         /* Some tracers are only allowed for the top level buffer */
4151         if (!trace_ok_for_array(t, tr)) {
4152                 ret = -EINVAL;
4153                 goto out;
4154         }
4155
4156         /* If trace pipe files are being read, we can't change the tracer */
4157         if (tr->current_trace->ref) {
4158                 ret = -EBUSY;
4159                 goto out;
4160         }
4161
4162         trace_branch_disable();
4163
4164         tr->current_trace->enabled--;
4165
4166         if (tr->current_trace->reset)
4167                 tr->current_trace->reset(tr);
4168
4169         /* Current trace needs to be nop_trace before synchronize_sched */
4170         tr->current_trace = &nop_trace;
4171
4172 #ifdef CONFIG_TRACER_MAX_TRACE
4173         had_max_tr = tr->allocated_snapshot;
4174
4175         if (had_max_tr && !t->use_max_tr) {
4176                 /*
4177                  * We need to make sure that the update_max_tr sees that
4178                  * current_trace changed to nop_trace to keep it from
4179                  * swapping the buffers after we resize it.
4180                  * The update_max_tr is called from interrupts disabled
4181                  * so a synchronized_sched() is sufficient.
4182                  */
4183                 synchronize_sched();
4184                 free_snapshot(tr);
4185         }
4186 #endif
4187         /* Currently, only the top instance has options */
4188         if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4189                 destroy_trace_option_files(topts);
4190                 topts = create_trace_option_files(tr, t);
4191         }
4192
4193 #ifdef CONFIG_TRACER_MAX_TRACE
4194         if (t->use_max_tr && !had_max_tr) {
4195                 ret = alloc_snapshot(tr);
4196                 if (ret < 0)
4197                         goto out;
4198         }
4199 #endif
4200
4201         if (t->init) {
4202                 ret = tracer_init(t, tr);
4203                 if (ret)
4204                         goto out;
4205         }
4206
4207         tr->current_trace = t;
4208         tr->current_trace->enabled++;
4209         trace_branch_enable(tr);
4210  out:
4211         mutex_unlock(&trace_types_lock);
4212
4213         return ret;
4214 }
4215
4216 static ssize_t
4217 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4218                         size_t cnt, loff_t *ppos)
4219 {
4220         struct trace_array *tr = filp->private_data;
4221         char buf[MAX_TRACER_SIZE+1];
4222         int i;
4223         size_t ret;
4224         int err;
4225
4226         ret = cnt;
4227
4228         if (cnt > MAX_TRACER_SIZE)
4229                 cnt = MAX_TRACER_SIZE;
4230
4231         if (copy_from_user(&buf, ubuf, cnt))
4232                 return -EFAULT;
4233
4234         buf[cnt] = 0;
4235
4236         /* strip ending whitespace. */
4237         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4238                 buf[i] = 0;
4239
4240         err = tracing_set_tracer(tr, buf);
4241         if (err)
4242                 return err;
4243
4244         *ppos += ret;
4245
4246         return ret;
4247 }
4248
4249 static ssize_t
4250 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4251                    size_t cnt, loff_t *ppos)
4252 {
4253         char buf[64];
4254         int r;
4255
4256         r = snprintf(buf, sizeof(buf), "%ld\n",
4257                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4258         if (r > sizeof(buf))
4259                 r = sizeof(buf);
4260         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4261 }
4262
4263 static ssize_t
4264 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4265                     size_t cnt, loff_t *ppos)
4266 {
4267         unsigned long val;
4268         int ret;
4269
4270         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4271         if (ret)
4272                 return ret;
4273
4274         *ptr = val * 1000;
4275
4276         return cnt;
4277 }
4278
4279 static ssize_t
4280 tracing_thresh_read(struct file *filp, char __user *ubuf,
4281                     size_t cnt, loff_t *ppos)
4282 {
4283         return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4284 }
4285
4286 static ssize_t
4287 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4288                      size_t cnt, loff_t *ppos)
4289 {
4290         struct trace_array *tr = filp->private_data;
4291         int ret;
4292
4293         mutex_lock(&trace_types_lock);
4294         ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4295         if (ret < 0)
4296                 goto out;
4297
4298         if (tr->current_trace->update_thresh) {
4299                 ret = tr->current_trace->update_thresh(tr);
4300                 if (ret < 0)
4301                         goto out;
4302         }
4303
4304         ret = cnt;
4305 out:
4306         mutex_unlock(&trace_types_lock);
4307
4308         return ret;
4309 }
4310
4311 static ssize_t
4312 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4313                      size_t cnt, loff_t *ppos)
4314 {
4315         return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4316 }
4317
4318 static ssize_t
4319 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4320                       size_t cnt, loff_t *ppos)
4321 {
4322         return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4323 }
4324
4325 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4326 {
4327         struct trace_array *tr = inode->i_private;
4328         struct trace_iterator *iter;
4329         int ret = 0;
4330
4331         if (tracing_disabled)
4332                 return -ENODEV;
4333
4334         if (trace_array_get(tr) < 0)
4335                 return -ENODEV;
4336
4337         mutex_lock(&trace_types_lock);
4338
4339         /* create a buffer to store the information to pass to userspace */
4340         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4341         if (!iter) {
4342                 ret = -ENOMEM;
4343                 __trace_array_put(tr);
4344                 goto out;
4345         }
4346
4347         trace_seq_init(&iter->seq);
4348         iter->trace = tr->current_trace;
4349
4350         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4351                 ret = -ENOMEM;
4352                 goto fail;
4353         }
4354
4355         /* trace pipe does not show start of buffer */
4356         cpumask_setall(iter->started);
4357
4358         if (trace_flags & TRACE_ITER_LATENCY_FMT)
4359                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4360
4361         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4362         if (trace_clocks[tr->clock_id].in_ns)
4363                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4364
4365         iter->tr = tr;
4366         iter->trace_buffer = &tr->trace_buffer;
4367         iter->cpu_file = tracing_get_cpu(inode);
4368         mutex_init(&iter->mutex);
4369         filp->private_data = iter;
4370
4371         if (iter->trace->pipe_open)
4372                 iter->trace->pipe_open(iter);
4373
4374         nonseekable_open(inode, filp);
4375
4376         tr->current_trace->ref++;
4377 out:
4378         mutex_unlock(&trace_types_lock);
4379         return ret;
4380
4381 fail:
4382         kfree(iter->trace);
4383         kfree(iter);
4384         __trace_array_put(tr);
4385         mutex_unlock(&trace_types_lock);
4386         return ret;
4387 }
4388
4389 static int tracing_release_pipe(struct inode *inode, struct file *file)
4390 {
4391         struct trace_iterator *iter = file->private_data;
4392         struct trace_array *tr = inode->i_private;
4393
4394         mutex_lock(&trace_types_lock);
4395
4396         tr->current_trace->ref--;
4397
4398         if (iter->trace->pipe_close)
4399                 iter->trace->pipe_close(iter);
4400
4401         mutex_unlock(&trace_types_lock);
4402
4403         free_cpumask_var(iter->started);
4404         mutex_destroy(&iter->mutex);
4405         kfree(iter);
4406
4407         trace_array_put(tr);
4408
4409         return 0;
4410 }
4411
4412 static unsigned int
4413 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4414 {
4415         /* Iterators are static, they should be filled or empty */
4416         if (trace_buffer_iter(iter, iter->cpu_file))
4417                 return POLLIN | POLLRDNORM;
4418
4419         if (trace_flags & TRACE_ITER_BLOCK)
4420                 /*
4421                  * Always select as readable when in blocking mode
4422                  */
4423                 return POLLIN | POLLRDNORM;
4424         else
4425                 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4426                                              filp, poll_table);
4427 }
4428
4429 static unsigned int
4430 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4431 {
4432         struct trace_iterator *iter = filp->private_data;
4433
4434         return trace_poll(iter, filp, poll_table);
4435 }
4436
4437 /* Must be called with iter->mutex held. */
4438 static int tracing_wait_pipe(struct file *filp)
4439 {
4440         struct trace_iterator *iter = filp->private_data;
4441         int ret;
4442
4443         while (trace_empty(iter)) {
4444
4445                 if ((filp->f_flags & O_NONBLOCK)) {
4446                         return -EAGAIN;
4447                 }
4448
4449                 /*
4450                  * We block until we read something and tracing is disabled.
4451                  * We still block if tracing is disabled, but we have never
4452                  * read anything. This allows a user to cat this file, and
4453                  * then enable tracing. But after we have read something,
4454                  * we give an EOF when tracing is again disabled.
4455                  *
4456                  * iter->pos will be 0 if we haven't read anything.
4457                  */
4458                 if (!tracing_is_on() && iter->pos)
4459                         break;
4460
4461                 mutex_unlock(&iter->mutex);
4462
4463                 ret = wait_on_pipe(iter, false);
4464
4465                 mutex_lock(&iter->mutex);
4466
4467                 if (ret)
4468                         return ret;
4469         }
4470
4471         return 1;
4472 }
4473
4474 /*
4475  * Consumer reader.
4476  */
4477 static ssize_t
4478 tracing_read_pipe(struct file *filp, char __user *ubuf,
4479                   size_t cnt, loff_t *ppos)
4480 {
4481         struct trace_iterator *iter = filp->private_data;
4482         ssize_t sret;
4483
4484         /* return any leftover data */
4485         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4486         if (sret != -EBUSY)
4487                 return sret;
4488
4489         trace_seq_init(&iter->seq);
4490
4491         /*
4492          * Avoid more than one consumer on a single file descriptor
4493          * This is just a matter of traces coherency, the ring buffer itself
4494          * is protected.
4495          */
4496         mutex_lock(&iter->mutex);
4497         if (iter->trace->read) {
4498                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4499                 if (sret)
4500                         goto out;
4501         }
4502
4503 waitagain:
4504         sret = tracing_wait_pipe(filp);
4505         if (sret <= 0)
4506                 goto out;
4507
4508         /* stop when tracing is finished */
4509         if (trace_empty(iter)) {
4510                 sret = 0;
4511                 goto out;
4512         }
4513
4514         if (cnt >= PAGE_SIZE)
4515                 cnt = PAGE_SIZE - 1;
4516
4517         /* reset all but tr, trace, and overruns */
4518         memset(&iter->seq, 0,
4519                sizeof(struct trace_iterator) -
4520                offsetof(struct trace_iterator, seq));
4521         cpumask_clear(iter->started);
4522         iter->pos = -1;
4523
4524         trace_event_read_lock();
4525         trace_access_lock(iter->cpu_file);
4526         while (trace_find_next_entry_inc(iter) != NULL) {
4527                 enum print_line_t ret;
4528                 int save_len = iter->seq.seq.len;
4529
4530                 ret = print_trace_line(iter);
4531                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4532                         /* don't print partial lines */
4533                         iter->seq.seq.len = save_len;
4534                         break;
4535                 }
4536                 if (ret != TRACE_TYPE_NO_CONSUME)
4537                         trace_consume(iter);
4538
4539                 if (trace_seq_used(&iter->seq) >= cnt)
4540                         break;
4541
4542                 /*
4543                  * Setting the full flag means we reached the trace_seq buffer
4544                  * size and we should leave by partial output condition above.
4545                  * One of the trace_seq_* functions is not used properly.
4546                  */
4547                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4548                           iter->ent->type);
4549         }
4550         trace_access_unlock(iter->cpu_file);
4551         trace_event_read_unlock();
4552
4553         /* Now copy what we have to the user */
4554         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4555         if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4556                 trace_seq_init(&iter->seq);
4557
4558         /*
4559          * If there was nothing to send to user, in spite of consuming trace
4560          * entries, go back to wait for more entries.
4561          */
4562         if (sret == -EBUSY)
4563                 goto waitagain;
4564
4565 out:
4566         mutex_unlock(&iter->mutex);
4567
4568         return sret;
4569 }
4570
4571 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4572                                      unsigned int idx)
4573 {
4574         __free_page(spd->pages[idx]);
4575 }
4576
4577 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4578         .can_merge              = 0,
4579         .confirm                = generic_pipe_buf_confirm,
4580         .release                = generic_pipe_buf_release,
4581         .steal                  = generic_pipe_buf_steal,
4582         .get                    = generic_pipe_buf_get,
4583 };
4584
4585 static size_t
4586 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4587 {
4588         size_t count;
4589         int save_len;
4590         int ret;
4591
4592         /* Seq buffer is page-sized, exactly what we need. */
4593         for (;;) {
4594                 save_len = iter->seq.seq.len;
4595                 ret = print_trace_line(iter);
4596
4597                 if (trace_seq_has_overflowed(&iter->seq)) {
4598                         iter->seq.seq.len = save_len;
4599                         break;
4600                 }
4601
4602                 /*
4603                  * This should not be hit, because it should only
4604                  * be set if the iter->seq overflowed. But check it
4605                  * anyway to be safe.
4606                  */
4607                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4608                         iter->seq.seq.len = save_len;
4609                         break;
4610                 }
4611
4612                 count = trace_seq_used(&iter->seq) - save_len;
4613                 if (rem < count) {
4614                         rem = 0;
4615                         iter->seq.seq.len = save_len;
4616                         break;
4617                 }
4618
4619                 if (ret != TRACE_TYPE_NO_CONSUME)
4620                         trace_consume(iter);
4621                 rem -= count;
4622                 if (!trace_find_next_entry_inc(iter))   {
4623                         rem = 0;
4624                         iter->ent = NULL;
4625                         break;
4626                 }
4627         }
4628
4629         return rem;
4630 }
4631
4632 static ssize_t tracing_splice_read_pipe(struct file *filp,
4633                                         loff_t *ppos,
4634                                         struct pipe_inode_info *pipe,
4635                                         size_t len,
4636                                         unsigned int flags)
4637 {
4638         struct page *pages_def[PIPE_DEF_BUFFERS];
4639         struct partial_page partial_def[PIPE_DEF_BUFFERS];
4640         struct trace_iterator *iter = filp->private_data;
4641         struct splice_pipe_desc spd = {
4642                 .pages          = pages_def,
4643                 .partial        = partial_def,
4644                 .nr_pages       = 0, /* This gets updated below. */
4645                 .nr_pages_max   = PIPE_DEF_BUFFERS,
4646                 .flags          = flags,
4647                 .ops            = &tracing_pipe_buf_ops,
4648                 .spd_release    = tracing_spd_release_pipe,
4649         };
4650         ssize_t ret;
4651         size_t rem;
4652         unsigned int i;
4653
4654         if (splice_grow_spd(pipe, &spd))
4655                 return -ENOMEM;
4656
4657         mutex_lock(&iter->mutex);
4658
4659         if (iter->trace->splice_read) {
4660                 ret = iter->trace->splice_read(iter, filp,
4661                                                ppos, pipe, len, flags);
4662                 if (ret)
4663                         goto out_err;
4664         }
4665
4666         ret = tracing_wait_pipe(filp);
4667         if (ret <= 0)
4668                 goto out_err;
4669
4670         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4671                 ret = -EFAULT;
4672                 goto out_err;
4673         }
4674
4675         trace_event_read_lock();
4676         trace_access_lock(iter->cpu_file);
4677
4678         /* Fill as many pages as possible. */
4679         for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4680                 spd.pages[i] = alloc_page(GFP_KERNEL);
4681                 if (!spd.pages[i])
4682                         break;
4683
4684                 rem = tracing_fill_pipe_page(rem, iter);
4685
4686                 /* Copy the data into the page, so we can start over. */
4687                 ret = trace_seq_to_buffer(&iter->seq,
4688                                           page_address(spd.pages[i]),
4689                                           trace_seq_used(&iter->seq));
4690                 if (ret < 0) {
4691                         __free_page(spd.pages[i]);
4692                         break;
4693                 }
4694                 spd.partial[i].offset = 0;
4695                 spd.partial[i].len = trace_seq_used(&iter->seq);
4696
4697                 trace_seq_init(&iter->seq);
4698         }
4699
4700         trace_access_unlock(iter->cpu_file);
4701         trace_event_read_unlock();
4702         mutex_unlock(&iter->mutex);
4703
4704         spd.nr_pages = i;
4705
4706         ret = splice_to_pipe(pipe, &spd);
4707 out:
4708         splice_shrink_spd(&spd);
4709         return ret;
4710
4711 out_err:
4712         mutex_unlock(&iter->mutex);
4713         goto out;
4714 }
4715
4716 static ssize_t
4717 tracing_entries_read(struct file *filp, char __user *ubuf,
4718                      size_t cnt, loff_t *ppos)
4719 {
4720         struct inode *inode = file_inode(filp);
4721         struct trace_array *tr = inode->i_private;
4722         int cpu = tracing_get_cpu(inode);
4723         char buf[64];
4724         int r = 0;
4725         ssize_t ret;
4726
4727         mutex_lock(&trace_types_lock);
4728
4729         if (cpu == RING_BUFFER_ALL_CPUS) {
4730                 int cpu, buf_size_same;
4731                 unsigned long size;
4732
4733                 size = 0;
4734                 buf_size_same = 1;
4735                 /* check if all cpu sizes are same */
4736                 for_each_tracing_cpu(cpu) {
4737                         /* fill in the size from first enabled cpu */
4738                         if (size == 0)
4739                                 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4740                         if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4741                                 buf_size_same = 0;
4742                                 break;
4743                         }
4744                 }
4745
4746                 if (buf_size_same) {
4747                         if (!ring_buffer_expanded)
4748                                 r = sprintf(buf, "%lu (expanded: %lu)\n",
4749                                             size >> 10,
4750                                             trace_buf_size >> 10);
4751                         else
4752                                 r = sprintf(buf, "%lu\n", size >> 10);
4753                 } else
4754                         r = sprintf(buf, "X\n");
4755         } else
4756                 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4757
4758         mutex_unlock(&trace_types_lock);
4759
4760         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4761         return ret;
4762 }
4763
4764 static ssize_t
4765 tracing_entries_write(struct file *filp, const char __user *ubuf,
4766                       size_t cnt, loff_t *ppos)
4767 {
4768         struct inode *inode = file_inode(filp);
4769         struct trace_array *tr = inode->i_private;
4770         unsigned long val;
4771         int ret;
4772
4773         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4774         if (ret)
4775                 return ret;
4776
4777         /* must have at least 1 entry */
4778         if (!val)
4779                 return -EINVAL;
4780
4781         /* value is in KB */
4782         val <<= 10;
4783         ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4784         if (ret < 0)
4785                 return ret;
4786
4787         *ppos += cnt;
4788
4789         return cnt;
4790 }
4791
4792 static ssize_t
4793 tracing_total_entries_read(struct file *filp, char __user *ubuf,
4794                                 size_t cnt, loff_t *ppos)
4795 {
4796         struct trace_array *tr = filp->private_data;
4797         char buf[64];
4798         int r, cpu;
4799         unsigned long size = 0, expanded_size = 0;
4800
4801         mutex_lock(&trace_types_lock);
4802         for_each_tracing_cpu(cpu) {
4803                 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4804                 if (!ring_buffer_expanded)
4805                         expanded_size += trace_buf_size >> 10;
4806         }
4807         if (ring_buffer_expanded)
4808                 r = sprintf(buf, "%lu\n", size);
4809         else
4810                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4811         mutex_unlock(&trace_types_lock);
4812
4813         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4814 }
4815
4816 static ssize_t
4817 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4818                           size_t cnt, loff_t *ppos)
4819 {
4820         /*
4821          * There is no need to read what the user has written, this function
4822          * is just to make sure that there is no error when "echo" is used
4823          */
4824
4825         *ppos += cnt;
4826
4827         return cnt;
4828 }
4829
4830 static int
4831 tracing_free_buffer_release(struct inode *inode, struct file *filp)
4832 {
4833         struct trace_array *tr = inode->i_private;
4834
4835         /* disable tracing ? */
4836         if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4837                 tracer_tracing_off(tr);
4838         /* resize the ring buffer to 0 */
4839         tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4840
4841         trace_array_put(tr);
4842
4843         return 0;
4844 }
4845
4846 static ssize_t
4847 tracing_mark_write(struct file *filp, const char __user *ubuf,
4848                                         size_t cnt, loff_t *fpos)
4849 {
4850         unsigned long addr = (unsigned long)ubuf;
4851         struct trace_array *tr = filp->private_data;
4852         struct ring_buffer_event *event;
4853         struct ring_buffer *buffer;
4854         struct print_entry *entry;
4855         unsigned long irq_flags;
4856         struct page *pages[2];
4857         void *map_page[2];
4858         int nr_pages = 1;
4859         ssize_t written;
4860         int offset;
4861         int size;
4862         int len;
4863         int ret;
4864         int i;
4865
4866         if (tracing_disabled)
4867                 return -EINVAL;
4868
4869         if (!(trace_flags & TRACE_ITER_MARKERS))
4870                 return -EINVAL;
4871
4872         if (cnt > TRACE_BUF_SIZE)
4873                 cnt = TRACE_BUF_SIZE;
4874
4875         /*
4876          * Userspace is injecting traces into the kernel trace buffer.
4877          * We want to be as non intrusive as possible.
4878          * To do so, we do not want to allocate any special buffers
4879          * or take any locks, but instead write the userspace data
4880          * straight into the ring buffer.
4881          *
4882          * First we need to pin the userspace buffer into memory,
4883          * which, most likely it is, because it just referenced it.
4884          * But there's no guarantee that it is. By using get_user_pages_fast()
4885          * and kmap_atomic/kunmap_atomic() we can get access to the
4886          * pages directly. We then write the data directly into the
4887          * ring buffer.
4888          */
4889         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4890
4891         /* check if we cross pages */
4892         if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4893                 nr_pages = 2;
4894
4895         offset = addr & (PAGE_SIZE - 1);
4896         addr &= PAGE_MASK;
4897
4898         ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4899         if (ret < nr_pages) {
4900                 while (--ret >= 0)
4901                         put_page(pages[ret]);
4902                 written = -EFAULT;
4903                 goto out;
4904         }
4905
4906         for (i = 0; i < nr_pages; i++)
4907                 map_page[i] = kmap_atomic(pages[i]);
4908
4909         local_save_flags(irq_flags);
4910         size = sizeof(*entry) + cnt + 2; /* possible \n added */
4911         buffer = tr->trace_buffer.buffer;
4912         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4913                                           irq_flags, preempt_count());
4914         if (!event) {
4915                 /* Ring buffer disabled, return as if not open for write */
4916                 written = -EBADF;
4917                 goto out_unlock;
4918         }
4919
4920         entry = ring_buffer_event_data(event);
4921         entry->ip = _THIS_IP_;
4922
4923         if (nr_pages == 2) {
4924                 len = PAGE_SIZE - offset;
4925                 memcpy(&entry->buf, map_page[0] + offset, len);
4926                 memcpy(&entry->buf[len], map_page[1], cnt - len);
4927         } else
4928                 memcpy(&entry->buf, map_page[0] + offset, cnt);
4929
4930         if (entry->buf[cnt - 1] != '\n') {
4931                 entry->buf[cnt] = '\n';
4932                 entry->buf[cnt + 1] = '\0';
4933         } else
4934                 entry->buf[cnt] = '\0';
4935
4936         __buffer_unlock_commit(buffer, event);
4937
4938         written = cnt;
4939
4940         *fpos += written;
4941
4942  out_unlock:
4943         for (i = nr_pages - 1; i >= 0; i--) {
4944                 kunmap_atomic(map_page[i]);
4945                 put_page(pages[i]);
4946         }
4947  out:
4948         return written;
4949 }
4950
4951 static int tracing_clock_show(struct seq_file *m, void *v)
4952 {
4953         struct trace_array *tr = m->private;
4954         int i;
4955
4956         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4957                 seq_printf(m,
4958                         "%s%s%s%s", i ? " " : "",
4959                         i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4960                         i == tr->clock_id ? "]" : "");
4961         seq_putc(m, '\n');
4962
4963         return 0;
4964 }
4965
4966 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
4967 {
4968         int i;
4969
4970         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4971                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4972                         break;
4973         }
4974         if (i == ARRAY_SIZE(trace_clocks))
4975                 return -EINVAL;
4976
4977         mutex_lock(&trace_types_lock);
4978
4979         tr->clock_id = i;
4980
4981         ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4982
4983         /*
4984          * New clock may not be consistent with the previous clock.
4985          * Reset the buffer so that it doesn't have incomparable timestamps.
4986          */
4987         tracing_reset_online_cpus(&tr->trace_buffer);
4988
4989 #ifdef CONFIG_TRACER_MAX_TRACE
4990         if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4991                 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4992         tracing_reset_online_cpus(&tr->max_buffer);
4993 #endif
4994
4995         mutex_unlock(&trace_types_lock);
4996
4997         return 0;
4998 }
4999
5000 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5001                                    size_t cnt, loff_t *fpos)
5002 {
5003         struct seq_file *m = filp->private_data;
5004         struct trace_array *tr = m->private;
5005         char buf[64];
5006         const char *clockstr;
5007         int ret;
5008
5009         if (cnt >= sizeof(buf))
5010                 return -EINVAL;
5011
5012         if (copy_from_user(&buf, ubuf, cnt))
5013                 return -EFAULT;
5014
5015         buf[cnt] = 0;
5016
5017         clockstr = strstrip(buf);
5018
5019         ret = tracing_set_clock(tr, clockstr);
5020         if (ret)
5021                 return ret;
5022
5023         *fpos += cnt;
5024
5025         return cnt;
5026 }
5027
5028 static int tracing_clock_open(struct inode *inode, struct file *file)
5029 {
5030         struct trace_array *tr = inode->i_private;
5031         int ret;
5032
5033         if (tracing_disabled)
5034                 return -ENODEV;
5035
5036         if (trace_array_get(tr))
5037                 return -ENODEV;
5038
5039         ret = single_open(file, tracing_clock_show, inode->i_private);
5040         if (ret < 0)
5041                 trace_array_put(tr);
5042
5043         return ret;
5044 }
5045
5046 struct ftrace_buffer_info {
5047         struct trace_iterator   iter;
5048         void                    *spare;
5049         unsigned int            read;
5050 };
5051
5052 #ifdef CONFIG_TRACER_SNAPSHOT
5053 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5054 {
5055         struct trace_array *tr = inode->i_private;
5056         struct trace_iterator *iter;
5057         struct seq_file *m;
5058         int ret = 0;
5059
5060         if (trace_array_get(tr) < 0)
5061                 return -ENODEV;
5062
5063         if (file->f_mode & FMODE_READ) {
5064                 iter = __tracing_open(inode, file, true);
5065                 if (IS_ERR(iter))
5066                         ret = PTR_ERR(iter);
5067         } else {
5068                 /* Writes still need the seq_file to hold the private data */
5069                 ret = -ENOMEM;
5070                 m = kzalloc(sizeof(*m), GFP_KERNEL);
5071                 if (!m)
5072                         goto out;
5073                 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5074                 if (!iter) {
5075                         kfree(m);
5076                         goto out;
5077                 }
5078                 ret = 0;
5079
5080                 iter->tr = tr;
5081                 iter->trace_buffer = &tr->max_buffer;
5082                 iter->cpu_file = tracing_get_cpu(inode);
5083                 m->private = iter;
5084                 file->private_data = m;
5085         }
5086 out:
5087         if (ret < 0)
5088                 trace_array_put(tr);
5089
5090         return ret;
5091 }
5092
5093 static ssize_t
5094 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5095                        loff_t *ppos)
5096 {
5097         struct seq_file *m = filp->private_data;
5098         struct trace_iterator *iter = m->private;
5099         struct trace_array *tr = iter->tr;
5100         unsigned long val;
5101         int ret;
5102
5103         ret = tracing_update_buffers();
5104         if (ret < 0)
5105                 return ret;
5106
5107         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5108         if (ret)
5109                 return ret;
5110
5111         mutex_lock(&trace_types_lock);
5112
5113         if (tr->current_trace->use_max_tr) {
5114                 ret = -EBUSY;
5115                 goto out;
5116         }
5117
5118         switch (val) {
5119         case 0:
5120                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5121                         ret = -EINVAL;
5122                         break;
5123                 }
5124                 if (tr->allocated_snapshot)
5125                         free_snapshot(tr);
5126                 break;
5127         case 1:
5128 /* Only allow per-cpu swap if the ring buffer supports it */
5129 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5130                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5131                         ret = -EINVAL;
5132                         break;
5133                 }
5134 #endif
5135                 if (!tr->allocated_snapshot) {
5136                         ret = alloc_snapshot(tr);
5137                         if (ret < 0)
5138                                 break;
5139                 }
5140                 local_irq_disable();
5141                 /* Now, we're going to swap */
5142                 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5143                         update_max_tr(tr, current, smp_processor_id());
5144                 else
5145                         update_max_tr_single(tr, current, iter->cpu_file);
5146                 local_irq_enable();
5147                 break;
5148         default:
5149                 if (tr->allocated_snapshot) {
5150                         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5151                                 tracing_reset_online_cpus(&tr->max_buffer);
5152                         else
5153                                 tracing_reset(&tr->max_buffer, iter->cpu_file);
5154                 }
5155                 break;
5156         }
5157
5158         if (ret >= 0) {
5159                 *ppos += cnt;
5160                 ret = cnt;
5161         }
5162 out:
5163         mutex_unlock(&trace_types_lock);
5164         return ret;
5165 }
5166
5167 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5168 {
5169         struct seq_file *m = file->private_data;
5170         int ret;
5171
5172         ret = tracing_release(inode, file);
5173
5174         if (file->f_mode & FMODE_READ)
5175                 return ret;
5176
5177         /* If write only, the seq_file is just a stub */
5178         if (m)
5179                 kfree(m->private);
5180         kfree(m);
5181
5182         return 0;
5183 }
5184
5185 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5186 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5187                                     size_t count, loff_t *ppos);
5188 static int tracing_buffers_release(struct inode *inode, struct file *file);
5189 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5190                    struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5191
5192 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5193 {
5194         struct ftrace_buffer_info *info;
5195         int ret;
5196
5197         ret = tracing_buffers_open(inode, filp);
5198         if (ret < 0)
5199                 return ret;
5200
5201         info = filp->private_data;
5202
5203         if (info->iter.trace->use_max_tr) {
5204                 tracing_buffers_release(inode, filp);
5205                 return -EBUSY;
5206         }
5207
5208         info->iter.snapshot = true;
5209         info->iter.trace_buffer = &info->iter.tr->max_buffer;
5210
5211         return ret;
5212 }
5213
5214 #endif /* CONFIG_TRACER_SNAPSHOT */
5215
5216
5217 static const struct file_operations tracing_thresh_fops = {
5218         .open           = tracing_open_generic,
5219         .read           = tracing_thresh_read,
5220         .write          = tracing_thresh_write,
5221         .llseek         = generic_file_llseek,
5222 };
5223
5224 static const struct file_operations tracing_max_lat_fops = {
5225         .open           = tracing_open_generic,
5226         .read           = tracing_max_lat_read,
5227         .write          = tracing_max_lat_write,
5228         .llseek         = generic_file_llseek,
5229 };
5230
5231 static const struct file_operations set_tracer_fops = {
5232         .open           = tracing_open_generic,
5233         .read           = tracing_set_trace_read,
5234         .write          = tracing_set_trace_write,
5235         .llseek         = generic_file_llseek,
5236 };
5237
5238 static const struct file_operations tracing_pipe_fops = {
5239         .open           = tracing_open_pipe,
5240         .poll           = tracing_poll_pipe,
5241         .read           = tracing_read_pipe,
5242         .splice_read    = tracing_splice_read_pipe,
5243         .release        = tracing_release_pipe,
5244         .llseek         = no_llseek,
5245 };
5246
5247 static const struct file_operations tracing_entries_fops = {
5248         .open           = tracing_open_generic_tr,
5249         .read           = tracing_entries_read,
5250         .write          = tracing_entries_write,
5251         .llseek         = generic_file_llseek,
5252         .release        = tracing_release_generic_tr,
5253 };
5254
5255 static const struct file_operations tracing_total_entries_fops = {
5256         .open           = tracing_open_generic_tr,
5257         .read           = tracing_total_entries_read,
5258         .llseek         = generic_file_llseek,
5259         .release        = tracing_release_generic_tr,
5260 };
5261
5262 static const struct file_operations tracing_free_buffer_fops = {
5263         .open           = tracing_open_generic_tr,
5264         .write          = tracing_free_buffer_write,
5265         .release        = tracing_free_buffer_release,
5266 };
5267
5268 static const struct file_operations tracing_mark_fops = {
5269         .open           = tracing_open_generic_tr,
5270         .write          = tracing_mark_write,
5271         .llseek         = generic_file_llseek,
5272         .release        = tracing_release_generic_tr,
5273 };
5274
5275 static const struct file_operations trace_clock_fops = {
5276         .open           = tracing_clock_open,
5277         .read           = seq_read,
5278         .llseek         = seq_lseek,
5279         .release        = tracing_single_release_tr,
5280         .write          = tracing_clock_write,
5281 };
5282
5283 #ifdef CONFIG_TRACER_SNAPSHOT
5284 static const struct file_operations snapshot_fops = {
5285         .open           = tracing_snapshot_open,
5286         .read           = seq_read,
5287         .write          = tracing_snapshot_write,
5288         .llseek         = tracing_lseek,
5289         .release        = tracing_snapshot_release,
5290 };
5291
5292 static const struct file_operations snapshot_raw_fops = {
5293         .open           = snapshot_raw_open,
5294         .read           = tracing_buffers_read,
5295         .release        = tracing_buffers_release,
5296         .splice_read    = tracing_buffers_splice_read,
5297         .llseek         = no_llseek,
5298 };
5299
5300 #endif /* CONFIG_TRACER_SNAPSHOT */
5301
5302 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5303 {
5304         struct trace_array *tr = inode->i_private;
5305         struct ftrace_buffer_info *info;
5306         int ret;
5307
5308         if (tracing_disabled)
5309                 return -ENODEV;
5310
5311         if (trace_array_get(tr) < 0)
5312                 return -ENODEV;
5313
5314         info = kzalloc(sizeof(*info), GFP_KERNEL);
5315         if (!info) {
5316                 trace_array_put(tr);
5317                 return -ENOMEM;
5318         }
5319
5320         mutex_lock(&trace_types_lock);
5321
5322         info->iter.tr           = tr;
5323         info->iter.cpu_file     = tracing_get_cpu(inode);
5324         info->iter.trace        = tr->current_trace;
5325         info->iter.trace_buffer = &tr->trace_buffer;
5326         info->spare             = NULL;
5327         /* Force reading ring buffer for first read */
5328         info->read              = (unsigned int)-1;
5329
5330         filp->private_data = info;
5331
5332         tr->current_trace->ref++;
5333
5334         mutex_unlock(&trace_types_lock);
5335
5336         ret = nonseekable_open(inode, filp);
5337         if (ret < 0)
5338                 trace_array_put(tr);
5339
5340         return ret;
5341 }
5342
5343 static unsigned int
5344 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5345 {
5346         struct ftrace_buffer_info *info = filp->private_data;
5347         struct trace_iterator *iter = &info->iter;
5348
5349         return trace_poll(iter, filp, poll_table);
5350 }
5351
5352 static ssize_t
5353 tracing_buffers_read(struct file *filp, char __user *ubuf,
5354                      size_t count, loff_t *ppos)
5355 {
5356         struct ftrace_buffer_info *info = filp->private_data;
5357         struct trace_iterator *iter = &info->iter;
5358         ssize_t ret;
5359         ssize_t size;
5360
5361         if (!count)
5362                 return 0;
5363
5364 #ifdef CONFIG_TRACER_MAX_TRACE
5365         if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5366                 return -EBUSY;
5367 #endif
5368
5369         if (!info->spare)
5370                 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5371                                                           iter->cpu_file);
5372         if (!info->spare)
5373                 return -ENOMEM;
5374
5375         /* Do we have previous read data to read? */
5376         if (info->read < PAGE_SIZE)
5377                 goto read;
5378
5379  again:
5380         trace_access_lock(iter->cpu_file);
5381         ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5382                                     &info->spare,
5383                                     count,
5384                                     iter->cpu_file, 0);
5385         trace_access_unlock(iter->cpu_file);
5386
5387         if (ret < 0) {
5388                 if (trace_empty(iter)) {
5389                         if ((filp->f_flags & O_NONBLOCK))
5390                                 return -EAGAIN;
5391
5392                         ret = wait_on_pipe(iter, false);
5393                         if (ret)
5394                                 return ret;
5395
5396                         goto again;
5397                 }
5398                 return 0;
5399         }
5400
5401         info->read = 0;
5402  read:
5403         size = PAGE_SIZE - info->read;
5404         if (size > count)
5405                 size = count;
5406
5407         ret = copy_to_user(ubuf, info->spare + info->read, size);
5408         if (ret == size)
5409                 return -EFAULT;
5410
5411         size -= ret;
5412
5413         *ppos += size;
5414         info->read += size;
5415
5416         return size;
5417 }
5418
5419 static int tracing_buffers_release(struct inode *inode, struct file *file)
5420 {
5421         struct ftrace_buffer_info *info = file->private_data;
5422         struct trace_iterator *iter = &info->iter;
5423
5424         mutex_lock(&trace_types_lock);
5425
5426         iter->tr->current_trace->ref--;
5427
5428         __trace_array_put(iter->tr);
5429
5430         if (info->spare)
5431                 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5432         kfree(info);
5433
5434         mutex_unlock(&trace_types_lock);
5435
5436         return 0;
5437 }
5438
5439 struct buffer_ref {
5440         struct ring_buffer      *buffer;
5441         void                    *page;
5442         int                     ref;
5443 };
5444
5445 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5446                                     struct pipe_buffer *buf)
5447 {
5448         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5449
5450         if (--ref->ref)
5451                 return;
5452
5453         ring_buffer_free_read_page(ref->buffer, ref->page);
5454         kfree(ref);
5455         buf->private = 0;
5456 }
5457
5458 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5459                                 struct pipe_buffer *buf)
5460 {
5461         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5462
5463         ref->ref++;
5464 }
5465
5466 /* Pipe buffer operations for a buffer. */
5467 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5468         .can_merge              = 0,
5469         .confirm                = generic_pipe_buf_confirm,
5470         .release                = buffer_pipe_buf_release,
5471         .steal                  = generic_pipe_buf_steal,
5472         .get                    = buffer_pipe_buf_get,
5473 };
5474
5475 /*
5476  * Callback from splice_to_pipe(), if we need to release some pages
5477  * at the end of the spd in case we error'ed out in filling the pipe.
5478  */
5479 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5480 {
5481         struct buffer_ref *ref =
5482                 (struct buffer_ref *)spd->partial[i].private;
5483
5484         if (--ref->ref)
5485                 return;
5486
5487         ring_buffer_free_read_page(ref->buffer, ref->page);
5488         kfree(ref);
5489         spd->partial[i].private = 0;
5490 }
5491
5492 static ssize_t
5493 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5494                             struct pipe_inode_info *pipe, size_t len,
5495                             unsigned int flags)
5496 {
5497         struct ftrace_buffer_info *info = file->private_data;
5498         struct trace_iterator *iter = &info->iter;
5499         struct partial_page partial_def[PIPE_DEF_BUFFERS];
5500         struct page *pages_def[PIPE_DEF_BUFFERS];
5501         struct splice_pipe_desc spd = {
5502                 .pages          = pages_def,
5503                 .partial        = partial_def,
5504                 .nr_pages_max   = PIPE_DEF_BUFFERS,
5505                 .flags          = flags,
5506                 .ops            = &buffer_pipe_buf_ops,
5507                 .spd_release    = buffer_spd_release,
5508         };
5509         struct buffer_ref *ref;
5510         int entries, size, i;
5511         ssize_t ret = 0;
5512
5513 #ifdef CONFIG_TRACER_MAX_TRACE
5514         if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5515                 return -EBUSY;
5516 #endif
5517
5518         if (splice_grow_spd(pipe, &spd))
5519                 return -ENOMEM;
5520
5521         if (*ppos & (PAGE_SIZE - 1))
5522                 return -EINVAL;
5523
5524         if (len & (PAGE_SIZE - 1)) {
5525                 if (len < PAGE_SIZE)
5526                         return -EINVAL;
5527                 len &= PAGE_MASK;
5528         }
5529
5530  again:
5531         trace_access_lock(iter->cpu_file);
5532         entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5533
5534         for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5535                 struct page *page;
5536                 int r;
5537
5538                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5539                 if (!ref) {
5540                         ret = -ENOMEM;
5541                         break;
5542                 }
5543
5544                 ref->ref = 1;
5545                 ref->buffer = iter->trace_buffer->buffer;
5546                 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5547                 if (!ref->page) {
5548                         ret = -ENOMEM;
5549                         kfree(ref);
5550                         break;
5551                 }
5552
5553                 r = ring_buffer_read_page(ref->buffer, &ref->page,
5554                                           len, iter->cpu_file, 1);
5555                 if (r < 0) {
5556                         ring_buffer_free_read_page(ref->buffer, ref->page);
5557                         kfree(ref);
5558                         break;
5559                 }
5560
5561                 /*
5562                  * zero out any left over data, this is going to
5563                  * user land.
5564                  */
5565                 size = ring_buffer_page_len(ref->page);
5566                 if (size < PAGE_SIZE)
5567                         memset(ref->page + size, 0, PAGE_SIZE - size);
5568
5569                 page = virt_to_page(ref->page);
5570
5571                 spd.pages[i] = page;
5572                 spd.partial[i].len = PAGE_SIZE;
5573                 spd.partial[i].offset = 0;
5574                 spd.partial[i].private = (unsigned long)ref;
5575                 spd.nr_pages++;
5576                 *ppos += PAGE_SIZE;
5577
5578                 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5579         }
5580
5581         trace_access_unlock(iter->cpu_file);
5582         spd.nr_pages = i;
5583
5584         /* did we read anything? */
5585         if (!spd.nr_pages) {
5586                 if (ret)
5587                         return ret;
5588
5589                 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5590                         return -EAGAIN;
5591
5592                 ret = wait_on_pipe(iter, true);
5593                 if (ret)
5594                         return ret;
5595
5596                 goto again;
5597         }
5598
5599         ret = splice_to_pipe(pipe, &spd);
5600         splice_shrink_spd(&spd);
5601
5602         return ret;
5603 }
5604
5605 static const struct file_operations tracing_buffers_fops = {
5606         .open           = tracing_buffers_open,
5607         .read           = tracing_buffers_read,
5608         .poll           = tracing_buffers_poll,
5609         .release        = tracing_buffers_release,
5610         .splice_read    = tracing_buffers_splice_read,
5611         .llseek         = no_llseek,
5612 };
5613
5614 static ssize_t
5615 tracing_stats_read(struct file *filp, char __user *ubuf,
5616                    size_t count, loff_t *ppos)
5617 {
5618         struct inode *inode = file_inode(filp);
5619         struct trace_array *tr = inode->i_private;
5620         struct trace_buffer *trace_buf = &tr->trace_buffer;
5621         int cpu = tracing_get_cpu(inode);
5622         struct trace_seq *s;
5623         unsigned long cnt;
5624         unsigned long long t;
5625         unsigned long usec_rem;
5626
5627         s = kmalloc(sizeof(*s), GFP_KERNEL);
5628         if (!s)
5629                 return -ENOMEM;
5630
5631         trace_seq_init(s);
5632
5633         cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5634         trace_seq_printf(s, "entries: %ld\n", cnt);
5635
5636         cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5637         trace_seq_printf(s, "overrun: %ld\n", cnt);
5638
5639         cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5640         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5641
5642         cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5643         trace_seq_printf(s, "bytes: %ld\n", cnt);
5644
5645         if (trace_clocks[tr->clock_id].in_ns) {
5646                 /* local or global for trace_clock */
5647                 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5648                 usec_rem = do_div(t, USEC_PER_SEC);
5649                 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5650                                                                 t, usec_rem);
5651
5652                 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5653                 usec_rem = do_div(t, USEC_PER_SEC);
5654                 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5655         } else {
5656                 /* counter or tsc mode for trace_clock */
5657                 trace_seq_printf(s, "oldest event ts: %llu\n",
5658                                 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5659
5660                 trace_seq_printf(s, "now ts: %llu\n",
5661                                 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5662         }
5663
5664         cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5665         trace_seq_printf(s, "dropped events: %ld\n", cnt);
5666
5667         cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5668         trace_seq_printf(s, "read events: %ld\n", cnt);
5669
5670         count = simple_read_from_buffer(ubuf, count, ppos,
5671                                         s->buffer, trace_seq_used(s));
5672
5673         kfree(s);
5674
5675         return count;
5676 }
5677
5678 static const struct file_operations tracing_stats_fops = {
5679         .open           = tracing_open_generic_tr,
5680         .read           = tracing_stats_read,
5681         .llseek         = generic_file_llseek,
5682         .release        = tracing_release_generic_tr,
5683 };
5684
5685 #ifdef CONFIG_DYNAMIC_FTRACE
5686
5687 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5688 {
5689         return 0;
5690 }
5691
5692 static ssize_t
5693 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5694                   size_t cnt, loff_t *ppos)
5695 {
5696         static char ftrace_dyn_info_buffer[1024];
5697         static DEFINE_MUTEX(dyn_info_mutex);
5698         unsigned long *p = filp->private_data;
5699         char *buf = ftrace_dyn_info_buffer;
5700         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5701         int r;
5702
5703         mutex_lock(&dyn_info_mutex);
5704         r = sprintf(buf, "%ld ", *p);
5705
5706         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5707         buf[r++] = '\n';
5708
5709         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5710
5711         mutex_unlock(&dyn_info_mutex);
5712
5713         return r;
5714 }
5715
5716 static const struct file_operations tracing_dyn_info_fops = {
5717         .open           = tracing_open_generic,
5718         .read           = tracing_read_dyn_info,
5719         .llseek         = generic_file_llseek,
5720 };
5721 #endif /* CONFIG_DYNAMIC_FTRACE */
5722
5723 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5724 static void
5725 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5726 {
5727         tracing_snapshot();
5728 }
5729
5730 static void
5731 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5732 {
5733         unsigned long *count = (long *)data;
5734
5735         if (!*count)
5736                 return;
5737
5738         if (*count != -1)
5739                 (*count)--;
5740
5741         tracing_snapshot();
5742 }
5743
5744 static int
5745 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5746                       struct ftrace_probe_ops *ops, void *data)
5747 {
5748         long count = (long)data;
5749
5750         seq_printf(m, "%ps:", (void *)ip);
5751
5752         seq_puts(m, "snapshot");
5753
5754         if (count == -1)
5755                 seq_puts(m, ":unlimited\n");
5756         else
5757                 seq_printf(m, ":count=%ld\n", count);
5758
5759         return 0;
5760 }
5761
5762 static struct ftrace_probe_ops snapshot_probe_ops = {
5763         .func                   = ftrace_snapshot,
5764         .print                  = ftrace_snapshot_print,
5765 };
5766
5767 static struct ftrace_probe_ops snapshot_count_probe_ops = {
5768         .func                   = ftrace_count_snapshot,
5769         .print                  = ftrace_snapshot_print,
5770 };
5771
5772 static int
5773 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5774                                char *glob, char *cmd, char *param, int enable)
5775 {
5776         struct ftrace_probe_ops *ops;
5777         void *count = (void *)-1;
5778         char *number;
5779         int ret;
5780
5781         /* hash funcs only work with set_ftrace_filter */
5782         if (!enable)
5783                 return -EINVAL;
5784
5785         ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
5786
5787         if (glob[0] == '!') {
5788                 unregister_ftrace_function_probe_func(glob+1, ops);
5789                 return 0;
5790         }
5791
5792         if (!param)
5793                 goto out_reg;
5794
5795         number = strsep(&param, ":");
5796
5797         if (!strlen(number))
5798                 goto out_reg;
5799
5800         /*
5801          * We use the callback data field (which is a pointer)
5802          * as our counter.
5803          */
5804         ret = kstrtoul(number, 0, (unsigned long *)&count);
5805         if (ret)
5806                 return ret;
5807
5808  out_reg:
5809         ret = register_ftrace_function_probe(glob, ops, count);
5810
5811         if (ret >= 0)
5812                 alloc_snapshot(&global_trace);
5813
5814         return ret < 0 ? ret : 0;
5815 }
5816
5817 static struct ftrace_func_command ftrace_snapshot_cmd = {
5818         .name                   = "snapshot",
5819         .func                   = ftrace_trace_snapshot_callback,
5820 };
5821
5822 static __init int register_snapshot_cmd(void)
5823 {
5824         return register_ftrace_command(&ftrace_snapshot_cmd);
5825 }
5826 #else
5827 static inline __init int register_snapshot_cmd(void) { return 0; }
5828 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5829
5830 static struct dentry *tracing_get_dentry(struct trace_array *tr)
5831 {
5832         return tr->dir;
5833 }
5834
5835 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5836 {
5837         struct dentry *d_tracer;
5838
5839         if (tr->percpu_dir)
5840                 return tr->percpu_dir;
5841
5842         d_tracer = tracing_get_dentry(tr);
5843         if (IS_ERR(d_tracer))
5844                 return NULL;
5845
5846         tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5847
5848         WARN_ONCE(!tr->percpu_dir,
5849                   "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5850
5851         return tr->percpu_dir;
5852 }
5853
5854 static struct dentry *
5855 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5856                       void *data, long cpu, const struct file_operations *fops)
5857 {
5858         struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5859
5860         if (ret) /* See tracing_get_cpu() */
5861                 ret->d_inode->i_cdev = (void *)(cpu + 1);
5862         return ret;
5863 }
5864
5865 static void
5866 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5867 {
5868         struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5869         struct dentry *d_cpu;
5870         char cpu_dir[30]; /* 30 characters should be more than enough */
5871
5872         if (!d_percpu)
5873                 return;
5874
5875         snprintf(cpu_dir, 30, "cpu%ld", cpu);
5876         d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5877         if (!d_cpu) {
5878                 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5879                 return;
5880         }
5881
5882         /* per cpu trace_pipe */
5883         trace_create_cpu_file("trace_pipe", 0444, d_cpu,
5884                                 tr, cpu, &tracing_pipe_fops);
5885
5886         /* per cpu trace */
5887         trace_create_cpu_file("trace", 0644, d_cpu,
5888                                 tr, cpu, &tracing_fops);
5889
5890         trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
5891                                 tr, cpu, &tracing_buffers_fops);
5892
5893         trace_create_cpu_file("stats", 0444, d_cpu,
5894                                 tr, cpu, &tracing_stats_fops);
5895
5896         trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
5897                                 tr, cpu, &tracing_entries_fops);
5898
5899 #ifdef CONFIG_TRACER_SNAPSHOT
5900         trace_create_cpu_file("snapshot", 0644, d_cpu,
5901                                 tr, cpu, &snapshot_fops);
5902
5903         trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
5904                                 tr, cpu, &snapshot_raw_fops);
5905 #endif
5906 }
5907
5908 #ifdef CONFIG_FTRACE_SELFTEST
5909 /* Let selftest have access to static functions in this file */
5910 #include "trace_selftest.c"
5911 #endif
5912
5913 struct trace_option_dentry {
5914         struct tracer_opt               *opt;
5915         struct tracer_flags             *flags;
5916         struct trace_array              *tr;
5917         struct dentry                   *entry;
5918 };
5919
5920 static ssize_t
5921 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5922                         loff_t *ppos)
5923 {
5924         struct trace_option_dentry *topt = filp->private_data;
5925         char *buf;
5926
5927         if (topt->flags->val & topt->opt->bit)
5928                 buf = "1\n";
5929         else
5930                 buf = "0\n";
5931
5932         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5933 }
5934
5935 static ssize_t
5936 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5937                          loff_t *ppos)
5938 {
5939         struct trace_option_dentry *topt = filp->private_data;
5940         unsigned long val;
5941         int ret;
5942
5943         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5944         if (ret)
5945                 return ret;
5946
5947         if (val != 0 && val != 1)
5948                 return -EINVAL;
5949
5950         if (!!(topt->flags->val & topt->opt->bit) != val) {
5951                 mutex_lock(&trace_types_lock);
5952                 ret = __set_tracer_option(topt->tr, topt->flags,
5953                                           topt->opt, !val);
5954                 mutex_unlock(&trace_types_lock);
5955                 if (ret)
5956                         return ret;
5957         }
5958
5959         *ppos += cnt;
5960
5961         return cnt;
5962 }
5963
5964
5965 static const struct file_operations trace_options_fops = {
5966         .open = tracing_open_generic,
5967         .read = trace_options_read,
5968         .write = trace_options_write,
5969         .llseek = generic_file_llseek,
5970 };
5971
5972 static ssize_t
5973 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5974                         loff_t *ppos)
5975 {
5976         long index = (long)filp->private_data;
5977         char *buf;
5978
5979         if (trace_flags & (1 << index))
5980                 buf = "1\n";
5981         else
5982                 buf = "0\n";
5983
5984         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5985 }
5986
5987 static ssize_t
5988 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5989                          loff_t *ppos)
5990 {
5991         struct trace_array *tr = &global_trace;
5992         long index = (long)filp->private_data;
5993         unsigned long val;
5994         int ret;
5995
5996         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5997         if (ret)
5998                 return ret;
5999
6000         if (val != 0 && val != 1)
6001                 return -EINVAL;
6002
6003         mutex_lock(&trace_types_lock);
6004         ret = set_tracer_flag(tr, 1 << index, val);
6005         mutex_unlock(&trace_types_lock);
6006
6007         if (ret < 0)
6008                 return ret;
6009
6010         *ppos += cnt;
6011
6012         return cnt;
6013 }
6014
6015 static const struct file_operations trace_options_core_fops = {
6016         .open = tracing_open_generic,
6017         .read = trace_options_core_read,
6018         .write = trace_options_core_write,
6019         .llseek = generic_file_llseek,
6020 };
6021
6022 struct dentry *trace_create_file(const char *name,
6023                                  umode_t mode,
6024                                  struct dentry *parent,
6025                                  void *data,
6026                                  const struct file_operations *fops)
6027 {
6028         struct dentry *ret;
6029
6030         ret = debugfs_create_file(name, mode, parent, data, fops);
6031         if (!ret)
6032                 pr_warning("Could not create debugfs '%s' entry\n", name);
6033
6034         return ret;
6035 }
6036
6037
6038 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6039 {
6040         struct dentry *d_tracer;
6041
6042         if (tr->options)
6043                 return tr->options;
6044
6045         d_tracer = tracing_get_dentry(tr);
6046         if (IS_ERR(d_tracer))
6047                 return NULL;
6048
6049         tr->options = debugfs_create_dir("options", d_tracer);
6050         if (!tr->options) {
6051                 pr_warning("Could not create debugfs directory 'options'\n");
6052                 return NULL;
6053         }
6054
6055         return tr->options;
6056 }
6057
6058 static void
6059 create_trace_option_file(struct trace_array *tr,
6060                          struct trace_option_dentry *topt,
6061                          struct tracer_flags *flags,
6062                          struct tracer_opt *opt)
6063 {
6064         struct dentry *t_options;
6065
6066         t_options = trace_options_init_dentry(tr);
6067         if (!t_options)
6068                 return;
6069
6070         topt->flags = flags;
6071         topt->opt = opt;
6072         topt->tr = tr;
6073
6074         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6075                                     &trace_options_fops);
6076
6077 }
6078
6079 static struct trace_option_dentry *
6080 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6081 {
6082         struct trace_option_dentry *topts;
6083         struct tracer_flags *flags;
6084         struct tracer_opt *opts;
6085         int cnt;
6086
6087         if (!tracer)
6088                 return NULL;
6089
6090         flags = tracer->flags;
6091
6092         if (!flags || !flags->opts)
6093                 return NULL;
6094
6095         opts = flags->opts;
6096
6097         for (cnt = 0; opts[cnt].name; cnt++)
6098                 ;
6099
6100         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6101         if (!topts)
6102                 return NULL;
6103
6104         for (cnt = 0; opts[cnt].name; cnt++)
6105                 create_trace_option_file(tr, &topts[cnt], flags,
6106                                          &opts[cnt]);
6107
6108         return topts;
6109 }
6110
6111 static void
6112 destroy_trace_option_files(struct trace_option_dentry *topts)
6113 {
6114         int cnt;
6115
6116         if (!topts)
6117                 return;
6118
6119         for (cnt = 0; topts[cnt].opt; cnt++)
6120                 debugfs_remove(topts[cnt].entry);
6121
6122         kfree(topts);
6123 }
6124
6125 static struct dentry *
6126 create_trace_option_core_file(struct trace_array *tr,
6127                               const char *option, long index)
6128 {
6129         struct dentry *t_options;
6130
6131         t_options = trace_options_init_dentry(tr);
6132         if (!t_options)
6133                 return NULL;
6134
6135         return trace_create_file(option, 0644, t_options, (void *)index,
6136                                     &trace_options_core_fops);
6137 }
6138
6139 static __init void create_trace_options_dir(struct trace_array *tr)
6140 {
6141         struct dentry *t_options;
6142         int i;
6143
6144         t_options = trace_options_init_dentry(tr);
6145         if (!t_options)
6146                 return;
6147
6148         for (i = 0; trace_options[i]; i++)
6149                 create_trace_option_core_file(tr, trace_options[i], i);
6150 }
6151
6152 static ssize_t
6153 rb_simple_read(struct file *filp, char __user *ubuf,
6154                size_t cnt, loff_t *ppos)
6155 {
6156         struct trace_array *tr = filp->private_data;
6157         char buf[64];
6158         int r;
6159
6160         r = tracer_tracing_is_on(tr);
6161         r = sprintf(buf, "%d\n", r);
6162
6163         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6164 }
6165
6166 static ssize_t
6167 rb_simple_write(struct file *filp, const char __user *ubuf,
6168                 size_t cnt, loff_t *ppos)
6169 {
6170         struct trace_array *tr = filp->private_data;
6171         struct ring_buffer *buffer = tr->trace_buffer.buffer;
6172         unsigned long val;
6173         int ret;
6174
6175         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6176         if (ret)
6177                 return ret;
6178
6179         if (buffer) {
6180                 mutex_lock(&trace_types_lock);
6181                 if (val) {
6182                         tracer_tracing_on(tr);
6183                         if (tr->current_trace->start)
6184                                 tr->current_trace->start(tr);
6185                 } else {
6186                         tracer_tracing_off(tr);
6187                         if (tr->current_trace->stop)
6188                                 tr->current_trace->stop(tr);
6189                 }
6190                 mutex_unlock(&trace_types_lock);
6191         }
6192
6193         (*ppos)++;
6194
6195         return cnt;
6196 }
6197
6198 static const struct file_operations rb_simple_fops = {
6199         .open           = tracing_open_generic_tr,
6200         .read           = rb_simple_read,
6201         .write          = rb_simple_write,
6202         .release        = tracing_release_generic_tr,
6203         .llseek         = default_llseek,
6204 };
6205
6206 struct dentry *trace_instance_dir;
6207
6208 static void
6209 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6210
6211 static int
6212 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6213 {
6214         enum ring_buffer_flags rb_flags;
6215
6216         rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6217
6218         buf->tr = tr;
6219
6220         buf->buffer = ring_buffer_alloc(size, rb_flags);
6221         if (!buf->buffer)
6222                 return -ENOMEM;
6223
6224         buf->data = alloc_percpu(struct trace_array_cpu);
6225         if (!buf->data) {
6226                 ring_buffer_free(buf->buffer);
6227                 return -ENOMEM;
6228         }
6229
6230         /* Allocate the first page for all buffers */
6231         set_buffer_entries(&tr->trace_buffer,
6232                            ring_buffer_size(tr->trace_buffer.buffer, 0));
6233
6234         return 0;
6235 }
6236
6237 static int allocate_trace_buffers(struct trace_array *tr, int size)
6238 {
6239         int ret;
6240
6241         ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6242         if (ret)
6243                 return ret;
6244
6245 #ifdef CONFIG_TRACER_MAX_TRACE
6246         ret = allocate_trace_buffer(tr, &tr->max_buffer,
6247                                     allocate_snapshot ? size : 1);
6248         if (WARN_ON(ret)) {
6249                 ring_buffer_free(tr->trace_buffer.buffer);
6250                 free_percpu(tr->trace_buffer.data);
6251                 return -ENOMEM;
6252         }
6253         tr->allocated_snapshot = allocate_snapshot;
6254
6255         /*
6256          * Only the top level trace array gets its snapshot allocated
6257          * from the kernel command line.
6258          */
6259         allocate_snapshot = false;
6260 #endif
6261         return 0;
6262 }
6263
6264 static void free_trace_buffer(struct trace_buffer *buf)
6265 {
6266         if (buf->buffer) {
6267                 ring_buffer_free(buf->buffer);
6268                 buf->buffer = NULL;
6269                 free_percpu(buf->data);
6270                 buf->data = NULL;
6271         }
6272 }
6273
6274 static void free_trace_buffers(struct trace_array *tr)
6275 {
6276         if (!tr)
6277                 return;
6278
6279         free_trace_buffer(&tr->trace_buffer);
6280
6281 #ifdef CONFIG_TRACER_MAX_TRACE
6282         free_trace_buffer(&tr->max_buffer);
6283 #endif
6284 }
6285
6286 static int new_instance_create(const char *name)
6287 {
6288         struct trace_array *tr;
6289         int ret;
6290
6291         mutex_lock(&trace_types_lock);
6292
6293         ret = -EEXIST;
6294         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6295                 if (tr->name && strcmp(tr->name, name) == 0)
6296                         goto out_unlock;
6297         }
6298
6299         ret = -ENOMEM;
6300         tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6301         if (!tr)
6302                 goto out_unlock;
6303
6304         tr->name = kstrdup(name, GFP_KERNEL);
6305         if (!tr->name)
6306                 goto out_free_tr;
6307
6308         if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6309                 goto out_free_tr;
6310
6311         cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6312
6313         raw_spin_lock_init(&tr->start_lock);
6314
6315         tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6316
6317         tr->current_trace = &nop_trace;
6318
6319         INIT_LIST_HEAD(&tr->systems);
6320         INIT_LIST_HEAD(&tr->events);
6321
6322         if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6323                 goto out_free_tr;
6324
6325         tr->dir = debugfs_create_dir(name, trace_instance_dir);
6326         if (!tr->dir)
6327                 goto out_free_tr;
6328
6329         ret = event_trace_add_tracer(tr->dir, tr);
6330         if (ret) {
6331                 debugfs_remove_recursive(tr->dir);
6332                 goto out_free_tr;
6333         }
6334
6335         init_tracer_debugfs(tr, tr->dir);
6336
6337         list_add(&tr->list, &ftrace_trace_arrays);
6338
6339         mutex_unlock(&trace_types_lock);
6340
6341         return 0;
6342
6343  out_free_tr:
6344         free_trace_buffers(tr);
6345         free_cpumask_var(tr->tracing_cpumask);
6346         kfree(tr->name);
6347         kfree(tr);
6348
6349  out_unlock:
6350         mutex_unlock(&trace_types_lock);
6351
6352         return ret;
6353
6354 }
6355
6356 static int instance_delete(const char *name)
6357 {
6358         struct trace_array *tr;
6359         int found = 0;
6360         int ret;
6361
6362         mutex_lock(&trace_types_lock);
6363
6364         ret = -ENODEV;
6365         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6366                 if (tr->name && strcmp(tr->name, name) == 0) {
6367                         found = 1;
6368                         break;
6369                 }
6370         }
6371         if (!found)
6372                 goto out_unlock;
6373
6374         ret = -EBUSY;
6375         if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6376                 goto out_unlock;
6377
6378         list_del(&tr->list);
6379
6380         tracing_set_nop(tr);
6381         event_trace_del_tracer(tr);
6382         ftrace_destroy_function_files(tr);
6383         debugfs_remove_recursive(tr->dir);
6384         free_trace_buffers(tr);
6385
6386         kfree(tr->name);
6387         kfree(tr);
6388
6389         ret = 0;
6390
6391  out_unlock:
6392         mutex_unlock(&trace_types_lock);
6393
6394         return ret;
6395 }
6396
6397 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6398 {
6399         struct dentry *parent;
6400         int ret;
6401
6402         /* Paranoid: Make sure the parent is the "instances" directory */
6403         parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
6404         if (WARN_ON_ONCE(parent != trace_instance_dir))
6405                 return -ENOENT;
6406
6407         /*
6408          * The inode mutex is locked, but debugfs_create_dir() will also
6409          * take the mutex. As the instances directory can not be destroyed
6410          * or changed in any other way, it is safe to unlock it, and
6411          * let the dentry try. If two users try to make the same dir at
6412          * the same time, then the new_instance_create() will determine the
6413          * winner.
6414          */
6415         mutex_unlock(&inode->i_mutex);
6416
6417         ret = new_instance_create(dentry->d_iname);
6418
6419         mutex_lock(&inode->i_mutex);
6420
6421         return ret;
6422 }
6423
6424 static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6425 {
6426         struct dentry *parent;
6427         int ret;
6428
6429         /* Paranoid: Make sure the parent is the "instances" directory */
6430         parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
6431         if (WARN_ON_ONCE(parent != trace_instance_dir))
6432                 return -ENOENT;
6433
6434         /* The caller did a dget() on dentry */
6435         mutex_unlock(&dentry->d_inode->i_mutex);
6436
6437         /*
6438          * The inode mutex is locked, but debugfs_create_dir() will also
6439          * take the mutex. As the instances directory can not be destroyed
6440          * or changed in any other way, it is safe to unlock it, and
6441          * let the dentry try. If two users try to make the same dir at
6442          * the same time, then the instance_delete() will determine the
6443          * winner.
6444          */
6445         mutex_unlock(&inode->i_mutex);
6446
6447         ret = instance_delete(dentry->d_iname);
6448
6449         mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6450         mutex_lock(&dentry->d_inode->i_mutex);
6451
6452         return ret;
6453 }
6454
6455 static const struct inode_operations instance_dir_inode_operations = {
6456         .lookup         = simple_lookup,
6457         .mkdir          = instance_mkdir,
6458         .rmdir          = instance_rmdir,
6459 };
6460
6461 static __init void create_trace_instances(struct dentry *d_tracer)
6462 {
6463         trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6464         if (WARN_ON(!trace_instance_dir))
6465                 return;
6466
6467         /* Hijack the dir inode operations, to allow mkdir */
6468         trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6469 }
6470
6471 static void
6472 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6473 {
6474         int cpu;
6475
6476         trace_create_file("available_tracers", 0444, d_tracer,
6477                         tr, &show_traces_fops);
6478
6479         trace_create_file("current_tracer", 0644, d_tracer,
6480                         tr, &set_tracer_fops);
6481
6482         trace_create_file("tracing_cpumask", 0644, d_tracer,
6483                           tr, &tracing_cpumask_fops);
6484
6485         trace_create_file("trace_options", 0644, d_tracer,
6486                           tr, &tracing_iter_fops);
6487
6488         trace_create_file("trace", 0644, d_tracer,
6489                           tr, &tracing_fops);
6490
6491         trace_create_file("trace_pipe", 0444, d_tracer,
6492                           tr, &tracing_pipe_fops);
6493
6494         trace_create_file("buffer_size_kb", 0644, d_tracer,
6495                           tr, &tracing_entries_fops);
6496
6497         trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6498                           tr, &tracing_total_entries_fops);
6499
6500         trace_create_file("free_buffer", 0200, d_tracer,
6501                           tr, &tracing_free_buffer_fops);
6502
6503         trace_create_file("trace_marker", 0220, d_tracer,
6504                           tr, &tracing_mark_fops);
6505
6506         trace_create_file("trace_clock", 0644, d_tracer, tr,
6507                           &trace_clock_fops);
6508
6509         trace_create_file("tracing_on", 0644, d_tracer,
6510                           tr, &rb_simple_fops);
6511
6512 #ifdef CONFIG_TRACER_MAX_TRACE
6513         trace_create_file("tracing_max_latency", 0644, d_tracer,
6514                         &tr->max_latency, &tracing_max_lat_fops);
6515 #endif
6516
6517         if (ftrace_create_function_files(tr, d_tracer))
6518                 WARN(1, "Could not allocate function filter files");
6519
6520 #ifdef CONFIG_TRACER_SNAPSHOT
6521         trace_create_file("snapshot", 0644, d_tracer,
6522                           tr, &snapshot_fops);
6523 #endif
6524
6525         for_each_tracing_cpu(cpu)
6526                 tracing_init_debugfs_percpu(tr, cpu);
6527
6528 }
6529
6530 /**
6531  * tracing_init_dentry - initialize top level trace array
6532  *
6533  * This is called when creating files or directories in the tracing
6534  * directory. It is called via fs_initcall() by any of the boot up code
6535  * and expects to return the dentry of the top level tracing directory.
6536  */
6537 struct dentry *tracing_init_dentry(void)
6538 {
6539         struct trace_array *tr = &global_trace;
6540
6541         if (tr->dir)
6542                 return tr->dir;
6543
6544         if (WARN_ON(!debugfs_initialized()))
6545                 return ERR_PTR(-ENODEV);
6546
6547         tr->dir = debugfs_create_dir("tracing", NULL);
6548
6549         if (!tr->dir) {
6550                 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6551                 return ERR_PTR(-ENOMEM);
6552         }
6553
6554         return tr->dir;
6555 }
6556
6557 extern struct trace_enum_map *__start_ftrace_enum_maps[];
6558 extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6559
6560 static void __init trace_enum_init(void)
6561 {
6562         int len;
6563
6564         len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6565         trace_insert_enum_map(__start_ftrace_enum_maps, len);
6566 }
6567
6568 #ifdef CONFIG_MODULES
6569 static void trace_module_add_enums(struct module *mod)
6570 {
6571         if (!mod->num_trace_enums)
6572                 return;
6573
6574         /*
6575          * Modules with bad taint do not have events created, do
6576          * not bother with enums either.
6577          */
6578         if (trace_module_has_bad_taint(mod))
6579                 return;
6580
6581         trace_insert_enum_map(mod->trace_enums, mod->num_trace_enums);
6582 }
6583
6584 static int trace_module_notify(struct notifier_block *self,
6585                                unsigned long val, void *data)
6586 {
6587         struct module *mod = data;
6588
6589         switch (val) {
6590         case MODULE_STATE_COMING:
6591                 trace_module_add_enums(mod);
6592                 break;
6593         }
6594
6595         return 0;
6596 }
6597
6598 static struct notifier_block trace_module_nb = {
6599         .notifier_call = trace_module_notify,
6600         .priority = 0,
6601 };
6602 #endif
6603
6604 static __init int tracer_init_debugfs(void)
6605 {
6606         struct dentry *d_tracer;
6607
6608         trace_access_lock_init();
6609
6610         d_tracer = tracing_init_dentry();
6611         if (IS_ERR(d_tracer))
6612                 return 0;
6613
6614         init_tracer_debugfs(&global_trace, d_tracer);
6615
6616         trace_create_file("tracing_thresh", 0644, d_tracer,
6617                         &global_trace, &tracing_thresh_fops);
6618
6619         trace_create_file("README", 0444, d_tracer,
6620                         NULL, &tracing_readme_fops);
6621
6622         trace_create_file("saved_cmdlines", 0444, d_tracer,
6623                         NULL, &tracing_saved_cmdlines_fops);
6624
6625         trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6626                           NULL, &tracing_saved_cmdlines_size_fops);
6627
6628         trace_enum_init();
6629
6630 #ifdef CONFIG_MODULES
6631         register_module_notifier(&trace_module_nb);
6632 #endif
6633
6634 #ifdef CONFIG_DYNAMIC_FTRACE
6635         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6636                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6637 #endif
6638
6639         create_trace_instances(d_tracer);
6640
6641         create_trace_options_dir(&global_trace);
6642
6643         return 0;
6644 }
6645
6646 static int trace_panic_handler(struct notifier_block *this,
6647                                unsigned long event, void *unused)
6648 {
6649         if (ftrace_dump_on_oops)
6650                 ftrace_dump(ftrace_dump_on_oops);
6651         return NOTIFY_OK;
6652 }
6653
6654 static struct notifier_block trace_panic_notifier = {
6655         .notifier_call  = trace_panic_handler,
6656         .next           = NULL,
6657         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
6658 };
6659
6660 static int trace_die_handler(struct notifier_block *self,
6661                              unsigned long val,
6662                              void *data)
6663 {
6664         switch (val) {
6665         case DIE_OOPS:
6666                 if (ftrace_dump_on_oops)
6667                         ftrace_dump(ftrace_dump_on_oops);
6668                 break;
6669         default:
6670                 break;
6671         }
6672         return NOTIFY_OK;
6673 }
6674
6675 static struct notifier_block trace_die_notifier = {
6676         .notifier_call = trace_die_handler,
6677         .priority = 200
6678 };
6679
6680 /*
6681  * printk is set to max of 1024, we really don't need it that big.
6682  * Nothing should be printing 1000 characters anyway.
6683  */
6684 #define TRACE_MAX_PRINT         1000
6685
6686 /*
6687  * Define here KERN_TRACE so that we have one place to modify
6688  * it if we decide to change what log level the ftrace dump
6689  * should be at.
6690  */
6691 #define KERN_TRACE              KERN_EMERG
6692
6693 void
6694 trace_printk_seq(struct trace_seq *s)
6695 {
6696         /* Probably should print a warning here. */
6697         if (s->seq.len >= TRACE_MAX_PRINT)
6698                 s->seq.len = TRACE_MAX_PRINT;
6699
6700         /*
6701          * More paranoid code. Although the buffer size is set to
6702          * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6703          * an extra layer of protection.
6704          */
6705         if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6706                 s->seq.len = s->seq.size - 1;
6707
6708         /* should be zero ended, but we are paranoid. */
6709         s->buffer[s->seq.len] = 0;
6710
6711         printk(KERN_TRACE "%s", s->buffer);
6712
6713         trace_seq_init(s);
6714 }
6715
6716 void trace_init_global_iter(struct trace_iterator *iter)
6717 {
6718         iter->tr = &global_trace;
6719         iter->trace = iter->tr->current_trace;
6720         iter->cpu_file = RING_BUFFER_ALL_CPUS;
6721         iter->trace_buffer = &global_trace.trace_buffer;
6722
6723         if (iter->trace && iter->trace->open)
6724                 iter->trace->open(iter);
6725
6726         /* Annotate start of buffers if we had overruns */
6727         if (ring_buffer_overruns(iter->trace_buffer->buffer))
6728                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6729
6730         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6731         if (trace_clocks[iter->tr->clock_id].in_ns)
6732                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6733 }
6734
6735 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6736 {
6737         /* use static because iter can be a bit big for the stack */
6738         static struct trace_iterator iter;
6739         static atomic_t dump_running;
6740         unsigned int old_userobj;
6741         unsigned long flags;
6742         int cnt = 0, cpu;
6743
6744         /* Only allow one dump user at a time. */
6745         if (atomic_inc_return(&dump_running) != 1) {
6746                 atomic_dec(&dump_running);
6747                 return;
6748         }
6749
6750         /*
6751          * Always turn off tracing when we dump.
6752          * We don't need to show trace output of what happens
6753          * between multiple crashes.
6754          *
6755          * If the user does a sysrq-z, then they can re-enable
6756          * tracing with echo 1 > tracing_on.
6757          */
6758         tracing_off();
6759
6760         local_irq_save(flags);
6761
6762         /* Simulate the iterator */
6763         trace_init_global_iter(&iter);
6764
6765         for_each_tracing_cpu(cpu) {
6766                 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6767         }
6768
6769         old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6770
6771         /* don't look at user memory in panic mode */
6772         trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6773
6774         switch (oops_dump_mode) {
6775         case DUMP_ALL:
6776                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6777                 break;
6778         case DUMP_ORIG:
6779                 iter.cpu_file = raw_smp_processor_id();
6780                 break;
6781         case DUMP_NONE:
6782                 goto out_enable;
6783         default:
6784                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6785                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6786         }
6787
6788         printk(KERN_TRACE "Dumping ftrace buffer:\n");
6789
6790         /* Did function tracer already get disabled? */
6791         if (ftrace_is_dead()) {
6792                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6793                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
6794         }
6795
6796         /*
6797          * We need to stop all tracing on all CPUS to read the
6798          * the next buffer. This is a bit expensive, but is
6799          * not done often. We fill all what we can read,
6800          * and then release the locks again.
6801          */
6802
6803         while (!trace_empty(&iter)) {
6804
6805                 if (!cnt)
6806                         printk(KERN_TRACE "---------------------------------\n");
6807
6808                 cnt++;
6809
6810                 /* reset all but tr, trace, and overruns */
6811                 memset(&iter.seq, 0,
6812                        sizeof(struct trace_iterator) -
6813                        offsetof(struct trace_iterator, seq));
6814                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6815                 iter.pos = -1;
6816
6817                 if (trace_find_next_entry_inc(&iter) != NULL) {
6818                         int ret;
6819
6820                         ret = print_trace_line(&iter);
6821                         if (ret != TRACE_TYPE_NO_CONSUME)
6822                                 trace_consume(&iter);
6823                 }
6824                 touch_nmi_watchdog();
6825
6826                 trace_printk_seq(&iter.seq);
6827         }
6828
6829         if (!cnt)
6830                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
6831         else
6832                 printk(KERN_TRACE "---------------------------------\n");
6833
6834  out_enable:
6835         trace_flags |= old_userobj;
6836
6837         for_each_tracing_cpu(cpu) {
6838                 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6839         }
6840         atomic_dec(&dump_running);
6841         local_irq_restore(flags);
6842 }
6843 EXPORT_SYMBOL_GPL(ftrace_dump);
6844
6845 __init static int tracer_alloc_buffers(void)
6846 {
6847         int ring_buf_size;
6848         int ret = -ENOMEM;
6849
6850         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6851                 goto out;
6852
6853         if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
6854                 goto out_free_buffer_mask;
6855
6856         /* Only allocate trace_printk buffers if a trace_printk exists */
6857         if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6858                 /* Must be called before global_trace.buffer is allocated */
6859                 trace_printk_init_buffers();
6860
6861         /* To save memory, keep the ring buffer size to its minimum */
6862         if (ring_buffer_expanded)
6863                 ring_buf_size = trace_buf_size;
6864         else
6865                 ring_buf_size = 1;
6866
6867         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6868         cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
6869
6870         raw_spin_lock_init(&global_trace.start_lock);
6871
6872         /* Used for event triggers */
6873         temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6874         if (!temp_buffer)
6875                 goto out_free_cpumask;
6876
6877         if (trace_create_savedcmd() < 0)
6878                 goto out_free_temp_buffer;
6879
6880         /* TODO: make the number of buffers hot pluggable with CPUS */
6881         if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6882                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6883                 WARN_ON(1);
6884                 goto out_free_savedcmd;
6885         }
6886
6887         if (global_trace.buffer_disabled)
6888                 tracing_off();
6889
6890         if (trace_boot_clock) {
6891                 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6892                 if (ret < 0)
6893                         pr_warning("Trace clock %s not defined, going back to default\n",
6894                                    trace_boot_clock);
6895         }
6896
6897         /*
6898          * register_tracer() might reference current_trace, so it
6899          * needs to be set before we register anything. This is
6900          * just a bootstrap of current_trace anyway.
6901          */
6902         global_trace.current_trace = &nop_trace;
6903
6904         global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6905
6906         ftrace_init_global_array_ops(&global_trace);
6907
6908         register_tracer(&nop_trace);
6909
6910         /* All seems OK, enable tracing */
6911         tracing_disabled = 0;
6912
6913         atomic_notifier_chain_register(&panic_notifier_list,
6914                                        &trace_panic_notifier);
6915
6916         register_die_notifier(&trace_die_notifier);
6917
6918         global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6919
6920         INIT_LIST_HEAD(&global_trace.systems);
6921         INIT_LIST_HEAD(&global_trace.events);
6922         list_add(&global_trace.list, &ftrace_trace_arrays);
6923
6924         while (trace_boot_options) {
6925                 char *option;
6926
6927                 option = strsep(&trace_boot_options, ",");
6928                 trace_set_options(&global_trace, option);
6929         }
6930
6931         register_snapshot_cmd();
6932
6933         return 0;
6934
6935 out_free_savedcmd:
6936         free_saved_cmdlines_buffer(savedcmd);
6937 out_free_temp_buffer:
6938         ring_buffer_free(temp_buffer);
6939 out_free_cpumask:
6940         free_cpumask_var(global_trace.tracing_cpumask);
6941 out_free_buffer_mask:
6942         free_cpumask_var(tracing_buffer_mask);
6943 out:
6944         return ret;
6945 }
6946
6947 void __init trace_init(void)
6948 {
6949         if (tracepoint_printk) {
6950                 tracepoint_print_iter =
6951                         kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
6952                 if (WARN_ON(!tracepoint_print_iter))
6953                         tracepoint_printk = 0;
6954         }
6955         tracer_alloc_buffers();
6956         trace_event_init();
6957 }
6958
6959 __init static int clear_boot_tracer(void)
6960 {
6961         /*
6962          * The default tracer at boot buffer is an init section.
6963          * This function is called in lateinit. If we did not
6964          * find the boot tracer, then clear it out, to prevent
6965          * later registration from accessing the buffer that is
6966          * about to be freed.
6967          */
6968         if (!default_bootup_tracer)
6969                 return 0;
6970
6971         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6972                default_bootup_tracer);
6973         default_bootup_tracer = NULL;
6974
6975         return 0;
6976 }
6977
6978 fs_initcall(tracer_init_debugfs);
6979 late_initcall(clear_boot_tracer);