Merge tag 'kvm-arm-for-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm...
[firefly-linux-kernel-4.4.55.git] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 Nadia Yvette Chambers
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
40 #include <linux/fs.h>
41 #include <linux/sched/rt.h>
42
43 #include "trace.h"
44 #include "trace_output.h"
45
46 /*
47  * On boot up, the ring buffer is set to the minimum size, so that
48  * we do not waste memory on systems that are not using tracing.
49  */
50 bool ring_buffer_expanded;
51
52 /*
53  * We need to change this state when a selftest is running.
54  * A selftest will lurk into the ring-buffer to count the
55  * entries inserted during the selftest although some concurrent
56  * insertions into the ring-buffer such as trace_printk could occurred
57  * at the same time, giving false positive or negative results.
58  */
59 static bool __read_mostly tracing_selftest_running;
60
61 /*
62  * If a tracer is running, we do not want to run SELFTEST.
63  */
64 bool __read_mostly tracing_selftest_disabled;
65
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt[] = {
68         { }
69 };
70
71 static struct tracer_flags dummy_tracer_flags = {
72         .val = 0,
73         .opts = dummy_tracer_opt
74 };
75
76 static int
77 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
78 {
79         return 0;
80 }
81
82 /*
83  * To prevent the comm cache from being overwritten when no
84  * tracing is active, only save the comm when a trace event
85  * occurred.
86  */
87 static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89 /*
90  * Kill all tracing for good (never come back).
91  * It is initialized to 1 but will turn to zero if the initialization
92  * of the tracer is successful. But that is the only place that sets
93  * this back to zero.
94  */
95 static int tracing_disabled = 1;
96
97 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
98
99 cpumask_var_t __read_mostly     tracing_buffer_mask;
100
101 /*
102  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103  *
104  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105  * is set, then ftrace_dump is called. This will output the contents
106  * of the ftrace buffers to the console.  This is very useful for
107  * capturing traces that lead to crashes and outputing it to a
108  * serial console.
109  *
110  * It is default off, but you can enable it with either specifying
111  * "ftrace_dump_on_oops" in the kernel command line, or setting
112  * /proc/sys/kernel/ftrace_dump_on_oops
113  * Set 1 if you want to dump buffers of all CPUs
114  * Set 2 if you want to dump the buffer of the CPU that triggered oops
115  */
116
117 enum ftrace_dump_mode ftrace_dump_on_oops;
118
119 /* When set, tracing will stop when a WARN*() is hit */
120 int __disable_trace_on_warning;
121
122 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
123
124 #define MAX_TRACER_SIZE         100
125 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
126 static char *default_bootup_tracer;
127
128 static bool allocate_snapshot;
129
130 static int __init set_cmdline_ftrace(char *str)
131 {
132         strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
133         default_bootup_tracer = bootup_tracer_buf;
134         /* We are using ftrace early, expand it */
135         ring_buffer_expanded = true;
136         return 1;
137 }
138 __setup("ftrace=", set_cmdline_ftrace);
139
140 static int __init set_ftrace_dump_on_oops(char *str)
141 {
142         if (*str++ != '=' || !*str) {
143                 ftrace_dump_on_oops = DUMP_ALL;
144                 return 1;
145         }
146
147         if (!strcmp("orig_cpu", str)) {
148                 ftrace_dump_on_oops = DUMP_ORIG;
149                 return 1;
150         }
151
152         return 0;
153 }
154 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
155
156 static int __init stop_trace_on_warning(char *str)
157 {
158         __disable_trace_on_warning = 1;
159         return 1;
160 }
161 __setup("traceoff_on_warning=", stop_trace_on_warning);
162
163 static int __init boot_alloc_snapshot(char *str)
164 {
165         allocate_snapshot = true;
166         /* We also need the main ring buffer expanded */
167         ring_buffer_expanded = true;
168         return 1;
169 }
170 __setup("alloc_snapshot", boot_alloc_snapshot);
171
172
173 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174 static char *trace_boot_options __initdata;
175
176 static int __init set_trace_boot_options(char *str)
177 {
178         strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
179         trace_boot_options = trace_boot_options_buf;
180         return 0;
181 }
182 __setup("trace_options=", set_trace_boot_options);
183
184 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185 static char *trace_boot_clock __initdata;
186
187 static int __init set_trace_boot_clock(char *str)
188 {
189         strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190         trace_boot_clock = trace_boot_clock_buf;
191         return 0;
192 }
193 __setup("trace_clock=", set_trace_boot_clock);
194
195
196 unsigned long long ns2usecs(cycle_t nsec)
197 {
198         nsec += 500;
199         do_div(nsec, 1000);
200         return nsec;
201 }
202
203 /*
204  * The global_trace is the descriptor that holds the tracing
205  * buffers for the live tracing. For each CPU, it contains
206  * a link list of pages that will store trace entries. The
207  * page descriptor of the pages in the memory is used to hold
208  * the link list by linking the lru item in the page descriptor
209  * to each of the pages in the buffer per CPU.
210  *
211  * For each active CPU there is a data field that holds the
212  * pages for the buffer for that CPU. Each CPU has the same number
213  * of pages allocated for its buffer.
214  */
215 static struct trace_array       global_trace;
216
217 LIST_HEAD(ftrace_trace_arrays);
218
219 int trace_array_get(struct trace_array *this_tr)
220 {
221         struct trace_array *tr;
222         int ret = -ENODEV;
223
224         mutex_lock(&trace_types_lock);
225         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
226                 if (tr == this_tr) {
227                         tr->ref++;
228                         ret = 0;
229                         break;
230                 }
231         }
232         mutex_unlock(&trace_types_lock);
233
234         return ret;
235 }
236
237 static void __trace_array_put(struct trace_array *this_tr)
238 {
239         WARN_ON(!this_tr->ref);
240         this_tr->ref--;
241 }
242
243 void trace_array_put(struct trace_array *this_tr)
244 {
245         mutex_lock(&trace_types_lock);
246         __trace_array_put(this_tr);
247         mutex_unlock(&trace_types_lock);
248 }
249
250 int filter_check_discard(struct ftrace_event_file *file, void *rec,
251                          struct ring_buffer *buffer,
252                          struct ring_buffer_event *event)
253 {
254         if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255             !filter_match_preds(file->filter, rec)) {
256                 ring_buffer_discard_commit(buffer, event);
257                 return 1;
258         }
259
260         return 0;
261 }
262 EXPORT_SYMBOL_GPL(filter_check_discard);
263
264 int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265                               struct ring_buffer *buffer,
266                               struct ring_buffer_event *event)
267 {
268         if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269             !filter_match_preds(call->filter, rec)) {
270                 ring_buffer_discard_commit(buffer, event);
271                 return 1;
272         }
273
274         return 0;
275 }
276 EXPORT_SYMBOL_GPL(call_filter_check_discard);
277
278 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
279 {
280         u64 ts;
281
282         /* Early boot up does not have a buffer yet */
283         if (!buf->buffer)
284                 return trace_clock_local();
285
286         ts = ring_buffer_time_stamp(buf->buffer, cpu);
287         ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
288
289         return ts;
290 }
291
292 cycle_t ftrace_now(int cpu)
293 {
294         return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
295 }
296
297 /**
298  * tracing_is_enabled - Show if global_trace has been disabled
299  *
300  * Shows if the global trace has been enabled or not. It uses the
301  * mirror flag "buffer_disabled" to be used in fast paths such as for
302  * the irqsoff tracer. But it may be inaccurate due to races. If you
303  * need to know the accurate state, use tracing_is_on() which is a little
304  * slower, but accurate.
305  */
306 int tracing_is_enabled(void)
307 {
308         /*
309          * For quick access (irqsoff uses this in fast path), just
310          * return the mirror variable of the state of the ring buffer.
311          * It's a little racy, but we don't really care.
312          */
313         smp_rmb();
314         return !global_trace.buffer_disabled;
315 }
316
317 /*
318  * trace_buf_size is the size in bytes that is allocated
319  * for a buffer. Note, the number of bytes is always rounded
320  * to page size.
321  *
322  * This number is purposely set to a low number of 16384.
323  * If the dump on oops happens, it will be much appreciated
324  * to not have to wait for all that output. Anyway this can be
325  * boot time and run time configurable.
326  */
327 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
328
329 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
330
331 /* trace_types holds a link list of available tracers. */
332 static struct tracer            *trace_types __read_mostly;
333
334 /*
335  * trace_types_lock is used to protect the trace_types list.
336  */
337 DEFINE_MUTEX(trace_types_lock);
338
339 /*
340  * serialize the access of the ring buffer
341  *
342  * ring buffer serializes readers, but it is low level protection.
343  * The validity of the events (which returns by ring_buffer_peek() ..etc)
344  * are not protected by ring buffer.
345  *
346  * The content of events may become garbage if we allow other process consumes
347  * these events concurrently:
348  *   A) the page of the consumed events may become a normal page
349  *      (not reader page) in ring buffer, and this page will be rewrited
350  *      by events producer.
351  *   B) The page of the consumed events may become a page for splice_read,
352  *      and this page will be returned to system.
353  *
354  * These primitives allow multi process access to different cpu ring buffer
355  * concurrently.
356  *
357  * These primitives don't distinguish read-only and read-consume access.
358  * Multi read-only access are also serialized.
359  */
360
361 #ifdef CONFIG_SMP
362 static DECLARE_RWSEM(all_cpu_access_lock);
363 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
364
365 static inline void trace_access_lock(int cpu)
366 {
367         if (cpu == RING_BUFFER_ALL_CPUS) {
368                 /* gain it for accessing the whole ring buffer. */
369                 down_write(&all_cpu_access_lock);
370         } else {
371                 /* gain it for accessing a cpu ring buffer. */
372
373                 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
374                 down_read(&all_cpu_access_lock);
375
376                 /* Secondly block other access to this @cpu ring buffer. */
377                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
378         }
379 }
380
381 static inline void trace_access_unlock(int cpu)
382 {
383         if (cpu == RING_BUFFER_ALL_CPUS) {
384                 up_write(&all_cpu_access_lock);
385         } else {
386                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387                 up_read(&all_cpu_access_lock);
388         }
389 }
390
391 static inline void trace_access_lock_init(void)
392 {
393         int cpu;
394
395         for_each_possible_cpu(cpu)
396                 mutex_init(&per_cpu(cpu_access_lock, cpu));
397 }
398
399 #else
400
401 static DEFINE_MUTEX(access_lock);
402
403 static inline void trace_access_lock(int cpu)
404 {
405         (void)cpu;
406         mutex_lock(&access_lock);
407 }
408
409 static inline void trace_access_unlock(int cpu)
410 {
411         (void)cpu;
412         mutex_unlock(&access_lock);
413 }
414
415 static inline void trace_access_lock_init(void)
416 {
417 }
418
419 #endif
420
421 /* trace_flags holds trace_options default values */
422 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
423         TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
424         TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
425         TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
426
427 static void tracer_tracing_on(struct trace_array *tr)
428 {
429         if (tr->trace_buffer.buffer)
430                 ring_buffer_record_on(tr->trace_buffer.buffer);
431         /*
432          * This flag is looked at when buffers haven't been allocated
433          * yet, or by some tracers (like irqsoff), that just want to
434          * know if the ring buffer has been disabled, but it can handle
435          * races of where it gets disabled but we still do a record.
436          * As the check is in the fast path of the tracers, it is more
437          * important to be fast than accurate.
438          */
439         tr->buffer_disabled = 0;
440         /* Make the flag seen by readers */
441         smp_wmb();
442 }
443
444 /**
445  * tracing_on - enable tracing buffers
446  *
447  * This function enables tracing buffers that may have been
448  * disabled with tracing_off.
449  */
450 void tracing_on(void)
451 {
452         tracer_tracing_on(&global_trace);
453 }
454 EXPORT_SYMBOL_GPL(tracing_on);
455
456 /**
457  * __trace_puts - write a constant string into the trace buffer.
458  * @ip:    The address of the caller
459  * @str:   The constant string to write
460  * @size:  The size of the string.
461  */
462 int __trace_puts(unsigned long ip, const char *str, int size)
463 {
464         struct ring_buffer_event *event;
465         struct ring_buffer *buffer;
466         struct print_entry *entry;
467         unsigned long irq_flags;
468         int alloc;
469         int pc;
470
471         if (!(trace_flags & TRACE_ITER_PRINTK))
472                 return 0;
473
474         pc = preempt_count();
475
476         if (unlikely(tracing_selftest_running || tracing_disabled))
477                 return 0;
478
479         alloc = sizeof(*entry) + size + 2; /* possible \n added */
480
481         local_save_flags(irq_flags);
482         buffer = global_trace.trace_buffer.buffer;
483         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
484                                           irq_flags, pc);
485         if (!event)
486                 return 0;
487
488         entry = ring_buffer_event_data(event);
489         entry->ip = ip;
490
491         memcpy(&entry->buf, str, size);
492
493         /* Add a newline if necessary */
494         if (entry->buf[size - 1] != '\n') {
495                 entry->buf[size] = '\n';
496                 entry->buf[size + 1] = '\0';
497         } else
498                 entry->buf[size] = '\0';
499
500         __buffer_unlock_commit(buffer, event);
501         ftrace_trace_stack(buffer, irq_flags, 4, pc);
502
503         return size;
504 }
505 EXPORT_SYMBOL_GPL(__trace_puts);
506
507 /**
508  * __trace_bputs - write the pointer to a constant string into trace buffer
509  * @ip:    The address of the caller
510  * @str:   The constant string to write to the buffer to
511  */
512 int __trace_bputs(unsigned long ip, const char *str)
513 {
514         struct ring_buffer_event *event;
515         struct ring_buffer *buffer;
516         struct bputs_entry *entry;
517         unsigned long irq_flags;
518         int size = sizeof(struct bputs_entry);
519         int pc;
520
521         if (!(trace_flags & TRACE_ITER_PRINTK))
522                 return 0;
523
524         pc = preempt_count();
525
526         if (unlikely(tracing_selftest_running || tracing_disabled))
527                 return 0;
528
529         local_save_flags(irq_flags);
530         buffer = global_trace.trace_buffer.buffer;
531         event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
532                                           irq_flags, pc);
533         if (!event)
534                 return 0;
535
536         entry = ring_buffer_event_data(event);
537         entry->ip                       = ip;
538         entry->str                      = str;
539
540         __buffer_unlock_commit(buffer, event);
541         ftrace_trace_stack(buffer, irq_flags, 4, pc);
542
543         return 1;
544 }
545 EXPORT_SYMBOL_GPL(__trace_bputs);
546
547 #ifdef CONFIG_TRACER_SNAPSHOT
548 /**
549  * trace_snapshot - take a snapshot of the current buffer.
550  *
551  * This causes a swap between the snapshot buffer and the current live
552  * tracing buffer. You can use this to take snapshots of the live
553  * trace when some condition is triggered, but continue to trace.
554  *
555  * Note, make sure to allocate the snapshot with either
556  * a tracing_snapshot_alloc(), or by doing it manually
557  * with: echo 1 > /sys/kernel/debug/tracing/snapshot
558  *
559  * If the snapshot buffer is not allocated, it will stop tracing.
560  * Basically making a permanent snapshot.
561  */
562 void tracing_snapshot(void)
563 {
564         struct trace_array *tr = &global_trace;
565         struct tracer *tracer = tr->current_trace;
566         unsigned long flags;
567
568         if (in_nmi()) {
569                 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
570                 internal_trace_puts("*** snapshot is being ignored        ***\n");
571                 return;
572         }
573
574         if (!tr->allocated_snapshot) {
575                 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
576                 internal_trace_puts("*** stopping trace here!   ***\n");
577                 tracing_off();
578                 return;
579         }
580
581         /* Note, snapshot can not be used when the tracer uses it */
582         if (tracer->use_max_tr) {
583                 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
584                 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
585                 return;
586         }
587
588         local_irq_save(flags);
589         update_max_tr(tr, current, smp_processor_id());
590         local_irq_restore(flags);
591 }
592 EXPORT_SYMBOL_GPL(tracing_snapshot);
593
594 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
595                                         struct trace_buffer *size_buf, int cpu_id);
596 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
597
598 static int alloc_snapshot(struct trace_array *tr)
599 {
600         int ret;
601
602         if (!tr->allocated_snapshot) {
603
604                 /* allocate spare buffer */
605                 ret = resize_buffer_duplicate_size(&tr->max_buffer,
606                                    &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
607                 if (ret < 0)
608                         return ret;
609
610                 tr->allocated_snapshot = true;
611         }
612
613         return 0;
614 }
615
616 static void free_snapshot(struct trace_array *tr)
617 {
618         /*
619          * We don't free the ring buffer. instead, resize it because
620          * The max_tr ring buffer has some state (e.g. ring->clock) and
621          * we want preserve it.
622          */
623         ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
624         set_buffer_entries(&tr->max_buffer, 1);
625         tracing_reset_online_cpus(&tr->max_buffer);
626         tr->allocated_snapshot = false;
627 }
628
629 /**
630  * tracing_alloc_snapshot - allocate snapshot buffer.
631  *
632  * This only allocates the snapshot buffer if it isn't already
633  * allocated - it doesn't also take a snapshot.
634  *
635  * This is meant to be used in cases where the snapshot buffer needs
636  * to be set up for events that can't sleep but need to be able to
637  * trigger a snapshot.
638  */
639 int tracing_alloc_snapshot(void)
640 {
641         struct trace_array *tr = &global_trace;
642         int ret;
643
644         ret = alloc_snapshot(tr);
645         WARN_ON(ret < 0);
646
647         return ret;
648 }
649 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
650
651 /**
652  * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
653  *
654  * This is similar to trace_snapshot(), but it will allocate the
655  * snapshot buffer if it isn't already allocated. Use this only
656  * where it is safe to sleep, as the allocation may sleep.
657  *
658  * This causes a swap between the snapshot buffer and the current live
659  * tracing buffer. You can use this to take snapshots of the live
660  * trace when some condition is triggered, but continue to trace.
661  */
662 void tracing_snapshot_alloc(void)
663 {
664         int ret;
665
666         ret = tracing_alloc_snapshot();
667         if (ret < 0)
668                 return;
669
670         tracing_snapshot();
671 }
672 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
673 #else
674 void tracing_snapshot(void)
675 {
676         WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
677 }
678 EXPORT_SYMBOL_GPL(tracing_snapshot);
679 int tracing_alloc_snapshot(void)
680 {
681         WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
682         return -ENODEV;
683 }
684 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
685 void tracing_snapshot_alloc(void)
686 {
687         /* Give warning */
688         tracing_snapshot();
689 }
690 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
691 #endif /* CONFIG_TRACER_SNAPSHOT */
692
693 static void tracer_tracing_off(struct trace_array *tr)
694 {
695         if (tr->trace_buffer.buffer)
696                 ring_buffer_record_off(tr->trace_buffer.buffer);
697         /*
698          * This flag is looked at when buffers haven't been allocated
699          * yet, or by some tracers (like irqsoff), that just want to
700          * know if the ring buffer has been disabled, but it can handle
701          * races of where it gets disabled but we still do a record.
702          * As the check is in the fast path of the tracers, it is more
703          * important to be fast than accurate.
704          */
705         tr->buffer_disabled = 1;
706         /* Make the flag seen by readers */
707         smp_wmb();
708 }
709
710 /**
711  * tracing_off - turn off tracing buffers
712  *
713  * This function stops the tracing buffers from recording data.
714  * It does not disable any overhead the tracers themselves may
715  * be causing. This function simply causes all recording to
716  * the ring buffers to fail.
717  */
718 void tracing_off(void)
719 {
720         tracer_tracing_off(&global_trace);
721 }
722 EXPORT_SYMBOL_GPL(tracing_off);
723
724 void disable_trace_on_warning(void)
725 {
726         if (__disable_trace_on_warning)
727                 tracing_off();
728 }
729
730 /**
731  * tracer_tracing_is_on - show real state of ring buffer enabled
732  * @tr : the trace array to know if ring buffer is enabled
733  *
734  * Shows real state of the ring buffer if it is enabled or not.
735  */
736 static int tracer_tracing_is_on(struct trace_array *tr)
737 {
738         if (tr->trace_buffer.buffer)
739                 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
740         return !tr->buffer_disabled;
741 }
742
743 /**
744  * tracing_is_on - show state of ring buffers enabled
745  */
746 int tracing_is_on(void)
747 {
748         return tracer_tracing_is_on(&global_trace);
749 }
750 EXPORT_SYMBOL_GPL(tracing_is_on);
751
752 static int __init set_buf_size(char *str)
753 {
754         unsigned long buf_size;
755
756         if (!str)
757                 return 0;
758         buf_size = memparse(str, &str);
759         /* nr_entries can not be zero */
760         if (buf_size == 0)
761                 return 0;
762         trace_buf_size = buf_size;
763         return 1;
764 }
765 __setup("trace_buf_size=", set_buf_size);
766
767 static int __init set_tracing_thresh(char *str)
768 {
769         unsigned long threshold;
770         int ret;
771
772         if (!str)
773                 return 0;
774         ret = kstrtoul(str, 0, &threshold);
775         if (ret < 0)
776                 return 0;
777         tracing_thresh = threshold * 1000;
778         return 1;
779 }
780 __setup("tracing_thresh=", set_tracing_thresh);
781
782 unsigned long nsecs_to_usecs(unsigned long nsecs)
783 {
784         return nsecs / 1000;
785 }
786
787 /* These must match the bit postions in trace_iterator_flags */
788 static const char *trace_options[] = {
789         "print-parent",
790         "sym-offset",
791         "sym-addr",
792         "verbose",
793         "raw",
794         "hex",
795         "bin",
796         "block",
797         "stacktrace",
798         "trace_printk",
799         "ftrace_preempt",
800         "branch",
801         "annotate",
802         "userstacktrace",
803         "sym-userobj",
804         "printk-msg-only",
805         "context-info",
806         "latency-format",
807         "sleep-time",
808         "graph-time",
809         "record-cmd",
810         "overwrite",
811         "disable_on_free",
812         "irq-info",
813         "markers",
814         "function-trace",
815         NULL
816 };
817
818 static struct {
819         u64 (*func)(void);
820         const char *name;
821         int in_ns;              /* is this clock in nanoseconds? */
822 } trace_clocks[] = {
823         { trace_clock_local,    "local",        1 },
824         { trace_clock_global,   "global",       1 },
825         { trace_clock_counter,  "counter",      0 },
826         { trace_clock_jiffies,  "uptime",       0 },
827         { trace_clock,          "perf",         1 },
828         ARCH_TRACE_CLOCKS
829 };
830
831 /*
832  * trace_parser_get_init - gets the buffer for trace parser
833  */
834 int trace_parser_get_init(struct trace_parser *parser, int size)
835 {
836         memset(parser, 0, sizeof(*parser));
837
838         parser->buffer = kmalloc(size, GFP_KERNEL);
839         if (!parser->buffer)
840                 return 1;
841
842         parser->size = size;
843         return 0;
844 }
845
846 /*
847  * trace_parser_put - frees the buffer for trace parser
848  */
849 void trace_parser_put(struct trace_parser *parser)
850 {
851         kfree(parser->buffer);
852 }
853
854 /*
855  * trace_get_user - reads the user input string separated by  space
856  * (matched by isspace(ch))
857  *
858  * For each string found the 'struct trace_parser' is updated,
859  * and the function returns.
860  *
861  * Returns number of bytes read.
862  *
863  * See kernel/trace/trace.h for 'struct trace_parser' details.
864  */
865 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
866         size_t cnt, loff_t *ppos)
867 {
868         char ch;
869         size_t read = 0;
870         ssize_t ret;
871
872         if (!*ppos)
873                 trace_parser_clear(parser);
874
875         ret = get_user(ch, ubuf++);
876         if (ret)
877                 goto out;
878
879         read++;
880         cnt--;
881
882         /*
883          * The parser is not finished with the last write,
884          * continue reading the user input without skipping spaces.
885          */
886         if (!parser->cont) {
887                 /* skip white space */
888                 while (cnt && isspace(ch)) {
889                         ret = get_user(ch, ubuf++);
890                         if (ret)
891                                 goto out;
892                         read++;
893                         cnt--;
894                 }
895
896                 /* only spaces were written */
897                 if (isspace(ch)) {
898                         *ppos += read;
899                         ret = read;
900                         goto out;
901                 }
902
903                 parser->idx = 0;
904         }
905
906         /* read the non-space input */
907         while (cnt && !isspace(ch)) {
908                 if (parser->idx < parser->size - 1)
909                         parser->buffer[parser->idx++] = ch;
910                 else {
911                         ret = -EINVAL;
912                         goto out;
913                 }
914                 ret = get_user(ch, ubuf++);
915                 if (ret)
916                         goto out;
917                 read++;
918                 cnt--;
919         }
920
921         /* We either got finished input or we have to wait for another call. */
922         if (isspace(ch)) {
923                 parser->buffer[parser->idx] = 0;
924                 parser->cont = false;
925         } else if (parser->idx < parser->size - 1) {
926                 parser->cont = true;
927                 parser->buffer[parser->idx++] = ch;
928         } else {
929                 ret = -EINVAL;
930                 goto out;
931         }
932
933         *ppos += read;
934         ret = read;
935
936 out:
937         return ret;
938 }
939
940 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
941 {
942         int len;
943
944         if (s->len <= s->readpos)
945                 return -EBUSY;
946
947         len = s->len - s->readpos;
948         if (cnt > len)
949                 cnt = len;
950         memcpy(buf, s->buffer + s->readpos, cnt);
951
952         s->readpos += cnt;
953         return cnt;
954 }
955
956 unsigned long __read_mostly     tracing_thresh;
957
958 #ifdef CONFIG_TRACER_MAX_TRACE
959 /*
960  * Copy the new maximum trace into the separate maximum-trace
961  * structure. (this way the maximum trace is permanently saved,
962  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
963  */
964 static void
965 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
966 {
967         struct trace_buffer *trace_buf = &tr->trace_buffer;
968         struct trace_buffer *max_buf = &tr->max_buffer;
969         struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
970         struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
971
972         max_buf->cpu = cpu;
973         max_buf->time_start = data->preempt_timestamp;
974
975         max_data->saved_latency = tr->max_latency;
976         max_data->critical_start = data->critical_start;
977         max_data->critical_end = data->critical_end;
978
979         memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
980         max_data->pid = tsk->pid;
981         /*
982          * If tsk == current, then use current_uid(), as that does not use
983          * RCU. The irq tracer can be called out of RCU scope.
984          */
985         if (tsk == current)
986                 max_data->uid = current_uid();
987         else
988                 max_data->uid = task_uid(tsk);
989
990         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
991         max_data->policy = tsk->policy;
992         max_data->rt_priority = tsk->rt_priority;
993
994         /* record this tasks comm */
995         tracing_record_cmdline(tsk);
996 }
997
998 /**
999  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1000  * @tr: tracer
1001  * @tsk: the task with the latency
1002  * @cpu: The cpu that initiated the trace.
1003  *
1004  * Flip the buffers between the @tr and the max_tr and record information
1005  * about which task was the cause of this latency.
1006  */
1007 void
1008 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1009 {
1010         struct ring_buffer *buf;
1011
1012         if (tr->stop_count)
1013                 return;
1014
1015         WARN_ON_ONCE(!irqs_disabled());
1016
1017         if (!tr->allocated_snapshot) {
1018                 /* Only the nop tracer should hit this when disabling */
1019                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1020                 return;
1021         }
1022
1023         arch_spin_lock(&tr->max_lock);
1024
1025         buf = tr->trace_buffer.buffer;
1026         tr->trace_buffer.buffer = tr->max_buffer.buffer;
1027         tr->max_buffer.buffer = buf;
1028
1029         __update_max_tr(tr, tsk, cpu);
1030         arch_spin_unlock(&tr->max_lock);
1031 }
1032
1033 /**
1034  * update_max_tr_single - only copy one trace over, and reset the rest
1035  * @tr - tracer
1036  * @tsk - task with the latency
1037  * @cpu - the cpu of the buffer to copy.
1038  *
1039  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1040  */
1041 void
1042 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1043 {
1044         int ret;
1045
1046         if (tr->stop_count)
1047                 return;
1048
1049         WARN_ON_ONCE(!irqs_disabled());
1050         if (!tr->allocated_snapshot) {
1051                 /* Only the nop tracer should hit this when disabling */
1052                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1053                 return;
1054         }
1055
1056         arch_spin_lock(&tr->max_lock);
1057
1058         ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1059
1060         if (ret == -EBUSY) {
1061                 /*
1062                  * We failed to swap the buffer due to a commit taking
1063                  * place on this CPU. We fail to record, but we reset
1064                  * the max trace buffer (no one writes directly to it)
1065                  * and flag that it failed.
1066                  */
1067                 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1068                         "Failed to swap buffers due to commit in progress\n");
1069         }
1070
1071         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1072
1073         __update_max_tr(tr, tsk, cpu);
1074         arch_spin_unlock(&tr->max_lock);
1075 }
1076 #endif /* CONFIG_TRACER_MAX_TRACE */
1077
1078 static int wait_on_pipe(struct trace_iterator *iter)
1079 {
1080         /* Iterators are static, they should be filled or empty */
1081         if (trace_buffer_iter(iter, iter->cpu_file))
1082                 return 0;
1083
1084         return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
1085 }
1086
1087 #ifdef CONFIG_FTRACE_STARTUP_TEST
1088 static int run_tracer_selftest(struct tracer *type)
1089 {
1090         struct trace_array *tr = &global_trace;
1091         struct tracer *saved_tracer = tr->current_trace;
1092         int ret;
1093
1094         if (!type->selftest || tracing_selftest_disabled)
1095                 return 0;
1096
1097         /*
1098          * Run a selftest on this tracer.
1099          * Here we reset the trace buffer, and set the current
1100          * tracer to be this tracer. The tracer can then run some
1101          * internal tracing to verify that everything is in order.
1102          * If we fail, we do not register this tracer.
1103          */
1104         tracing_reset_online_cpus(&tr->trace_buffer);
1105
1106         tr->current_trace = type;
1107
1108 #ifdef CONFIG_TRACER_MAX_TRACE
1109         if (type->use_max_tr) {
1110                 /* If we expanded the buffers, make sure the max is expanded too */
1111                 if (ring_buffer_expanded)
1112                         ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1113                                            RING_BUFFER_ALL_CPUS);
1114                 tr->allocated_snapshot = true;
1115         }
1116 #endif
1117
1118         /* the test is responsible for initializing and enabling */
1119         pr_info("Testing tracer %s: ", type->name);
1120         ret = type->selftest(type, tr);
1121         /* the test is responsible for resetting too */
1122         tr->current_trace = saved_tracer;
1123         if (ret) {
1124                 printk(KERN_CONT "FAILED!\n");
1125                 /* Add the warning after printing 'FAILED' */
1126                 WARN_ON(1);
1127                 return -1;
1128         }
1129         /* Only reset on passing, to avoid touching corrupted buffers */
1130         tracing_reset_online_cpus(&tr->trace_buffer);
1131
1132 #ifdef CONFIG_TRACER_MAX_TRACE
1133         if (type->use_max_tr) {
1134                 tr->allocated_snapshot = false;
1135
1136                 /* Shrink the max buffer again */
1137                 if (ring_buffer_expanded)
1138                         ring_buffer_resize(tr->max_buffer.buffer, 1,
1139                                            RING_BUFFER_ALL_CPUS);
1140         }
1141 #endif
1142
1143         printk(KERN_CONT "PASSED\n");
1144         return 0;
1145 }
1146 #else
1147 static inline int run_tracer_selftest(struct tracer *type)
1148 {
1149         return 0;
1150 }
1151 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1152
1153 /**
1154  * register_tracer - register a tracer with the ftrace system.
1155  * @type - the plugin for the tracer
1156  *
1157  * Register a new plugin tracer.
1158  */
1159 int register_tracer(struct tracer *type)
1160 {
1161         struct tracer *t;
1162         int ret = 0;
1163
1164         if (!type->name) {
1165                 pr_info("Tracer must have a name\n");
1166                 return -1;
1167         }
1168
1169         if (strlen(type->name) >= MAX_TRACER_SIZE) {
1170                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1171                 return -1;
1172         }
1173
1174         mutex_lock(&trace_types_lock);
1175
1176         tracing_selftest_running = true;
1177
1178         for (t = trace_types; t; t = t->next) {
1179                 if (strcmp(type->name, t->name) == 0) {
1180                         /* already found */
1181                         pr_info("Tracer %s already registered\n",
1182                                 type->name);
1183                         ret = -1;
1184                         goto out;
1185                 }
1186         }
1187
1188         if (!type->set_flag)
1189                 type->set_flag = &dummy_set_flag;
1190         if (!type->flags)
1191                 type->flags = &dummy_tracer_flags;
1192         else
1193                 if (!type->flags->opts)
1194                         type->flags->opts = dummy_tracer_opt;
1195
1196         ret = run_tracer_selftest(type);
1197         if (ret < 0)
1198                 goto out;
1199
1200         type->next = trace_types;
1201         trace_types = type;
1202
1203  out:
1204         tracing_selftest_running = false;
1205         mutex_unlock(&trace_types_lock);
1206
1207         if (ret || !default_bootup_tracer)
1208                 goto out_unlock;
1209
1210         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1211                 goto out_unlock;
1212
1213         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1214         /* Do we want this tracer to start on bootup? */
1215         tracing_set_tracer(&global_trace, type->name);
1216         default_bootup_tracer = NULL;
1217         /* disable other selftests, since this will break it. */
1218         tracing_selftest_disabled = true;
1219 #ifdef CONFIG_FTRACE_STARTUP_TEST
1220         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1221                type->name);
1222 #endif
1223
1224  out_unlock:
1225         return ret;
1226 }
1227
1228 void tracing_reset(struct trace_buffer *buf, int cpu)
1229 {
1230         struct ring_buffer *buffer = buf->buffer;
1231
1232         if (!buffer)
1233                 return;
1234
1235         ring_buffer_record_disable(buffer);
1236
1237         /* Make sure all commits have finished */
1238         synchronize_sched();
1239         ring_buffer_reset_cpu(buffer, cpu);
1240
1241         ring_buffer_record_enable(buffer);
1242 }
1243
1244 void tracing_reset_online_cpus(struct trace_buffer *buf)
1245 {
1246         struct ring_buffer *buffer = buf->buffer;
1247         int cpu;
1248
1249         if (!buffer)
1250                 return;
1251
1252         ring_buffer_record_disable(buffer);
1253
1254         /* Make sure all commits have finished */
1255         synchronize_sched();
1256
1257         buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1258
1259         for_each_online_cpu(cpu)
1260                 ring_buffer_reset_cpu(buffer, cpu);
1261
1262         ring_buffer_record_enable(buffer);
1263 }
1264
1265 /* Must have trace_types_lock held */
1266 void tracing_reset_all_online_cpus(void)
1267 {
1268         struct trace_array *tr;
1269
1270         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1271                 tracing_reset_online_cpus(&tr->trace_buffer);
1272 #ifdef CONFIG_TRACER_MAX_TRACE
1273                 tracing_reset_online_cpus(&tr->max_buffer);
1274 #endif
1275         }
1276 }
1277
1278 #define SAVED_CMDLINES_DEFAULT 128
1279 #define NO_CMDLINE_MAP UINT_MAX
1280 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1281 struct saved_cmdlines_buffer {
1282         unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1283         unsigned *map_cmdline_to_pid;
1284         unsigned cmdline_num;
1285         int cmdline_idx;
1286         char *saved_cmdlines;
1287 };
1288 static struct saved_cmdlines_buffer *savedcmd;
1289
1290 /* temporary disable recording */
1291 static atomic_t trace_record_cmdline_disabled __read_mostly;
1292
1293 static inline char *get_saved_cmdlines(int idx)
1294 {
1295         return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1296 }
1297
1298 static inline void set_cmdline(int idx, const char *cmdline)
1299 {
1300         memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1301 }
1302
1303 static int allocate_cmdlines_buffer(unsigned int val,
1304                                     struct saved_cmdlines_buffer *s)
1305 {
1306         s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1307                                         GFP_KERNEL);
1308         if (!s->map_cmdline_to_pid)
1309                 return -ENOMEM;
1310
1311         s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1312         if (!s->saved_cmdlines) {
1313                 kfree(s->map_cmdline_to_pid);
1314                 return -ENOMEM;
1315         }
1316
1317         s->cmdline_idx = 0;
1318         s->cmdline_num = val;
1319         memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1320                sizeof(s->map_pid_to_cmdline));
1321         memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1322                val * sizeof(*s->map_cmdline_to_pid));
1323
1324         return 0;
1325 }
1326
1327 static int trace_create_savedcmd(void)
1328 {
1329         int ret;
1330
1331         savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1332         if (!savedcmd)
1333                 return -ENOMEM;
1334
1335         ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1336         if (ret < 0) {
1337                 kfree(savedcmd);
1338                 savedcmd = NULL;
1339                 return -ENOMEM;
1340         }
1341
1342         return 0;
1343 }
1344
1345 int is_tracing_stopped(void)
1346 {
1347         return global_trace.stop_count;
1348 }
1349
1350 /**
1351  * tracing_start - quick start of the tracer
1352  *
1353  * If tracing is enabled but was stopped by tracing_stop,
1354  * this will start the tracer back up.
1355  */
1356 void tracing_start(void)
1357 {
1358         struct ring_buffer *buffer;
1359         unsigned long flags;
1360
1361         if (tracing_disabled)
1362                 return;
1363
1364         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1365         if (--global_trace.stop_count) {
1366                 if (global_trace.stop_count < 0) {
1367                         /* Someone screwed up their debugging */
1368                         WARN_ON_ONCE(1);
1369                         global_trace.stop_count = 0;
1370                 }
1371                 goto out;
1372         }
1373
1374         /* Prevent the buffers from switching */
1375         arch_spin_lock(&global_trace.max_lock);
1376
1377         buffer = global_trace.trace_buffer.buffer;
1378         if (buffer)
1379                 ring_buffer_record_enable(buffer);
1380
1381 #ifdef CONFIG_TRACER_MAX_TRACE
1382         buffer = global_trace.max_buffer.buffer;
1383         if (buffer)
1384                 ring_buffer_record_enable(buffer);
1385 #endif
1386
1387         arch_spin_unlock(&global_trace.max_lock);
1388
1389  out:
1390         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1391 }
1392
1393 static void tracing_start_tr(struct trace_array *tr)
1394 {
1395         struct ring_buffer *buffer;
1396         unsigned long flags;
1397
1398         if (tracing_disabled)
1399                 return;
1400
1401         /* If global, we need to also start the max tracer */
1402         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1403                 return tracing_start();
1404
1405         raw_spin_lock_irqsave(&tr->start_lock, flags);
1406
1407         if (--tr->stop_count) {
1408                 if (tr->stop_count < 0) {
1409                         /* Someone screwed up their debugging */
1410                         WARN_ON_ONCE(1);
1411                         tr->stop_count = 0;
1412                 }
1413                 goto out;
1414         }
1415
1416         buffer = tr->trace_buffer.buffer;
1417         if (buffer)
1418                 ring_buffer_record_enable(buffer);
1419
1420  out:
1421         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1422 }
1423
1424 /**
1425  * tracing_stop - quick stop of the tracer
1426  *
1427  * Light weight way to stop tracing. Use in conjunction with
1428  * tracing_start.
1429  */
1430 void tracing_stop(void)
1431 {
1432         struct ring_buffer *buffer;
1433         unsigned long flags;
1434
1435         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1436         if (global_trace.stop_count++)
1437                 goto out;
1438
1439         /* Prevent the buffers from switching */
1440         arch_spin_lock(&global_trace.max_lock);
1441
1442         buffer = global_trace.trace_buffer.buffer;
1443         if (buffer)
1444                 ring_buffer_record_disable(buffer);
1445
1446 #ifdef CONFIG_TRACER_MAX_TRACE
1447         buffer = global_trace.max_buffer.buffer;
1448         if (buffer)
1449                 ring_buffer_record_disable(buffer);
1450 #endif
1451
1452         arch_spin_unlock(&global_trace.max_lock);
1453
1454  out:
1455         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1456 }
1457
1458 static void tracing_stop_tr(struct trace_array *tr)
1459 {
1460         struct ring_buffer *buffer;
1461         unsigned long flags;
1462
1463         /* If global, we need to also stop the max tracer */
1464         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1465                 return tracing_stop();
1466
1467         raw_spin_lock_irqsave(&tr->start_lock, flags);
1468         if (tr->stop_count++)
1469                 goto out;
1470
1471         buffer = tr->trace_buffer.buffer;
1472         if (buffer)
1473                 ring_buffer_record_disable(buffer);
1474
1475  out:
1476         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1477 }
1478
1479 void trace_stop_cmdline_recording(void);
1480
1481 static int trace_save_cmdline(struct task_struct *tsk)
1482 {
1483         unsigned pid, idx;
1484
1485         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1486                 return 0;
1487
1488         /*
1489          * It's not the end of the world if we don't get
1490          * the lock, but we also don't want to spin
1491          * nor do we want to disable interrupts,
1492          * so if we miss here, then better luck next time.
1493          */
1494         if (!arch_spin_trylock(&trace_cmdline_lock))
1495                 return 0;
1496
1497         idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1498         if (idx == NO_CMDLINE_MAP) {
1499                 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1500
1501                 /*
1502                  * Check whether the cmdline buffer at idx has a pid
1503                  * mapped. We are going to overwrite that entry so we
1504                  * need to clear the map_pid_to_cmdline. Otherwise we
1505                  * would read the new comm for the old pid.
1506                  */
1507                 pid = savedcmd->map_cmdline_to_pid[idx];
1508                 if (pid != NO_CMDLINE_MAP)
1509                         savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1510
1511                 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1512                 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1513
1514                 savedcmd->cmdline_idx = idx;
1515         }
1516
1517         set_cmdline(idx, tsk->comm);
1518
1519         arch_spin_unlock(&trace_cmdline_lock);
1520
1521         return 1;
1522 }
1523
1524 static void __trace_find_cmdline(int pid, char comm[])
1525 {
1526         unsigned map;
1527
1528         if (!pid) {
1529                 strcpy(comm, "<idle>");
1530                 return;
1531         }
1532
1533         if (WARN_ON_ONCE(pid < 0)) {
1534                 strcpy(comm, "<XXX>");
1535                 return;
1536         }
1537
1538         if (pid > PID_MAX_DEFAULT) {
1539                 strcpy(comm, "<...>");
1540                 return;
1541         }
1542
1543         map = savedcmd->map_pid_to_cmdline[pid];
1544         if (map != NO_CMDLINE_MAP)
1545                 strcpy(comm, get_saved_cmdlines(map));
1546         else
1547                 strcpy(comm, "<...>");
1548 }
1549
1550 void trace_find_cmdline(int pid, char comm[])
1551 {
1552         preempt_disable();
1553         arch_spin_lock(&trace_cmdline_lock);
1554
1555         __trace_find_cmdline(pid, comm);
1556
1557         arch_spin_unlock(&trace_cmdline_lock);
1558         preempt_enable();
1559 }
1560
1561 void tracing_record_cmdline(struct task_struct *tsk)
1562 {
1563         if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1564                 return;
1565
1566         if (!__this_cpu_read(trace_cmdline_save))
1567                 return;
1568
1569         if (trace_save_cmdline(tsk))
1570                 __this_cpu_write(trace_cmdline_save, false);
1571 }
1572
1573 void
1574 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1575                              int pc)
1576 {
1577         struct task_struct *tsk = current;
1578
1579         entry->preempt_count            = pc & 0xff;
1580         entry->pid                      = (tsk) ? tsk->pid : 0;
1581         entry->flags =
1582 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1583                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1584 #else
1585                 TRACE_FLAG_IRQS_NOSUPPORT |
1586 #endif
1587                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1588                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1589                 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1590                 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1591 }
1592 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1593
1594 struct ring_buffer_event *
1595 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1596                           int type,
1597                           unsigned long len,
1598                           unsigned long flags, int pc)
1599 {
1600         struct ring_buffer_event *event;
1601
1602         event = ring_buffer_lock_reserve(buffer, len);
1603         if (event != NULL) {
1604                 struct trace_entry *ent = ring_buffer_event_data(event);
1605
1606                 tracing_generic_entry_update(ent, flags, pc);
1607                 ent->type = type;
1608         }
1609
1610         return event;
1611 }
1612
1613 void
1614 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1615 {
1616         __this_cpu_write(trace_cmdline_save, true);
1617         ring_buffer_unlock_commit(buffer, event);
1618 }
1619
1620 static inline void
1621 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1622                              struct ring_buffer_event *event,
1623                              unsigned long flags, int pc)
1624 {
1625         __buffer_unlock_commit(buffer, event);
1626
1627         ftrace_trace_stack(buffer, flags, 6, pc);
1628         ftrace_trace_userstack(buffer, flags, pc);
1629 }
1630
1631 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1632                                 struct ring_buffer_event *event,
1633                                 unsigned long flags, int pc)
1634 {
1635         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1636 }
1637 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1638
1639 static struct ring_buffer *temp_buffer;
1640
1641 struct ring_buffer_event *
1642 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1643                           struct ftrace_event_file *ftrace_file,
1644                           int type, unsigned long len,
1645                           unsigned long flags, int pc)
1646 {
1647         struct ring_buffer_event *entry;
1648
1649         *current_rb = ftrace_file->tr->trace_buffer.buffer;
1650         entry = trace_buffer_lock_reserve(*current_rb,
1651                                          type, len, flags, pc);
1652         /*
1653          * If tracing is off, but we have triggers enabled
1654          * we still need to look at the event data. Use the temp_buffer
1655          * to store the trace event for the tigger to use. It's recusive
1656          * safe and will not be recorded anywhere.
1657          */
1658         if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1659                 *current_rb = temp_buffer;
1660                 entry = trace_buffer_lock_reserve(*current_rb,
1661                                                   type, len, flags, pc);
1662         }
1663         return entry;
1664 }
1665 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1666
1667 struct ring_buffer_event *
1668 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1669                                   int type, unsigned long len,
1670                                   unsigned long flags, int pc)
1671 {
1672         *current_rb = global_trace.trace_buffer.buffer;
1673         return trace_buffer_lock_reserve(*current_rb,
1674                                          type, len, flags, pc);
1675 }
1676 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1677
1678 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1679                                         struct ring_buffer_event *event,
1680                                         unsigned long flags, int pc)
1681 {
1682         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1683 }
1684 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1685
1686 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1687                                      struct ring_buffer_event *event,
1688                                      unsigned long flags, int pc,
1689                                      struct pt_regs *regs)
1690 {
1691         __buffer_unlock_commit(buffer, event);
1692
1693         ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1694         ftrace_trace_userstack(buffer, flags, pc);
1695 }
1696 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1697
1698 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1699                                          struct ring_buffer_event *event)
1700 {
1701         ring_buffer_discard_commit(buffer, event);
1702 }
1703 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1704
1705 void
1706 trace_function(struct trace_array *tr,
1707                unsigned long ip, unsigned long parent_ip, unsigned long flags,
1708                int pc)
1709 {
1710         struct ftrace_event_call *call = &event_function;
1711         struct ring_buffer *buffer = tr->trace_buffer.buffer;
1712         struct ring_buffer_event *event;
1713         struct ftrace_entry *entry;
1714
1715         /* If we are reading the ring buffer, don't trace */
1716         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1717                 return;
1718
1719         event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1720                                           flags, pc);
1721         if (!event)
1722                 return;
1723         entry   = ring_buffer_event_data(event);
1724         entry->ip                       = ip;
1725         entry->parent_ip                = parent_ip;
1726
1727         if (!call_filter_check_discard(call, entry, buffer, event))
1728                 __buffer_unlock_commit(buffer, event);
1729 }
1730
1731 #ifdef CONFIG_STACKTRACE
1732
1733 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1734 struct ftrace_stack {
1735         unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
1736 };
1737
1738 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1739 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1740
1741 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1742                                  unsigned long flags,
1743                                  int skip, int pc, struct pt_regs *regs)
1744 {
1745         struct ftrace_event_call *call = &event_kernel_stack;
1746         struct ring_buffer_event *event;
1747         struct stack_entry *entry;
1748         struct stack_trace trace;
1749         int use_stack;
1750         int size = FTRACE_STACK_ENTRIES;
1751
1752         trace.nr_entries        = 0;
1753         trace.skip              = skip;
1754
1755         /*
1756          * Since events can happen in NMIs there's no safe way to
1757          * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1758          * or NMI comes in, it will just have to use the default
1759          * FTRACE_STACK_SIZE.
1760          */
1761         preempt_disable_notrace();
1762
1763         use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1764         /*
1765          * We don't need any atomic variables, just a barrier.
1766          * If an interrupt comes in, we don't care, because it would
1767          * have exited and put the counter back to what we want.
1768          * We just need a barrier to keep gcc from moving things
1769          * around.
1770          */
1771         barrier();
1772         if (use_stack == 1) {
1773                 trace.entries           = this_cpu_ptr(ftrace_stack.calls);
1774                 trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
1775
1776                 if (regs)
1777                         save_stack_trace_regs(regs, &trace);
1778                 else
1779                         save_stack_trace(&trace);
1780
1781                 if (trace.nr_entries > size)
1782                         size = trace.nr_entries;
1783         } else
1784                 /* From now on, use_stack is a boolean */
1785                 use_stack = 0;
1786
1787         size *= sizeof(unsigned long);
1788
1789         event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1790                                           sizeof(*entry) + size, flags, pc);
1791         if (!event)
1792                 goto out;
1793         entry = ring_buffer_event_data(event);
1794
1795         memset(&entry->caller, 0, size);
1796
1797         if (use_stack)
1798                 memcpy(&entry->caller, trace.entries,
1799                        trace.nr_entries * sizeof(unsigned long));
1800         else {
1801                 trace.max_entries       = FTRACE_STACK_ENTRIES;
1802                 trace.entries           = entry->caller;
1803                 if (regs)
1804                         save_stack_trace_regs(regs, &trace);
1805                 else
1806                         save_stack_trace(&trace);
1807         }
1808
1809         entry->size = trace.nr_entries;
1810
1811         if (!call_filter_check_discard(call, entry, buffer, event))
1812                 __buffer_unlock_commit(buffer, event);
1813
1814  out:
1815         /* Again, don't let gcc optimize things here */
1816         barrier();
1817         __this_cpu_dec(ftrace_stack_reserve);
1818         preempt_enable_notrace();
1819
1820 }
1821
1822 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1823                              int skip, int pc, struct pt_regs *regs)
1824 {
1825         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1826                 return;
1827
1828         __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1829 }
1830
1831 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1832                         int skip, int pc)
1833 {
1834         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1835                 return;
1836
1837         __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1838 }
1839
1840 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1841                    int pc)
1842 {
1843         __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1844 }
1845
1846 /**
1847  * trace_dump_stack - record a stack back trace in the trace buffer
1848  * @skip: Number of functions to skip (helper handlers)
1849  */
1850 void trace_dump_stack(int skip)
1851 {
1852         unsigned long flags;
1853
1854         if (tracing_disabled || tracing_selftest_running)
1855                 return;
1856
1857         local_save_flags(flags);
1858
1859         /*
1860          * Skip 3 more, seems to get us at the caller of
1861          * this function.
1862          */
1863         skip += 3;
1864         __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1865                              flags, skip, preempt_count(), NULL);
1866 }
1867
1868 static DEFINE_PER_CPU(int, user_stack_count);
1869
1870 void
1871 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1872 {
1873         struct ftrace_event_call *call = &event_user_stack;
1874         struct ring_buffer_event *event;
1875         struct userstack_entry *entry;
1876         struct stack_trace trace;
1877
1878         if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1879                 return;
1880
1881         /*
1882          * NMIs can not handle page faults, even with fix ups.
1883          * The save user stack can (and often does) fault.
1884          */
1885         if (unlikely(in_nmi()))
1886                 return;
1887
1888         /*
1889          * prevent recursion, since the user stack tracing may
1890          * trigger other kernel events.
1891          */
1892         preempt_disable();
1893         if (__this_cpu_read(user_stack_count))
1894                 goto out;
1895
1896         __this_cpu_inc(user_stack_count);
1897
1898         event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1899                                           sizeof(*entry), flags, pc);
1900         if (!event)
1901                 goto out_drop_count;
1902         entry   = ring_buffer_event_data(event);
1903
1904         entry->tgid             = current->tgid;
1905         memset(&entry->caller, 0, sizeof(entry->caller));
1906
1907         trace.nr_entries        = 0;
1908         trace.max_entries       = FTRACE_STACK_ENTRIES;
1909         trace.skip              = 0;
1910         trace.entries           = entry->caller;
1911
1912         save_stack_trace_user(&trace);
1913         if (!call_filter_check_discard(call, entry, buffer, event))
1914                 __buffer_unlock_commit(buffer, event);
1915
1916  out_drop_count:
1917         __this_cpu_dec(user_stack_count);
1918  out:
1919         preempt_enable();
1920 }
1921
1922 #ifdef UNUSED
1923 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1924 {
1925         ftrace_trace_userstack(tr, flags, preempt_count());
1926 }
1927 #endif /* UNUSED */
1928
1929 #endif /* CONFIG_STACKTRACE */
1930
1931 /* created for use with alloc_percpu */
1932 struct trace_buffer_struct {
1933         char buffer[TRACE_BUF_SIZE];
1934 };
1935
1936 static struct trace_buffer_struct *trace_percpu_buffer;
1937 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1938 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1939 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1940
1941 /*
1942  * The buffer used is dependent on the context. There is a per cpu
1943  * buffer for normal context, softirq contex, hard irq context and
1944  * for NMI context. Thise allows for lockless recording.
1945  *
1946  * Note, if the buffers failed to be allocated, then this returns NULL
1947  */
1948 static char *get_trace_buf(void)
1949 {
1950         struct trace_buffer_struct *percpu_buffer;
1951
1952         /*
1953          * If we have allocated per cpu buffers, then we do not
1954          * need to do any locking.
1955          */
1956         if (in_nmi())
1957                 percpu_buffer = trace_percpu_nmi_buffer;
1958         else if (in_irq())
1959                 percpu_buffer = trace_percpu_irq_buffer;
1960         else if (in_softirq())
1961                 percpu_buffer = trace_percpu_sirq_buffer;
1962         else
1963                 percpu_buffer = trace_percpu_buffer;
1964
1965         if (!percpu_buffer)
1966                 return NULL;
1967
1968         return this_cpu_ptr(&percpu_buffer->buffer[0]);
1969 }
1970
1971 static int alloc_percpu_trace_buffer(void)
1972 {
1973         struct trace_buffer_struct *buffers;
1974         struct trace_buffer_struct *sirq_buffers;
1975         struct trace_buffer_struct *irq_buffers;
1976         struct trace_buffer_struct *nmi_buffers;
1977
1978         buffers = alloc_percpu(struct trace_buffer_struct);
1979         if (!buffers)
1980                 goto err_warn;
1981
1982         sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1983         if (!sirq_buffers)
1984                 goto err_sirq;
1985
1986         irq_buffers = alloc_percpu(struct trace_buffer_struct);
1987         if (!irq_buffers)
1988                 goto err_irq;
1989
1990         nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1991         if (!nmi_buffers)
1992                 goto err_nmi;
1993
1994         trace_percpu_buffer = buffers;
1995         trace_percpu_sirq_buffer = sirq_buffers;
1996         trace_percpu_irq_buffer = irq_buffers;
1997         trace_percpu_nmi_buffer = nmi_buffers;
1998
1999         return 0;
2000
2001  err_nmi:
2002         free_percpu(irq_buffers);
2003  err_irq:
2004         free_percpu(sirq_buffers);
2005  err_sirq:
2006         free_percpu(buffers);
2007  err_warn:
2008         WARN(1, "Could not allocate percpu trace_printk buffer");
2009         return -ENOMEM;
2010 }
2011
2012 static int buffers_allocated;
2013
2014 void trace_printk_init_buffers(void)
2015 {
2016         if (buffers_allocated)
2017                 return;
2018
2019         if (alloc_percpu_trace_buffer())
2020                 return;
2021
2022         /* trace_printk() is for debug use only. Don't use it in production. */
2023
2024         pr_warning("\n**********************************************************\n");
2025         pr_warning("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2026         pr_warning("**                                                      **\n");
2027         pr_warning("** trace_printk() being used. Allocating extra memory.  **\n");
2028         pr_warning("**                                                      **\n");
2029         pr_warning("** This means that this is a DEBUG kernel and it is     **\n");
2030         pr_warning("** unsafe for produciton use.                           **\n");
2031         pr_warning("**                                                      **\n");
2032         pr_warning("** If you see this message and you are not debugging    **\n");
2033         pr_warning("** the kernel, report this immediately to your vendor!  **\n");
2034         pr_warning("**                                                      **\n");
2035         pr_warning("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2036         pr_warning("**********************************************************\n");
2037
2038         /* Expand the buffers to set size */
2039         tracing_update_buffers();
2040
2041         buffers_allocated = 1;
2042
2043         /*
2044          * trace_printk_init_buffers() can be called by modules.
2045          * If that happens, then we need to start cmdline recording
2046          * directly here. If the global_trace.buffer is already
2047          * allocated here, then this was called by module code.
2048          */
2049         if (global_trace.trace_buffer.buffer)
2050                 tracing_start_cmdline_record();
2051 }
2052
2053 void trace_printk_start_comm(void)
2054 {
2055         /* Start tracing comms if trace printk is set */
2056         if (!buffers_allocated)
2057                 return;
2058         tracing_start_cmdline_record();
2059 }
2060
2061 static void trace_printk_start_stop_comm(int enabled)
2062 {
2063         if (!buffers_allocated)
2064                 return;
2065
2066         if (enabled)
2067                 tracing_start_cmdline_record();
2068         else
2069                 tracing_stop_cmdline_record();
2070 }
2071
2072 /**
2073  * trace_vbprintk - write binary msg to tracing buffer
2074  *
2075  */
2076 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2077 {
2078         struct ftrace_event_call *call = &event_bprint;
2079         struct ring_buffer_event *event;
2080         struct ring_buffer *buffer;
2081         struct trace_array *tr = &global_trace;
2082         struct bprint_entry *entry;
2083         unsigned long flags;
2084         char *tbuffer;
2085         int len = 0, size, pc;
2086
2087         if (unlikely(tracing_selftest_running || tracing_disabled))
2088                 return 0;
2089
2090         /* Don't pollute graph traces with trace_vprintk internals */
2091         pause_graph_tracing();
2092
2093         pc = preempt_count();
2094         preempt_disable_notrace();
2095
2096         tbuffer = get_trace_buf();
2097         if (!tbuffer) {
2098                 len = 0;
2099                 goto out;
2100         }
2101
2102         len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2103
2104         if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2105                 goto out;
2106
2107         local_save_flags(flags);
2108         size = sizeof(*entry) + sizeof(u32) * len;
2109         buffer = tr->trace_buffer.buffer;
2110         event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2111                                           flags, pc);
2112         if (!event)
2113                 goto out;
2114         entry = ring_buffer_event_data(event);
2115         entry->ip                       = ip;
2116         entry->fmt                      = fmt;
2117
2118         memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2119         if (!call_filter_check_discard(call, entry, buffer, event)) {
2120                 __buffer_unlock_commit(buffer, event);
2121                 ftrace_trace_stack(buffer, flags, 6, pc);
2122         }
2123
2124 out:
2125         preempt_enable_notrace();
2126         unpause_graph_tracing();
2127
2128         return len;
2129 }
2130 EXPORT_SYMBOL_GPL(trace_vbprintk);
2131
2132 static int
2133 __trace_array_vprintk(struct ring_buffer *buffer,
2134                       unsigned long ip, const char *fmt, va_list args)
2135 {
2136         struct ftrace_event_call *call = &event_print;
2137         struct ring_buffer_event *event;
2138         int len = 0, size, pc;
2139         struct print_entry *entry;
2140         unsigned long flags;
2141         char *tbuffer;
2142
2143         if (tracing_disabled || tracing_selftest_running)
2144                 return 0;
2145
2146         /* Don't pollute graph traces with trace_vprintk internals */
2147         pause_graph_tracing();
2148
2149         pc = preempt_count();
2150         preempt_disable_notrace();
2151
2152
2153         tbuffer = get_trace_buf();
2154         if (!tbuffer) {
2155                 len = 0;
2156                 goto out;
2157         }
2158
2159         len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2160         if (len > TRACE_BUF_SIZE)
2161                 goto out;
2162
2163         local_save_flags(flags);
2164         size = sizeof(*entry) + len + 1;
2165         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2166                                           flags, pc);
2167         if (!event)
2168                 goto out;
2169         entry = ring_buffer_event_data(event);
2170         entry->ip = ip;
2171
2172         memcpy(&entry->buf, tbuffer, len);
2173         entry->buf[len] = '\0';
2174         if (!call_filter_check_discard(call, entry, buffer, event)) {
2175                 __buffer_unlock_commit(buffer, event);
2176                 ftrace_trace_stack(buffer, flags, 6, pc);
2177         }
2178  out:
2179         preempt_enable_notrace();
2180         unpause_graph_tracing();
2181
2182         return len;
2183 }
2184
2185 int trace_array_vprintk(struct trace_array *tr,
2186                         unsigned long ip, const char *fmt, va_list args)
2187 {
2188         return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2189 }
2190
2191 int trace_array_printk(struct trace_array *tr,
2192                        unsigned long ip, const char *fmt, ...)
2193 {
2194         int ret;
2195         va_list ap;
2196
2197         if (!(trace_flags & TRACE_ITER_PRINTK))
2198                 return 0;
2199
2200         va_start(ap, fmt);
2201         ret = trace_array_vprintk(tr, ip, fmt, ap);
2202         va_end(ap);
2203         return ret;
2204 }
2205
2206 int trace_array_printk_buf(struct ring_buffer *buffer,
2207                            unsigned long ip, const char *fmt, ...)
2208 {
2209         int ret;
2210         va_list ap;
2211
2212         if (!(trace_flags & TRACE_ITER_PRINTK))
2213                 return 0;
2214
2215         va_start(ap, fmt);
2216         ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2217         va_end(ap);
2218         return ret;
2219 }
2220
2221 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2222 {
2223         return trace_array_vprintk(&global_trace, ip, fmt, args);
2224 }
2225 EXPORT_SYMBOL_GPL(trace_vprintk);
2226
2227 static void trace_iterator_increment(struct trace_iterator *iter)
2228 {
2229         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2230
2231         iter->idx++;
2232         if (buf_iter)
2233                 ring_buffer_read(buf_iter, NULL);
2234 }
2235
2236 static struct trace_entry *
2237 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2238                 unsigned long *lost_events)
2239 {
2240         struct ring_buffer_event *event;
2241         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2242
2243         if (buf_iter)
2244                 event = ring_buffer_iter_peek(buf_iter, ts);
2245         else
2246                 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2247                                          lost_events);
2248
2249         if (event) {
2250                 iter->ent_size = ring_buffer_event_length(event);
2251                 return ring_buffer_event_data(event);
2252         }
2253         iter->ent_size = 0;
2254         return NULL;
2255 }
2256
2257 static struct trace_entry *
2258 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2259                   unsigned long *missing_events, u64 *ent_ts)
2260 {
2261         struct ring_buffer *buffer = iter->trace_buffer->buffer;
2262         struct trace_entry *ent, *next = NULL;
2263         unsigned long lost_events = 0, next_lost = 0;
2264         int cpu_file = iter->cpu_file;
2265         u64 next_ts = 0, ts;
2266         int next_cpu = -1;
2267         int next_size = 0;
2268         int cpu;
2269
2270         /*
2271          * If we are in a per_cpu trace file, don't bother by iterating over
2272          * all cpu and peek directly.
2273          */
2274         if (cpu_file > RING_BUFFER_ALL_CPUS) {
2275                 if (ring_buffer_empty_cpu(buffer, cpu_file))
2276                         return NULL;
2277                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2278                 if (ent_cpu)
2279                         *ent_cpu = cpu_file;
2280
2281                 return ent;
2282         }
2283
2284         for_each_tracing_cpu(cpu) {
2285
2286                 if (ring_buffer_empty_cpu(buffer, cpu))
2287                         continue;
2288
2289                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2290
2291                 /*
2292                  * Pick the entry with the smallest timestamp:
2293                  */
2294                 if (ent && (!next || ts < next_ts)) {
2295                         next = ent;
2296                         next_cpu = cpu;
2297                         next_ts = ts;
2298                         next_lost = lost_events;
2299                         next_size = iter->ent_size;
2300                 }
2301         }
2302
2303         iter->ent_size = next_size;
2304
2305         if (ent_cpu)
2306                 *ent_cpu = next_cpu;
2307
2308         if (ent_ts)
2309                 *ent_ts = next_ts;
2310
2311         if (missing_events)
2312                 *missing_events = next_lost;
2313
2314         return next;
2315 }
2316
2317 /* Find the next real entry, without updating the iterator itself */
2318 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2319                                           int *ent_cpu, u64 *ent_ts)
2320 {
2321         return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2322 }
2323
2324 /* Find the next real entry, and increment the iterator to the next entry */
2325 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2326 {
2327         iter->ent = __find_next_entry(iter, &iter->cpu,
2328                                       &iter->lost_events, &iter->ts);
2329
2330         if (iter->ent)
2331                 trace_iterator_increment(iter);
2332
2333         return iter->ent ? iter : NULL;
2334 }
2335
2336 static void trace_consume(struct trace_iterator *iter)
2337 {
2338         ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2339                             &iter->lost_events);
2340 }
2341
2342 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2343 {
2344         struct trace_iterator *iter = m->private;
2345         int i = (int)*pos;
2346         void *ent;
2347
2348         WARN_ON_ONCE(iter->leftover);
2349
2350         (*pos)++;
2351
2352         /* can't go backwards */
2353         if (iter->idx > i)
2354                 return NULL;
2355
2356         if (iter->idx < 0)
2357                 ent = trace_find_next_entry_inc(iter);
2358         else
2359                 ent = iter;
2360
2361         while (ent && iter->idx < i)
2362                 ent = trace_find_next_entry_inc(iter);
2363
2364         iter->pos = *pos;
2365
2366         return ent;
2367 }
2368
2369 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2370 {
2371         struct ring_buffer_event *event;
2372         struct ring_buffer_iter *buf_iter;
2373         unsigned long entries = 0;
2374         u64 ts;
2375
2376         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2377
2378         buf_iter = trace_buffer_iter(iter, cpu);
2379         if (!buf_iter)
2380                 return;
2381
2382         ring_buffer_iter_reset(buf_iter);
2383
2384         /*
2385          * We could have the case with the max latency tracers
2386          * that a reset never took place on a cpu. This is evident
2387          * by the timestamp being before the start of the buffer.
2388          */
2389         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2390                 if (ts >= iter->trace_buffer->time_start)
2391                         break;
2392                 entries++;
2393                 ring_buffer_read(buf_iter, NULL);
2394         }
2395
2396         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2397 }
2398
2399 /*
2400  * The current tracer is copied to avoid a global locking
2401  * all around.
2402  */
2403 static void *s_start(struct seq_file *m, loff_t *pos)
2404 {
2405         struct trace_iterator *iter = m->private;
2406         struct trace_array *tr = iter->tr;
2407         int cpu_file = iter->cpu_file;
2408         void *p = NULL;
2409         loff_t l = 0;
2410         int cpu;
2411
2412         /*
2413          * copy the tracer to avoid using a global lock all around.
2414          * iter->trace is a copy of current_trace, the pointer to the
2415          * name may be used instead of a strcmp(), as iter->trace->name
2416          * will point to the same string as current_trace->name.
2417          */
2418         mutex_lock(&trace_types_lock);
2419         if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2420                 *iter->trace = *tr->current_trace;
2421         mutex_unlock(&trace_types_lock);
2422
2423 #ifdef CONFIG_TRACER_MAX_TRACE
2424         if (iter->snapshot && iter->trace->use_max_tr)
2425                 return ERR_PTR(-EBUSY);
2426 #endif
2427
2428         if (!iter->snapshot)
2429                 atomic_inc(&trace_record_cmdline_disabled);
2430
2431         if (*pos != iter->pos) {
2432                 iter->ent = NULL;
2433                 iter->cpu = 0;
2434                 iter->idx = -1;
2435
2436                 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2437                         for_each_tracing_cpu(cpu)
2438                                 tracing_iter_reset(iter, cpu);
2439                 } else
2440                         tracing_iter_reset(iter, cpu_file);
2441
2442                 iter->leftover = 0;
2443                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2444                         ;
2445
2446         } else {
2447                 /*
2448                  * If we overflowed the seq_file before, then we want
2449                  * to just reuse the trace_seq buffer again.
2450                  */
2451                 if (iter->leftover)
2452                         p = iter;
2453                 else {
2454                         l = *pos - 1;
2455                         p = s_next(m, p, &l);
2456                 }
2457         }
2458
2459         trace_event_read_lock();
2460         trace_access_lock(cpu_file);
2461         return p;
2462 }
2463
2464 static void s_stop(struct seq_file *m, void *p)
2465 {
2466         struct trace_iterator *iter = m->private;
2467
2468 #ifdef CONFIG_TRACER_MAX_TRACE
2469         if (iter->snapshot && iter->trace->use_max_tr)
2470                 return;
2471 #endif
2472
2473         if (!iter->snapshot)
2474                 atomic_dec(&trace_record_cmdline_disabled);
2475
2476         trace_access_unlock(iter->cpu_file);
2477         trace_event_read_unlock();
2478 }
2479
2480 static void
2481 get_total_entries(struct trace_buffer *buf,
2482                   unsigned long *total, unsigned long *entries)
2483 {
2484         unsigned long count;
2485         int cpu;
2486
2487         *total = 0;
2488         *entries = 0;
2489
2490         for_each_tracing_cpu(cpu) {
2491                 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2492                 /*
2493                  * If this buffer has skipped entries, then we hold all
2494                  * entries for the trace and we need to ignore the
2495                  * ones before the time stamp.
2496                  */
2497                 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2498                         count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2499                         /* total is the same as the entries */
2500                         *total += count;
2501                 } else
2502                         *total += count +
2503                                 ring_buffer_overrun_cpu(buf->buffer, cpu);
2504                 *entries += count;
2505         }
2506 }
2507
2508 static void print_lat_help_header(struct seq_file *m)
2509 {
2510         seq_puts(m, "#                  _------=> CPU#            \n");
2511         seq_puts(m, "#                 / _-----=> irqs-off        \n");
2512         seq_puts(m, "#                | / _----=> need-resched    \n");
2513         seq_puts(m, "#                || / _---=> hardirq/softirq \n");
2514         seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
2515         seq_puts(m, "#                |||| /     delay             \n");
2516         seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
2517         seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
2518 }
2519
2520 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2521 {
2522         unsigned long total;
2523         unsigned long entries;
2524
2525         get_total_entries(buf, &total, &entries);
2526         seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2527                    entries, total, num_online_cpus());
2528         seq_puts(m, "#\n");
2529 }
2530
2531 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2532 {
2533         print_event_info(buf, m);
2534         seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n");
2535         seq_puts(m, "#              | |       |          |         |\n");
2536 }
2537
2538 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2539 {
2540         print_event_info(buf, m);
2541         seq_puts(m, "#                              _-----=> irqs-off\n");
2542         seq_puts(m, "#                             / _----=> need-resched\n");
2543         seq_puts(m, "#                            | / _---=> hardirq/softirq\n");
2544         seq_puts(m, "#                            || / _--=> preempt-depth\n");
2545         seq_puts(m, "#                            ||| /     delay\n");
2546         seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
2547         seq_puts(m, "#              | |       |   ||||       |         |\n");
2548 }
2549
2550 void
2551 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2552 {
2553         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2554         struct trace_buffer *buf = iter->trace_buffer;
2555         struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2556         struct tracer *type = iter->trace;
2557         unsigned long entries;
2558         unsigned long total;
2559         const char *name = "preemption";
2560
2561         name = type->name;
2562
2563         get_total_entries(buf, &total, &entries);
2564
2565         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2566                    name, UTS_RELEASE);
2567         seq_puts(m, "# -----------------------------------"
2568                  "---------------------------------\n");
2569         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2570                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2571                    nsecs_to_usecs(data->saved_latency),
2572                    entries,
2573                    total,
2574                    buf->cpu,
2575 #if defined(CONFIG_PREEMPT_NONE)
2576                    "server",
2577 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2578                    "desktop",
2579 #elif defined(CONFIG_PREEMPT)
2580                    "preempt",
2581 #else
2582                    "unknown",
2583 #endif
2584                    /* These are reserved for later use */
2585                    0, 0, 0, 0);
2586 #ifdef CONFIG_SMP
2587         seq_printf(m, " #P:%d)\n", num_online_cpus());
2588 #else
2589         seq_puts(m, ")\n");
2590 #endif
2591         seq_puts(m, "#    -----------------\n");
2592         seq_printf(m, "#    | task: %.16s-%d "
2593                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2594                    data->comm, data->pid,
2595                    from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2596                    data->policy, data->rt_priority);
2597         seq_puts(m, "#    -----------------\n");
2598
2599         if (data->critical_start) {
2600                 seq_puts(m, "#  => started at: ");
2601                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2602                 trace_print_seq(m, &iter->seq);
2603                 seq_puts(m, "\n#  => ended at:   ");
2604                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2605                 trace_print_seq(m, &iter->seq);
2606                 seq_puts(m, "\n#\n");
2607         }
2608
2609         seq_puts(m, "#\n");
2610 }
2611
2612 static void test_cpu_buff_start(struct trace_iterator *iter)
2613 {
2614         struct trace_seq *s = &iter->seq;
2615
2616         if (!(trace_flags & TRACE_ITER_ANNOTATE))
2617                 return;
2618
2619         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2620                 return;
2621
2622         if (cpumask_test_cpu(iter->cpu, iter->started))
2623                 return;
2624
2625         if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2626                 return;
2627
2628         cpumask_set_cpu(iter->cpu, iter->started);
2629
2630         /* Don't print started cpu buffer for the first entry of the trace */
2631         if (iter->idx > 1)
2632                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2633                                 iter->cpu);
2634 }
2635
2636 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2637 {
2638         struct trace_seq *s = &iter->seq;
2639         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2640         struct trace_entry *entry;
2641         struct trace_event *event;
2642
2643         entry = iter->ent;
2644
2645         test_cpu_buff_start(iter);
2646
2647         event = ftrace_find_event(entry->type);
2648
2649         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2650                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2651                         if (!trace_print_lat_context(iter))
2652                                 goto partial;
2653                 } else {
2654                         if (!trace_print_context(iter))
2655                                 goto partial;
2656                 }
2657         }
2658
2659         if (event)
2660                 return event->funcs->trace(iter, sym_flags, event);
2661
2662         if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2663                 goto partial;
2664
2665         return TRACE_TYPE_HANDLED;
2666 partial:
2667         return TRACE_TYPE_PARTIAL_LINE;
2668 }
2669
2670 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2671 {
2672         struct trace_seq *s = &iter->seq;
2673         struct trace_entry *entry;
2674         struct trace_event *event;
2675
2676         entry = iter->ent;
2677
2678         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2679                 if (!trace_seq_printf(s, "%d %d %llu ",
2680                                       entry->pid, iter->cpu, iter->ts))
2681                         goto partial;
2682         }
2683
2684         event = ftrace_find_event(entry->type);
2685         if (event)
2686                 return event->funcs->raw(iter, 0, event);
2687
2688         if (!trace_seq_printf(s, "%d ?\n", entry->type))
2689                 goto partial;
2690
2691         return TRACE_TYPE_HANDLED;
2692 partial:
2693         return TRACE_TYPE_PARTIAL_LINE;
2694 }
2695
2696 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2697 {
2698         struct trace_seq *s = &iter->seq;
2699         unsigned char newline = '\n';
2700         struct trace_entry *entry;
2701         struct trace_event *event;
2702
2703         entry = iter->ent;
2704
2705         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2706                 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2707                 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2708                 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2709         }
2710
2711         event = ftrace_find_event(entry->type);
2712         if (event) {
2713                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2714                 if (ret != TRACE_TYPE_HANDLED)
2715                         return ret;
2716         }
2717
2718         SEQ_PUT_FIELD_RET(s, newline);
2719
2720         return TRACE_TYPE_HANDLED;
2721 }
2722
2723 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2724 {
2725         struct trace_seq *s = &iter->seq;
2726         struct trace_entry *entry;
2727         struct trace_event *event;
2728
2729         entry = iter->ent;
2730
2731         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2732                 SEQ_PUT_FIELD_RET(s, entry->pid);
2733                 SEQ_PUT_FIELD_RET(s, iter->cpu);
2734                 SEQ_PUT_FIELD_RET(s, iter->ts);
2735         }
2736
2737         event = ftrace_find_event(entry->type);
2738         return event ? event->funcs->binary(iter, 0, event) :
2739                 TRACE_TYPE_HANDLED;
2740 }
2741
2742 int trace_empty(struct trace_iterator *iter)
2743 {
2744         struct ring_buffer_iter *buf_iter;
2745         int cpu;
2746
2747         /* If we are looking at one CPU buffer, only check that one */
2748         if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2749                 cpu = iter->cpu_file;
2750                 buf_iter = trace_buffer_iter(iter, cpu);
2751                 if (buf_iter) {
2752                         if (!ring_buffer_iter_empty(buf_iter))
2753                                 return 0;
2754                 } else {
2755                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2756                                 return 0;
2757                 }
2758                 return 1;
2759         }
2760
2761         for_each_tracing_cpu(cpu) {
2762                 buf_iter = trace_buffer_iter(iter, cpu);
2763                 if (buf_iter) {
2764                         if (!ring_buffer_iter_empty(buf_iter))
2765                                 return 0;
2766                 } else {
2767                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2768                                 return 0;
2769                 }
2770         }
2771
2772         return 1;
2773 }
2774
2775 /*  Called with trace_event_read_lock() held. */
2776 enum print_line_t print_trace_line(struct trace_iterator *iter)
2777 {
2778         enum print_line_t ret;
2779
2780         if (iter->lost_events &&
2781             !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2782                                  iter->cpu, iter->lost_events))
2783                 return TRACE_TYPE_PARTIAL_LINE;
2784
2785         if (iter->trace && iter->trace->print_line) {
2786                 ret = iter->trace->print_line(iter);
2787                 if (ret != TRACE_TYPE_UNHANDLED)
2788                         return ret;
2789         }
2790
2791         if (iter->ent->type == TRACE_BPUTS &&
2792                         trace_flags & TRACE_ITER_PRINTK &&
2793                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2794                 return trace_print_bputs_msg_only(iter);
2795
2796         if (iter->ent->type == TRACE_BPRINT &&
2797                         trace_flags & TRACE_ITER_PRINTK &&
2798                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2799                 return trace_print_bprintk_msg_only(iter);
2800
2801         if (iter->ent->type == TRACE_PRINT &&
2802                         trace_flags & TRACE_ITER_PRINTK &&
2803                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2804                 return trace_print_printk_msg_only(iter);
2805
2806         if (trace_flags & TRACE_ITER_BIN)
2807                 return print_bin_fmt(iter);
2808
2809         if (trace_flags & TRACE_ITER_HEX)
2810                 return print_hex_fmt(iter);
2811
2812         if (trace_flags & TRACE_ITER_RAW)
2813                 return print_raw_fmt(iter);
2814
2815         return print_trace_fmt(iter);
2816 }
2817
2818 void trace_latency_header(struct seq_file *m)
2819 {
2820         struct trace_iterator *iter = m->private;
2821
2822         /* print nothing if the buffers are empty */
2823         if (trace_empty(iter))
2824                 return;
2825
2826         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2827                 print_trace_header(m, iter);
2828
2829         if (!(trace_flags & TRACE_ITER_VERBOSE))
2830                 print_lat_help_header(m);
2831 }
2832
2833 void trace_default_header(struct seq_file *m)
2834 {
2835         struct trace_iterator *iter = m->private;
2836
2837         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2838                 return;
2839
2840         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2841                 /* print nothing if the buffers are empty */
2842                 if (trace_empty(iter))
2843                         return;
2844                 print_trace_header(m, iter);
2845                 if (!(trace_flags & TRACE_ITER_VERBOSE))
2846                         print_lat_help_header(m);
2847         } else {
2848                 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2849                         if (trace_flags & TRACE_ITER_IRQ_INFO)
2850                                 print_func_help_header_irq(iter->trace_buffer, m);
2851                         else
2852                                 print_func_help_header(iter->trace_buffer, m);
2853                 }
2854         }
2855 }
2856
2857 static void test_ftrace_alive(struct seq_file *m)
2858 {
2859         if (!ftrace_is_dead())
2860                 return;
2861         seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2862         seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
2863 }
2864
2865 #ifdef CONFIG_TRACER_MAX_TRACE
2866 static void show_snapshot_main_help(struct seq_file *m)
2867 {
2868         seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2869         seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2870         seq_printf(m, "#                      Takes a snapshot of the main buffer.\n");
2871         seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
2872         seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
2873         seq_printf(m, "#                       is not a '0' or '1')\n");
2874 }
2875
2876 static void show_snapshot_percpu_help(struct seq_file *m)
2877 {
2878         seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2879 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2880         seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2881         seq_printf(m, "#                      Takes a snapshot of the main buffer for this cpu.\n");
2882 #else
2883         seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2884         seq_printf(m, "#                     Must use main snapshot file to allocate.\n");
2885 #endif
2886         seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2887         seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
2888         seq_printf(m, "#                       is not a '0' or '1')\n");
2889 }
2890
2891 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2892 {
2893         if (iter->tr->allocated_snapshot)
2894                 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2895         else
2896                 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2897
2898         seq_printf(m, "# Snapshot commands:\n");
2899         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2900                 show_snapshot_main_help(m);
2901         else
2902                 show_snapshot_percpu_help(m);
2903 }
2904 #else
2905 /* Should never be called */
2906 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2907 #endif
2908
2909 static int s_show(struct seq_file *m, void *v)
2910 {
2911         struct trace_iterator *iter = v;
2912         int ret;
2913
2914         if (iter->ent == NULL) {
2915                 if (iter->tr) {
2916                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
2917                         seq_puts(m, "#\n");
2918                         test_ftrace_alive(m);
2919                 }
2920                 if (iter->snapshot && trace_empty(iter))
2921                         print_snapshot_help(m, iter);
2922                 else if (iter->trace && iter->trace->print_header)
2923                         iter->trace->print_header(m);
2924                 else
2925                         trace_default_header(m);
2926
2927         } else if (iter->leftover) {
2928                 /*
2929                  * If we filled the seq_file buffer earlier, we
2930                  * want to just show it now.
2931                  */
2932                 ret = trace_print_seq(m, &iter->seq);
2933
2934                 /* ret should this time be zero, but you never know */
2935                 iter->leftover = ret;
2936
2937         } else {
2938                 print_trace_line(iter);
2939                 ret = trace_print_seq(m, &iter->seq);
2940                 /*
2941                  * If we overflow the seq_file buffer, then it will
2942                  * ask us for this data again at start up.
2943                  * Use that instead.
2944                  *  ret is 0 if seq_file write succeeded.
2945                  *        -1 otherwise.
2946                  */
2947                 iter->leftover = ret;
2948         }
2949
2950         return 0;
2951 }
2952
2953 /*
2954  * Should be used after trace_array_get(), trace_types_lock
2955  * ensures that i_cdev was already initialized.
2956  */
2957 static inline int tracing_get_cpu(struct inode *inode)
2958 {
2959         if (inode->i_cdev) /* See trace_create_cpu_file() */
2960                 return (long)inode->i_cdev - 1;
2961         return RING_BUFFER_ALL_CPUS;
2962 }
2963
2964 static const struct seq_operations tracer_seq_ops = {
2965         .start          = s_start,
2966         .next           = s_next,
2967         .stop           = s_stop,
2968         .show           = s_show,
2969 };
2970
2971 static struct trace_iterator *
2972 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2973 {
2974         struct trace_array *tr = inode->i_private;
2975         struct trace_iterator *iter;
2976         int cpu;
2977
2978         if (tracing_disabled)
2979                 return ERR_PTR(-ENODEV);
2980
2981         iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2982         if (!iter)
2983                 return ERR_PTR(-ENOMEM);
2984
2985         iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2986                                     GFP_KERNEL);
2987         if (!iter->buffer_iter)
2988                 goto release;
2989
2990         /*
2991          * We make a copy of the current tracer to avoid concurrent
2992          * changes on it while we are reading.
2993          */
2994         mutex_lock(&trace_types_lock);
2995         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2996         if (!iter->trace)
2997                 goto fail;
2998
2999         *iter->trace = *tr->current_trace;
3000
3001         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3002                 goto fail;
3003
3004         iter->tr = tr;
3005
3006 #ifdef CONFIG_TRACER_MAX_TRACE
3007         /* Currently only the top directory has a snapshot */
3008         if (tr->current_trace->print_max || snapshot)
3009                 iter->trace_buffer = &tr->max_buffer;
3010         else
3011 #endif
3012                 iter->trace_buffer = &tr->trace_buffer;
3013         iter->snapshot = snapshot;
3014         iter->pos = -1;
3015         iter->cpu_file = tracing_get_cpu(inode);
3016         mutex_init(&iter->mutex);
3017
3018         /* Notify the tracer early; before we stop tracing. */
3019         if (iter->trace && iter->trace->open)
3020                 iter->trace->open(iter);
3021
3022         /* Annotate start of buffers if we had overruns */
3023         if (ring_buffer_overruns(iter->trace_buffer->buffer))
3024                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3025
3026         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3027         if (trace_clocks[tr->clock_id].in_ns)
3028                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3029
3030         /* stop the trace while dumping if we are not opening "snapshot" */
3031         if (!iter->snapshot)
3032                 tracing_stop_tr(tr);
3033
3034         if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3035                 for_each_tracing_cpu(cpu) {
3036                         iter->buffer_iter[cpu] =
3037                                 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3038                 }
3039                 ring_buffer_read_prepare_sync();
3040                 for_each_tracing_cpu(cpu) {
3041                         ring_buffer_read_start(iter->buffer_iter[cpu]);
3042                         tracing_iter_reset(iter, cpu);
3043                 }
3044         } else {
3045                 cpu = iter->cpu_file;
3046                 iter->buffer_iter[cpu] =
3047                         ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3048                 ring_buffer_read_prepare_sync();
3049                 ring_buffer_read_start(iter->buffer_iter[cpu]);
3050                 tracing_iter_reset(iter, cpu);
3051         }
3052
3053         mutex_unlock(&trace_types_lock);
3054
3055         return iter;
3056
3057  fail:
3058         mutex_unlock(&trace_types_lock);
3059         kfree(iter->trace);
3060         kfree(iter->buffer_iter);
3061 release:
3062         seq_release_private(inode, file);
3063         return ERR_PTR(-ENOMEM);
3064 }
3065
3066 int tracing_open_generic(struct inode *inode, struct file *filp)
3067 {
3068         if (tracing_disabled)
3069                 return -ENODEV;
3070
3071         filp->private_data = inode->i_private;
3072         return 0;
3073 }
3074
3075 bool tracing_is_disabled(void)
3076 {
3077         return (tracing_disabled) ? true: false;
3078 }
3079
3080 /*
3081  * Open and update trace_array ref count.
3082  * Must have the current trace_array passed to it.
3083  */
3084 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3085 {
3086         struct trace_array *tr = inode->i_private;
3087
3088         if (tracing_disabled)
3089                 return -ENODEV;
3090
3091         if (trace_array_get(tr) < 0)
3092                 return -ENODEV;
3093
3094         filp->private_data = inode->i_private;
3095
3096         return 0;
3097 }
3098
3099 static int tracing_release(struct inode *inode, struct file *file)
3100 {
3101         struct trace_array *tr = inode->i_private;
3102         struct seq_file *m = file->private_data;
3103         struct trace_iterator *iter;
3104         int cpu;
3105
3106         if (!(file->f_mode & FMODE_READ)) {
3107                 trace_array_put(tr);
3108                 return 0;
3109         }
3110
3111         /* Writes do not use seq_file */
3112         iter = m->private;
3113         mutex_lock(&trace_types_lock);
3114
3115         for_each_tracing_cpu(cpu) {
3116                 if (iter->buffer_iter[cpu])
3117                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
3118         }
3119
3120         if (iter->trace && iter->trace->close)
3121                 iter->trace->close(iter);
3122
3123         if (!iter->snapshot)
3124                 /* reenable tracing if it was previously enabled */
3125                 tracing_start_tr(tr);
3126
3127         __trace_array_put(tr);
3128
3129         mutex_unlock(&trace_types_lock);
3130
3131         mutex_destroy(&iter->mutex);
3132         free_cpumask_var(iter->started);
3133         kfree(iter->trace);
3134         kfree(iter->buffer_iter);
3135         seq_release_private(inode, file);
3136
3137         return 0;
3138 }
3139
3140 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3141 {
3142         struct trace_array *tr = inode->i_private;
3143
3144         trace_array_put(tr);
3145         return 0;
3146 }
3147
3148 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3149 {
3150         struct trace_array *tr = inode->i_private;
3151
3152         trace_array_put(tr);
3153
3154         return single_release(inode, file);
3155 }
3156
3157 static int tracing_open(struct inode *inode, struct file *file)
3158 {
3159         struct trace_array *tr = inode->i_private;
3160         struct trace_iterator *iter;
3161         int ret = 0;
3162
3163         if (trace_array_get(tr) < 0)
3164                 return -ENODEV;
3165
3166         /* If this file was open for write, then erase contents */
3167         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3168                 int cpu = tracing_get_cpu(inode);
3169
3170                 if (cpu == RING_BUFFER_ALL_CPUS)
3171                         tracing_reset_online_cpus(&tr->trace_buffer);
3172                 else
3173                         tracing_reset(&tr->trace_buffer, cpu);
3174         }
3175
3176         if (file->f_mode & FMODE_READ) {
3177                 iter = __tracing_open(inode, file, false);
3178                 if (IS_ERR(iter))
3179                         ret = PTR_ERR(iter);
3180                 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3181                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
3182         }
3183
3184         if (ret < 0)
3185                 trace_array_put(tr);
3186
3187         return ret;
3188 }
3189
3190 /*
3191  * Some tracers are not suitable for instance buffers.
3192  * A tracer is always available for the global array (toplevel)
3193  * or if it explicitly states that it is.
3194  */
3195 static bool
3196 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3197 {
3198         return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3199 }
3200
3201 /* Find the next tracer that this trace array may use */
3202 static struct tracer *
3203 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3204 {
3205         while (t && !trace_ok_for_array(t, tr))
3206                 t = t->next;
3207
3208         return t;
3209 }
3210
3211 static void *
3212 t_next(struct seq_file *m, void *v, loff_t *pos)
3213 {
3214         struct trace_array *tr = m->private;
3215         struct tracer *t = v;
3216
3217         (*pos)++;
3218
3219         if (t)
3220                 t = get_tracer_for_array(tr, t->next);
3221
3222         return t;
3223 }
3224
3225 static void *t_start(struct seq_file *m, loff_t *pos)
3226 {
3227         struct trace_array *tr = m->private;
3228         struct tracer *t;
3229         loff_t l = 0;
3230
3231         mutex_lock(&trace_types_lock);
3232
3233         t = get_tracer_for_array(tr, trace_types);
3234         for (; t && l < *pos; t = t_next(m, t, &l))
3235                         ;
3236
3237         return t;
3238 }
3239
3240 static void t_stop(struct seq_file *m, void *p)
3241 {
3242         mutex_unlock(&trace_types_lock);
3243 }
3244
3245 static int t_show(struct seq_file *m, void *v)
3246 {
3247         struct tracer *t = v;
3248
3249         if (!t)
3250                 return 0;
3251
3252         seq_printf(m, "%s", t->name);
3253         if (t->next)
3254                 seq_putc(m, ' ');
3255         else
3256                 seq_putc(m, '\n');
3257
3258         return 0;
3259 }
3260
3261 static const struct seq_operations show_traces_seq_ops = {
3262         .start          = t_start,
3263         .next           = t_next,
3264         .stop           = t_stop,
3265         .show           = t_show,
3266 };
3267
3268 static int show_traces_open(struct inode *inode, struct file *file)
3269 {
3270         struct trace_array *tr = inode->i_private;
3271         struct seq_file *m;
3272         int ret;
3273
3274         if (tracing_disabled)
3275                 return -ENODEV;
3276
3277         ret = seq_open(file, &show_traces_seq_ops);
3278         if (ret)
3279                 return ret;
3280
3281         m = file->private_data;
3282         m->private = tr;
3283
3284         return 0;
3285 }
3286
3287 static ssize_t
3288 tracing_write_stub(struct file *filp, const char __user *ubuf,
3289                    size_t count, loff_t *ppos)
3290 {
3291         return count;
3292 }
3293
3294 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3295 {
3296         int ret;
3297
3298         if (file->f_mode & FMODE_READ)
3299                 ret = seq_lseek(file, offset, whence);
3300         else
3301                 file->f_pos = ret = 0;
3302
3303         return ret;
3304 }
3305
3306 static const struct file_operations tracing_fops = {
3307         .open           = tracing_open,
3308         .read           = seq_read,
3309         .write          = tracing_write_stub,
3310         .llseek         = tracing_lseek,
3311         .release        = tracing_release,
3312 };
3313
3314 static const struct file_operations show_traces_fops = {
3315         .open           = show_traces_open,
3316         .read           = seq_read,
3317         .release        = seq_release,
3318         .llseek         = seq_lseek,
3319 };
3320
3321 /*
3322  * The tracer itself will not take this lock, but still we want
3323  * to provide a consistent cpumask to user-space:
3324  */
3325 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3326
3327 /*
3328  * Temporary storage for the character representation of the
3329  * CPU bitmask (and one more byte for the newline):
3330  */
3331 static char mask_str[NR_CPUS + 1];
3332
3333 static ssize_t
3334 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3335                      size_t count, loff_t *ppos)
3336 {
3337         struct trace_array *tr = file_inode(filp)->i_private;
3338         int len;
3339
3340         mutex_lock(&tracing_cpumask_update_lock);
3341
3342         len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
3343         if (count - len < 2) {
3344                 count = -EINVAL;
3345                 goto out_err;
3346         }
3347         len += sprintf(mask_str + len, "\n");
3348         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3349
3350 out_err:
3351         mutex_unlock(&tracing_cpumask_update_lock);
3352
3353         return count;
3354 }
3355
3356 static ssize_t
3357 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3358                       size_t count, loff_t *ppos)
3359 {
3360         struct trace_array *tr = file_inode(filp)->i_private;
3361         cpumask_var_t tracing_cpumask_new;
3362         int err, cpu;
3363
3364         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3365                 return -ENOMEM;
3366
3367         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3368         if (err)
3369                 goto err_unlock;
3370
3371         mutex_lock(&tracing_cpumask_update_lock);
3372
3373         local_irq_disable();
3374         arch_spin_lock(&tr->max_lock);
3375         for_each_tracing_cpu(cpu) {
3376                 /*
3377                  * Increase/decrease the disabled counter if we are
3378                  * about to flip a bit in the cpumask:
3379                  */
3380                 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3381                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3382                         atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3383                         ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3384                 }
3385                 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3386                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3387                         atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3388                         ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3389                 }
3390         }
3391         arch_spin_unlock(&tr->max_lock);
3392         local_irq_enable();
3393
3394         cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3395
3396         mutex_unlock(&tracing_cpumask_update_lock);
3397         free_cpumask_var(tracing_cpumask_new);
3398
3399         return count;
3400
3401 err_unlock:
3402         free_cpumask_var(tracing_cpumask_new);
3403
3404         return err;
3405 }
3406
3407 static const struct file_operations tracing_cpumask_fops = {
3408         .open           = tracing_open_generic_tr,
3409         .read           = tracing_cpumask_read,
3410         .write          = tracing_cpumask_write,
3411         .release        = tracing_release_generic_tr,
3412         .llseek         = generic_file_llseek,
3413 };
3414
3415 static int tracing_trace_options_show(struct seq_file *m, void *v)
3416 {
3417         struct tracer_opt *trace_opts;
3418         struct trace_array *tr = m->private;
3419         u32 tracer_flags;
3420         int i;
3421
3422         mutex_lock(&trace_types_lock);
3423         tracer_flags = tr->current_trace->flags->val;
3424         trace_opts = tr->current_trace->flags->opts;
3425
3426         for (i = 0; trace_options[i]; i++) {
3427                 if (trace_flags & (1 << i))
3428                         seq_printf(m, "%s\n", trace_options[i]);
3429                 else
3430                         seq_printf(m, "no%s\n", trace_options[i]);
3431         }
3432
3433         for (i = 0; trace_opts[i].name; i++) {
3434                 if (tracer_flags & trace_opts[i].bit)
3435                         seq_printf(m, "%s\n", trace_opts[i].name);
3436                 else
3437                         seq_printf(m, "no%s\n", trace_opts[i].name);
3438         }
3439         mutex_unlock(&trace_types_lock);
3440
3441         return 0;
3442 }
3443
3444 static int __set_tracer_option(struct trace_array *tr,
3445                                struct tracer_flags *tracer_flags,
3446                                struct tracer_opt *opts, int neg)
3447 {
3448         struct tracer *trace = tr->current_trace;
3449         int ret;
3450
3451         ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3452         if (ret)
3453                 return ret;
3454
3455         if (neg)
3456                 tracer_flags->val &= ~opts->bit;
3457         else
3458                 tracer_flags->val |= opts->bit;
3459         return 0;
3460 }
3461
3462 /* Try to assign a tracer specific option */
3463 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3464 {
3465         struct tracer *trace = tr->current_trace;
3466         struct tracer_flags *tracer_flags = trace->flags;
3467         struct tracer_opt *opts = NULL;
3468         int i;
3469
3470         for (i = 0; tracer_flags->opts[i].name; i++) {
3471                 opts = &tracer_flags->opts[i];
3472
3473                 if (strcmp(cmp, opts->name) == 0)
3474                         return __set_tracer_option(tr, trace->flags, opts, neg);
3475         }
3476
3477         return -EINVAL;
3478 }
3479
3480 /* Some tracers require overwrite to stay enabled */
3481 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3482 {
3483         if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3484                 return -1;
3485
3486         return 0;
3487 }
3488
3489 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3490 {
3491         /* do nothing if flag is already set */
3492         if (!!(trace_flags & mask) == !!enabled)
3493                 return 0;
3494
3495         /* Give the tracer a chance to approve the change */
3496         if (tr->current_trace->flag_changed)
3497                 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3498                         return -EINVAL;
3499
3500         if (enabled)
3501                 trace_flags |= mask;
3502         else
3503                 trace_flags &= ~mask;
3504
3505         if (mask == TRACE_ITER_RECORD_CMD)
3506                 trace_event_enable_cmd_record(enabled);
3507
3508         if (mask == TRACE_ITER_OVERWRITE) {
3509                 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3510 #ifdef CONFIG_TRACER_MAX_TRACE
3511                 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3512 #endif
3513         }
3514
3515         if (mask == TRACE_ITER_PRINTK)
3516                 trace_printk_start_stop_comm(enabled);
3517
3518         return 0;
3519 }
3520
3521 static int trace_set_options(struct trace_array *tr, char *option)
3522 {
3523         char *cmp;
3524         int neg = 0;
3525         int ret = -ENODEV;
3526         int i;
3527
3528         cmp = strstrip(option);
3529
3530         if (strncmp(cmp, "no", 2) == 0) {
3531                 neg = 1;
3532                 cmp += 2;
3533         }
3534
3535         mutex_lock(&trace_types_lock);
3536
3537         for (i = 0; trace_options[i]; i++) {
3538                 if (strcmp(cmp, trace_options[i]) == 0) {
3539                         ret = set_tracer_flag(tr, 1 << i, !neg);
3540                         break;
3541                 }
3542         }
3543
3544         /* If no option could be set, test the specific tracer options */
3545         if (!trace_options[i])
3546                 ret = set_tracer_option(tr, cmp, neg);
3547
3548         mutex_unlock(&trace_types_lock);
3549
3550         return ret;
3551 }
3552
3553 static ssize_t
3554 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3555                         size_t cnt, loff_t *ppos)
3556 {
3557         struct seq_file *m = filp->private_data;
3558         struct trace_array *tr = m->private;
3559         char buf[64];
3560         int ret;
3561
3562         if (cnt >= sizeof(buf))
3563                 return -EINVAL;
3564
3565         if (copy_from_user(&buf, ubuf, cnt))
3566                 return -EFAULT;
3567
3568         buf[cnt] = 0;
3569
3570         ret = trace_set_options(tr, buf);
3571         if (ret < 0)
3572                 return ret;
3573
3574         *ppos += cnt;
3575
3576         return cnt;
3577 }
3578
3579 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3580 {
3581         struct trace_array *tr = inode->i_private;
3582         int ret;
3583
3584         if (tracing_disabled)
3585                 return -ENODEV;
3586
3587         if (trace_array_get(tr) < 0)
3588                 return -ENODEV;
3589
3590         ret = single_open(file, tracing_trace_options_show, inode->i_private);
3591         if (ret < 0)
3592                 trace_array_put(tr);
3593
3594         return ret;
3595 }
3596
3597 static const struct file_operations tracing_iter_fops = {
3598         .open           = tracing_trace_options_open,
3599         .read           = seq_read,
3600         .llseek         = seq_lseek,
3601         .release        = tracing_single_release_tr,
3602         .write          = tracing_trace_options_write,
3603 };
3604
3605 static const char readme_msg[] =
3606         "tracing mini-HOWTO:\n\n"
3607         "# echo 0 > tracing_on : quick way to disable tracing\n"
3608         "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3609         " Important files:\n"
3610         "  trace\t\t\t- The static contents of the buffer\n"
3611         "\t\t\t  To clear the buffer write into this file: echo > trace\n"
3612         "  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3613         "  current_tracer\t- function and latency tracers\n"
3614         "  available_tracers\t- list of configured tracers for current_tracer\n"
3615         "  buffer_size_kb\t- view and modify size of per cpu buffer\n"
3616         "  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
3617         "  trace_clock\t\t-change the clock used to order events\n"
3618         "       local:   Per cpu clock but may not be synced across CPUs\n"
3619         "      global:   Synced across CPUs but slows tracing down.\n"
3620         "     counter:   Not a clock, but just an increment\n"
3621         "      uptime:   Jiffy counter from time of boot\n"
3622         "        perf:   Same clock that perf events use\n"
3623 #ifdef CONFIG_X86_64
3624         "     x86-tsc:   TSC cycle counter\n"
3625 #endif
3626         "\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3627         "  tracing_cpumask\t- Limit which CPUs to trace\n"
3628         "  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3629         "\t\t\t  Remove sub-buffer with rmdir\n"
3630         "  trace_options\t\t- Set format or modify how tracing happens\n"
3631         "\t\t\t  Disable an option by adding a suffix 'no' to the\n"
3632         "\t\t\t  option name\n"
3633         "  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3634 #ifdef CONFIG_DYNAMIC_FTRACE
3635         "\n  available_filter_functions - list of functions that can be filtered on\n"
3636         "  set_ftrace_filter\t- echo function name in here to only trace these\n"
3637         "\t\t\t  functions\n"
3638         "\t     accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3639         "\t     modules: Can select a group via module\n"
3640         "\t      Format: :mod:<module-name>\n"
3641         "\t     example: echo :mod:ext3 > set_ftrace_filter\n"
3642         "\t    triggers: a command to perform when function is hit\n"
3643         "\t      Format: <function>:<trigger>[:count]\n"
3644         "\t     trigger: traceon, traceoff\n"
3645         "\t\t      enable_event:<system>:<event>\n"
3646         "\t\t      disable_event:<system>:<event>\n"
3647 #ifdef CONFIG_STACKTRACE
3648         "\t\t      stacktrace\n"
3649 #endif
3650 #ifdef CONFIG_TRACER_SNAPSHOT
3651         "\t\t      snapshot\n"
3652 #endif
3653         "\t\t      dump\n"
3654         "\t\t      cpudump\n"
3655         "\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
3656         "\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
3657         "\t     The first one will disable tracing every time do_fault is hit\n"
3658         "\t     The second will disable tracing at most 3 times when do_trap is hit\n"
3659         "\t       The first time do trap is hit and it disables tracing, the\n"
3660         "\t       counter will decrement to 2. If tracing is already disabled,\n"
3661         "\t       the counter will not decrement. It only decrements when the\n"
3662         "\t       trigger did work\n"
3663         "\t     To remove trigger without count:\n"
3664         "\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
3665         "\t     To remove trigger with a count:\n"
3666         "\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3667         "  set_ftrace_notrace\t- echo function name in here to never trace.\n"
3668         "\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3669         "\t    modules: Can select a group via module command :mod:\n"
3670         "\t    Does not accept triggers\n"
3671 #endif /* CONFIG_DYNAMIC_FTRACE */
3672 #ifdef CONFIG_FUNCTION_TRACER
3673         "  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3674         "\t\t    (function)\n"
3675 #endif
3676 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3677         "  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3678         "  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3679         "  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3680 #endif
3681 #ifdef CONFIG_TRACER_SNAPSHOT
3682         "\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
3683         "\t\t\t  snapshot buffer. Read the contents for more\n"
3684         "\t\t\t  information\n"
3685 #endif
3686 #ifdef CONFIG_STACK_TRACER
3687         "  stack_trace\t\t- Shows the max stack trace when active\n"
3688         "  stack_max_size\t- Shows current max stack size that was traced\n"
3689         "\t\t\t  Write into this file to reset the max size (trigger a\n"
3690         "\t\t\t  new trace)\n"
3691 #ifdef CONFIG_DYNAMIC_FTRACE
3692         "  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3693         "\t\t\t  traces\n"
3694 #endif
3695 #endif /* CONFIG_STACK_TRACER */
3696         "  events/\t\t- Directory containing all trace event subsystems:\n"
3697         "      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3698         "  events/<system>/\t- Directory containing all trace events for <system>:\n"
3699         "      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3700         "\t\t\t  events\n"
3701         "      filter\t\t- If set, only events passing filter are traced\n"
3702         "  events/<system>/<event>/\t- Directory containing control files for\n"
3703         "\t\t\t  <event>:\n"
3704         "      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3705         "      filter\t\t- If set, only events passing filter are traced\n"
3706         "      trigger\t\t- If set, a command to perform when event is hit\n"
3707         "\t    Format: <trigger>[:count][if <filter>]\n"
3708         "\t   trigger: traceon, traceoff\n"
3709         "\t            enable_event:<system>:<event>\n"
3710         "\t            disable_event:<system>:<event>\n"
3711 #ifdef CONFIG_STACKTRACE
3712         "\t\t    stacktrace\n"
3713 #endif
3714 #ifdef CONFIG_TRACER_SNAPSHOT
3715         "\t\t    snapshot\n"
3716 #endif
3717         "\t   example: echo traceoff > events/block/block_unplug/trigger\n"
3718         "\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
3719         "\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3720         "\t                  events/block/block_unplug/trigger\n"
3721         "\t   The first disables tracing every time block_unplug is hit.\n"
3722         "\t   The second disables tracing the first 3 times block_unplug is hit.\n"
3723         "\t   The third enables the kmalloc event the first 3 times block_unplug\n"
3724         "\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3725         "\t   Like function triggers, the counter is only decremented if it\n"
3726         "\t    enabled or disabled tracing.\n"
3727         "\t   To remove a trigger without a count:\n"
3728         "\t     echo '!<trigger> > <system>/<event>/trigger\n"
3729         "\t   To remove a trigger with a count:\n"
3730         "\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
3731         "\t   Filters can be ignored when removing a trigger.\n"
3732 ;
3733
3734 static ssize_t
3735 tracing_readme_read(struct file *filp, char __user *ubuf,
3736                        size_t cnt, loff_t *ppos)
3737 {
3738         return simple_read_from_buffer(ubuf, cnt, ppos,
3739                                         readme_msg, strlen(readme_msg));
3740 }
3741
3742 static const struct file_operations tracing_readme_fops = {
3743         .open           = tracing_open_generic,
3744         .read           = tracing_readme_read,
3745         .llseek         = generic_file_llseek,
3746 };
3747
3748 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3749 {
3750         unsigned int *ptr = v;
3751
3752         if (*pos || m->count)
3753                 ptr++;
3754
3755         (*pos)++;
3756
3757         for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3758              ptr++) {
3759                 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3760                         continue;
3761
3762                 return ptr;
3763         }
3764
3765         return NULL;
3766 }
3767
3768 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3769 {
3770         void *v;
3771         loff_t l = 0;
3772
3773         preempt_disable();
3774         arch_spin_lock(&trace_cmdline_lock);
3775
3776         v = &savedcmd->map_cmdline_to_pid[0];
3777         while (l <= *pos) {
3778                 v = saved_cmdlines_next(m, v, &l);
3779                 if (!v)
3780                         return NULL;
3781         }
3782
3783         return v;
3784 }
3785
3786 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3787 {
3788         arch_spin_unlock(&trace_cmdline_lock);
3789         preempt_enable();
3790 }
3791
3792 static int saved_cmdlines_show(struct seq_file *m, void *v)
3793 {
3794         char buf[TASK_COMM_LEN];
3795         unsigned int *pid = v;
3796
3797         __trace_find_cmdline(*pid, buf);
3798         seq_printf(m, "%d %s\n", *pid, buf);
3799         return 0;
3800 }
3801
3802 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3803         .start          = saved_cmdlines_start,
3804         .next           = saved_cmdlines_next,
3805         .stop           = saved_cmdlines_stop,
3806         .show           = saved_cmdlines_show,
3807 };
3808
3809 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3810 {
3811         if (tracing_disabled)
3812                 return -ENODEV;
3813
3814         return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3815 }
3816
3817 static const struct file_operations tracing_saved_cmdlines_fops = {
3818         .open           = tracing_saved_cmdlines_open,
3819         .read           = seq_read,
3820         .llseek         = seq_lseek,
3821         .release        = seq_release,
3822 };
3823
3824 static ssize_t
3825 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3826                                  size_t cnt, loff_t *ppos)
3827 {
3828         char buf[64];
3829         int r;
3830
3831         arch_spin_lock(&trace_cmdline_lock);
3832         r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3833         arch_spin_unlock(&trace_cmdline_lock);
3834
3835         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3836 }
3837
3838 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3839 {
3840         kfree(s->saved_cmdlines);
3841         kfree(s->map_cmdline_to_pid);
3842         kfree(s);
3843 }
3844
3845 static int tracing_resize_saved_cmdlines(unsigned int val)
3846 {
3847         struct saved_cmdlines_buffer *s, *savedcmd_temp;
3848
3849         s = kmalloc(sizeof(*s), GFP_KERNEL);
3850         if (!s)
3851                 return -ENOMEM;
3852
3853         if (allocate_cmdlines_buffer(val, s) < 0) {
3854                 kfree(s);
3855                 return -ENOMEM;
3856         }
3857
3858         arch_spin_lock(&trace_cmdline_lock);
3859         savedcmd_temp = savedcmd;
3860         savedcmd = s;
3861         arch_spin_unlock(&trace_cmdline_lock);
3862         free_saved_cmdlines_buffer(savedcmd_temp);
3863
3864         return 0;
3865 }
3866
3867 static ssize_t
3868 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3869                                   size_t cnt, loff_t *ppos)
3870 {
3871         unsigned long val;
3872         int ret;
3873
3874         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3875         if (ret)
3876                 return ret;
3877
3878         /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3879         if (!val || val > PID_MAX_DEFAULT)
3880                 return -EINVAL;
3881
3882         ret = tracing_resize_saved_cmdlines((unsigned int)val);
3883         if (ret < 0)
3884                 return ret;
3885
3886         *ppos += cnt;
3887
3888         return cnt;
3889 }
3890
3891 static const struct file_operations tracing_saved_cmdlines_size_fops = {
3892         .open           = tracing_open_generic,
3893         .read           = tracing_saved_cmdlines_size_read,
3894         .write          = tracing_saved_cmdlines_size_write,
3895 };
3896
3897 static ssize_t
3898 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3899                        size_t cnt, loff_t *ppos)
3900 {
3901         struct trace_array *tr = filp->private_data;
3902         char buf[MAX_TRACER_SIZE+2];
3903         int r;
3904
3905         mutex_lock(&trace_types_lock);
3906         r = sprintf(buf, "%s\n", tr->current_trace->name);
3907         mutex_unlock(&trace_types_lock);
3908
3909         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3910 }
3911
3912 int tracer_init(struct tracer *t, struct trace_array *tr)
3913 {
3914         tracing_reset_online_cpus(&tr->trace_buffer);
3915         return t->init(tr);
3916 }
3917
3918 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3919 {
3920         int cpu;
3921
3922         for_each_tracing_cpu(cpu)
3923                 per_cpu_ptr(buf->data, cpu)->entries = val;
3924 }
3925
3926 #ifdef CONFIG_TRACER_MAX_TRACE
3927 /* resize @tr's buffer to the size of @size_tr's entries */
3928 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3929                                         struct trace_buffer *size_buf, int cpu_id)
3930 {
3931         int cpu, ret = 0;
3932
3933         if (cpu_id == RING_BUFFER_ALL_CPUS) {
3934                 for_each_tracing_cpu(cpu) {
3935                         ret = ring_buffer_resize(trace_buf->buffer,
3936                                  per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3937                         if (ret < 0)
3938                                 break;
3939                         per_cpu_ptr(trace_buf->data, cpu)->entries =
3940                                 per_cpu_ptr(size_buf->data, cpu)->entries;
3941                 }
3942         } else {
3943                 ret = ring_buffer_resize(trace_buf->buffer,
3944                                  per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3945                 if (ret == 0)
3946                         per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3947                                 per_cpu_ptr(size_buf->data, cpu_id)->entries;
3948         }
3949
3950         return ret;
3951 }
3952 #endif /* CONFIG_TRACER_MAX_TRACE */
3953
3954 static int __tracing_resize_ring_buffer(struct trace_array *tr,
3955                                         unsigned long size, int cpu)
3956 {
3957         int ret;
3958
3959         /*
3960          * If kernel or user changes the size of the ring buffer
3961          * we use the size that was given, and we can forget about
3962          * expanding it later.
3963          */
3964         ring_buffer_expanded = true;
3965
3966         /* May be called before buffers are initialized */
3967         if (!tr->trace_buffer.buffer)
3968                 return 0;
3969
3970         ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3971         if (ret < 0)
3972                 return ret;
3973
3974 #ifdef CONFIG_TRACER_MAX_TRACE
3975         if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3976             !tr->current_trace->use_max_tr)
3977                 goto out;
3978
3979         ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3980         if (ret < 0) {
3981                 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3982                                                      &tr->trace_buffer, cpu);
3983                 if (r < 0) {
3984                         /*
3985                          * AARGH! We are left with different
3986                          * size max buffer!!!!
3987                          * The max buffer is our "snapshot" buffer.
3988                          * When a tracer needs a snapshot (one of the
3989                          * latency tracers), it swaps the max buffer
3990                          * with the saved snap shot. We succeeded to
3991                          * update the size of the main buffer, but failed to
3992                          * update the size of the max buffer. But when we tried
3993                          * to reset the main buffer to the original size, we
3994                          * failed there too. This is very unlikely to
3995                          * happen, but if it does, warn and kill all
3996                          * tracing.
3997                          */
3998                         WARN_ON(1);
3999                         tracing_disabled = 1;
4000                 }
4001                 return ret;
4002         }
4003
4004         if (cpu == RING_BUFFER_ALL_CPUS)
4005                 set_buffer_entries(&tr->max_buffer, size);
4006         else
4007                 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4008
4009  out:
4010 #endif /* CONFIG_TRACER_MAX_TRACE */
4011
4012         if (cpu == RING_BUFFER_ALL_CPUS)
4013                 set_buffer_entries(&tr->trace_buffer, size);
4014         else
4015                 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4016
4017         return ret;
4018 }
4019
4020 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4021                                           unsigned long size, int cpu_id)
4022 {
4023         int ret = size;
4024
4025         mutex_lock(&trace_types_lock);
4026
4027         if (cpu_id != RING_BUFFER_ALL_CPUS) {
4028                 /* make sure, this cpu is enabled in the mask */
4029                 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4030                         ret = -EINVAL;
4031                         goto out;
4032                 }
4033         }
4034
4035         ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4036         if (ret < 0)
4037                 ret = -ENOMEM;
4038
4039 out:
4040         mutex_unlock(&trace_types_lock);
4041
4042         return ret;
4043 }
4044
4045
4046 /**
4047  * tracing_update_buffers - used by tracing facility to expand ring buffers
4048  *
4049  * To save on memory when the tracing is never used on a system with it
4050  * configured in. The ring buffers are set to a minimum size. But once
4051  * a user starts to use the tracing facility, then they need to grow
4052  * to their default size.
4053  *
4054  * This function is to be called when a tracer is about to be used.
4055  */
4056 int tracing_update_buffers(void)
4057 {
4058         int ret = 0;
4059
4060         mutex_lock(&trace_types_lock);
4061         if (!ring_buffer_expanded)
4062                 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4063                                                 RING_BUFFER_ALL_CPUS);
4064         mutex_unlock(&trace_types_lock);
4065
4066         return ret;
4067 }
4068
4069 struct trace_option_dentry;
4070
4071 static struct trace_option_dentry *
4072 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4073
4074 static void
4075 destroy_trace_option_files(struct trace_option_dentry *topts);
4076
4077 /*
4078  * Used to clear out the tracer before deletion of an instance.
4079  * Must have trace_types_lock held.
4080  */
4081 static void tracing_set_nop(struct trace_array *tr)
4082 {
4083         if (tr->current_trace == &nop_trace)
4084                 return;
4085         
4086         tr->current_trace->enabled--;
4087
4088         if (tr->current_trace->reset)
4089                 tr->current_trace->reset(tr);
4090
4091         tr->current_trace = &nop_trace;
4092 }
4093
4094 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4095 {
4096         static struct trace_option_dentry *topts;
4097         struct tracer *t;
4098 #ifdef CONFIG_TRACER_MAX_TRACE
4099         bool had_max_tr;
4100 #endif
4101         int ret = 0;
4102
4103         mutex_lock(&trace_types_lock);
4104
4105         if (!ring_buffer_expanded) {
4106                 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4107                                                 RING_BUFFER_ALL_CPUS);
4108                 if (ret < 0)
4109                         goto out;
4110                 ret = 0;
4111         }
4112
4113         for (t = trace_types; t; t = t->next) {
4114                 if (strcmp(t->name, buf) == 0)
4115                         break;
4116         }
4117         if (!t) {
4118                 ret = -EINVAL;
4119                 goto out;
4120         }
4121         if (t == tr->current_trace)
4122                 goto out;
4123
4124         /* Some tracers are only allowed for the top level buffer */
4125         if (!trace_ok_for_array(t, tr)) {
4126                 ret = -EINVAL;
4127                 goto out;
4128         }
4129
4130         trace_branch_disable();
4131
4132         tr->current_trace->enabled--;
4133
4134         if (tr->current_trace->reset)
4135                 tr->current_trace->reset(tr);
4136
4137         /* Current trace needs to be nop_trace before synchronize_sched */
4138         tr->current_trace = &nop_trace;
4139
4140 #ifdef CONFIG_TRACER_MAX_TRACE
4141         had_max_tr = tr->allocated_snapshot;
4142
4143         if (had_max_tr && !t->use_max_tr) {
4144                 /*
4145                  * We need to make sure that the update_max_tr sees that
4146                  * current_trace changed to nop_trace to keep it from
4147                  * swapping the buffers after we resize it.
4148                  * The update_max_tr is called from interrupts disabled
4149                  * so a synchronized_sched() is sufficient.
4150                  */
4151                 synchronize_sched();
4152                 free_snapshot(tr);
4153         }
4154 #endif
4155         /* Currently, only the top instance has options */
4156         if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4157                 destroy_trace_option_files(topts);
4158                 topts = create_trace_option_files(tr, t);
4159         }
4160
4161 #ifdef CONFIG_TRACER_MAX_TRACE
4162         if (t->use_max_tr && !had_max_tr) {
4163                 ret = alloc_snapshot(tr);
4164                 if (ret < 0)
4165                         goto out;
4166         }
4167 #endif
4168
4169         if (t->init) {
4170                 ret = tracer_init(t, tr);
4171                 if (ret)
4172                         goto out;
4173         }
4174
4175         tr->current_trace = t;
4176         tr->current_trace->enabled++;
4177         trace_branch_enable(tr);
4178  out:
4179         mutex_unlock(&trace_types_lock);
4180
4181         return ret;
4182 }
4183
4184 static ssize_t
4185 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4186                         size_t cnt, loff_t *ppos)
4187 {
4188         struct trace_array *tr = filp->private_data;
4189         char buf[MAX_TRACER_SIZE+1];
4190         int i;
4191         size_t ret;
4192         int err;
4193
4194         ret = cnt;
4195
4196         if (cnt > MAX_TRACER_SIZE)
4197                 cnt = MAX_TRACER_SIZE;
4198
4199         if (copy_from_user(&buf, ubuf, cnt))
4200                 return -EFAULT;
4201
4202         buf[cnt] = 0;
4203
4204         /* strip ending whitespace. */
4205         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4206                 buf[i] = 0;
4207
4208         err = tracing_set_tracer(tr, buf);
4209         if (err)
4210                 return err;
4211
4212         *ppos += ret;
4213
4214         return ret;
4215 }
4216
4217 static ssize_t
4218 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4219                    size_t cnt, loff_t *ppos)
4220 {
4221         char buf[64];
4222         int r;
4223
4224         r = snprintf(buf, sizeof(buf), "%ld\n",
4225                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4226         if (r > sizeof(buf))
4227                 r = sizeof(buf);
4228         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4229 }
4230
4231 static ssize_t
4232 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4233                     size_t cnt, loff_t *ppos)
4234 {
4235         unsigned long val;
4236         int ret;
4237
4238         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4239         if (ret)
4240                 return ret;
4241
4242         *ptr = val * 1000;
4243
4244         return cnt;
4245 }
4246
4247 static ssize_t
4248 tracing_thresh_read(struct file *filp, char __user *ubuf,
4249                     size_t cnt, loff_t *ppos)
4250 {
4251         return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4252 }
4253
4254 static ssize_t
4255 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4256                      size_t cnt, loff_t *ppos)
4257 {
4258         struct trace_array *tr = filp->private_data;
4259         int ret;
4260
4261         mutex_lock(&trace_types_lock);
4262         ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4263         if (ret < 0)
4264                 goto out;
4265
4266         if (tr->current_trace->update_thresh) {
4267                 ret = tr->current_trace->update_thresh(tr);
4268                 if (ret < 0)
4269                         goto out;
4270         }
4271
4272         ret = cnt;
4273 out:
4274         mutex_unlock(&trace_types_lock);
4275
4276         return ret;
4277 }
4278
4279 static ssize_t
4280 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4281                      size_t cnt, loff_t *ppos)
4282 {
4283         return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4284 }
4285
4286 static ssize_t
4287 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4288                       size_t cnt, loff_t *ppos)
4289 {
4290         return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4291 }
4292
4293 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4294 {
4295         struct trace_array *tr = inode->i_private;
4296         struct trace_iterator *iter;
4297         int ret = 0;
4298
4299         if (tracing_disabled)
4300                 return -ENODEV;
4301
4302         if (trace_array_get(tr) < 0)
4303                 return -ENODEV;
4304
4305         mutex_lock(&trace_types_lock);
4306
4307         /* create a buffer to store the information to pass to userspace */
4308         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4309         if (!iter) {
4310                 ret = -ENOMEM;
4311                 __trace_array_put(tr);
4312                 goto out;
4313         }
4314
4315         /*
4316          * We make a copy of the current tracer to avoid concurrent
4317          * changes on it while we are reading.
4318          */
4319         iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4320         if (!iter->trace) {
4321                 ret = -ENOMEM;
4322                 goto fail;
4323         }
4324         *iter->trace = *tr->current_trace;
4325
4326         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4327                 ret = -ENOMEM;
4328                 goto fail;
4329         }
4330
4331         /* trace pipe does not show start of buffer */
4332         cpumask_setall(iter->started);
4333
4334         if (trace_flags & TRACE_ITER_LATENCY_FMT)
4335                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4336
4337         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4338         if (trace_clocks[tr->clock_id].in_ns)
4339                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4340
4341         iter->tr = tr;
4342         iter->trace_buffer = &tr->trace_buffer;
4343         iter->cpu_file = tracing_get_cpu(inode);
4344         mutex_init(&iter->mutex);
4345         filp->private_data = iter;
4346
4347         if (iter->trace->pipe_open)
4348                 iter->trace->pipe_open(iter);
4349
4350         nonseekable_open(inode, filp);
4351 out:
4352         mutex_unlock(&trace_types_lock);
4353         return ret;
4354
4355 fail:
4356         kfree(iter->trace);
4357         kfree(iter);
4358         __trace_array_put(tr);
4359         mutex_unlock(&trace_types_lock);
4360         return ret;
4361 }
4362
4363 static int tracing_release_pipe(struct inode *inode, struct file *file)
4364 {
4365         struct trace_iterator *iter = file->private_data;
4366         struct trace_array *tr = inode->i_private;
4367
4368         mutex_lock(&trace_types_lock);
4369
4370         if (iter->trace->pipe_close)
4371                 iter->trace->pipe_close(iter);
4372
4373         mutex_unlock(&trace_types_lock);
4374
4375         free_cpumask_var(iter->started);
4376         mutex_destroy(&iter->mutex);
4377         kfree(iter->trace);
4378         kfree(iter);
4379
4380         trace_array_put(tr);
4381
4382         return 0;
4383 }
4384
4385 static unsigned int
4386 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4387 {
4388         /* Iterators are static, they should be filled or empty */
4389         if (trace_buffer_iter(iter, iter->cpu_file))
4390                 return POLLIN | POLLRDNORM;
4391
4392         if (trace_flags & TRACE_ITER_BLOCK)
4393                 /*
4394                  * Always select as readable when in blocking mode
4395                  */
4396                 return POLLIN | POLLRDNORM;
4397         else
4398                 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4399                                              filp, poll_table);
4400 }
4401
4402 static unsigned int
4403 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4404 {
4405         struct trace_iterator *iter = filp->private_data;
4406
4407         return trace_poll(iter, filp, poll_table);
4408 }
4409
4410 /* Must be called with trace_types_lock mutex held. */
4411 static int tracing_wait_pipe(struct file *filp)
4412 {
4413         struct trace_iterator *iter = filp->private_data;
4414         int ret;
4415
4416         while (trace_empty(iter)) {
4417
4418                 if ((filp->f_flags & O_NONBLOCK)) {
4419                         return -EAGAIN;
4420                 }
4421
4422                 /*
4423                  * We block until we read something and tracing is disabled.
4424                  * We still block if tracing is disabled, but we have never
4425                  * read anything. This allows a user to cat this file, and
4426                  * then enable tracing. But after we have read something,
4427                  * we give an EOF when tracing is again disabled.
4428                  *
4429                  * iter->pos will be 0 if we haven't read anything.
4430                  */
4431                 if (!tracing_is_on() && iter->pos)
4432                         break;
4433
4434                 mutex_unlock(&iter->mutex);
4435
4436                 ret = wait_on_pipe(iter);
4437
4438                 mutex_lock(&iter->mutex);
4439
4440                 if (ret)
4441                         return ret;
4442
4443                 if (signal_pending(current))
4444                         return -EINTR;
4445         }
4446
4447         return 1;
4448 }
4449
4450 /*
4451  * Consumer reader.
4452  */
4453 static ssize_t
4454 tracing_read_pipe(struct file *filp, char __user *ubuf,
4455                   size_t cnt, loff_t *ppos)
4456 {
4457         struct trace_iterator *iter = filp->private_data;
4458         struct trace_array *tr = iter->tr;
4459         ssize_t sret;
4460
4461         /* return any leftover data */
4462         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4463         if (sret != -EBUSY)
4464                 return sret;
4465
4466         trace_seq_init(&iter->seq);
4467
4468         /* copy the tracer to avoid using a global lock all around */
4469         mutex_lock(&trace_types_lock);
4470         if (unlikely(iter->trace->name != tr->current_trace->name))
4471                 *iter->trace = *tr->current_trace;
4472         mutex_unlock(&trace_types_lock);
4473
4474         /*
4475          * Avoid more than one consumer on a single file descriptor
4476          * This is just a matter of traces coherency, the ring buffer itself
4477          * is protected.
4478          */
4479         mutex_lock(&iter->mutex);
4480         if (iter->trace->read) {
4481                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4482                 if (sret)
4483                         goto out;
4484         }
4485
4486 waitagain:
4487         sret = tracing_wait_pipe(filp);
4488         if (sret <= 0)
4489                 goto out;
4490
4491         /* stop when tracing is finished */
4492         if (trace_empty(iter)) {
4493                 sret = 0;
4494                 goto out;
4495         }
4496
4497         if (cnt >= PAGE_SIZE)
4498                 cnt = PAGE_SIZE - 1;
4499
4500         /* reset all but tr, trace, and overruns */
4501         memset(&iter->seq, 0,
4502                sizeof(struct trace_iterator) -
4503                offsetof(struct trace_iterator, seq));
4504         cpumask_clear(iter->started);
4505         iter->pos = -1;
4506
4507         trace_event_read_lock();
4508         trace_access_lock(iter->cpu_file);
4509         while (trace_find_next_entry_inc(iter) != NULL) {
4510                 enum print_line_t ret;
4511                 int len = iter->seq.len;
4512
4513                 ret = print_trace_line(iter);
4514                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4515                         /* don't print partial lines */
4516                         iter->seq.len = len;
4517                         break;
4518                 }
4519                 if (ret != TRACE_TYPE_NO_CONSUME)
4520                         trace_consume(iter);
4521
4522                 if (iter->seq.len >= cnt)
4523                         break;
4524
4525                 /*
4526                  * Setting the full flag means we reached the trace_seq buffer
4527                  * size and we should leave by partial output condition above.
4528                  * One of the trace_seq_* functions is not used properly.
4529                  */
4530                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4531                           iter->ent->type);
4532         }
4533         trace_access_unlock(iter->cpu_file);
4534         trace_event_read_unlock();
4535
4536         /* Now copy what we have to the user */
4537         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4538         if (iter->seq.readpos >= iter->seq.len)
4539                 trace_seq_init(&iter->seq);
4540
4541         /*
4542          * If there was nothing to send to user, in spite of consuming trace
4543          * entries, go back to wait for more entries.
4544          */
4545         if (sret == -EBUSY)
4546                 goto waitagain;
4547
4548 out:
4549         mutex_unlock(&iter->mutex);
4550
4551         return sret;
4552 }
4553
4554 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4555                                      unsigned int idx)
4556 {
4557         __free_page(spd->pages[idx]);
4558 }
4559
4560 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4561         .can_merge              = 0,
4562         .confirm                = generic_pipe_buf_confirm,
4563         .release                = generic_pipe_buf_release,
4564         .steal                  = generic_pipe_buf_steal,
4565         .get                    = generic_pipe_buf_get,
4566 };
4567
4568 static size_t
4569 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4570 {
4571         size_t count;
4572         int ret;
4573
4574         /* Seq buffer is page-sized, exactly what we need. */
4575         for (;;) {
4576                 count = iter->seq.len;
4577                 ret = print_trace_line(iter);
4578                 count = iter->seq.len - count;
4579                 if (rem < count) {
4580                         rem = 0;
4581                         iter->seq.len -= count;
4582                         break;
4583                 }
4584                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4585                         iter->seq.len -= count;
4586                         break;
4587                 }
4588
4589                 if (ret != TRACE_TYPE_NO_CONSUME)
4590                         trace_consume(iter);
4591                 rem -= count;
4592                 if (!trace_find_next_entry_inc(iter))   {
4593                         rem = 0;
4594                         iter->ent = NULL;
4595                         break;
4596                 }
4597         }
4598
4599         return rem;
4600 }
4601
4602 static ssize_t tracing_splice_read_pipe(struct file *filp,
4603                                         loff_t *ppos,
4604                                         struct pipe_inode_info *pipe,
4605                                         size_t len,
4606                                         unsigned int flags)
4607 {
4608         struct page *pages_def[PIPE_DEF_BUFFERS];
4609         struct partial_page partial_def[PIPE_DEF_BUFFERS];
4610         struct trace_iterator *iter = filp->private_data;
4611         struct splice_pipe_desc spd = {
4612                 .pages          = pages_def,
4613                 .partial        = partial_def,
4614                 .nr_pages       = 0, /* This gets updated below. */
4615                 .nr_pages_max   = PIPE_DEF_BUFFERS,
4616                 .flags          = flags,
4617                 .ops            = &tracing_pipe_buf_ops,
4618                 .spd_release    = tracing_spd_release_pipe,
4619         };
4620         struct trace_array *tr = iter->tr;
4621         ssize_t ret;
4622         size_t rem;
4623         unsigned int i;
4624
4625         if (splice_grow_spd(pipe, &spd))
4626                 return -ENOMEM;
4627
4628         /* copy the tracer to avoid using a global lock all around */
4629         mutex_lock(&trace_types_lock);
4630         if (unlikely(iter->trace->name != tr->current_trace->name))
4631                 *iter->trace = *tr->current_trace;
4632         mutex_unlock(&trace_types_lock);
4633
4634         mutex_lock(&iter->mutex);
4635
4636         if (iter->trace->splice_read) {
4637                 ret = iter->trace->splice_read(iter, filp,
4638                                                ppos, pipe, len, flags);
4639                 if (ret)
4640                         goto out_err;
4641         }
4642
4643         ret = tracing_wait_pipe(filp);
4644         if (ret <= 0)
4645                 goto out_err;
4646
4647         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4648                 ret = -EFAULT;
4649                 goto out_err;
4650         }
4651
4652         trace_event_read_lock();
4653         trace_access_lock(iter->cpu_file);
4654
4655         /* Fill as many pages as possible. */
4656         for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4657                 spd.pages[i] = alloc_page(GFP_KERNEL);
4658                 if (!spd.pages[i])
4659                         break;
4660
4661                 rem = tracing_fill_pipe_page(rem, iter);
4662
4663                 /* Copy the data into the page, so we can start over. */
4664                 ret = trace_seq_to_buffer(&iter->seq,
4665                                           page_address(spd.pages[i]),
4666                                           iter->seq.len);
4667                 if (ret < 0) {
4668                         __free_page(spd.pages[i]);
4669                         break;
4670                 }
4671                 spd.partial[i].offset = 0;
4672                 spd.partial[i].len = iter->seq.len;
4673
4674                 trace_seq_init(&iter->seq);
4675         }
4676
4677         trace_access_unlock(iter->cpu_file);
4678         trace_event_read_unlock();
4679         mutex_unlock(&iter->mutex);
4680
4681         spd.nr_pages = i;
4682
4683         ret = splice_to_pipe(pipe, &spd);
4684 out:
4685         splice_shrink_spd(&spd);
4686         return ret;
4687
4688 out_err:
4689         mutex_unlock(&iter->mutex);
4690         goto out;
4691 }
4692
4693 static ssize_t
4694 tracing_entries_read(struct file *filp, char __user *ubuf,
4695                      size_t cnt, loff_t *ppos)
4696 {
4697         struct inode *inode = file_inode(filp);
4698         struct trace_array *tr = inode->i_private;
4699         int cpu = tracing_get_cpu(inode);
4700         char buf[64];
4701         int r = 0;
4702         ssize_t ret;
4703
4704         mutex_lock(&trace_types_lock);
4705
4706         if (cpu == RING_BUFFER_ALL_CPUS) {
4707                 int cpu, buf_size_same;
4708                 unsigned long size;
4709
4710                 size = 0;
4711                 buf_size_same = 1;
4712                 /* check if all cpu sizes are same */
4713                 for_each_tracing_cpu(cpu) {
4714                         /* fill in the size from first enabled cpu */
4715                         if (size == 0)
4716                                 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4717                         if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4718                                 buf_size_same = 0;
4719                                 break;
4720                         }
4721                 }
4722
4723                 if (buf_size_same) {
4724                         if (!ring_buffer_expanded)
4725                                 r = sprintf(buf, "%lu (expanded: %lu)\n",
4726                                             size >> 10,
4727                                             trace_buf_size >> 10);
4728                         else
4729                                 r = sprintf(buf, "%lu\n", size >> 10);
4730                 } else
4731                         r = sprintf(buf, "X\n");
4732         } else
4733                 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4734
4735         mutex_unlock(&trace_types_lock);
4736
4737         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4738         return ret;
4739 }
4740
4741 static ssize_t
4742 tracing_entries_write(struct file *filp, const char __user *ubuf,
4743                       size_t cnt, loff_t *ppos)
4744 {
4745         struct inode *inode = file_inode(filp);
4746         struct trace_array *tr = inode->i_private;
4747         unsigned long val;
4748         int ret;
4749
4750         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4751         if (ret)
4752                 return ret;
4753
4754         /* must have at least 1 entry */
4755         if (!val)
4756                 return -EINVAL;
4757
4758         /* value is in KB */
4759         val <<= 10;
4760         ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4761         if (ret < 0)
4762                 return ret;
4763
4764         *ppos += cnt;
4765
4766         return cnt;
4767 }
4768
4769 static ssize_t
4770 tracing_total_entries_read(struct file *filp, char __user *ubuf,
4771                                 size_t cnt, loff_t *ppos)
4772 {
4773         struct trace_array *tr = filp->private_data;
4774         char buf[64];
4775         int r, cpu;
4776         unsigned long size = 0, expanded_size = 0;
4777
4778         mutex_lock(&trace_types_lock);
4779         for_each_tracing_cpu(cpu) {
4780                 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4781                 if (!ring_buffer_expanded)
4782                         expanded_size += trace_buf_size >> 10;
4783         }
4784         if (ring_buffer_expanded)
4785                 r = sprintf(buf, "%lu\n", size);
4786         else
4787                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4788         mutex_unlock(&trace_types_lock);
4789
4790         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4791 }
4792
4793 static ssize_t
4794 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4795                           size_t cnt, loff_t *ppos)
4796 {
4797         /*
4798          * There is no need to read what the user has written, this function
4799          * is just to make sure that there is no error when "echo" is used
4800          */
4801
4802         *ppos += cnt;
4803
4804         return cnt;
4805 }
4806
4807 static int
4808 tracing_free_buffer_release(struct inode *inode, struct file *filp)
4809 {
4810         struct trace_array *tr = inode->i_private;
4811
4812         /* disable tracing ? */
4813         if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4814                 tracer_tracing_off(tr);
4815         /* resize the ring buffer to 0 */
4816         tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4817
4818         trace_array_put(tr);
4819
4820         return 0;
4821 }
4822
4823 static ssize_t
4824 tracing_mark_write(struct file *filp, const char __user *ubuf,
4825                                         size_t cnt, loff_t *fpos)
4826 {
4827         unsigned long addr = (unsigned long)ubuf;
4828         struct trace_array *tr = filp->private_data;
4829         struct ring_buffer_event *event;
4830         struct ring_buffer *buffer;
4831         struct print_entry *entry;
4832         unsigned long irq_flags;
4833         struct page *pages[2];
4834         void *map_page[2];
4835         int nr_pages = 1;
4836         ssize_t written;
4837         int offset;
4838         int size;
4839         int len;
4840         int ret;
4841         int i;
4842
4843         if (tracing_disabled)
4844                 return -EINVAL;
4845
4846         if (!(trace_flags & TRACE_ITER_MARKERS))
4847                 return -EINVAL;
4848
4849         if (cnt > TRACE_BUF_SIZE)
4850                 cnt = TRACE_BUF_SIZE;
4851
4852         /*
4853          * Userspace is injecting traces into the kernel trace buffer.
4854          * We want to be as non intrusive as possible.
4855          * To do so, we do not want to allocate any special buffers
4856          * or take any locks, but instead write the userspace data
4857          * straight into the ring buffer.
4858          *
4859          * First we need to pin the userspace buffer into memory,
4860          * which, most likely it is, because it just referenced it.
4861          * But there's no guarantee that it is. By using get_user_pages_fast()
4862          * and kmap_atomic/kunmap_atomic() we can get access to the
4863          * pages directly. We then write the data directly into the
4864          * ring buffer.
4865          */
4866         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4867
4868         /* check if we cross pages */
4869         if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4870                 nr_pages = 2;
4871
4872         offset = addr & (PAGE_SIZE - 1);
4873         addr &= PAGE_MASK;
4874
4875         ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4876         if (ret < nr_pages) {
4877                 while (--ret >= 0)
4878                         put_page(pages[ret]);
4879                 written = -EFAULT;
4880                 goto out;
4881         }
4882
4883         for (i = 0; i < nr_pages; i++)
4884                 map_page[i] = kmap_atomic(pages[i]);
4885
4886         local_save_flags(irq_flags);
4887         size = sizeof(*entry) + cnt + 2; /* possible \n added */
4888         buffer = tr->trace_buffer.buffer;
4889         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4890                                           irq_flags, preempt_count());
4891         if (!event) {
4892                 /* Ring buffer disabled, return as if not open for write */
4893                 written = -EBADF;
4894                 goto out_unlock;
4895         }
4896
4897         entry = ring_buffer_event_data(event);
4898         entry->ip = _THIS_IP_;
4899
4900         if (nr_pages == 2) {
4901                 len = PAGE_SIZE - offset;
4902                 memcpy(&entry->buf, map_page[0] + offset, len);
4903                 memcpy(&entry->buf[len], map_page[1], cnt - len);
4904         } else
4905                 memcpy(&entry->buf, map_page[0] + offset, cnt);
4906
4907         if (entry->buf[cnt - 1] != '\n') {
4908                 entry->buf[cnt] = '\n';
4909                 entry->buf[cnt + 1] = '\0';
4910         } else
4911                 entry->buf[cnt] = '\0';
4912
4913         __buffer_unlock_commit(buffer, event);
4914
4915         written = cnt;
4916
4917         *fpos += written;
4918
4919  out_unlock:
4920         for (i = 0; i < nr_pages; i++){
4921                 kunmap_atomic(map_page[i]);
4922                 put_page(pages[i]);
4923         }
4924  out:
4925         return written;
4926 }
4927
4928 static int tracing_clock_show(struct seq_file *m, void *v)
4929 {
4930         struct trace_array *tr = m->private;
4931         int i;
4932
4933         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4934                 seq_printf(m,
4935                         "%s%s%s%s", i ? " " : "",
4936                         i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4937                         i == tr->clock_id ? "]" : "");
4938         seq_putc(m, '\n');
4939
4940         return 0;
4941 }
4942
4943 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
4944 {
4945         int i;
4946
4947         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4948                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4949                         break;
4950         }
4951         if (i == ARRAY_SIZE(trace_clocks))
4952                 return -EINVAL;
4953
4954         mutex_lock(&trace_types_lock);
4955
4956         tr->clock_id = i;
4957
4958         ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4959
4960         /*
4961          * New clock may not be consistent with the previous clock.
4962          * Reset the buffer so that it doesn't have incomparable timestamps.
4963          */
4964         tracing_reset_online_cpus(&tr->trace_buffer);
4965
4966 #ifdef CONFIG_TRACER_MAX_TRACE
4967         if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4968                 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4969         tracing_reset_online_cpus(&tr->max_buffer);
4970 #endif
4971
4972         mutex_unlock(&trace_types_lock);
4973
4974         return 0;
4975 }
4976
4977 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4978                                    size_t cnt, loff_t *fpos)
4979 {
4980         struct seq_file *m = filp->private_data;
4981         struct trace_array *tr = m->private;
4982         char buf[64];
4983         const char *clockstr;
4984         int ret;
4985
4986         if (cnt >= sizeof(buf))
4987                 return -EINVAL;
4988
4989         if (copy_from_user(&buf, ubuf, cnt))
4990                 return -EFAULT;
4991
4992         buf[cnt] = 0;
4993
4994         clockstr = strstrip(buf);
4995
4996         ret = tracing_set_clock(tr, clockstr);
4997         if (ret)
4998                 return ret;
4999
5000         *fpos += cnt;
5001
5002         return cnt;
5003 }
5004
5005 static int tracing_clock_open(struct inode *inode, struct file *file)
5006 {
5007         struct trace_array *tr = inode->i_private;
5008         int ret;
5009
5010         if (tracing_disabled)
5011                 return -ENODEV;
5012
5013         if (trace_array_get(tr))
5014                 return -ENODEV;
5015
5016         ret = single_open(file, tracing_clock_show, inode->i_private);
5017         if (ret < 0)
5018                 trace_array_put(tr);
5019
5020         return ret;
5021 }
5022
5023 struct ftrace_buffer_info {
5024         struct trace_iterator   iter;
5025         void                    *spare;
5026         unsigned int            read;
5027 };
5028
5029 #ifdef CONFIG_TRACER_SNAPSHOT
5030 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5031 {
5032         struct trace_array *tr = inode->i_private;
5033         struct trace_iterator *iter;
5034         struct seq_file *m;
5035         int ret = 0;
5036
5037         if (trace_array_get(tr) < 0)
5038                 return -ENODEV;
5039
5040         if (file->f_mode & FMODE_READ) {
5041                 iter = __tracing_open(inode, file, true);
5042                 if (IS_ERR(iter))
5043                         ret = PTR_ERR(iter);
5044         } else {
5045                 /* Writes still need the seq_file to hold the private data */
5046                 ret = -ENOMEM;
5047                 m = kzalloc(sizeof(*m), GFP_KERNEL);
5048                 if (!m)
5049                         goto out;
5050                 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5051                 if (!iter) {
5052                         kfree(m);
5053                         goto out;
5054                 }
5055                 ret = 0;
5056
5057                 iter->tr = tr;
5058                 iter->trace_buffer = &tr->max_buffer;
5059                 iter->cpu_file = tracing_get_cpu(inode);
5060                 m->private = iter;
5061                 file->private_data = m;
5062         }
5063 out:
5064         if (ret < 0)
5065                 trace_array_put(tr);
5066
5067         return ret;
5068 }
5069
5070 static ssize_t
5071 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5072                        loff_t *ppos)
5073 {
5074         struct seq_file *m = filp->private_data;
5075         struct trace_iterator *iter = m->private;
5076         struct trace_array *tr = iter->tr;
5077         unsigned long val;
5078         int ret;
5079
5080         ret = tracing_update_buffers();
5081         if (ret < 0)
5082                 return ret;
5083
5084         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5085         if (ret)
5086                 return ret;
5087
5088         mutex_lock(&trace_types_lock);
5089
5090         if (tr->current_trace->use_max_tr) {
5091                 ret = -EBUSY;
5092                 goto out;
5093         }
5094
5095         switch (val) {
5096         case 0:
5097                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5098                         ret = -EINVAL;
5099                         break;
5100                 }
5101                 if (tr->allocated_snapshot)
5102                         free_snapshot(tr);
5103                 break;
5104         case 1:
5105 /* Only allow per-cpu swap if the ring buffer supports it */
5106 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5107                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5108                         ret = -EINVAL;
5109                         break;
5110                 }
5111 #endif
5112                 if (!tr->allocated_snapshot) {
5113                         ret = alloc_snapshot(tr);
5114                         if (ret < 0)
5115                                 break;
5116                 }
5117                 local_irq_disable();
5118                 /* Now, we're going to swap */
5119                 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5120                         update_max_tr(tr, current, smp_processor_id());
5121                 else
5122                         update_max_tr_single(tr, current, iter->cpu_file);
5123                 local_irq_enable();
5124                 break;
5125         default:
5126                 if (tr->allocated_snapshot) {
5127                         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5128                                 tracing_reset_online_cpus(&tr->max_buffer);
5129                         else
5130                                 tracing_reset(&tr->max_buffer, iter->cpu_file);
5131                 }
5132                 break;
5133         }
5134
5135         if (ret >= 0) {
5136                 *ppos += cnt;
5137                 ret = cnt;
5138         }
5139 out:
5140         mutex_unlock(&trace_types_lock);
5141         return ret;
5142 }
5143
5144 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5145 {
5146         struct seq_file *m = file->private_data;
5147         int ret;
5148
5149         ret = tracing_release(inode, file);
5150
5151         if (file->f_mode & FMODE_READ)
5152                 return ret;
5153
5154         /* If write only, the seq_file is just a stub */
5155         if (m)
5156                 kfree(m->private);
5157         kfree(m);
5158
5159         return 0;
5160 }
5161
5162 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5163 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5164                                     size_t count, loff_t *ppos);
5165 static int tracing_buffers_release(struct inode *inode, struct file *file);
5166 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5167                    struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5168
5169 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5170 {
5171         struct ftrace_buffer_info *info;
5172         int ret;
5173
5174         ret = tracing_buffers_open(inode, filp);
5175         if (ret < 0)
5176                 return ret;
5177
5178         info = filp->private_data;
5179
5180         if (info->iter.trace->use_max_tr) {
5181                 tracing_buffers_release(inode, filp);
5182                 return -EBUSY;
5183         }
5184
5185         info->iter.snapshot = true;
5186         info->iter.trace_buffer = &info->iter.tr->max_buffer;
5187
5188         return ret;
5189 }
5190
5191 #endif /* CONFIG_TRACER_SNAPSHOT */
5192
5193
5194 static const struct file_operations tracing_thresh_fops = {
5195         .open           = tracing_open_generic,
5196         .read           = tracing_thresh_read,
5197         .write          = tracing_thresh_write,
5198         .llseek         = generic_file_llseek,
5199 };
5200
5201 static const struct file_operations tracing_max_lat_fops = {
5202         .open           = tracing_open_generic,
5203         .read           = tracing_max_lat_read,
5204         .write          = tracing_max_lat_write,
5205         .llseek         = generic_file_llseek,
5206 };
5207
5208 static const struct file_operations set_tracer_fops = {
5209         .open           = tracing_open_generic,
5210         .read           = tracing_set_trace_read,
5211         .write          = tracing_set_trace_write,
5212         .llseek         = generic_file_llseek,
5213 };
5214
5215 static const struct file_operations tracing_pipe_fops = {
5216         .open           = tracing_open_pipe,
5217         .poll           = tracing_poll_pipe,
5218         .read           = tracing_read_pipe,
5219         .splice_read    = tracing_splice_read_pipe,
5220         .release        = tracing_release_pipe,
5221         .llseek         = no_llseek,
5222 };
5223
5224 static const struct file_operations tracing_entries_fops = {
5225         .open           = tracing_open_generic_tr,
5226         .read           = tracing_entries_read,
5227         .write          = tracing_entries_write,
5228         .llseek         = generic_file_llseek,
5229         .release        = tracing_release_generic_tr,
5230 };
5231
5232 static const struct file_operations tracing_total_entries_fops = {
5233         .open           = tracing_open_generic_tr,
5234         .read           = tracing_total_entries_read,
5235         .llseek         = generic_file_llseek,
5236         .release        = tracing_release_generic_tr,
5237 };
5238
5239 static const struct file_operations tracing_free_buffer_fops = {
5240         .open           = tracing_open_generic_tr,
5241         .write          = tracing_free_buffer_write,
5242         .release        = tracing_free_buffer_release,
5243 };
5244
5245 static const struct file_operations tracing_mark_fops = {
5246         .open           = tracing_open_generic_tr,
5247         .write          = tracing_mark_write,
5248         .llseek         = generic_file_llseek,
5249         .release        = tracing_release_generic_tr,
5250 };
5251
5252 static const struct file_operations trace_clock_fops = {
5253         .open           = tracing_clock_open,
5254         .read           = seq_read,
5255         .llseek         = seq_lseek,
5256         .release        = tracing_single_release_tr,
5257         .write          = tracing_clock_write,
5258 };
5259
5260 #ifdef CONFIG_TRACER_SNAPSHOT
5261 static const struct file_operations snapshot_fops = {
5262         .open           = tracing_snapshot_open,
5263         .read           = seq_read,
5264         .write          = tracing_snapshot_write,
5265         .llseek         = tracing_lseek,
5266         .release        = tracing_snapshot_release,
5267 };
5268
5269 static const struct file_operations snapshot_raw_fops = {
5270         .open           = snapshot_raw_open,
5271         .read           = tracing_buffers_read,
5272         .release        = tracing_buffers_release,
5273         .splice_read    = tracing_buffers_splice_read,
5274         .llseek         = no_llseek,
5275 };
5276
5277 #endif /* CONFIG_TRACER_SNAPSHOT */
5278
5279 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5280 {
5281         struct trace_array *tr = inode->i_private;
5282         struct ftrace_buffer_info *info;
5283         int ret;
5284
5285         if (tracing_disabled)
5286                 return -ENODEV;
5287
5288         if (trace_array_get(tr) < 0)
5289                 return -ENODEV;
5290
5291         info = kzalloc(sizeof(*info), GFP_KERNEL);
5292         if (!info) {
5293                 trace_array_put(tr);
5294                 return -ENOMEM;
5295         }
5296
5297         mutex_lock(&trace_types_lock);
5298
5299         info->iter.tr           = tr;
5300         info->iter.cpu_file     = tracing_get_cpu(inode);
5301         info->iter.trace        = tr->current_trace;
5302         info->iter.trace_buffer = &tr->trace_buffer;
5303         info->spare             = NULL;
5304         /* Force reading ring buffer for first read */
5305         info->read              = (unsigned int)-1;
5306
5307         filp->private_data = info;
5308
5309         mutex_unlock(&trace_types_lock);
5310
5311         ret = nonseekable_open(inode, filp);
5312         if (ret < 0)
5313                 trace_array_put(tr);
5314
5315         return ret;
5316 }
5317
5318 static unsigned int
5319 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5320 {
5321         struct ftrace_buffer_info *info = filp->private_data;
5322         struct trace_iterator *iter = &info->iter;
5323
5324         return trace_poll(iter, filp, poll_table);
5325 }
5326
5327 static ssize_t
5328 tracing_buffers_read(struct file *filp, char __user *ubuf,
5329                      size_t count, loff_t *ppos)
5330 {
5331         struct ftrace_buffer_info *info = filp->private_data;
5332         struct trace_iterator *iter = &info->iter;
5333         ssize_t ret;
5334         ssize_t size;
5335
5336         if (!count)
5337                 return 0;
5338
5339         mutex_lock(&trace_types_lock);
5340
5341 #ifdef CONFIG_TRACER_MAX_TRACE
5342         if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5343                 size = -EBUSY;
5344                 goto out_unlock;
5345         }
5346 #endif
5347
5348         if (!info->spare)
5349                 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5350                                                           iter->cpu_file);
5351         size = -ENOMEM;
5352         if (!info->spare)
5353                 goto out_unlock;
5354
5355         /* Do we have previous read data to read? */
5356         if (info->read < PAGE_SIZE)
5357                 goto read;
5358
5359  again:
5360         trace_access_lock(iter->cpu_file);
5361         ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5362                                     &info->spare,
5363                                     count,
5364                                     iter->cpu_file, 0);
5365         trace_access_unlock(iter->cpu_file);
5366
5367         if (ret < 0) {
5368                 if (trace_empty(iter)) {
5369                         if ((filp->f_flags & O_NONBLOCK)) {
5370                                 size = -EAGAIN;
5371                                 goto out_unlock;
5372                         }
5373                         mutex_unlock(&trace_types_lock);
5374                         ret = wait_on_pipe(iter);
5375                         mutex_lock(&trace_types_lock);
5376                         if (ret) {
5377                                 size = ret;
5378                                 goto out_unlock;
5379                         }
5380                         if (signal_pending(current)) {
5381                                 size = -EINTR;
5382                                 goto out_unlock;
5383                         }
5384                         goto again;
5385                 }
5386                 size = 0;
5387                 goto out_unlock;
5388         }
5389
5390         info->read = 0;
5391  read:
5392         size = PAGE_SIZE - info->read;
5393         if (size > count)
5394                 size = count;
5395
5396         ret = copy_to_user(ubuf, info->spare + info->read, size);
5397         if (ret == size) {
5398                 size = -EFAULT;
5399                 goto out_unlock;
5400         }
5401         size -= ret;
5402
5403         *ppos += size;
5404         info->read += size;
5405
5406  out_unlock:
5407         mutex_unlock(&trace_types_lock);
5408
5409         return size;
5410 }
5411
5412 static int tracing_buffers_release(struct inode *inode, struct file *file)
5413 {
5414         struct ftrace_buffer_info *info = file->private_data;
5415         struct trace_iterator *iter = &info->iter;
5416
5417         mutex_lock(&trace_types_lock);
5418
5419         __trace_array_put(iter->tr);
5420
5421         if (info->spare)
5422                 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5423         kfree(info);
5424
5425         mutex_unlock(&trace_types_lock);
5426
5427         return 0;
5428 }
5429
5430 struct buffer_ref {
5431         struct ring_buffer      *buffer;
5432         void                    *page;
5433         int                     ref;
5434 };
5435
5436 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5437                                     struct pipe_buffer *buf)
5438 {
5439         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5440
5441         if (--ref->ref)
5442                 return;
5443
5444         ring_buffer_free_read_page(ref->buffer, ref->page);
5445         kfree(ref);
5446         buf->private = 0;
5447 }
5448
5449 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5450                                 struct pipe_buffer *buf)
5451 {
5452         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5453
5454         ref->ref++;
5455 }
5456
5457 /* Pipe buffer operations for a buffer. */
5458 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5459         .can_merge              = 0,
5460         .confirm                = generic_pipe_buf_confirm,
5461         .release                = buffer_pipe_buf_release,
5462         .steal                  = generic_pipe_buf_steal,
5463         .get                    = buffer_pipe_buf_get,
5464 };
5465
5466 /*
5467  * Callback from splice_to_pipe(), if we need to release some pages
5468  * at the end of the spd in case we error'ed out in filling the pipe.
5469  */
5470 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5471 {
5472         struct buffer_ref *ref =
5473                 (struct buffer_ref *)spd->partial[i].private;
5474
5475         if (--ref->ref)
5476                 return;
5477
5478         ring_buffer_free_read_page(ref->buffer, ref->page);
5479         kfree(ref);
5480         spd->partial[i].private = 0;
5481 }
5482
5483 static ssize_t
5484 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5485                             struct pipe_inode_info *pipe, size_t len,
5486                             unsigned int flags)
5487 {
5488         struct ftrace_buffer_info *info = file->private_data;
5489         struct trace_iterator *iter = &info->iter;
5490         struct partial_page partial_def[PIPE_DEF_BUFFERS];
5491         struct page *pages_def[PIPE_DEF_BUFFERS];
5492         struct splice_pipe_desc spd = {
5493                 .pages          = pages_def,
5494                 .partial        = partial_def,
5495                 .nr_pages_max   = PIPE_DEF_BUFFERS,
5496                 .flags          = flags,
5497                 .ops            = &buffer_pipe_buf_ops,
5498                 .spd_release    = buffer_spd_release,
5499         };
5500         struct buffer_ref *ref;
5501         int entries, size, i;
5502         ssize_t ret;
5503
5504         mutex_lock(&trace_types_lock);
5505
5506 #ifdef CONFIG_TRACER_MAX_TRACE
5507         if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5508                 ret = -EBUSY;
5509                 goto out;
5510         }
5511 #endif
5512
5513         if (splice_grow_spd(pipe, &spd)) {
5514                 ret = -ENOMEM;
5515                 goto out;
5516         }
5517
5518         if (*ppos & (PAGE_SIZE - 1)) {
5519                 ret = -EINVAL;
5520                 goto out;
5521         }
5522
5523         if (len & (PAGE_SIZE - 1)) {
5524                 if (len < PAGE_SIZE) {
5525                         ret = -EINVAL;
5526                         goto out;
5527                 }
5528                 len &= PAGE_MASK;
5529         }
5530
5531  again:
5532         trace_access_lock(iter->cpu_file);
5533         entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5534
5535         for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5536                 struct page *page;
5537                 int r;
5538
5539                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5540                 if (!ref)
5541                         break;
5542
5543                 ref->ref = 1;
5544                 ref->buffer = iter->trace_buffer->buffer;
5545                 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5546                 if (!ref->page) {
5547                         kfree(ref);
5548                         break;
5549                 }
5550
5551                 r = ring_buffer_read_page(ref->buffer, &ref->page,
5552                                           len, iter->cpu_file, 1);
5553                 if (r < 0) {
5554                         ring_buffer_free_read_page(ref->buffer, ref->page);
5555                         kfree(ref);
5556                         break;
5557                 }
5558
5559                 /*
5560                  * zero out any left over data, this is going to
5561                  * user land.
5562                  */
5563                 size = ring_buffer_page_len(ref->page);
5564                 if (size < PAGE_SIZE)
5565                         memset(ref->page + size, 0, PAGE_SIZE - size);
5566
5567                 page = virt_to_page(ref->page);
5568
5569                 spd.pages[i] = page;
5570                 spd.partial[i].len = PAGE_SIZE;
5571                 spd.partial[i].offset = 0;
5572                 spd.partial[i].private = (unsigned long)ref;
5573                 spd.nr_pages++;
5574                 *ppos += PAGE_SIZE;
5575
5576                 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5577         }
5578
5579         trace_access_unlock(iter->cpu_file);
5580         spd.nr_pages = i;
5581
5582         /* did we read anything? */
5583         if (!spd.nr_pages) {
5584                 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5585                         ret = -EAGAIN;
5586                         goto out;
5587                 }
5588                 mutex_unlock(&trace_types_lock);
5589                 ret = wait_on_pipe(iter);
5590                 mutex_lock(&trace_types_lock);
5591                 if (ret)
5592                         goto out;
5593                 if (signal_pending(current)) {
5594                         ret = -EINTR;
5595                         goto out;
5596                 }
5597                 goto again;
5598         }
5599
5600         ret = splice_to_pipe(pipe, &spd);
5601         splice_shrink_spd(&spd);
5602 out:
5603         mutex_unlock(&trace_types_lock);
5604
5605         return ret;
5606 }
5607
5608 static const struct file_operations tracing_buffers_fops = {
5609         .open           = tracing_buffers_open,
5610         .read           = tracing_buffers_read,
5611         .poll           = tracing_buffers_poll,
5612         .release        = tracing_buffers_release,
5613         .splice_read    = tracing_buffers_splice_read,
5614         .llseek         = no_llseek,
5615 };
5616
5617 static ssize_t
5618 tracing_stats_read(struct file *filp, char __user *ubuf,
5619                    size_t count, loff_t *ppos)
5620 {
5621         struct inode *inode = file_inode(filp);
5622         struct trace_array *tr = inode->i_private;
5623         struct trace_buffer *trace_buf = &tr->trace_buffer;
5624         int cpu = tracing_get_cpu(inode);
5625         struct trace_seq *s;
5626         unsigned long cnt;
5627         unsigned long long t;
5628         unsigned long usec_rem;
5629
5630         s = kmalloc(sizeof(*s), GFP_KERNEL);
5631         if (!s)
5632                 return -ENOMEM;
5633
5634         trace_seq_init(s);
5635
5636         cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5637         trace_seq_printf(s, "entries: %ld\n", cnt);
5638
5639         cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5640         trace_seq_printf(s, "overrun: %ld\n", cnt);
5641
5642         cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5643         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5644
5645         cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5646         trace_seq_printf(s, "bytes: %ld\n", cnt);
5647
5648         if (trace_clocks[tr->clock_id].in_ns) {
5649                 /* local or global for trace_clock */
5650                 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5651                 usec_rem = do_div(t, USEC_PER_SEC);
5652                 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5653                                                                 t, usec_rem);
5654
5655                 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5656                 usec_rem = do_div(t, USEC_PER_SEC);
5657                 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5658         } else {
5659                 /* counter or tsc mode for trace_clock */
5660                 trace_seq_printf(s, "oldest event ts: %llu\n",
5661                                 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5662
5663                 trace_seq_printf(s, "now ts: %llu\n",
5664                                 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5665         }
5666
5667         cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5668         trace_seq_printf(s, "dropped events: %ld\n", cnt);
5669
5670         cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5671         trace_seq_printf(s, "read events: %ld\n", cnt);
5672
5673         count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5674
5675         kfree(s);
5676
5677         return count;
5678 }
5679
5680 static const struct file_operations tracing_stats_fops = {
5681         .open           = tracing_open_generic_tr,
5682         .read           = tracing_stats_read,
5683         .llseek         = generic_file_llseek,
5684         .release        = tracing_release_generic_tr,
5685 };
5686
5687 #ifdef CONFIG_DYNAMIC_FTRACE
5688
5689 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5690 {
5691         return 0;
5692 }
5693
5694 static ssize_t
5695 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5696                   size_t cnt, loff_t *ppos)
5697 {
5698         static char ftrace_dyn_info_buffer[1024];
5699         static DEFINE_MUTEX(dyn_info_mutex);
5700         unsigned long *p = filp->private_data;
5701         char *buf = ftrace_dyn_info_buffer;
5702         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5703         int r;
5704
5705         mutex_lock(&dyn_info_mutex);
5706         r = sprintf(buf, "%ld ", *p);
5707
5708         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5709         buf[r++] = '\n';
5710
5711         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5712
5713         mutex_unlock(&dyn_info_mutex);
5714
5715         return r;
5716 }
5717
5718 static const struct file_operations tracing_dyn_info_fops = {
5719         .open           = tracing_open_generic,
5720         .read           = tracing_read_dyn_info,
5721         .llseek         = generic_file_llseek,
5722 };
5723 #endif /* CONFIG_DYNAMIC_FTRACE */
5724
5725 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5726 static void
5727 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5728 {
5729         tracing_snapshot();
5730 }
5731
5732 static void
5733 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5734 {
5735         unsigned long *count = (long *)data;
5736
5737         if (!*count)
5738                 return;
5739
5740         if (*count != -1)
5741                 (*count)--;
5742
5743         tracing_snapshot();
5744 }
5745
5746 static int
5747 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5748                       struct ftrace_probe_ops *ops, void *data)
5749 {
5750         long count = (long)data;
5751
5752         seq_printf(m, "%ps:", (void *)ip);
5753
5754         seq_printf(m, "snapshot");
5755
5756         if (count == -1)
5757                 seq_printf(m, ":unlimited\n");
5758         else
5759                 seq_printf(m, ":count=%ld\n", count);
5760
5761         return 0;
5762 }
5763
5764 static struct ftrace_probe_ops snapshot_probe_ops = {
5765         .func                   = ftrace_snapshot,
5766         .print                  = ftrace_snapshot_print,
5767 };
5768
5769 static struct ftrace_probe_ops snapshot_count_probe_ops = {
5770         .func                   = ftrace_count_snapshot,
5771         .print                  = ftrace_snapshot_print,
5772 };
5773
5774 static int
5775 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5776                                char *glob, char *cmd, char *param, int enable)
5777 {
5778         struct ftrace_probe_ops *ops;
5779         void *count = (void *)-1;
5780         char *number;
5781         int ret;
5782
5783         /* hash funcs only work with set_ftrace_filter */
5784         if (!enable)
5785                 return -EINVAL;
5786
5787         ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
5788
5789         if (glob[0] == '!') {
5790                 unregister_ftrace_function_probe_func(glob+1, ops);
5791                 return 0;
5792         }
5793
5794         if (!param)
5795                 goto out_reg;
5796
5797         number = strsep(&param, ":");
5798
5799         if (!strlen(number))
5800                 goto out_reg;
5801
5802         /*
5803          * We use the callback data field (which is a pointer)
5804          * as our counter.
5805          */
5806         ret = kstrtoul(number, 0, (unsigned long *)&count);
5807         if (ret)
5808                 return ret;
5809
5810  out_reg:
5811         ret = register_ftrace_function_probe(glob, ops, count);
5812
5813         if (ret >= 0)
5814                 alloc_snapshot(&global_trace);
5815
5816         return ret < 0 ? ret : 0;
5817 }
5818
5819 static struct ftrace_func_command ftrace_snapshot_cmd = {
5820         .name                   = "snapshot",
5821         .func                   = ftrace_trace_snapshot_callback,
5822 };
5823
5824 static __init int register_snapshot_cmd(void)
5825 {
5826         return register_ftrace_command(&ftrace_snapshot_cmd);
5827 }
5828 #else
5829 static inline __init int register_snapshot_cmd(void) { return 0; }
5830 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5831
5832 struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
5833 {
5834         if (tr->dir)
5835                 return tr->dir;
5836
5837         if (!debugfs_initialized())
5838                 return NULL;
5839
5840         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5841                 tr->dir = debugfs_create_dir("tracing", NULL);
5842
5843         if (!tr->dir)
5844                 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5845
5846         return tr->dir;
5847 }
5848
5849 struct dentry *tracing_init_dentry(void)
5850 {
5851         return tracing_init_dentry_tr(&global_trace);
5852 }
5853
5854 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5855 {
5856         struct dentry *d_tracer;
5857
5858         if (tr->percpu_dir)
5859                 return tr->percpu_dir;
5860
5861         d_tracer = tracing_init_dentry_tr(tr);
5862         if (!d_tracer)
5863                 return NULL;
5864
5865         tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5866
5867         WARN_ONCE(!tr->percpu_dir,
5868                   "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5869
5870         return tr->percpu_dir;
5871 }
5872
5873 static struct dentry *
5874 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5875                       void *data, long cpu, const struct file_operations *fops)
5876 {
5877         struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5878
5879         if (ret) /* See tracing_get_cpu() */
5880                 ret->d_inode->i_cdev = (void *)(cpu + 1);
5881         return ret;
5882 }
5883
5884 static void
5885 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5886 {
5887         struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5888         struct dentry *d_cpu;
5889         char cpu_dir[30]; /* 30 characters should be more than enough */
5890
5891         if (!d_percpu)
5892                 return;
5893
5894         snprintf(cpu_dir, 30, "cpu%ld", cpu);
5895         d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5896         if (!d_cpu) {
5897                 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5898                 return;
5899         }
5900
5901         /* per cpu trace_pipe */
5902         trace_create_cpu_file("trace_pipe", 0444, d_cpu,
5903                                 tr, cpu, &tracing_pipe_fops);
5904
5905         /* per cpu trace */
5906         trace_create_cpu_file("trace", 0644, d_cpu,
5907                                 tr, cpu, &tracing_fops);
5908
5909         trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
5910                                 tr, cpu, &tracing_buffers_fops);
5911
5912         trace_create_cpu_file("stats", 0444, d_cpu,
5913                                 tr, cpu, &tracing_stats_fops);
5914
5915         trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
5916                                 tr, cpu, &tracing_entries_fops);
5917
5918 #ifdef CONFIG_TRACER_SNAPSHOT
5919         trace_create_cpu_file("snapshot", 0644, d_cpu,
5920                                 tr, cpu, &snapshot_fops);
5921
5922         trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
5923                                 tr, cpu, &snapshot_raw_fops);
5924 #endif
5925 }
5926
5927 #ifdef CONFIG_FTRACE_SELFTEST
5928 /* Let selftest have access to static functions in this file */
5929 #include "trace_selftest.c"
5930 #endif
5931
5932 struct trace_option_dentry {
5933         struct tracer_opt               *opt;
5934         struct tracer_flags             *flags;
5935         struct trace_array              *tr;
5936         struct dentry                   *entry;
5937 };
5938
5939 static ssize_t
5940 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5941                         loff_t *ppos)
5942 {
5943         struct trace_option_dentry *topt = filp->private_data;
5944         char *buf;
5945
5946         if (topt->flags->val & topt->opt->bit)
5947                 buf = "1\n";
5948         else
5949                 buf = "0\n";
5950
5951         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5952 }
5953
5954 static ssize_t
5955 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5956                          loff_t *ppos)
5957 {
5958         struct trace_option_dentry *topt = filp->private_data;
5959         unsigned long val;
5960         int ret;
5961
5962         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5963         if (ret)
5964                 return ret;
5965
5966         if (val != 0 && val != 1)
5967                 return -EINVAL;
5968
5969         if (!!(topt->flags->val & topt->opt->bit) != val) {
5970                 mutex_lock(&trace_types_lock);
5971                 ret = __set_tracer_option(topt->tr, topt->flags,
5972                                           topt->opt, !val);
5973                 mutex_unlock(&trace_types_lock);
5974                 if (ret)
5975                         return ret;
5976         }
5977
5978         *ppos += cnt;
5979
5980         return cnt;
5981 }
5982
5983
5984 static const struct file_operations trace_options_fops = {
5985         .open = tracing_open_generic,
5986         .read = trace_options_read,
5987         .write = trace_options_write,
5988         .llseek = generic_file_llseek,
5989 };
5990
5991 static ssize_t
5992 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5993                         loff_t *ppos)
5994 {
5995         long index = (long)filp->private_data;
5996         char *buf;
5997
5998         if (trace_flags & (1 << index))
5999                 buf = "1\n";
6000         else
6001                 buf = "0\n";
6002
6003         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6004 }
6005
6006 static ssize_t
6007 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6008                          loff_t *ppos)
6009 {
6010         struct trace_array *tr = &global_trace;
6011         long index = (long)filp->private_data;
6012         unsigned long val;
6013         int ret;
6014
6015         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6016         if (ret)
6017                 return ret;
6018
6019         if (val != 0 && val != 1)
6020                 return -EINVAL;
6021
6022         mutex_lock(&trace_types_lock);
6023         ret = set_tracer_flag(tr, 1 << index, val);
6024         mutex_unlock(&trace_types_lock);
6025
6026         if (ret < 0)
6027                 return ret;
6028
6029         *ppos += cnt;
6030
6031         return cnt;
6032 }
6033
6034 static const struct file_operations trace_options_core_fops = {
6035         .open = tracing_open_generic,
6036         .read = trace_options_core_read,
6037         .write = trace_options_core_write,
6038         .llseek = generic_file_llseek,
6039 };
6040
6041 struct dentry *trace_create_file(const char *name,
6042                                  umode_t mode,
6043                                  struct dentry *parent,
6044                                  void *data,
6045                                  const struct file_operations *fops)
6046 {
6047         struct dentry *ret;
6048
6049         ret = debugfs_create_file(name, mode, parent, data, fops);
6050         if (!ret)
6051                 pr_warning("Could not create debugfs '%s' entry\n", name);
6052
6053         return ret;
6054 }
6055
6056
6057 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6058 {
6059         struct dentry *d_tracer;
6060
6061         if (tr->options)
6062                 return tr->options;
6063
6064         d_tracer = tracing_init_dentry_tr(tr);
6065         if (!d_tracer)
6066                 return NULL;
6067
6068         tr->options = debugfs_create_dir("options", d_tracer);
6069         if (!tr->options) {
6070                 pr_warning("Could not create debugfs directory 'options'\n");
6071                 return NULL;
6072         }
6073
6074         return tr->options;
6075 }
6076
6077 static void
6078 create_trace_option_file(struct trace_array *tr,
6079                          struct trace_option_dentry *topt,
6080                          struct tracer_flags *flags,
6081                          struct tracer_opt *opt)
6082 {
6083         struct dentry *t_options;
6084
6085         t_options = trace_options_init_dentry(tr);
6086         if (!t_options)
6087                 return;
6088
6089         topt->flags = flags;
6090         topt->opt = opt;
6091         topt->tr = tr;
6092
6093         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6094                                     &trace_options_fops);
6095
6096 }
6097
6098 static struct trace_option_dentry *
6099 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6100 {
6101         struct trace_option_dentry *topts;
6102         struct tracer_flags *flags;
6103         struct tracer_opt *opts;
6104         int cnt;
6105
6106         if (!tracer)
6107                 return NULL;
6108
6109         flags = tracer->flags;
6110
6111         if (!flags || !flags->opts)
6112                 return NULL;
6113
6114         opts = flags->opts;
6115
6116         for (cnt = 0; opts[cnt].name; cnt++)
6117                 ;
6118
6119         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6120         if (!topts)
6121                 return NULL;
6122
6123         for (cnt = 0; opts[cnt].name; cnt++)
6124                 create_trace_option_file(tr, &topts[cnt], flags,
6125                                          &opts[cnt]);
6126
6127         return topts;
6128 }
6129
6130 static void
6131 destroy_trace_option_files(struct trace_option_dentry *topts)
6132 {
6133         int cnt;
6134
6135         if (!topts)
6136                 return;
6137
6138         for (cnt = 0; topts[cnt].opt; cnt++)
6139                 debugfs_remove(topts[cnt].entry);
6140
6141         kfree(topts);
6142 }
6143
6144 static struct dentry *
6145 create_trace_option_core_file(struct trace_array *tr,
6146                               const char *option, long index)
6147 {
6148         struct dentry *t_options;
6149
6150         t_options = trace_options_init_dentry(tr);
6151         if (!t_options)
6152                 return NULL;
6153
6154         return trace_create_file(option, 0644, t_options, (void *)index,
6155                                     &trace_options_core_fops);
6156 }
6157
6158 static __init void create_trace_options_dir(struct trace_array *tr)
6159 {
6160         struct dentry *t_options;
6161         int i;
6162
6163         t_options = trace_options_init_dentry(tr);
6164         if (!t_options)
6165                 return;
6166
6167         for (i = 0; trace_options[i]; i++)
6168                 create_trace_option_core_file(tr, trace_options[i], i);
6169 }
6170
6171 static ssize_t
6172 rb_simple_read(struct file *filp, char __user *ubuf,
6173                size_t cnt, loff_t *ppos)
6174 {
6175         struct trace_array *tr = filp->private_data;
6176         char buf[64];
6177         int r;
6178
6179         r = tracer_tracing_is_on(tr);
6180         r = sprintf(buf, "%d\n", r);
6181
6182         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6183 }
6184
6185 static ssize_t
6186 rb_simple_write(struct file *filp, const char __user *ubuf,
6187                 size_t cnt, loff_t *ppos)
6188 {
6189         struct trace_array *tr = filp->private_data;
6190         struct ring_buffer *buffer = tr->trace_buffer.buffer;
6191         unsigned long val;
6192         int ret;
6193
6194         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6195         if (ret)
6196                 return ret;
6197
6198         if (buffer) {
6199                 mutex_lock(&trace_types_lock);
6200                 if (val) {
6201                         tracer_tracing_on(tr);
6202                         if (tr->current_trace->start)
6203                                 tr->current_trace->start(tr);
6204                 } else {
6205                         tracer_tracing_off(tr);
6206                         if (tr->current_trace->stop)
6207                                 tr->current_trace->stop(tr);
6208                 }
6209                 mutex_unlock(&trace_types_lock);
6210         }
6211
6212         (*ppos)++;
6213
6214         return cnt;
6215 }
6216
6217 static const struct file_operations rb_simple_fops = {
6218         .open           = tracing_open_generic_tr,
6219         .read           = rb_simple_read,
6220         .write          = rb_simple_write,
6221         .release        = tracing_release_generic_tr,
6222         .llseek         = default_llseek,
6223 };
6224
6225 struct dentry *trace_instance_dir;
6226
6227 static void
6228 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6229
6230 static int
6231 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6232 {
6233         enum ring_buffer_flags rb_flags;
6234
6235         rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6236
6237         buf->tr = tr;
6238
6239         buf->buffer = ring_buffer_alloc(size, rb_flags);
6240         if (!buf->buffer)
6241                 return -ENOMEM;
6242
6243         buf->data = alloc_percpu(struct trace_array_cpu);
6244         if (!buf->data) {
6245                 ring_buffer_free(buf->buffer);
6246                 return -ENOMEM;
6247         }
6248
6249         /* Allocate the first page for all buffers */
6250         set_buffer_entries(&tr->trace_buffer,
6251                            ring_buffer_size(tr->trace_buffer.buffer, 0));
6252
6253         return 0;
6254 }
6255
6256 static int allocate_trace_buffers(struct trace_array *tr, int size)
6257 {
6258         int ret;
6259
6260         ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6261         if (ret)
6262                 return ret;
6263
6264 #ifdef CONFIG_TRACER_MAX_TRACE
6265         ret = allocate_trace_buffer(tr, &tr->max_buffer,
6266                                     allocate_snapshot ? size : 1);
6267         if (WARN_ON(ret)) {
6268                 ring_buffer_free(tr->trace_buffer.buffer);
6269                 free_percpu(tr->trace_buffer.data);
6270                 return -ENOMEM;
6271         }
6272         tr->allocated_snapshot = allocate_snapshot;
6273
6274         /*
6275          * Only the top level trace array gets its snapshot allocated
6276          * from the kernel command line.
6277          */
6278         allocate_snapshot = false;
6279 #endif
6280         return 0;
6281 }
6282
6283 static void free_trace_buffer(struct trace_buffer *buf)
6284 {
6285         if (buf->buffer) {
6286                 ring_buffer_free(buf->buffer);
6287                 buf->buffer = NULL;
6288                 free_percpu(buf->data);
6289                 buf->data = NULL;
6290         }
6291 }
6292
6293 static void free_trace_buffers(struct trace_array *tr)
6294 {
6295         if (!tr)
6296                 return;
6297
6298         free_trace_buffer(&tr->trace_buffer);
6299
6300 #ifdef CONFIG_TRACER_MAX_TRACE
6301         free_trace_buffer(&tr->max_buffer);
6302 #endif
6303 }
6304
6305 static int new_instance_create(const char *name)
6306 {
6307         struct trace_array *tr;
6308         int ret;
6309
6310         mutex_lock(&trace_types_lock);
6311
6312         ret = -EEXIST;
6313         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6314                 if (tr->name && strcmp(tr->name, name) == 0)
6315                         goto out_unlock;
6316         }
6317
6318         ret = -ENOMEM;
6319         tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6320         if (!tr)
6321                 goto out_unlock;
6322
6323         tr->name = kstrdup(name, GFP_KERNEL);
6324         if (!tr->name)
6325                 goto out_free_tr;
6326
6327         if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6328                 goto out_free_tr;
6329
6330         cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6331
6332         raw_spin_lock_init(&tr->start_lock);
6333
6334         tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6335
6336         tr->current_trace = &nop_trace;
6337
6338         INIT_LIST_HEAD(&tr->systems);
6339         INIT_LIST_HEAD(&tr->events);
6340
6341         if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6342                 goto out_free_tr;
6343
6344         tr->dir = debugfs_create_dir(name, trace_instance_dir);
6345         if (!tr->dir)
6346                 goto out_free_tr;
6347
6348         ret = event_trace_add_tracer(tr->dir, tr);
6349         if (ret) {
6350                 debugfs_remove_recursive(tr->dir);
6351                 goto out_free_tr;
6352         }
6353
6354         init_tracer_debugfs(tr, tr->dir);
6355
6356         list_add(&tr->list, &ftrace_trace_arrays);
6357
6358         mutex_unlock(&trace_types_lock);
6359
6360         return 0;
6361
6362  out_free_tr:
6363         free_trace_buffers(tr);
6364         free_cpumask_var(tr->tracing_cpumask);
6365         kfree(tr->name);
6366         kfree(tr);
6367
6368  out_unlock:
6369         mutex_unlock(&trace_types_lock);
6370
6371         return ret;
6372
6373 }
6374
6375 static int instance_delete(const char *name)
6376 {
6377         struct trace_array *tr;
6378         int found = 0;
6379         int ret;
6380
6381         mutex_lock(&trace_types_lock);
6382
6383         ret = -ENODEV;
6384         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6385                 if (tr->name && strcmp(tr->name, name) == 0) {
6386                         found = 1;
6387                         break;
6388                 }
6389         }
6390         if (!found)
6391                 goto out_unlock;
6392
6393         ret = -EBUSY;
6394         if (tr->ref)
6395                 goto out_unlock;
6396
6397         list_del(&tr->list);
6398
6399         tracing_set_nop(tr);
6400         event_trace_del_tracer(tr);
6401         ftrace_destroy_function_files(tr);
6402         debugfs_remove_recursive(tr->dir);
6403         free_trace_buffers(tr);
6404
6405         kfree(tr->name);
6406         kfree(tr);
6407
6408         ret = 0;
6409
6410  out_unlock:
6411         mutex_unlock(&trace_types_lock);
6412
6413         return ret;
6414 }
6415
6416 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6417 {
6418         struct dentry *parent;
6419         int ret;
6420
6421         /* Paranoid: Make sure the parent is the "instances" directory */
6422         parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6423         if (WARN_ON_ONCE(parent != trace_instance_dir))
6424                 return -ENOENT;
6425
6426         /*
6427          * The inode mutex is locked, but debugfs_create_dir() will also
6428          * take the mutex. As the instances directory can not be destroyed
6429          * or changed in any other way, it is safe to unlock it, and
6430          * let the dentry try. If two users try to make the same dir at
6431          * the same time, then the new_instance_create() will determine the
6432          * winner.
6433          */
6434         mutex_unlock(&inode->i_mutex);
6435
6436         ret = new_instance_create(dentry->d_iname);
6437
6438         mutex_lock(&inode->i_mutex);
6439
6440         return ret;
6441 }
6442
6443 static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6444 {
6445         struct dentry *parent;
6446         int ret;
6447
6448         /* Paranoid: Make sure the parent is the "instances" directory */
6449         parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6450         if (WARN_ON_ONCE(parent != trace_instance_dir))
6451                 return -ENOENT;
6452
6453         /* The caller did a dget() on dentry */
6454         mutex_unlock(&dentry->d_inode->i_mutex);
6455
6456         /*
6457          * The inode mutex is locked, but debugfs_create_dir() will also
6458          * take the mutex. As the instances directory can not be destroyed
6459          * or changed in any other way, it is safe to unlock it, and
6460          * let the dentry try. If two users try to make the same dir at
6461          * the same time, then the instance_delete() will determine the
6462          * winner.
6463          */
6464         mutex_unlock(&inode->i_mutex);
6465
6466         ret = instance_delete(dentry->d_iname);
6467
6468         mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6469         mutex_lock(&dentry->d_inode->i_mutex);
6470
6471         return ret;
6472 }
6473
6474 static const struct inode_operations instance_dir_inode_operations = {
6475         .lookup         = simple_lookup,
6476         .mkdir          = instance_mkdir,
6477         .rmdir          = instance_rmdir,
6478 };
6479
6480 static __init void create_trace_instances(struct dentry *d_tracer)
6481 {
6482         trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6483         if (WARN_ON(!trace_instance_dir))
6484                 return;
6485
6486         /* Hijack the dir inode operations, to allow mkdir */
6487         trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6488 }
6489
6490 static void
6491 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6492 {
6493         int cpu;
6494
6495         trace_create_file("available_tracers", 0444, d_tracer,
6496                         tr, &show_traces_fops);
6497
6498         trace_create_file("current_tracer", 0644, d_tracer,
6499                         tr, &set_tracer_fops);
6500
6501         trace_create_file("tracing_cpumask", 0644, d_tracer,
6502                           tr, &tracing_cpumask_fops);
6503
6504         trace_create_file("trace_options", 0644, d_tracer,
6505                           tr, &tracing_iter_fops);
6506
6507         trace_create_file("trace", 0644, d_tracer,
6508                           tr, &tracing_fops);
6509
6510         trace_create_file("trace_pipe", 0444, d_tracer,
6511                           tr, &tracing_pipe_fops);
6512
6513         trace_create_file("buffer_size_kb", 0644, d_tracer,
6514                           tr, &tracing_entries_fops);
6515
6516         trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6517                           tr, &tracing_total_entries_fops);
6518
6519         trace_create_file("free_buffer", 0200, d_tracer,
6520                           tr, &tracing_free_buffer_fops);
6521
6522         trace_create_file("trace_marker", 0220, d_tracer,
6523                           tr, &tracing_mark_fops);
6524
6525         trace_create_file("trace_clock", 0644, d_tracer, tr,
6526                           &trace_clock_fops);
6527
6528         trace_create_file("tracing_on", 0644, d_tracer,
6529                           tr, &rb_simple_fops);
6530
6531 #ifdef CONFIG_TRACER_MAX_TRACE
6532         trace_create_file("tracing_max_latency", 0644, d_tracer,
6533                         &tr->max_latency, &tracing_max_lat_fops);
6534 #endif
6535
6536         if (ftrace_create_function_files(tr, d_tracer))
6537                 WARN(1, "Could not allocate function filter files");
6538
6539 #ifdef CONFIG_TRACER_SNAPSHOT
6540         trace_create_file("snapshot", 0644, d_tracer,
6541                           tr, &snapshot_fops);
6542 #endif
6543
6544         for_each_tracing_cpu(cpu)
6545                 tracing_init_debugfs_percpu(tr, cpu);
6546
6547 }
6548
6549 static __init int tracer_init_debugfs(void)
6550 {
6551         struct dentry *d_tracer;
6552
6553         trace_access_lock_init();
6554
6555         d_tracer = tracing_init_dentry();
6556         if (!d_tracer)
6557                 return 0;
6558
6559         init_tracer_debugfs(&global_trace, d_tracer);
6560
6561         trace_create_file("tracing_thresh", 0644, d_tracer,
6562                         &global_trace, &tracing_thresh_fops);
6563
6564         trace_create_file("README", 0444, d_tracer,
6565                         NULL, &tracing_readme_fops);
6566
6567         trace_create_file("saved_cmdlines", 0444, d_tracer,
6568                         NULL, &tracing_saved_cmdlines_fops);
6569
6570         trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6571                           NULL, &tracing_saved_cmdlines_size_fops);
6572
6573 #ifdef CONFIG_DYNAMIC_FTRACE
6574         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6575                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6576 #endif
6577
6578         create_trace_instances(d_tracer);
6579
6580         create_trace_options_dir(&global_trace);
6581
6582         return 0;
6583 }
6584
6585 static int trace_panic_handler(struct notifier_block *this,
6586                                unsigned long event, void *unused)
6587 {
6588         if (ftrace_dump_on_oops)
6589                 ftrace_dump(ftrace_dump_on_oops);
6590         return NOTIFY_OK;
6591 }
6592
6593 static struct notifier_block trace_panic_notifier = {
6594         .notifier_call  = trace_panic_handler,
6595         .next           = NULL,
6596         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
6597 };
6598
6599 static int trace_die_handler(struct notifier_block *self,
6600                              unsigned long val,
6601                              void *data)
6602 {
6603         switch (val) {
6604         case DIE_OOPS:
6605                 if (ftrace_dump_on_oops)
6606                         ftrace_dump(ftrace_dump_on_oops);
6607                 break;
6608         default:
6609                 break;
6610         }
6611         return NOTIFY_OK;
6612 }
6613
6614 static struct notifier_block trace_die_notifier = {
6615         .notifier_call = trace_die_handler,
6616         .priority = 200
6617 };
6618
6619 /*
6620  * printk is set to max of 1024, we really don't need it that big.
6621  * Nothing should be printing 1000 characters anyway.
6622  */
6623 #define TRACE_MAX_PRINT         1000
6624
6625 /*
6626  * Define here KERN_TRACE so that we have one place to modify
6627  * it if we decide to change what log level the ftrace dump
6628  * should be at.
6629  */
6630 #define KERN_TRACE              KERN_EMERG
6631
6632 void
6633 trace_printk_seq(struct trace_seq *s)
6634 {
6635         /* Probably should print a warning here. */
6636         if (s->len >= TRACE_MAX_PRINT)
6637                 s->len = TRACE_MAX_PRINT;
6638
6639         /* should be zero ended, but we are paranoid. */
6640         s->buffer[s->len] = 0;
6641
6642         printk(KERN_TRACE "%s", s->buffer);
6643
6644         trace_seq_init(s);
6645 }
6646
6647 void trace_init_global_iter(struct trace_iterator *iter)
6648 {
6649         iter->tr = &global_trace;
6650         iter->trace = iter->tr->current_trace;
6651         iter->cpu_file = RING_BUFFER_ALL_CPUS;
6652         iter->trace_buffer = &global_trace.trace_buffer;
6653
6654         if (iter->trace && iter->trace->open)
6655                 iter->trace->open(iter);
6656
6657         /* Annotate start of buffers if we had overruns */
6658         if (ring_buffer_overruns(iter->trace_buffer->buffer))
6659                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6660
6661         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6662         if (trace_clocks[iter->tr->clock_id].in_ns)
6663                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6664 }
6665
6666 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6667 {
6668         /* use static because iter can be a bit big for the stack */
6669         static struct trace_iterator iter;
6670         static atomic_t dump_running;
6671         unsigned int old_userobj;
6672         unsigned long flags;
6673         int cnt = 0, cpu;
6674
6675         /* Only allow one dump user at a time. */
6676         if (atomic_inc_return(&dump_running) != 1) {
6677                 atomic_dec(&dump_running);
6678                 return;
6679         }
6680
6681         /*
6682          * Always turn off tracing when we dump.
6683          * We don't need to show trace output of what happens
6684          * between multiple crashes.
6685          *
6686          * If the user does a sysrq-z, then they can re-enable
6687          * tracing with echo 1 > tracing_on.
6688          */
6689         tracing_off();
6690
6691         local_irq_save(flags);
6692
6693         /* Simulate the iterator */
6694         trace_init_global_iter(&iter);
6695
6696         for_each_tracing_cpu(cpu) {
6697                 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6698         }
6699
6700         old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6701
6702         /* don't look at user memory in panic mode */
6703         trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6704
6705         switch (oops_dump_mode) {
6706         case DUMP_ALL:
6707                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6708                 break;
6709         case DUMP_ORIG:
6710                 iter.cpu_file = raw_smp_processor_id();
6711                 break;
6712         case DUMP_NONE:
6713                 goto out_enable;
6714         default:
6715                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6716                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6717         }
6718
6719         printk(KERN_TRACE "Dumping ftrace buffer:\n");
6720
6721         /* Did function tracer already get disabled? */
6722         if (ftrace_is_dead()) {
6723                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6724                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
6725         }
6726
6727         /*
6728          * We need to stop all tracing on all CPUS to read the
6729          * the next buffer. This is a bit expensive, but is
6730          * not done often. We fill all what we can read,
6731          * and then release the locks again.
6732          */
6733
6734         while (!trace_empty(&iter)) {
6735
6736                 if (!cnt)
6737                         printk(KERN_TRACE "---------------------------------\n");
6738
6739                 cnt++;
6740
6741                 /* reset all but tr, trace, and overruns */
6742                 memset(&iter.seq, 0,
6743                        sizeof(struct trace_iterator) -
6744                        offsetof(struct trace_iterator, seq));
6745                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6746                 iter.pos = -1;
6747
6748                 if (trace_find_next_entry_inc(&iter) != NULL) {
6749                         int ret;
6750
6751                         ret = print_trace_line(&iter);
6752                         if (ret != TRACE_TYPE_NO_CONSUME)
6753                                 trace_consume(&iter);
6754                 }
6755                 touch_nmi_watchdog();
6756
6757                 trace_printk_seq(&iter.seq);
6758         }
6759
6760         if (!cnt)
6761                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
6762         else
6763                 printk(KERN_TRACE "---------------------------------\n");
6764
6765  out_enable:
6766         trace_flags |= old_userobj;
6767
6768         for_each_tracing_cpu(cpu) {
6769                 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6770         }
6771         atomic_dec(&dump_running);
6772         local_irq_restore(flags);
6773 }
6774 EXPORT_SYMBOL_GPL(ftrace_dump);
6775
6776 __init static int tracer_alloc_buffers(void)
6777 {
6778         int ring_buf_size;
6779         int ret = -ENOMEM;
6780
6781
6782         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6783                 goto out;
6784
6785         if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
6786                 goto out_free_buffer_mask;
6787
6788         /* Only allocate trace_printk buffers if a trace_printk exists */
6789         if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6790                 /* Must be called before global_trace.buffer is allocated */
6791                 trace_printk_init_buffers();
6792
6793         /* To save memory, keep the ring buffer size to its minimum */
6794         if (ring_buffer_expanded)
6795                 ring_buf_size = trace_buf_size;
6796         else
6797                 ring_buf_size = 1;
6798
6799         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6800         cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
6801
6802         raw_spin_lock_init(&global_trace.start_lock);
6803
6804         /* Used for event triggers */
6805         temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6806         if (!temp_buffer)
6807                 goto out_free_cpumask;
6808
6809         if (trace_create_savedcmd() < 0)
6810                 goto out_free_temp_buffer;
6811
6812         /* TODO: make the number of buffers hot pluggable with CPUS */
6813         if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6814                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6815                 WARN_ON(1);
6816                 goto out_free_savedcmd;
6817         }
6818
6819         if (global_trace.buffer_disabled)
6820                 tracing_off();
6821
6822         if (trace_boot_clock) {
6823                 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6824                 if (ret < 0)
6825                         pr_warning("Trace clock %s not defined, going back to default\n",
6826                                    trace_boot_clock);
6827         }
6828
6829         /*
6830          * register_tracer() might reference current_trace, so it
6831          * needs to be set before we register anything. This is
6832          * just a bootstrap of current_trace anyway.
6833          */
6834         global_trace.current_trace = &nop_trace;
6835
6836         global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6837
6838         ftrace_init_global_array_ops(&global_trace);
6839
6840         register_tracer(&nop_trace);
6841
6842         /* All seems OK, enable tracing */
6843         tracing_disabled = 0;
6844
6845         atomic_notifier_chain_register(&panic_notifier_list,
6846                                        &trace_panic_notifier);
6847
6848         register_die_notifier(&trace_die_notifier);
6849
6850         global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6851
6852         INIT_LIST_HEAD(&global_trace.systems);
6853         INIT_LIST_HEAD(&global_trace.events);
6854         list_add(&global_trace.list, &ftrace_trace_arrays);
6855
6856         while (trace_boot_options) {
6857                 char *option;
6858
6859                 option = strsep(&trace_boot_options, ",");
6860                 trace_set_options(&global_trace, option);
6861         }
6862
6863         register_snapshot_cmd();
6864
6865         return 0;
6866
6867 out_free_savedcmd:
6868         free_saved_cmdlines_buffer(savedcmd);
6869 out_free_temp_buffer:
6870         ring_buffer_free(temp_buffer);
6871 out_free_cpumask:
6872         free_cpumask_var(global_trace.tracing_cpumask);
6873 out_free_buffer_mask:
6874         free_cpumask_var(tracing_buffer_mask);
6875 out:
6876         return ret;
6877 }
6878
6879 __init static int clear_boot_tracer(void)
6880 {
6881         /*
6882          * The default tracer at boot buffer is an init section.
6883          * This function is called in lateinit. If we did not
6884          * find the boot tracer, then clear it out, to prevent
6885          * later registration from accessing the buffer that is
6886          * about to be freed.
6887          */
6888         if (!default_bootup_tracer)
6889                 return 0;
6890
6891         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6892                default_bootup_tracer);
6893         default_bootup_tracer = NULL;
6894
6895         return 0;
6896 }
6897
6898 early_initcall(tracer_alloc_buffers);
6899 fs_initcall(tracer_init_debugfs);
6900 late_initcall(clear_boot_tracer);