ftrace: Add context level recursion bit checking
[firefly-linux-kernel-4.4.55.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35
36 #include <trace/events/sched.h>
37
38 #include <asm/setup.h>
39
40 #include "trace_output.h"
41 #include "trace_stat.h"
42
43 #define FTRACE_WARN_ON(cond)                    \
44         ({                                      \
45                 int ___r = cond;                \
46                 if (WARN_ON(___r))              \
47                         ftrace_kill();          \
48                 ___r;                           \
49         })
50
51 #define FTRACE_WARN_ON_ONCE(cond)               \
52         ({                                      \
53                 int ___r = cond;                \
54                 if (WARN_ON_ONCE(___r))         \
55                         ftrace_kill();          \
56                 ___r;                           \
57         })
58
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
67 static struct ftrace_ops ftrace_list_end __read_mostly = {
68         .func           = ftrace_stub,
69         .flags          = FTRACE_OPS_FL_RECURSION_SAFE,
70 };
71
72 /* ftrace_enabled is a method to turn ftrace on or off */
73 int ftrace_enabled __read_mostly;
74 static int last_ftrace_enabled;
75
76 /* Quick disabling of function tracer. */
77 int function_trace_stop __read_mostly;
78
79 /* Current function tracing op */
80 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
81
82 /* List for set_ftrace_pid's pids. */
83 LIST_HEAD(ftrace_pids);
84 struct ftrace_pid {
85         struct list_head list;
86         struct pid *pid;
87 };
88
89 /*
90  * ftrace_disabled is set when an anomaly is discovered.
91  * ftrace_disabled is much stronger than ftrace_enabled.
92  */
93 static int ftrace_disabled __read_mostly;
94
95 static DEFINE_MUTEX(ftrace_lock);
96
97 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
98 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
99 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
100 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
101 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
102 static struct ftrace_ops global_ops;
103 static struct ftrace_ops control_ops;
104
105 #if ARCH_SUPPORTS_FTRACE_OPS
106 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
107                                  struct ftrace_ops *op, struct pt_regs *regs);
108 #else
109 /* See comment below, where ftrace_ops_list_func is defined */
110 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
111 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
112 #endif
113
114 /*
115  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
116  * can use rcu_dereference_raw() is that elements removed from this list
117  * are simply leaked, so there is no need to interact with a grace-period
118  * mechanism.  The rcu_dereference_raw() calls are needed to handle
119  * concurrent insertions into the ftrace_global_list.
120  *
121  * Silly Alpha and silly pointer-speculation compiler optimizations!
122  */
123 #define do_for_each_ftrace_op(op, list)                 \
124         op = rcu_dereference_raw(list);                 \
125         do
126
127 /*
128  * Optimized for just a single item in the list (as that is the normal case).
129  */
130 #define while_for_each_ftrace_op(op)                            \
131         while (likely(op = rcu_dereference_raw((op)->next)) &&  \
132                unlikely((op) != &ftrace_list_end))
133
134 /**
135  * ftrace_nr_registered_ops - return number of ops registered
136  *
137  * Returns the number of ftrace_ops registered and tracing functions
138  */
139 int ftrace_nr_registered_ops(void)
140 {
141         struct ftrace_ops *ops;
142         int cnt = 0;
143
144         mutex_lock(&ftrace_lock);
145
146         for (ops = ftrace_ops_list;
147              ops != &ftrace_list_end; ops = ops->next)
148                 cnt++;
149
150         mutex_unlock(&ftrace_lock);
151
152         return cnt;
153 }
154
155 static void
156 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
157                         struct ftrace_ops *op, struct pt_regs *regs)
158 {
159         int bit;
160
161         if (in_interrupt()) {
162                 if (in_nmi())
163                         bit = TRACE_GLOBAL_NMI_BIT;
164
165                 else if (in_irq())
166                         bit = TRACE_GLOBAL_IRQ_BIT;
167                 else
168                         bit = TRACE_GLOBAL_SIRQ_BIT;
169         } else
170                 bit = TRACE_GLOBAL_BIT;
171
172         if (unlikely(trace_recursion_test(bit)))
173                 return;
174
175         trace_recursion_set(bit);
176         do_for_each_ftrace_op(op, ftrace_global_list) {
177                 op->func(ip, parent_ip, op, regs);
178         } while_for_each_ftrace_op(op);
179         trace_recursion_clear(bit);
180 }
181
182 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
183                             struct ftrace_ops *op, struct pt_regs *regs)
184 {
185         if (!test_tsk_trace_trace(current))
186                 return;
187
188         ftrace_pid_function(ip, parent_ip, op, regs);
189 }
190
191 static void set_ftrace_pid_function(ftrace_func_t func)
192 {
193         /* do not set ftrace_pid_function to itself! */
194         if (func != ftrace_pid_func)
195                 ftrace_pid_function = func;
196 }
197
198 /**
199  * clear_ftrace_function - reset the ftrace function
200  *
201  * This NULLs the ftrace function and in essence stops
202  * tracing.  There may be lag
203  */
204 void clear_ftrace_function(void)
205 {
206         ftrace_trace_function = ftrace_stub;
207         ftrace_pid_function = ftrace_stub;
208 }
209
210 static void control_ops_disable_all(struct ftrace_ops *ops)
211 {
212         int cpu;
213
214         for_each_possible_cpu(cpu)
215                 *per_cpu_ptr(ops->disabled, cpu) = 1;
216 }
217
218 static int control_ops_alloc(struct ftrace_ops *ops)
219 {
220         int __percpu *disabled;
221
222         disabled = alloc_percpu(int);
223         if (!disabled)
224                 return -ENOMEM;
225
226         ops->disabled = disabled;
227         control_ops_disable_all(ops);
228         return 0;
229 }
230
231 static void control_ops_free(struct ftrace_ops *ops)
232 {
233         free_percpu(ops->disabled);
234 }
235
236 static void update_global_ops(void)
237 {
238         ftrace_func_t func;
239
240         /*
241          * If there's only one function registered, then call that
242          * function directly. Otherwise, we need to iterate over the
243          * registered callers.
244          */
245         if (ftrace_global_list == &ftrace_list_end ||
246             ftrace_global_list->next == &ftrace_list_end) {
247                 func = ftrace_global_list->func;
248                 /*
249                  * As we are calling the function directly.
250                  * If it does not have recursion protection,
251                  * the function_trace_op needs to be updated
252                  * accordingly.
253                  */
254                 if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
255                         global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
256                 else
257                         global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
258         } else {
259                 func = ftrace_global_list_func;
260                 /* The list has its own recursion protection. */
261                 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
262         }
263
264
265         /* If we filter on pids, update to use the pid function */
266         if (!list_empty(&ftrace_pids)) {
267                 set_ftrace_pid_function(func);
268                 func = ftrace_pid_func;
269         }
270
271         global_ops.func = func;
272 }
273
274 static void update_ftrace_function(void)
275 {
276         ftrace_func_t func;
277
278         update_global_ops();
279
280         /*
281          * If we are at the end of the list and this ops is
282          * recursion safe and not dynamic and the arch supports passing ops,
283          * then have the mcount trampoline call the function directly.
284          */
285         if (ftrace_ops_list == &ftrace_list_end ||
286             (ftrace_ops_list->next == &ftrace_list_end &&
287              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
288              (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
289              !FTRACE_FORCE_LIST_FUNC)) {
290                 /* Set the ftrace_ops that the arch callback uses */
291                 if (ftrace_ops_list == &global_ops)
292                         function_trace_op = ftrace_global_list;
293                 else
294                         function_trace_op = ftrace_ops_list;
295                 func = ftrace_ops_list->func;
296         } else {
297                 /* Just use the default ftrace_ops */
298                 function_trace_op = &ftrace_list_end;
299                 func = ftrace_ops_list_func;
300         }
301
302         ftrace_trace_function = func;
303 }
304
305 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
306 {
307         ops->next = *list;
308         /*
309          * We are entering ops into the list but another
310          * CPU might be walking that list. We need to make sure
311          * the ops->next pointer is valid before another CPU sees
312          * the ops pointer included into the list.
313          */
314         rcu_assign_pointer(*list, ops);
315 }
316
317 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
318 {
319         struct ftrace_ops **p;
320
321         /*
322          * If we are removing the last function, then simply point
323          * to the ftrace_stub.
324          */
325         if (*list == ops && ops->next == &ftrace_list_end) {
326                 *list = &ftrace_list_end;
327                 return 0;
328         }
329
330         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
331                 if (*p == ops)
332                         break;
333
334         if (*p != ops)
335                 return -1;
336
337         *p = (*p)->next;
338         return 0;
339 }
340
341 static void add_ftrace_list_ops(struct ftrace_ops **list,
342                                 struct ftrace_ops *main_ops,
343                                 struct ftrace_ops *ops)
344 {
345         int first = *list == &ftrace_list_end;
346         add_ftrace_ops(list, ops);
347         if (first)
348                 add_ftrace_ops(&ftrace_ops_list, main_ops);
349 }
350
351 static int remove_ftrace_list_ops(struct ftrace_ops **list,
352                                   struct ftrace_ops *main_ops,
353                                   struct ftrace_ops *ops)
354 {
355         int ret = remove_ftrace_ops(list, ops);
356         if (!ret && *list == &ftrace_list_end)
357                 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
358         return ret;
359 }
360
361 static int __register_ftrace_function(struct ftrace_ops *ops)
362 {
363         if (unlikely(ftrace_disabled))
364                 return -ENODEV;
365
366         if (FTRACE_WARN_ON(ops == &global_ops))
367                 return -EINVAL;
368
369         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
370                 return -EBUSY;
371
372         /* We don't support both control and global flags set. */
373         if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
374                 return -EINVAL;
375
376 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
377         /*
378          * If the ftrace_ops specifies SAVE_REGS, then it only can be used
379          * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
380          * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
381          */
382         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
383             !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
384                 return -EINVAL;
385
386         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
387                 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
388 #endif
389
390         if (!core_kernel_data((unsigned long)ops))
391                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
392
393         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
394                 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
395                 ops->flags |= FTRACE_OPS_FL_ENABLED;
396         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
397                 if (control_ops_alloc(ops))
398                         return -ENOMEM;
399                 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
400         } else
401                 add_ftrace_ops(&ftrace_ops_list, ops);
402
403         if (ftrace_enabled)
404                 update_ftrace_function();
405
406         return 0;
407 }
408
409 static int __unregister_ftrace_function(struct ftrace_ops *ops)
410 {
411         int ret;
412
413         if (ftrace_disabled)
414                 return -ENODEV;
415
416         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
417                 return -EBUSY;
418
419         if (FTRACE_WARN_ON(ops == &global_ops))
420                 return -EINVAL;
421
422         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
423                 ret = remove_ftrace_list_ops(&ftrace_global_list,
424                                              &global_ops, ops);
425                 if (!ret)
426                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
427         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
428                 ret = remove_ftrace_list_ops(&ftrace_control_list,
429                                              &control_ops, ops);
430                 if (!ret) {
431                         /*
432                          * The ftrace_ops is now removed from the list,
433                          * so there'll be no new users. We must ensure
434                          * all current users are done before we free
435                          * the control data.
436                          */
437                         synchronize_sched();
438                         control_ops_free(ops);
439                 }
440         } else
441                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
442
443         if (ret < 0)
444                 return ret;
445
446         if (ftrace_enabled)
447                 update_ftrace_function();
448
449         /*
450          * Dynamic ops may be freed, we must make sure that all
451          * callers are done before leaving this function.
452          */
453         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
454                 synchronize_sched();
455
456         return 0;
457 }
458
459 static void ftrace_update_pid_func(void)
460 {
461         /* Only do something if we are tracing something */
462         if (ftrace_trace_function == ftrace_stub)
463                 return;
464
465         update_ftrace_function();
466 }
467
468 #ifdef CONFIG_FUNCTION_PROFILER
469 struct ftrace_profile {
470         struct hlist_node               node;
471         unsigned long                   ip;
472         unsigned long                   counter;
473 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
474         unsigned long long              time;
475         unsigned long long              time_squared;
476 #endif
477 };
478
479 struct ftrace_profile_page {
480         struct ftrace_profile_page      *next;
481         unsigned long                   index;
482         struct ftrace_profile           records[];
483 };
484
485 struct ftrace_profile_stat {
486         atomic_t                        disabled;
487         struct hlist_head               *hash;
488         struct ftrace_profile_page      *pages;
489         struct ftrace_profile_page      *start;
490         struct tracer_stat              stat;
491 };
492
493 #define PROFILE_RECORDS_SIZE                                            \
494         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
495
496 #define PROFILES_PER_PAGE                                       \
497         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
498
499 static int ftrace_profile_bits __read_mostly;
500 static int ftrace_profile_enabled __read_mostly;
501
502 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
503 static DEFINE_MUTEX(ftrace_profile_lock);
504
505 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
506
507 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
508
509 static void *
510 function_stat_next(void *v, int idx)
511 {
512         struct ftrace_profile *rec = v;
513         struct ftrace_profile_page *pg;
514
515         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
516
517  again:
518         if (idx != 0)
519                 rec++;
520
521         if ((void *)rec >= (void *)&pg->records[pg->index]) {
522                 pg = pg->next;
523                 if (!pg)
524                         return NULL;
525                 rec = &pg->records[0];
526                 if (!rec->counter)
527                         goto again;
528         }
529
530         return rec;
531 }
532
533 static void *function_stat_start(struct tracer_stat *trace)
534 {
535         struct ftrace_profile_stat *stat =
536                 container_of(trace, struct ftrace_profile_stat, stat);
537
538         if (!stat || !stat->start)
539                 return NULL;
540
541         return function_stat_next(&stat->start->records[0], 0);
542 }
543
544 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
545 /* function graph compares on total time */
546 static int function_stat_cmp(void *p1, void *p2)
547 {
548         struct ftrace_profile *a = p1;
549         struct ftrace_profile *b = p2;
550
551         if (a->time < b->time)
552                 return -1;
553         if (a->time > b->time)
554                 return 1;
555         else
556                 return 0;
557 }
558 #else
559 /* not function graph compares against hits */
560 static int function_stat_cmp(void *p1, void *p2)
561 {
562         struct ftrace_profile *a = p1;
563         struct ftrace_profile *b = p2;
564
565         if (a->counter < b->counter)
566                 return -1;
567         if (a->counter > b->counter)
568                 return 1;
569         else
570                 return 0;
571 }
572 #endif
573
574 static int function_stat_headers(struct seq_file *m)
575 {
576 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
577         seq_printf(m, "  Function                               "
578                    "Hit    Time            Avg             s^2\n"
579                       "  --------                               "
580                    "---    ----            ---             ---\n");
581 #else
582         seq_printf(m, "  Function                               Hit\n"
583                       "  --------                               ---\n");
584 #endif
585         return 0;
586 }
587
588 static int function_stat_show(struct seq_file *m, void *v)
589 {
590         struct ftrace_profile *rec = v;
591         char str[KSYM_SYMBOL_LEN];
592         int ret = 0;
593 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
594         static struct trace_seq s;
595         unsigned long long avg;
596         unsigned long long stddev;
597 #endif
598         mutex_lock(&ftrace_profile_lock);
599
600         /* we raced with function_profile_reset() */
601         if (unlikely(rec->counter == 0)) {
602                 ret = -EBUSY;
603                 goto out;
604         }
605
606         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
607         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
608
609 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
610         seq_printf(m, "    ");
611         avg = rec->time;
612         do_div(avg, rec->counter);
613
614         /* Sample standard deviation (s^2) */
615         if (rec->counter <= 1)
616                 stddev = 0;
617         else {
618                 stddev = rec->time_squared - rec->counter * avg * avg;
619                 /*
620                  * Divide only 1000 for ns^2 -> us^2 conversion.
621                  * trace_print_graph_duration will divide 1000 again.
622                  */
623                 do_div(stddev, (rec->counter - 1) * 1000);
624         }
625
626         trace_seq_init(&s);
627         trace_print_graph_duration(rec->time, &s);
628         trace_seq_puts(&s, "    ");
629         trace_print_graph_duration(avg, &s);
630         trace_seq_puts(&s, "    ");
631         trace_print_graph_duration(stddev, &s);
632         trace_print_seq(m, &s);
633 #endif
634         seq_putc(m, '\n');
635 out:
636         mutex_unlock(&ftrace_profile_lock);
637
638         return ret;
639 }
640
641 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
642 {
643         struct ftrace_profile_page *pg;
644
645         pg = stat->pages = stat->start;
646
647         while (pg) {
648                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
649                 pg->index = 0;
650                 pg = pg->next;
651         }
652
653         memset(stat->hash, 0,
654                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
655 }
656
657 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
658 {
659         struct ftrace_profile_page *pg;
660         int functions;
661         int pages;
662         int i;
663
664         /* If we already allocated, do nothing */
665         if (stat->pages)
666                 return 0;
667
668         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
669         if (!stat->pages)
670                 return -ENOMEM;
671
672 #ifdef CONFIG_DYNAMIC_FTRACE
673         functions = ftrace_update_tot_cnt;
674 #else
675         /*
676          * We do not know the number of functions that exist because
677          * dynamic tracing is what counts them. With past experience
678          * we have around 20K functions. That should be more than enough.
679          * It is highly unlikely we will execute every function in
680          * the kernel.
681          */
682         functions = 20000;
683 #endif
684
685         pg = stat->start = stat->pages;
686
687         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
688
689         for (i = 0; i < pages; i++) {
690                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
691                 if (!pg->next)
692                         goto out_free;
693                 pg = pg->next;
694         }
695
696         return 0;
697
698  out_free:
699         pg = stat->start;
700         while (pg) {
701                 unsigned long tmp = (unsigned long)pg;
702
703                 pg = pg->next;
704                 free_page(tmp);
705         }
706
707         free_page((unsigned long)stat->pages);
708         stat->pages = NULL;
709         stat->start = NULL;
710
711         return -ENOMEM;
712 }
713
714 static int ftrace_profile_init_cpu(int cpu)
715 {
716         struct ftrace_profile_stat *stat;
717         int size;
718
719         stat = &per_cpu(ftrace_profile_stats, cpu);
720
721         if (stat->hash) {
722                 /* If the profile is already created, simply reset it */
723                 ftrace_profile_reset(stat);
724                 return 0;
725         }
726
727         /*
728          * We are profiling all functions, but usually only a few thousand
729          * functions are hit. We'll make a hash of 1024 items.
730          */
731         size = FTRACE_PROFILE_HASH_SIZE;
732
733         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
734
735         if (!stat->hash)
736                 return -ENOMEM;
737
738         if (!ftrace_profile_bits) {
739                 size--;
740
741                 for (; size; size >>= 1)
742                         ftrace_profile_bits++;
743         }
744
745         /* Preallocate the function profiling pages */
746         if (ftrace_profile_pages_init(stat) < 0) {
747                 kfree(stat->hash);
748                 stat->hash = NULL;
749                 return -ENOMEM;
750         }
751
752         return 0;
753 }
754
755 static int ftrace_profile_init(void)
756 {
757         int cpu;
758         int ret = 0;
759
760         for_each_online_cpu(cpu) {
761                 ret = ftrace_profile_init_cpu(cpu);
762                 if (ret)
763                         break;
764         }
765
766         return ret;
767 }
768
769 /* interrupts must be disabled */
770 static struct ftrace_profile *
771 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
772 {
773         struct ftrace_profile *rec;
774         struct hlist_head *hhd;
775         struct hlist_node *n;
776         unsigned long key;
777
778         key = hash_long(ip, ftrace_profile_bits);
779         hhd = &stat->hash[key];
780
781         if (hlist_empty(hhd))
782                 return NULL;
783
784         hlist_for_each_entry_rcu(rec, n, hhd, node) {
785                 if (rec->ip == ip)
786                         return rec;
787         }
788
789         return NULL;
790 }
791
792 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
793                                struct ftrace_profile *rec)
794 {
795         unsigned long key;
796
797         key = hash_long(rec->ip, ftrace_profile_bits);
798         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
799 }
800
801 /*
802  * The memory is already allocated, this simply finds a new record to use.
803  */
804 static struct ftrace_profile *
805 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
806 {
807         struct ftrace_profile *rec = NULL;
808
809         /* prevent recursion (from NMIs) */
810         if (atomic_inc_return(&stat->disabled) != 1)
811                 goto out;
812
813         /*
814          * Try to find the function again since an NMI
815          * could have added it
816          */
817         rec = ftrace_find_profiled_func(stat, ip);
818         if (rec)
819                 goto out;
820
821         if (stat->pages->index == PROFILES_PER_PAGE) {
822                 if (!stat->pages->next)
823                         goto out;
824                 stat->pages = stat->pages->next;
825         }
826
827         rec = &stat->pages->records[stat->pages->index++];
828         rec->ip = ip;
829         ftrace_add_profile(stat, rec);
830
831  out:
832         atomic_dec(&stat->disabled);
833
834         return rec;
835 }
836
837 static void
838 function_profile_call(unsigned long ip, unsigned long parent_ip,
839                       struct ftrace_ops *ops, struct pt_regs *regs)
840 {
841         struct ftrace_profile_stat *stat;
842         struct ftrace_profile *rec;
843         unsigned long flags;
844
845         if (!ftrace_profile_enabled)
846                 return;
847
848         local_irq_save(flags);
849
850         stat = &__get_cpu_var(ftrace_profile_stats);
851         if (!stat->hash || !ftrace_profile_enabled)
852                 goto out;
853
854         rec = ftrace_find_profiled_func(stat, ip);
855         if (!rec) {
856                 rec = ftrace_profile_alloc(stat, ip);
857                 if (!rec)
858                         goto out;
859         }
860
861         rec->counter++;
862  out:
863         local_irq_restore(flags);
864 }
865
866 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
867 static int profile_graph_entry(struct ftrace_graph_ent *trace)
868 {
869         function_profile_call(trace->func, 0, NULL, NULL);
870         return 1;
871 }
872
873 static void profile_graph_return(struct ftrace_graph_ret *trace)
874 {
875         struct ftrace_profile_stat *stat;
876         unsigned long long calltime;
877         struct ftrace_profile *rec;
878         unsigned long flags;
879
880         local_irq_save(flags);
881         stat = &__get_cpu_var(ftrace_profile_stats);
882         if (!stat->hash || !ftrace_profile_enabled)
883                 goto out;
884
885         /* If the calltime was zero'd ignore it */
886         if (!trace->calltime)
887                 goto out;
888
889         calltime = trace->rettime - trace->calltime;
890
891         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
892                 int index;
893
894                 index = trace->depth;
895
896                 /* Append this call time to the parent time to subtract */
897                 if (index)
898                         current->ret_stack[index - 1].subtime += calltime;
899
900                 if (current->ret_stack[index].subtime < calltime)
901                         calltime -= current->ret_stack[index].subtime;
902                 else
903                         calltime = 0;
904         }
905
906         rec = ftrace_find_profiled_func(stat, trace->func);
907         if (rec) {
908                 rec->time += calltime;
909                 rec->time_squared += calltime * calltime;
910         }
911
912  out:
913         local_irq_restore(flags);
914 }
915
916 static int register_ftrace_profiler(void)
917 {
918         return register_ftrace_graph(&profile_graph_return,
919                                      &profile_graph_entry);
920 }
921
922 static void unregister_ftrace_profiler(void)
923 {
924         unregister_ftrace_graph();
925 }
926 #else
927 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
928         .func           = function_profile_call,
929         .flags          = FTRACE_OPS_FL_RECURSION_SAFE,
930 };
931
932 static int register_ftrace_profiler(void)
933 {
934         return register_ftrace_function(&ftrace_profile_ops);
935 }
936
937 static void unregister_ftrace_profiler(void)
938 {
939         unregister_ftrace_function(&ftrace_profile_ops);
940 }
941 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
942
943 static ssize_t
944 ftrace_profile_write(struct file *filp, const char __user *ubuf,
945                      size_t cnt, loff_t *ppos)
946 {
947         unsigned long val;
948         int ret;
949
950         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
951         if (ret)
952                 return ret;
953
954         val = !!val;
955
956         mutex_lock(&ftrace_profile_lock);
957         if (ftrace_profile_enabled ^ val) {
958                 if (val) {
959                         ret = ftrace_profile_init();
960                         if (ret < 0) {
961                                 cnt = ret;
962                                 goto out;
963                         }
964
965                         ret = register_ftrace_profiler();
966                         if (ret < 0) {
967                                 cnt = ret;
968                                 goto out;
969                         }
970                         ftrace_profile_enabled = 1;
971                 } else {
972                         ftrace_profile_enabled = 0;
973                         /*
974                          * unregister_ftrace_profiler calls stop_machine
975                          * so this acts like an synchronize_sched.
976                          */
977                         unregister_ftrace_profiler();
978                 }
979         }
980  out:
981         mutex_unlock(&ftrace_profile_lock);
982
983         *ppos += cnt;
984
985         return cnt;
986 }
987
988 static ssize_t
989 ftrace_profile_read(struct file *filp, char __user *ubuf,
990                      size_t cnt, loff_t *ppos)
991 {
992         char buf[64];           /* big enough to hold a number */
993         int r;
994
995         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
996         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
997 }
998
999 static const struct file_operations ftrace_profile_fops = {
1000         .open           = tracing_open_generic,
1001         .read           = ftrace_profile_read,
1002         .write          = ftrace_profile_write,
1003         .llseek         = default_llseek,
1004 };
1005
1006 /* used to initialize the real stat files */
1007 static struct tracer_stat function_stats __initdata = {
1008         .name           = "functions",
1009         .stat_start     = function_stat_start,
1010         .stat_next      = function_stat_next,
1011         .stat_cmp       = function_stat_cmp,
1012         .stat_headers   = function_stat_headers,
1013         .stat_show      = function_stat_show
1014 };
1015
1016 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1017 {
1018         struct ftrace_profile_stat *stat;
1019         struct dentry *entry;
1020         char *name;
1021         int ret;
1022         int cpu;
1023
1024         for_each_possible_cpu(cpu) {
1025                 stat = &per_cpu(ftrace_profile_stats, cpu);
1026
1027                 /* allocate enough for function name + cpu number */
1028                 name = kmalloc(32, GFP_KERNEL);
1029                 if (!name) {
1030                         /*
1031                          * The files created are permanent, if something happens
1032                          * we still do not free memory.
1033                          */
1034                         WARN(1,
1035                              "Could not allocate stat file for cpu %d\n",
1036                              cpu);
1037                         return;
1038                 }
1039                 stat->stat = function_stats;
1040                 snprintf(name, 32, "function%d", cpu);
1041                 stat->stat.name = name;
1042                 ret = register_stat_tracer(&stat->stat);
1043                 if (ret) {
1044                         WARN(1,
1045                              "Could not register function stat for cpu %d\n",
1046                              cpu);
1047                         kfree(name);
1048                         return;
1049                 }
1050         }
1051
1052         entry = debugfs_create_file("function_profile_enabled", 0644,
1053                                     d_tracer, NULL, &ftrace_profile_fops);
1054         if (!entry)
1055                 pr_warning("Could not create debugfs "
1056                            "'function_profile_enabled' entry\n");
1057 }
1058
1059 #else /* CONFIG_FUNCTION_PROFILER */
1060 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1061 {
1062 }
1063 #endif /* CONFIG_FUNCTION_PROFILER */
1064
1065 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1066
1067 #ifdef CONFIG_DYNAMIC_FTRACE
1068
1069 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1070 # error Dynamic ftrace depends on MCOUNT_RECORD
1071 #endif
1072
1073 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1074
1075 struct ftrace_func_probe {
1076         struct hlist_node       node;
1077         struct ftrace_probe_ops *ops;
1078         unsigned long           flags;
1079         unsigned long           ip;
1080         void                    *data;
1081         struct rcu_head         rcu;
1082 };
1083
1084 struct ftrace_func_entry {
1085         struct hlist_node hlist;
1086         unsigned long ip;
1087 };
1088
1089 struct ftrace_hash {
1090         unsigned long           size_bits;
1091         struct hlist_head       *buckets;
1092         unsigned long           count;
1093         struct rcu_head         rcu;
1094 };
1095
1096 /*
1097  * We make these constant because no one should touch them,
1098  * but they are used as the default "empty hash", to avoid allocating
1099  * it all the time. These are in a read only section such that if
1100  * anyone does try to modify it, it will cause an exception.
1101  */
1102 static const struct hlist_head empty_buckets[1];
1103 static const struct ftrace_hash empty_hash = {
1104         .buckets = (struct hlist_head *)empty_buckets,
1105 };
1106 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1107
1108 static struct ftrace_ops global_ops = {
1109         .func                   = ftrace_stub,
1110         .notrace_hash           = EMPTY_HASH,
1111         .filter_hash            = EMPTY_HASH,
1112         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
1113 };
1114
1115 static DEFINE_MUTEX(ftrace_regex_lock);
1116
1117 struct ftrace_page {
1118         struct ftrace_page      *next;
1119         struct dyn_ftrace       *records;
1120         int                     index;
1121         int                     size;
1122 };
1123
1124 static struct ftrace_page *ftrace_new_pgs;
1125
1126 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1127 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1128
1129 /* estimate from running different kernels */
1130 #define NR_TO_INIT              10000
1131
1132 static struct ftrace_page       *ftrace_pages_start;
1133 static struct ftrace_page       *ftrace_pages;
1134
1135 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1136 {
1137         return !hash || !hash->count;
1138 }
1139
1140 static struct ftrace_func_entry *
1141 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1142 {
1143         unsigned long key;
1144         struct ftrace_func_entry *entry;
1145         struct hlist_head *hhd;
1146         struct hlist_node *n;
1147
1148         if (ftrace_hash_empty(hash))
1149                 return NULL;
1150
1151         if (hash->size_bits > 0)
1152                 key = hash_long(ip, hash->size_bits);
1153         else
1154                 key = 0;
1155
1156         hhd = &hash->buckets[key];
1157
1158         hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1159                 if (entry->ip == ip)
1160                         return entry;
1161         }
1162         return NULL;
1163 }
1164
1165 static void __add_hash_entry(struct ftrace_hash *hash,
1166                              struct ftrace_func_entry *entry)
1167 {
1168         struct hlist_head *hhd;
1169         unsigned long key;
1170
1171         if (hash->size_bits)
1172                 key = hash_long(entry->ip, hash->size_bits);
1173         else
1174                 key = 0;
1175
1176         hhd = &hash->buckets[key];
1177         hlist_add_head(&entry->hlist, hhd);
1178         hash->count++;
1179 }
1180
1181 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1182 {
1183         struct ftrace_func_entry *entry;
1184
1185         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1186         if (!entry)
1187                 return -ENOMEM;
1188
1189         entry->ip = ip;
1190         __add_hash_entry(hash, entry);
1191
1192         return 0;
1193 }
1194
1195 static void
1196 free_hash_entry(struct ftrace_hash *hash,
1197                   struct ftrace_func_entry *entry)
1198 {
1199         hlist_del(&entry->hlist);
1200         kfree(entry);
1201         hash->count--;
1202 }
1203
1204 static void
1205 remove_hash_entry(struct ftrace_hash *hash,
1206                   struct ftrace_func_entry *entry)
1207 {
1208         hlist_del(&entry->hlist);
1209         hash->count--;
1210 }
1211
1212 static void ftrace_hash_clear(struct ftrace_hash *hash)
1213 {
1214         struct hlist_head *hhd;
1215         struct hlist_node *tp, *tn;
1216         struct ftrace_func_entry *entry;
1217         int size = 1 << hash->size_bits;
1218         int i;
1219
1220         if (!hash->count)
1221                 return;
1222
1223         for (i = 0; i < size; i++) {
1224                 hhd = &hash->buckets[i];
1225                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1226                         free_hash_entry(hash, entry);
1227         }
1228         FTRACE_WARN_ON(hash->count);
1229 }
1230
1231 static void free_ftrace_hash(struct ftrace_hash *hash)
1232 {
1233         if (!hash || hash == EMPTY_HASH)
1234                 return;
1235         ftrace_hash_clear(hash);
1236         kfree(hash->buckets);
1237         kfree(hash);
1238 }
1239
1240 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1241 {
1242         struct ftrace_hash *hash;
1243
1244         hash = container_of(rcu, struct ftrace_hash, rcu);
1245         free_ftrace_hash(hash);
1246 }
1247
1248 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1249 {
1250         if (!hash || hash == EMPTY_HASH)
1251                 return;
1252         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1253 }
1254
1255 void ftrace_free_filter(struct ftrace_ops *ops)
1256 {
1257         free_ftrace_hash(ops->filter_hash);
1258         free_ftrace_hash(ops->notrace_hash);
1259 }
1260
1261 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1262 {
1263         struct ftrace_hash *hash;
1264         int size;
1265
1266         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1267         if (!hash)
1268                 return NULL;
1269
1270         size = 1 << size_bits;
1271         hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1272
1273         if (!hash->buckets) {
1274                 kfree(hash);
1275                 return NULL;
1276         }
1277
1278         hash->size_bits = size_bits;
1279
1280         return hash;
1281 }
1282
1283 static struct ftrace_hash *
1284 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1285 {
1286         struct ftrace_func_entry *entry;
1287         struct ftrace_hash *new_hash;
1288         struct hlist_node *tp;
1289         int size;
1290         int ret;
1291         int i;
1292
1293         new_hash = alloc_ftrace_hash(size_bits);
1294         if (!new_hash)
1295                 return NULL;
1296
1297         /* Empty hash? */
1298         if (ftrace_hash_empty(hash))
1299                 return new_hash;
1300
1301         size = 1 << hash->size_bits;
1302         for (i = 0; i < size; i++) {
1303                 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1304                         ret = add_hash_entry(new_hash, entry->ip);
1305                         if (ret < 0)
1306                                 goto free_hash;
1307                 }
1308         }
1309
1310         FTRACE_WARN_ON(new_hash->count != hash->count);
1311
1312         return new_hash;
1313
1314  free_hash:
1315         free_ftrace_hash(new_hash);
1316         return NULL;
1317 }
1318
1319 static void
1320 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1321 static void
1322 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1323
1324 static int
1325 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1326                  struct ftrace_hash **dst, struct ftrace_hash *src)
1327 {
1328         struct ftrace_func_entry *entry;
1329         struct hlist_node *tp, *tn;
1330         struct hlist_head *hhd;
1331         struct ftrace_hash *old_hash;
1332         struct ftrace_hash *new_hash;
1333         unsigned long key;
1334         int size = src->count;
1335         int bits = 0;
1336         int ret;
1337         int i;
1338
1339         /*
1340          * Remove the current set, update the hash and add
1341          * them back.
1342          */
1343         ftrace_hash_rec_disable(ops, enable);
1344
1345         /*
1346          * If the new source is empty, just free dst and assign it
1347          * the empty_hash.
1348          */
1349         if (!src->count) {
1350                 free_ftrace_hash_rcu(*dst);
1351                 rcu_assign_pointer(*dst, EMPTY_HASH);
1352                 /* still need to update the function records */
1353                 ret = 0;
1354                 goto out;
1355         }
1356
1357         /*
1358          * Make the hash size about 1/2 the # found
1359          */
1360         for (size /= 2; size; size >>= 1)
1361                 bits++;
1362
1363         /* Don't allocate too much */
1364         if (bits > FTRACE_HASH_MAX_BITS)
1365                 bits = FTRACE_HASH_MAX_BITS;
1366
1367         ret = -ENOMEM;
1368         new_hash = alloc_ftrace_hash(bits);
1369         if (!new_hash)
1370                 goto out;
1371
1372         size = 1 << src->size_bits;
1373         for (i = 0; i < size; i++) {
1374                 hhd = &src->buckets[i];
1375                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1376                         if (bits > 0)
1377                                 key = hash_long(entry->ip, bits);
1378                         else
1379                                 key = 0;
1380                         remove_hash_entry(src, entry);
1381                         __add_hash_entry(new_hash, entry);
1382                 }
1383         }
1384
1385         old_hash = *dst;
1386         rcu_assign_pointer(*dst, new_hash);
1387         free_ftrace_hash_rcu(old_hash);
1388
1389         ret = 0;
1390  out:
1391         /*
1392          * Enable regardless of ret:
1393          *  On success, we enable the new hash.
1394          *  On failure, we re-enable the original hash.
1395          */
1396         ftrace_hash_rec_enable(ops, enable);
1397
1398         return ret;
1399 }
1400
1401 /*
1402  * Test the hashes for this ops to see if we want to call
1403  * the ops->func or not.
1404  *
1405  * It's a match if the ip is in the ops->filter_hash or
1406  * the filter_hash does not exist or is empty,
1407  *  AND
1408  * the ip is not in the ops->notrace_hash.
1409  *
1410  * This needs to be called with preemption disabled as
1411  * the hashes are freed with call_rcu_sched().
1412  */
1413 static int
1414 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1415 {
1416         struct ftrace_hash *filter_hash;
1417         struct ftrace_hash *notrace_hash;
1418         int ret;
1419
1420         filter_hash = rcu_dereference_raw(ops->filter_hash);
1421         notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1422
1423         if ((ftrace_hash_empty(filter_hash) ||
1424              ftrace_lookup_ip(filter_hash, ip)) &&
1425             (ftrace_hash_empty(notrace_hash) ||
1426              !ftrace_lookup_ip(notrace_hash, ip)))
1427                 ret = 1;
1428         else
1429                 ret = 0;
1430
1431         return ret;
1432 }
1433
1434 /*
1435  * This is a double for. Do not use 'break' to break out of the loop,
1436  * you must use a goto.
1437  */
1438 #define do_for_each_ftrace_rec(pg, rec)                                 \
1439         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1440                 int _____i;                                             \
1441                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1442                         rec = &pg->records[_____i];
1443
1444 #define while_for_each_ftrace_rec()             \
1445                 }                               \
1446         }
1447
1448
1449 static int ftrace_cmp_recs(const void *a, const void *b)
1450 {
1451         const struct dyn_ftrace *key = a;
1452         const struct dyn_ftrace *rec = b;
1453
1454         if (key->flags < rec->ip)
1455                 return -1;
1456         if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1457                 return 1;
1458         return 0;
1459 }
1460
1461 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1462 {
1463         struct ftrace_page *pg;
1464         struct dyn_ftrace *rec;
1465         struct dyn_ftrace key;
1466
1467         key.ip = start;
1468         key.flags = end;        /* overload flags, as it is unsigned long */
1469
1470         for (pg = ftrace_pages_start; pg; pg = pg->next) {
1471                 if (end < pg->records[0].ip ||
1472                     start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1473                         continue;
1474                 rec = bsearch(&key, pg->records, pg->index,
1475                               sizeof(struct dyn_ftrace),
1476                               ftrace_cmp_recs);
1477                 if (rec)
1478                         return rec->ip;
1479         }
1480
1481         return 0;
1482 }
1483
1484 /**
1485  * ftrace_location - return true if the ip giving is a traced location
1486  * @ip: the instruction pointer to check
1487  *
1488  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1489  * That is, the instruction that is either a NOP or call to
1490  * the function tracer. It checks the ftrace internal tables to
1491  * determine if the address belongs or not.
1492  */
1493 unsigned long ftrace_location(unsigned long ip)
1494 {
1495         return ftrace_location_range(ip, ip);
1496 }
1497
1498 /**
1499  * ftrace_text_reserved - return true if range contains an ftrace location
1500  * @start: start of range to search
1501  * @end: end of range to search (inclusive). @end points to the last byte to check.
1502  *
1503  * Returns 1 if @start and @end contains a ftrace location.
1504  * That is, the instruction that is either a NOP or call to
1505  * the function tracer. It checks the ftrace internal tables to
1506  * determine if the address belongs or not.
1507  */
1508 int ftrace_text_reserved(void *start, void *end)
1509 {
1510         unsigned long ret;
1511
1512         ret = ftrace_location_range((unsigned long)start,
1513                                     (unsigned long)end);
1514
1515         return (int)!!ret;
1516 }
1517
1518 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1519                                      int filter_hash,
1520                                      bool inc)
1521 {
1522         struct ftrace_hash *hash;
1523         struct ftrace_hash *other_hash;
1524         struct ftrace_page *pg;
1525         struct dyn_ftrace *rec;
1526         int count = 0;
1527         int all = 0;
1528
1529         /* Only update if the ops has been registered */
1530         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1531                 return;
1532
1533         /*
1534          * In the filter_hash case:
1535          *   If the count is zero, we update all records.
1536          *   Otherwise we just update the items in the hash.
1537          *
1538          * In the notrace_hash case:
1539          *   We enable the update in the hash.
1540          *   As disabling notrace means enabling the tracing,
1541          *   and enabling notrace means disabling, the inc variable
1542          *   gets inversed.
1543          */
1544         if (filter_hash) {
1545                 hash = ops->filter_hash;
1546                 other_hash = ops->notrace_hash;
1547                 if (ftrace_hash_empty(hash))
1548                         all = 1;
1549         } else {
1550                 inc = !inc;
1551                 hash = ops->notrace_hash;
1552                 other_hash = ops->filter_hash;
1553                 /*
1554                  * If the notrace hash has no items,
1555                  * then there's nothing to do.
1556                  */
1557                 if (ftrace_hash_empty(hash))
1558                         return;
1559         }
1560
1561         do_for_each_ftrace_rec(pg, rec) {
1562                 int in_other_hash = 0;
1563                 int in_hash = 0;
1564                 int match = 0;
1565
1566                 if (all) {
1567                         /*
1568                          * Only the filter_hash affects all records.
1569                          * Update if the record is not in the notrace hash.
1570                          */
1571                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1572                                 match = 1;
1573                 } else {
1574                         in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1575                         in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1576
1577                         /*
1578                          *
1579                          */
1580                         if (filter_hash && in_hash && !in_other_hash)
1581                                 match = 1;
1582                         else if (!filter_hash && in_hash &&
1583                                  (in_other_hash || ftrace_hash_empty(other_hash)))
1584                                 match = 1;
1585                 }
1586                 if (!match)
1587                         continue;
1588
1589                 if (inc) {
1590                         rec->flags++;
1591                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1592                                 return;
1593                         /*
1594                          * If any ops wants regs saved for this function
1595                          * then all ops will get saved regs.
1596                          */
1597                         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1598                                 rec->flags |= FTRACE_FL_REGS;
1599                 } else {
1600                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1601                                 return;
1602                         rec->flags--;
1603                 }
1604                 count++;
1605                 /* Shortcut, if we handled all records, we are done. */
1606                 if (!all && count == hash->count)
1607                         return;
1608         } while_for_each_ftrace_rec();
1609 }
1610
1611 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1612                                     int filter_hash)
1613 {
1614         __ftrace_hash_rec_update(ops, filter_hash, 0);
1615 }
1616
1617 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1618                                    int filter_hash)
1619 {
1620         __ftrace_hash_rec_update(ops, filter_hash, 1);
1621 }
1622
1623 static void print_ip_ins(const char *fmt, unsigned char *p)
1624 {
1625         int i;
1626
1627         printk(KERN_CONT "%s", fmt);
1628
1629         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1630                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1631 }
1632
1633 /**
1634  * ftrace_bug - report and shutdown function tracer
1635  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1636  * @ip: The address that failed
1637  *
1638  * The arch code that enables or disables the function tracing
1639  * can call ftrace_bug() when it has detected a problem in
1640  * modifying the code. @failed should be one of either:
1641  * EFAULT - if the problem happens on reading the @ip address
1642  * EINVAL - if what is read at @ip is not what was expected
1643  * EPERM - if the problem happens on writting to the @ip address
1644  */
1645 void ftrace_bug(int failed, unsigned long ip)
1646 {
1647         switch (failed) {
1648         case -EFAULT:
1649                 FTRACE_WARN_ON_ONCE(1);
1650                 pr_info("ftrace faulted on modifying ");
1651                 print_ip_sym(ip);
1652                 break;
1653         case -EINVAL:
1654                 FTRACE_WARN_ON_ONCE(1);
1655                 pr_info("ftrace failed to modify ");
1656                 print_ip_sym(ip);
1657                 print_ip_ins(" actual: ", (unsigned char *)ip);
1658                 printk(KERN_CONT "\n");
1659                 break;
1660         case -EPERM:
1661                 FTRACE_WARN_ON_ONCE(1);
1662                 pr_info("ftrace faulted on writing ");
1663                 print_ip_sym(ip);
1664                 break;
1665         default:
1666                 FTRACE_WARN_ON_ONCE(1);
1667                 pr_info("ftrace faulted on unknown error ");
1668                 print_ip_sym(ip);
1669         }
1670 }
1671
1672 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1673 {
1674         unsigned long flag = 0UL;
1675
1676         /*
1677          * If we are updating calls:
1678          *
1679          *   If the record has a ref count, then we need to enable it
1680          *   because someone is using it.
1681          *
1682          *   Otherwise we make sure its disabled.
1683          *
1684          * If we are disabling calls, then disable all records that
1685          * are enabled.
1686          */
1687         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1688                 flag = FTRACE_FL_ENABLED;
1689
1690         /*
1691          * If enabling and the REGS flag does not match the REGS_EN, then
1692          * do not ignore this record. Set flags to fail the compare against
1693          * ENABLED.
1694          */
1695         if (flag &&
1696             (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1697                 flag |= FTRACE_FL_REGS;
1698
1699         /* If the state of this record hasn't changed, then do nothing */
1700         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1701                 return FTRACE_UPDATE_IGNORE;
1702
1703         if (flag) {
1704                 /* Save off if rec is being enabled (for return value) */
1705                 flag ^= rec->flags & FTRACE_FL_ENABLED;
1706
1707                 if (update) {
1708                         rec->flags |= FTRACE_FL_ENABLED;
1709                         if (flag & FTRACE_FL_REGS) {
1710                                 if (rec->flags & FTRACE_FL_REGS)
1711                                         rec->flags |= FTRACE_FL_REGS_EN;
1712                                 else
1713                                         rec->flags &= ~FTRACE_FL_REGS_EN;
1714                         }
1715                 }
1716
1717                 /*
1718                  * If this record is being updated from a nop, then
1719                  *   return UPDATE_MAKE_CALL.
1720                  * Otherwise, if the EN flag is set, then return
1721                  *   UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1722                  *   from the non-save regs, to a save regs function.
1723                  * Otherwise,
1724                  *   return UPDATE_MODIFY_CALL to tell the caller to convert
1725                  *   from the save regs, to a non-save regs function.
1726                  */
1727                 if (flag & FTRACE_FL_ENABLED)
1728                         return FTRACE_UPDATE_MAKE_CALL;
1729                 else if (rec->flags & FTRACE_FL_REGS_EN)
1730                         return FTRACE_UPDATE_MODIFY_CALL_REGS;
1731                 else
1732                         return FTRACE_UPDATE_MODIFY_CALL;
1733         }
1734
1735         if (update) {
1736                 /* If there's no more users, clear all flags */
1737                 if (!(rec->flags & ~FTRACE_FL_MASK))
1738                         rec->flags = 0;
1739                 else
1740                         /* Just disable the record (keep REGS state) */
1741                         rec->flags &= ~FTRACE_FL_ENABLED;
1742         }
1743
1744         return FTRACE_UPDATE_MAKE_NOP;
1745 }
1746
1747 /**
1748  * ftrace_update_record, set a record that now is tracing or not
1749  * @rec: the record to update
1750  * @enable: set to 1 if the record is tracing, zero to force disable
1751  *
1752  * The records that represent all functions that can be traced need
1753  * to be updated when tracing has been enabled.
1754  */
1755 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1756 {
1757         return ftrace_check_record(rec, enable, 1);
1758 }
1759
1760 /**
1761  * ftrace_test_record, check if the record has been enabled or not
1762  * @rec: the record to test
1763  * @enable: set to 1 to check if enabled, 0 if it is disabled
1764  *
1765  * The arch code may need to test if a record is already set to
1766  * tracing to determine how to modify the function code that it
1767  * represents.
1768  */
1769 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1770 {
1771         return ftrace_check_record(rec, enable, 0);
1772 }
1773
1774 static int
1775 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1776 {
1777         unsigned long ftrace_old_addr;
1778         unsigned long ftrace_addr;
1779         int ret;
1780
1781         ret = ftrace_update_record(rec, enable);
1782
1783         if (rec->flags & FTRACE_FL_REGS)
1784                 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1785         else
1786                 ftrace_addr = (unsigned long)FTRACE_ADDR;
1787
1788         switch (ret) {
1789         case FTRACE_UPDATE_IGNORE:
1790                 return 0;
1791
1792         case FTRACE_UPDATE_MAKE_CALL:
1793                 return ftrace_make_call(rec, ftrace_addr);
1794
1795         case FTRACE_UPDATE_MAKE_NOP:
1796                 return ftrace_make_nop(NULL, rec, ftrace_addr);
1797
1798         case FTRACE_UPDATE_MODIFY_CALL_REGS:
1799         case FTRACE_UPDATE_MODIFY_CALL:
1800                 if (rec->flags & FTRACE_FL_REGS)
1801                         ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1802                 else
1803                         ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1804
1805                 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1806         }
1807
1808         return -1; /* unknow ftrace bug */
1809 }
1810
1811 void __weak ftrace_replace_code(int enable)
1812 {
1813         struct dyn_ftrace *rec;
1814         struct ftrace_page *pg;
1815         int failed;
1816
1817         if (unlikely(ftrace_disabled))
1818                 return;
1819
1820         do_for_each_ftrace_rec(pg, rec) {
1821                 failed = __ftrace_replace_code(rec, enable);
1822                 if (failed) {
1823                         ftrace_bug(failed, rec->ip);
1824                         /* Stop processing */
1825                         return;
1826                 }
1827         } while_for_each_ftrace_rec();
1828 }
1829
1830 struct ftrace_rec_iter {
1831         struct ftrace_page      *pg;
1832         int                     index;
1833 };
1834
1835 /**
1836  * ftrace_rec_iter_start, start up iterating over traced functions
1837  *
1838  * Returns an iterator handle that is used to iterate over all
1839  * the records that represent address locations where functions
1840  * are traced.
1841  *
1842  * May return NULL if no records are available.
1843  */
1844 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1845 {
1846         /*
1847          * We only use a single iterator.
1848          * Protected by the ftrace_lock mutex.
1849          */
1850         static struct ftrace_rec_iter ftrace_rec_iter;
1851         struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1852
1853         iter->pg = ftrace_pages_start;
1854         iter->index = 0;
1855
1856         /* Could have empty pages */
1857         while (iter->pg && !iter->pg->index)
1858                 iter->pg = iter->pg->next;
1859
1860         if (!iter->pg)
1861                 return NULL;
1862
1863         return iter;
1864 }
1865
1866 /**
1867  * ftrace_rec_iter_next, get the next record to process.
1868  * @iter: The handle to the iterator.
1869  *
1870  * Returns the next iterator after the given iterator @iter.
1871  */
1872 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1873 {
1874         iter->index++;
1875
1876         if (iter->index >= iter->pg->index) {
1877                 iter->pg = iter->pg->next;
1878                 iter->index = 0;
1879
1880                 /* Could have empty pages */
1881                 while (iter->pg && !iter->pg->index)
1882                         iter->pg = iter->pg->next;
1883         }
1884
1885         if (!iter->pg)
1886                 return NULL;
1887
1888         return iter;
1889 }
1890
1891 /**
1892  * ftrace_rec_iter_record, get the record at the iterator location
1893  * @iter: The current iterator location
1894  *
1895  * Returns the record that the current @iter is at.
1896  */
1897 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1898 {
1899         return &iter->pg->records[iter->index];
1900 }
1901
1902 static int
1903 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1904 {
1905         unsigned long ip;
1906         int ret;
1907
1908         ip = rec->ip;
1909
1910         if (unlikely(ftrace_disabled))
1911                 return 0;
1912
1913         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1914         if (ret) {
1915                 ftrace_bug(ret, ip);
1916                 return 0;
1917         }
1918         return 1;
1919 }
1920
1921 /*
1922  * archs can override this function if they must do something
1923  * before the modifying code is performed.
1924  */
1925 int __weak ftrace_arch_code_modify_prepare(void)
1926 {
1927         return 0;
1928 }
1929
1930 /*
1931  * archs can override this function if they must do something
1932  * after the modifying code is performed.
1933  */
1934 int __weak ftrace_arch_code_modify_post_process(void)
1935 {
1936         return 0;
1937 }
1938
1939 void ftrace_modify_all_code(int command)
1940 {
1941         if (command & FTRACE_UPDATE_CALLS)
1942                 ftrace_replace_code(1);
1943         else if (command & FTRACE_DISABLE_CALLS)
1944                 ftrace_replace_code(0);
1945
1946         if (command & FTRACE_UPDATE_TRACE_FUNC)
1947                 ftrace_update_ftrace_func(ftrace_trace_function);
1948
1949         if (command & FTRACE_START_FUNC_RET)
1950                 ftrace_enable_ftrace_graph_caller();
1951         else if (command & FTRACE_STOP_FUNC_RET)
1952                 ftrace_disable_ftrace_graph_caller();
1953 }
1954
1955 static int __ftrace_modify_code(void *data)
1956 {
1957         int *command = data;
1958
1959         ftrace_modify_all_code(*command);
1960
1961         return 0;
1962 }
1963
1964 /**
1965  * ftrace_run_stop_machine, go back to the stop machine method
1966  * @command: The command to tell ftrace what to do
1967  *
1968  * If an arch needs to fall back to the stop machine method, the
1969  * it can call this function.
1970  */
1971 void ftrace_run_stop_machine(int command)
1972 {
1973         stop_machine(__ftrace_modify_code, &command, NULL);
1974 }
1975
1976 /**
1977  * arch_ftrace_update_code, modify the code to trace or not trace
1978  * @command: The command that needs to be done
1979  *
1980  * Archs can override this function if it does not need to
1981  * run stop_machine() to modify code.
1982  */
1983 void __weak arch_ftrace_update_code(int command)
1984 {
1985         ftrace_run_stop_machine(command);
1986 }
1987
1988 static void ftrace_run_update_code(int command)
1989 {
1990         int ret;
1991
1992         ret = ftrace_arch_code_modify_prepare();
1993         FTRACE_WARN_ON(ret);
1994         if (ret)
1995                 return;
1996         /*
1997          * Do not call function tracer while we update the code.
1998          * We are in stop machine.
1999          */
2000         function_trace_stop++;
2001
2002         /*
2003          * By default we use stop_machine() to modify the code.
2004          * But archs can do what ever they want as long as it
2005          * is safe. The stop_machine() is the safest, but also
2006          * produces the most overhead.
2007          */
2008         arch_ftrace_update_code(command);
2009
2010         function_trace_stop--;
2011
2012         ret = ftrace_arch_code_modify_post_process();
2013         FTRACE_WARN_ON(ret);
2014 }
2015
2016 static ftrace_func_t saved_ftrace_func;
2017 static int ftrace_start_up;
2018 static int global_start_up;
2019
2020 static void ftrace_startup_enable(int command)
2021 {
2022         if (saved_ftrace_func != ftrace_trace_function) {
2023                 saved_ftrace_func = ftrace_trace_function;
2024                 command |= FTRACE_UPDATE_TRACE_FUNC;
2025         }
2026
2027         if (!command || !ftrace_enabled)
2028                 return;
2029
2030         ftrace_run_update_code(command);
2031 }
2032
2033 static int ftrace_startup(struct ftrace_ops *ops, int command)
2034 {
2035         bool hash_enable = true;
2036
2037         if (unlikely(ftrace_disabled))
2038                 return -ENODEV;
2039
2040         ftrace_start_up++;
2041         command |= FTRACE_UPDATE_CALLS;
2042
2043         /* ops marked global share the filter hashes */
2044         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2045                 ops = &global_ops;
2046                 /* Don't update hash if global is already set */
2047                 if (global_start_up)
2048                         hash_enable = false;
2049                 global_start_up++;
2050         }
2051
2052         ops->flags |= FTRACE_OPS_FL_ENABLED;
2053         if (hash_enable)
2054                 ftrace_hash_rec_enable(ops, 1);
2055
2056         ftrace_startup_enable(command);
2057
2058         return 0;
2059 }
2060
2061 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
2062 {
2063         bool hash_disable = true;
2064
2065         if (unlikely(ftrace_disabled))
2066                 return;
2067
2068         ftrace_start_up--;
2069         /*
2070          * Just warn in case of unbalance, no need to kill ftrace, it's not
2071          * critical but the ftrace_call callers may be never nopped again after
2072          * further ftrace uses.
2073          */
2074         WARN_ON_ONCE(ftrace_start_up < 0);
2075
2076         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2077                 ops = &global_ops;
2078                 global_start_up--;
2079                 WARN_ON_ONCE(global_start_up < 0);
2080                 /* Don't update hash if global still has users */
2081                 if (global_start_up) {
2082                         WARN_ON_ONCE(!ftrace_start_up);
2083                         hash_disable = false;
2084                 }
2085         }
2086
2087         if (hash_disable)
2088                 ftrace_hash_rec_disable(ops, 1);
2089
2090         if (ops != &global_ops || !global_start_up)
2091                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2092
2093         command |= FTRACE_UPDATE_CALLS;
2094
2095         if (saved_ftrace_func != ftrace_trace_function) {
2096                 saved_ftrace_func = ftrace_trace_function;
2097                 command |= FTRACE_UPDATE_TRACE_FUNC;
2098         }
2099
2100         if (!command || !ftrace_enabled)
2101                 return;
2102
2103         ftrace_run_update_code(command);
2104 }
2105
2106 static void ftrace_startup_sysctl(void)
2107 {
2108         if (unlikely(ftrace_disabled))
2109                 return;
2110
2111         /* Force update next time */
2112         saved_ftrace_func = NULL;
2113         /* ftrace_start_up is true if we want ftrace running */
2114         if (ftrace_start_up)
2115                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2116 }
2117
2118 static void ftrace_shutdown_sysctl(void)
2119 {
2120         if (unlikely(ftrace_disabled))
2121                 return;
2122
2123         /* ftrace_start_up is true if ftrace is running */
2124         if (ftrace_start_up)
2125                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2126 }
2127
2128 static cycle_t          ftrace_update_time;
2129 static unsigned long    ftrace_update_cnt;
2130 unsigned long           ftrace_update_tot_cnt;
2131
2132 static int ops_traces_mod(struct ftrace_ops *ops)
2133 {
2134         struct ftrace_hash *hash;
2135
2136         hash = ops->filter_hash;
2137         return ftrace_hash_empty(hash);
2138 }
2139
2140 static int ftrace_update_code(struct module *mod)
2141 {
2142         struct ftrace_page *pg;
2143         struct dyn_ftrace *p;
2144         cycle_t start, stop;
2145         unsigned long ref = 0;
2146         int i;
2147
2148         /*
2149          * When adding a module, we need to check if tracers are
2150          * currently enabled and if they are set to trace all functions.
2151          * If they are, we need to enable the module functions as well
2152          * as update the reference counts for those function records.
2153          */
2154         if (mod) {
2155                 struct ftrace_ops *ops;
2156
2157                 for (ops = ftrace_ops_list;
2158                      ops != &ftrace_list_end; ops = ops->next) {
2159                         if (ops->flags & FTRACE_OPS_FL_ENABLED &&
2160                             ops_traces_mod(ops))
2161                                 ref++;
2162                 }
2163         }
2164
2165         start = ftrace_now(raw_smp_processor_id());
2166         ftrace_update_cnt = 0;
2167
2168         for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2169
2170                 for (i = 0; i < pg->index; i++) {
2171                         /* If something went wrong, bail without enabling anything */
2172                         if (unlikely(ftrace_disabled))
2173                                 return -1;
2174
2175                         p = &pg->records[i];
2176                         p->flags = ref;
2177
2178                         /*
2179                          * Do the initial record conversion from mcount jump
2180                          * to the NOP instructions.
2181                          */
2182                         if (!ftrace_code_disable(mod, p))
2183                                 break;
2184
2185                         ftrace_update_cnt++;
2186
2187                         /*
2188                          * If the tracing is enabled, go ahead and enable the record.
2189                          *
2190                          * The reason not to enable the record immediatelly is the
2191                          * inherent check of ftrace_make_nop/ftrace_make_call for
2192                          * correct previous instructions.  Making first the NOP
2193                          * conversion puts the module to the correct state, thus
2194                          * passing the ftrace_make_call check.
2195                          */
2196                         if (ftrace_start_up && ref) {
2197                                 int failed = __ftrace_replace_code(p, 1);
2198                                 if (failed)
2199                                         ftrace_bug(failed, p->ip);
2200                         }
2201                 }
2202         }
2203
2204         ftrace_new_pgs = NULL;
2205
2206         stop = ftrace_now(raw_smp_processor_id());
2207         ftrace_update_time = stop - start;
2208         ftrace_update_tot_cnt += ftrace_update_cnt;
2209
2210         return 0;
2211 }
2212
2213 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2214 {
2215         int order;
2216         int cnt;
2217
2218         if (WARN_ON(!count))
2219                 return -EINVAL;
2220
2221         order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2222
2223         /*
2224          * We want to fill as much as possible. No more than a page
2225          * may be empty.
2226          */
2227         while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2228                 order--;
2229
2230  again:
2231         pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2232
2233         if (!pg->records) {
2234                 /* if we can't allocate this size, try something smaller */
2235                 if (!order)
2236                         return -ENOMEM;
2237                 order >>= 1;
2238                 goto again;
2239         }
2240
2241         cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2242         pg->size = cnt;
2243
2244         if (cnt > count)
2245                 cnt = count;
2246
2247         return cnt;
2248 }
2249
2250 static struct ftrace_page *
2251 ftrace_allocate_pages(unsigned long num_to_init)
2252 {
2253         struct ftrace_page *start_pg;
2254         struct ftrace_page *pg;
2255         int order;
2256         int cnt;
2257
2258         if (!num_to_init)
2259                 return 0;
2260
2261         start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2262         if (!pg)
2263                 return NULL;
2264
2265         /*
2266          * Try to allocate as much as possible in one continues
2267          * location that fills in all of the space. We want to
2268          * waste as little space as possible.
2269          */
2270         for (;;) {
2271                 cnt = ftrace_allocate_records(pg, num_to_init);
2272                 if (cnt < 0)
2273                         goto free_pages;
2274
2275                 num_to_init -= cnt;
2276                 if (!num_to_init)
2277                         break;
2278
2279                 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2280                 if (!pg->next)
2281                         goto free_pages;
2282
2283                 pg = pg->next;
2284         }
2285
2286         return start_pg;
2287
2288  free_pages:
2289         while (start_pg) {
2290                 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2291                 free_pages((unsigned long)pg->records, order);
2292                 start_pg = pg->next;
2293                 kfree(pg);
2294                 pg = start_pg;
2295         }
2296         pr_info("ftrace: FAILED to allocate memory for functions\n");
2297         return NULL;
2298 }
2299
2300 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2301 {
2302         int cnt;
2303
2304         if (!num_to_init) {
2305                 pr_info("ftrace: No functions to be traced?\n");
2306                 return -1;
2307         }
2308
2309         cnt = num_to_init / ENTRIES_PER_PAGE;
2310         pr_info("ftrace: allocating %ld entries in %d pages\n",
2311                 num_to_init, cnt + 1);
2312
2313         return 0;
2314 }
2315
2316 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2317
2318 struct ftrace_iterator {
2319         loff_t                          pos;
2320         loff_t                          func_pos;
2321         struct ftrace_page              *pg;
2322         struct dyn_ftrace               *func;
2323         struct ftrace_func_probe        *probe;
2324         struct trace_parser             parser;
2325         struct ftrace_hash              *hash;
2326         struct ftrace_ops               *ops;
2327         int                             hidx;
2328         int                             idx;
2329         unsigned                        flags;
2330 };
2331
2332 static void *
2333 t_hash_next(struct seq_file *m, loff_t *pos)
2334 {
2335         struct ftrace_iterator *iter = m->private;
2336         struct hlist_node *hnd = NULL;
2337         struct hlist_head *hhd;
2338
2339         (*pos)++;
2340         iter->pos = *pos;
2341
2342         if (iter->probe)
2343                 hnd = &iter->probe->node;
2344  retry:
2345         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2346                 return NULL;
2347
2348         hhd = &ftrace_func_hash[iter->hidx];
2349
2350         if (hlist_empty(hhd)) {
2351                 iter->hidx++;
2352                 hnd = NULL;
2353                 goto retry;
2354         }
2355
2356         if (!hnd)
2357                 hnd = hhd->first;
2358         else {
2359                 hnd = hnd->next;
2360                 if (!hnd) {
2361                         iter->hidx++;
2362                         goto retry;
2363                 }
2364         }
2365
2366         if (WARN_ON_ONCE(!hnd))
2367                 return NULL;
2368
2369         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2370
2371         return iter;
2372 }
2373
2374 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2375 {
2376         struct ftrace_iterator *iter = m->private;
2377         void *p = NULL;
2378         loff_t l;
2379
2380         if (!(iter->flags & FTRACE_ITER_DO_HASH))
2381                 return NULL;
2382
2383         if (iter->func_pos > *pos)
2384                 return NULL;
2385
2386         iter->hidx = 0;
2387         for (l = 0; l <= (*pos - iter->func_pos); ) {
2388                 p = t_hash_next(m, &l);
2389                 if (!p)
2390                         break;
2391         }
2392         if (!p)
2393                 return NULL;
2394
2395         /* Only set this if we have an item */
2396         iter->flags |= FTRACE_ITER_HASH;
2397
2398         return iter;
2399 }
2400
2401 static int
2402 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2403 {
2404         struct ftrace_func_probe *rec;
2405
2406         rec = iter->probe;
2407         if (WARN_ON_ONCE(!rec))
2408                 return -EIO;
2409
2410         if (rec->ops->print)
2411                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2412
2413         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2414
2415         if (rec->data)
2416                 seq_printf(m, ":%p", rec->data);
2417         seq_putc(m, '\n');
2418
2419         return 0;
2420 }
2421
2422 static void *
2423 t_next(struct seq_file *m, void *v, loff_t *pos)
2424 {
2425         struct ftrace_iterator *iter = m->private;
2426         struct ftrace_ops *ops = iter->ops;
2427         struct dyn_ftrace *rec = NULL;
2428
2429         if (unlikely(ftrace_disabled))
2430                 return NULL;
2431
2432         if (iter->flags & FTRACE_ITER_HASH)
2433                 return t_hash_next(m, pos);
2434
2435         (*pos)++;
2436         iter->pos = iter->func_pos = *pos;
2437
2438         if (iter->flags & FTRACE_ITER_PRINTALL)
2439                 return t_hash_start(m, pos);
2440
2441  retry:
2442         if (iter->idx >= iter->pg->index) {
2443                 if (iter->pg->next) {
2444                         iter->pg = iter->pg->next;
2445                         iter->idx = 0;
2446                         goto retry;
2447                 }
2448         } else {
2449                 rec = &iter->pg->records[iter->idx++];
2450                 if (((iter->flags & FTRACE_ITER_FILTER) &&
2451                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2452
2453                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2454                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2455
2456                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2457                      !(rec->flags & ~FTRACE_FL_MASK))) {
2458
2459                         rec = NULL;
2460                         goto retry;
2461                 }
2462         }
2463
2464         if (!rec)
2465                 return t_hash_start(m, pos);
2466
2467         iter->func = rec;
2468
2469         return iter;
2470 }
2471
2472 static void reset_iter_read(struct ftrace_iterator *iter)
2473 {
2474         iter->pos = 0;
2475         iter->func_pos = 0;
2476         iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2477 }
2478
2479 static void *t_start(struct seq_file *m, loff_t *pos)
2480 {
2481         struct ftrace_iterator *iter = m->private;
2482         struct ftrace_ops *ops = iter->ops;
2483         void *p = NULL;
2484         loff_t l;
2485
2486         mutex_lock(&ftrace_lock);
2487
2488         if (unlikely(ftrace_disabled))
2489                 return NULL;
2490
2491         /*
2492          * If an lseek was done, then reset and start from beginning.
2493          */
2494         if (*pos < iter->pos)
2495                 reset_iter_read(iter);
2496
2497         /*
2498          * For set_ftrace_filter reading, if we have the filter
2499          * off, we can short cut and just print out that all
2500          * functions are enabled.
2501          */
2502         if (iter->flags & FTRACE_ITER_FILTER &&
2503             ftrace_hash_empty(ops->filter_hash)) {
2504                 if (*pos > 0)
2505                         return t_hash_start(m, pos);
2506                 iter->flags |= FTRACE_ITER_PRINTALL;
2507                 /* reset in case of seek/pread */
2508                 iter->flags &= ~FTRACE_ITER_HASH;
2509                 return iter;
2510         }
2511
2512         if (iter->flags & FTRACE_ITER_HASH)
2513                 return t_hash_start(m, pos);
2514
2515         /*
2516          * Unfortunately, we need to restart at ftrace_pages_start
2517          * every time we let go of the ftrace_mutex. This is because
2518          * those pointers can change without the lock.
2519          */
2520         iter->pg = ftrace_pages_start;
2521         iter->idx = 0;
2522         for (l = 0; l <= *pos; ) {
2523                 p = t_next(m, p, &l);
2524                 if (!p)
2525                         break;
2526         }
2527
2528         if (!p)
2529                 return t_hash_start(m, pos);
2530
2531         return iter;
2532 }
2533
2534 static void t_stop(struct seq_file *m, void *p)
2535 {
2536         mutex_unlock(&ftrace_lock);
2537 }
2538
2539 static int t_show(struct seq_file *m, void *v)
2540 {
2541         struct ftrace_iterator *iter = m->private;
2542         struct dyn_ftrace *rec;
2543
2544         if (iter->flags & FTRACE_ITER_HASH)
2545                 return t_hash_show(m, iter);
2546
2547         if (iter->flags & FTRACE_ITER_PRINTALL) {
2548                 seq_printf(m, "#### all functions enabled ####\n");
2549                 return 0;
2550         }
2551
2552         rec = iter->func;
2553
2554         if (!rec)
2555                 return 0;
2556
2557         seq_printf(m, "%ps", (void *)rec->ip);
2558         if (iter->flags & FTRACE_ITER_ENABLED)
2559                 seq_printf(m, " (%ld)%s",
2560                            rec->flags & ~FTRACE_FL_MASK,
2561                            rec->flags & FTRACE_FL_REGS ? " R" : "");
2562         seq_printf(m, "\n");
2563
2564         return 0;
2565 }
2566
2567 static const struct seq_operations show_ftrace_seq_ops = {
2568         .start = t_start,
2569         .next = t_next,
2570         .stop = t_stop,
2571         .show = t_show,
2572 };
2573
2574 static int
2575 ftrace_avail_open(struct inode *inode, struct file *file)
2576 {
2577         struct ftrace_iterator *iter;
2578
2579         if (unlikely(ftrace_disabled))
2580                 return -ENODEV;
2581
2582         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2583         if (iter) {
2584                 iter->pg = ftrace_pages_start;
2585                 iter->ops = &global_ops;
2586         }
2587
2588         return iter ? 0 : -ENOMEM;
2589 }
2590
2591 static int
2592 ftrace_enabled_open(struct inode *inode, struct file *file)
2593 {
2594         struct ftrace_iterator *iter;
2595
2596         if (unlikely(ftrace_disabled))
2597                 return -ENODEV;
2598
2599         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2600         if (iter) {
2601                 iter->pg = ftrace_pages_start;
2602                 iter->flags = FTRACE_ITER_ENABLED;
2603                 iter->ops = &global_ops;
2604         }
2605
2606         return iter ? 0 : -ENOMEM;
2607 }
2608
2609 static void ftrace_filter_reset(struct ftrace_hash *hash)
2610 {
2611         mutex_lock(&ftrace_lock);
2612         ftrace_hash_clear(hash);
2613         mutex_unlock(&ftrace_lock);
2614 }
2615
2616 /**
2617  * ftrace_regex_open - initialize function tracer filter files
2618  * @ops: The ftrace_ops that hold the hash filters
2619  * @flag: The type of filter to process
2620  * @inode: The inode, usually passed in to your open routine
2621  * @file: The file, usually passed in to your open routine
2622  *
2623  * ftrace_regex_open() initializes the filter files for the
2624  * @ops. Depending on @flag it may process the filter hash or
2625  * the notrace hash of @ops. With this called from the open
2626  * routine, you can use ftrace_filter_write() for the write
2627  * routine if @flag has FTRACE_ITER_FILTER set, or
2628  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2629  * ftrace_regex_lseek() should be used as the lseek routine, and
2630  * release must call ftrace_regex_release().
2631  */
2632 int
2633 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2634                   struct inode *inode, struct file *file)
2635 {
2636         struct ftrace_iterator *iter;
2637         struct ftrace_hash *hash;
2638         int ret = 0;
2639
2640         if (unlikely(ftrace_disabled))
2641                 return -ENODEV;
2642
2643         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2644         if (!iter)
2645                 return -ENOMEM;
2646
2647         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2648                 kfree(iter);
2649                 return -ENOMEM;
2650         }
2651
2652         if (flag & FTRACE_ITER_NOTRACE)
2653                 hash = ops->notrace_hash;
2654         else
2655                 hash = ops->filter_hash;
2656
2657         iter->ops = ops;
2658         iter->flags = flag;
2659
2660         if (file->f_mode & FMODE_WRITE) {
2661                 mutex_lock(&ftrace_lock);
2662                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2663                 mutex_unlock(&ftrace_lock);
2664
2665                 if (!iter->hash) {
2666                         trace_parser_put(&iter->parser);
2667                         kfree(iter);
2668                         return -ENOMEM;
2669                 }
2670         }
2671
2672         mutex_lock(&ftrace_regex_lock);
2673
2674         if ((file->f_mode & FMODE_WRITE) &&
2675             (file->f_flags & O_TRUNC))
2676                 ftrace_filter_reset(iter->hash);
2677
2678         if (file->f_mode & FMODE_READ) {
2679                 iter->pg = ftrace_pages_start;
2680
2681                 ret = seq_open(file, &show_ftrace_seq_ops);
2682                 if (!ret) {
2683                         struct seq_file *m = file->private_data;
2684                         m->private = iter;
2685                 } else {
2686                         /* Failed */
2687                         free_ftrace_hash(iter->hash);
2688                         trace_parser_put(&iter->parser);
2689                         kfree(iter);
2690                 }
2691         } else
2692                 file->private_data = iter;
2693         mutex_unlock(&ftrace_regex_lock);
2694
2695         return ret;
2696 }
2697
2698 static int
2699 ftrace_filter_open(struct inode *inode, struct file *file)
2700 {
2701         return ftrace_regex_open(&global_ops,
2702                         FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2703                         inode, file);
2704 }
2705
2706 static int
2707 ftrace_notrace_open(struct inode *inode, struct file *file)
2708 {
2709         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2710                                  inode, file);
2711 }
2712
2713 loff_t
2714 ftrace_regex_lseek(struct file *file, loff_t offset, int whence)
2715 {
2716         loff_t ret;
2717
2718         if (file->f_mode & FMODE_READ)
2719                 ret = seq_lseek(file, offset, whence);
2720         else
2721                 file->f_pos = ret = 1;
2722
2723         return ret;
2724 }
2725
2726 static int ftrace_match(char *str, char *regex, int len, int type)
2727 {
2728         int matched = 0;
2729         int slen;
2730
2731         switch (type) {
2732         case MATCH_FULL:
2733                 if (strcmp(str, regex) == 0)
2734                         matched = 1;
2735                 break;
2736         case MATCH_FRONT_ONLY:
2737                 if (strncmp(str, regex, len) == 0)
2738                         matched = 1;
2739                 break;
2740         case MATCH_MIDDLE_ONLY:
2741                 if (strstr(str, regex))
2742                         matched = 1;
2743                 break;
2744         case MATCH_END_ONLY:
2745                 slen = strlen(str);
2746                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2747                         matched = 1;
2748                 break;
2749         }
2750
2751         return matched;
2752 }
2753
2754 static int
2755 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2756 {
2757         struct ftrace_func_entry *entry;
2758         int ret = 0;
2759
2760         entry = ftrace_lookup_ip(hash, rec->ip);
2761         if (not) {
2762                 /* Do nothing if it doesn't exist */
2763                 if (!entry)
2764                         return 0;
2765
2766                 free_hash_entry(hash, entry);
2767         } else {
2768                 /* Do nothing if it exists */
2769                 if (entry)
2770                         return 0;
2771
2772                 ret = add_hash_entry(hash, rec->ip);
2773         }
2774         return ret;
2775 }
2776
2777 static int
2778 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2779                     char *regex, int len, int type)
2780 {
2781         char str[KSYM_SYMBOL_LEN];
2782         char *modname;
2783
2784         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2785
2786         if (mod) {
2787                 /* module lookup requires matching the module */
2788                 if (!modname || strcmp(modname, mod))
2789                         return 0;
2790
2791                 /* blank search means to match all funcs in the mod */
2792                 if (!len)
2793                         return 1;
2794         }
2795
2796         return ftrace_match(str, regex, len, type);
2797 }
2798
2799 static int
2800 match_records(struct ftrace_hash *hash, char *buff,
2801               int len, char *mod, int not)
2802 {
2803         unsigned search_len = 0;
2804         struct ftrace_page *pg;
2805         struct dyn_ftrace *rec;
2806         int type = MATCH_FULL;
2807         char *search = buff;
2808         int found = 0;
2809         int ret;
2810
2811         if (len) {
2812                 type = filter_parse_regex(buff, len, &search, &not);
2813                 search_len = strlen(search);
2814         }
2815
2816         mutex_lock(&ftrace_lock);
2817
2818         if (unlikely(ftrace_disabled))
2819                 goto out_unlock;
2820
2821         do_for_each_ftrace_rec(pg, rec) {
2822                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2823                         ret = enter_record(hash, rec, not);
2824                         if (ret < 0) {
2825                                 found = ret;
2826                                 goto out_unlock;
2827                         }
2828                         found = 1;
2829                 }
2830         } while_for_each_ftrace_rec();
2831  out_unlock:
2832         mutex_unlock(&ftrace_lock);
2833
2834         return found;
2835 }
2836
2837 static int
2838 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2839 {
2840         return match_records(hash, buff, len, NULL, 0);
2841 }
2842
2843 static int
2844 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2845 {
2846         int not = 0;
2847
2848         /* blank or '*' mean the same */
2849         if (strcmp(buff, "*") == 0)
2850                 buff[0] = 0;
2851
2852         /* handle the case of 'dont filter this module' */
2853         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2854                 buff[0] = 0;
2855                 not = 1;
2856         }
2857
2858         return match_records(hash, buff, strlen(buff), mod, not);
2859 }
2860
2861 /*
2862  * We register the module command as a template to show others how
2863  * to register the a command as well.
2864  */
2865
2866 static int
2867 ftrace_mod_callback(struct ftrace_hash *hash,
2868                     char *func, char *cmd, char *param, int enable)
2869 {
2870         char *mod;
2871         int ret = -EINVAL;
2872
2873         /*
2874          * cmd == 'mod' because we only registered this func
2875          * for the 'mod' ftrace_func_command.
2876          * But if you register one func with multiple commands,
2877          * you can tell which command was used by the cmd
2878          * parameter.
2879          */
2880
2881         /* we must have a module name */
2882         if (!param)
2883                 return ret;
2884
2885         mod = strsep(&param, ":");
2886         if (!strlen(mod))
2887                 return ret;
2888
2889         ret = ftrace_match_module_records(hash, func, mod);
2890         if (!ret)
2891                 ret = -EINVAL;
2892         if (ret < 0)
2893                 return ret;
2894
2895         return 0;
2896 }
2897
2898 static struct ftrace_func_command ftrace_mod_cmd = {
2899         .name                   = "mod",
2900         .func                   = ftrace_mod_callback,
2901 };
2902
2903 static int __init ftrace_mod_cmd_init(void)
2904 {
2905         return register_ftrace_command(&ftrace_mod_cmd);
2906 }
2907 core_initcall(ftrace_mod_cmd_init);
2908
2909 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2910                                       struct ftrace_ops *op, struct pt_regs *pt_regs)
2911 {
2912         struct ftrace_func_probe *entry;
2913         struct hlist_head *hhd;
2914         struct hlist_node *n;
2915         unsigned long key;
2916
2917         key = hash_long(ip, FTRACE_HASH_BITS);
2918
2919         hhd = &ftrace_func_hash[key];
2920
2921         if (hlist_empty(hhd))
2922                 return;
2923
2924         /*
2925          * Disable preemption for these calls to prevent a RCU grace
2926          * period. This syncs the hash iteration and freeing of items
2927          * on the hash. rcu_read_lock is too dangerous here.
2928          */
2929         preempt_disable_notrace();
2930         hlist_for_each_entry_rcu(entry, n, hhd, node) {
2931                 if (entry->ip == ip)
2932                         entry->ops->func(ip, parent_ip, &entry->data);
2933         }
2934         preempt_enable_notrace();
2935 }
2936
2937 static struct ftrace_ops trace_probe_ops __read_mostly =
2938 {
2939         .func           = function_trace_probe_call,
2940 };
2941
2942 static int ftrace_probe_registered;
2943
2944 static void __enable_ftrace_function_probe(void)
2945 {
2946         int ret;
2947         int i;
2948
2949         if (ftrace_probe_registered)
2950                 return;
2951
2952         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2953                 struct hlist_head *hhd = &ftrace_func_hash[i];
2954                 if (hhd->first)
2955                         break;
2956         }
2957         /* Nothing registered? */
2958         if (i == FTRACE_FUNC_HASHSIZE)
2959                 return;
2960
2961         ret = __register_ftrace_function(&trace_probe_ops);
2962         if (!ret)
2963                 ret = ftrace_startup(&trace_probe_ops, 0);
2964
2965         ftrace_probe_registered = 1;
2966 }
2967
2968 static void __disable_ftrace_function_probe(void)
2969 {
2970         int ret;
2971         int i;
2972
2973         if (!ftrace_probe_registered)
2974                 return;
2975
2976         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2977                 struct hlist_head *hhd = &ftrace_func_hash[i];
2978                 if (hhd->first)
2979                         return;
2980         }
2981
2982         /* no more funcs left */
2983         ret = __unregister_ftrace_function(&trace_probe_ops);
2984         if (!ret)
2985                 ftrace_shutdown(&trace_probe_ops, 0);
2986
2987         ftrace_probe_registered = 0;
2988 }
2989
2990
2991 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2992 {
2993         struct ftrace_func_probe *entry =
2994                 container_of(rhp, struct ftrace_func_probe, rcu);
2995
2996         if (entry->ops->free)
2997                 entry->ops->free(&entry->data);
2998         kfree(entry);
2999 }
3000
3001
3002 int
3003 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3004                               void *data)
3005 {
3006         struct ftrace_func_probe *entry;
3007         struct ftrace_page *pg;
3008         struct dyn_ftrace *rec;
3009         int type, len, not;
3010         unsigned long key;
3011         int count = 0;
3012         char *search;
3013
3014         type = filter_parse_regex(glob, strlen(glob), &search, &not);
3015         len = strlen(search);
3016
3017         /* we do not support '!' for function probes */
3018         if (WARN_ON(not))
3019                 return -EINVAL;
3020
3021         mutex_lock(&ftrace_lock);
3022
3023         if (unlikely(ftrace_disabled))
3024                 goto out_unlock;
3025
3026         do_for_each_ftrace_rec(pg, rec) {
3027
3028                 if (!ftrace_match_record(rec, NULL, search, len, type))
3029                         continue;
3030
3031                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3032                 if (!entry) {
3033                         /* If we did not process any, then return error */
3034                         if (!count)
3035                                 count = -ENOMEM;
3036                         goto out_unlock;
3037                 }
3038
3039                 count++;
3040
3041                 entry->data = data;
3042
3043                 /*
3044                  * The caller might want to do something special
3045                  * for each function we find. We call the callback
3046                  * to give the caller an opportunity to do so.
3047                  */
3048                 if (ops->callback) {
3049                         if (ops->callback(rec->ip, &entry->data) < 0) {
3050                                 /* caller does not like this func */
3051                                 kfree(entry);
3052                                 continue;
3053                         }
3054                 }
3055
3056                 entry->ops = ops;
3057                 entry->ip = rec->ip;
3058
3059                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3060                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3061
3062         } while_for_each_ftrace_rec();
3063         __enable_ftrace_function_probe();
3064
3065  out_unlock:
3066         mutex_unlock(&ftrace_lock);
3067
3068         return count;
3069 }
3070
3071 enum {
3072         PROBE_TEST_FUNC         = 1,
3073         PROBE_TEST_DATA         = 2
3074 };
3075
3076 static void
3077 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3078                                   void *data, int flags)
3079 {
3080         struct ftrace_func_probe *entry;
3081         struct hlist_node *n, *tmp;
3082         char str[KSYM_SYMBOL_LEN];
3083         int type = MATCH_FULL;
3084         int i, len = 0;
3085         char *search;
3086
3087         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3088                 glob = NULL;
3089         else if (glob) {
3090                 int not;
3091
3092                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3093                 len = strlen(search);
3094
3095                 /* we do not support '!' for function probes */
3096                 if (WARN_ON(not))
3097                         return;
3098         }
3099
3100         mutex_lock(&ftrace_lock);
3101         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3102                 struct hlist_head *hhd = &ftrace_func_hash[i];
3103
3104                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
3105
3106                         /* break up if statements for readability */
3107                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3108                                 continue;
3109
3110                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
3111                                 continue;
3112
3113                         /* do this last, since it is the most expensive */
3114                         if (glob) {
3115                                 kallsyms_lookup(entry->ip, NULL, NULL,
3116                                                 NULL, str);
3117                                 if (!ftrace_match(str, glob, len, type))
3118                                         continue;
3119                         }
3120
3121                         hlist_del(&entry->node);
3122                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
3123                 }
3124         }
3125         __disable_ftrace_function_probe();
3126         mutex_unlock(&ftrace_lock);
3127 }
3128
3129 void
3130 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3131                                 void *data)
3132 {
3133         __unregister_ftrace_function_probe(glob, ops, data,
3134                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
3135 }
3136
3137 void
3138 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3139 {
3140         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3141 }
3142
3143 void unregister_ftrace_function_probe_all(char *glob)
3144 {
3145         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3146 }
3147
3148 static LIST_HEAD(ftrace_commands);
3149 static DEFINE_MUTEX(ftrace_cmd_mutex);
3150
3151 int register_ftrace_command(struct ftrace_func_command *cmd)
3152 {
3153         struct ftrace_func_command *p;
3154         int ret = 0;
3155
3156         mutex_lock(&ftrace_cmd_mutex);
3157         list_for_each_entry(p, &ftrace_commands, list) {
3158                 if (strcmp(cmd->name, p->name) == 0) {
3159                         ret = -EBUSY;
3160                         goto out_unlock;
3161                 }
3162         }
3163         list_add(&cmd->list, &ftrace_commands);
3164  out_unlock:
3165         mutex_unlock(&ftrace_cmd_mutex);
3166
3167         return ret;
3168 }
3169
3170 int unregister_ftrace_command(struct ftrace_func_command *cmd)
3171 {
3172         struct ftrace_func_command *p, *n;
3173         int ret = -ENODEV;
3174
3175         mutex_lock(&ftrace_cmd_mutex);
3176         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3177                 if (strcmp(cmd->name, p->name) == 0) {
3178                         ret = 0;
3179                         list_del_init(&p->list);
3180                         goto out_unlock;
3181                 }
3182         }
3183  out_unlock:
3184         mutex_unlock(&ftrace_cmd_mutex);
3185
3186         return ret;
3187 }
3188
3189 static int ftrace_process_regex(struct ftrace_hash *hash,
3190                                 char *buff, int len, int enable)
3191 {
3192         char *func, *command, *next = buff;
3193         struct ftrace_func_command *p;
3194         int ret = -EINVAL;
3195
3196         func = strsep(&next, ":");
3197
3198         if (!next) {
3199                 ret = ftrace_match_records(hash, func, len);
3200                 if (!ret)
3201                         ret = -EINVAL;
3202                 if (ret < 0)
3203                         return ret;
3204                 return 0;
3205         }
3206
3207         /* command found */
3208
3209         command = strsep(&next, ":");
3210
3211         mutex_lock(&ftrace_cmd_mutex);
3212         list_for_each_entry(p, &ftrace_commands, list) {
3213                 if (strcmp(p->name, command) == 0) {
3214                         ret = p->func(hash, func, command, next, enable);
3215                         goto out_unlock;
3216                 }
3217         }
3218  out_unlock:
3219         mutex_unlock(&ftrace_cmd_mutex);
3220
3221         return ret;
3222 }
3223
3224 static ssize_t
3225 ftrace_regex_write(struct file *file, const char __user *ubuf,
3226                    size_t cnt, loff_t *ppos, int enable)
3227 {
3228         struct ftrace_iterator *iter;
3229         struct trace_parser *parser;
3230         ssize_t ret, read;
3231
3232         if (!cnt)
3233                 return 0;
3234
3235         mutex_lock(&ftrace_regex_lock);
3236
3237         ret = -ENODEV;
3238         if (unlikely(ftrace_disabled))
3239                 goto out_unlock;
3240
3241         if (file->f_mode & FMODE_READ) {
3242                 struct seq_file *m = file->private_data;
3243                 iter = m->private;
3244         } else
3245                 iter = file->private_data;
3246
3247         parser = &iter->parser;
3248         read = trace_get_user(parser, ubuf, cnt, ppos);
3249
3250         if (read >= 0 && trace_parser_loaded(parser) &&
3251             !trace_parser_cont(parser)) {
3252                 ret = ftrace_process_regex(iter->hash, parser->buffer,
3253                                            parser->idx, enable);
3254                 trace_parser_clear(parser);
3255                 if (ret)
3256                         goto out_unlock;
3257         }
3258
3259         ret = read;
3260 out_unlock:
3261         mutex_unlock(&ftrace_regex_lock);
3262
3263         return ret;
3264 }
3265
3266 ssize_t
3267 ftrace_filter_write(struct file *file, const char __user *ubuf,
3268                     size_t cnt, loff_t *ppos)
3269 {
3270         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3271 }
3272
3273 ssize_t
3274 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3275                      size_t cnt, loff_t *ppos)
3276 {
3277         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3278 }
3279
3280 static int
3281 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3282 {
3283         struct ftrace_func_entry *entry;
3284
3285         if (!ftrace_location(ip))
3286                 return -EINVAL;
3287
3288         if (remove) {
3289                 entry = ftrace_lookup_ip(hash, ip);
3290                 if (!entry)
3291                         return -ENOENT;
3292                 free_hash_entry(hash, entry);
3293                 return 0;
3294         }
3295
3296         return add_hash_entry(hash, ip);
3297 }
3298
3299 static int
3300 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3301                 unsigned long ip, int remove, int reset, int enable)
3302 {
3303         struct ftrace_hash **orig_hash;
3304         struct ftrace_hash *hash;
3305         int ret;
3306
3307         /* All global ops uses the global ops filters */
3308         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3309                 ops = &global_ops;
3310
3311         if (unlikely(ftrace_disabled))
3312                 return -ENODEV;
3313
3314         if (enable)
3315                 orig_hash = &ops->filter_hash;
3316         else
3317                 orig_hash = &ops->notrace_hash;
3318
3319         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3320         if (!hash)
3321                 return -ENOMEM;
3322
3323         mutex_lock(&ftrace_regex_lock);
3324         if (reset)
3325                 ftrace_filter_reset(hash);
3326         if (buf && !ftrace_match_records(hash, buf, len)) {
3327                 ret = -EINVAL;
3328                 goto out_regex_unlock;
3329         }
3330         if (ip) {
3331                 ret = ftrace_match_addr(hash, ip, remove);
3332                 if (ret < 0)
3333                         goto out_regex_unlock;
3334         }
3335
3336         mutex_lock(&ftrace_lock);
3337         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3338         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3339             && ftrace_enabled)
3340                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3341
3342         mutex_unlock(&ftrace_lock);
3343
3344  out_regex_unlock:
3345         mutex_unlock(&ftrace_regex_lock);
3346
3347         free_ftrace_hash(hash);
3348         return ret;
3349 }
3350
3351 static int
3352 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3353                 int reset, int enable)
3354 {
3355         return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3356 }
3357
3358 /**
3359  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3360  * @ops - the ops to set the filter with
3361  * @ip - the address to add to or remove from the filter.
3362  * @remove - non zero to remove the ip from the filter
3363  * @reset - non zero to reset all filters before applying this filter.
3364  *
3365  * Filters denote which functions should be enabled when tracing is enabled
3366  * If @ip is NULL, it failes to update filter.
3367  */
3368 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3369                          int remove, int reset)
3370 {
3371         return ftrace_set_addr(ops, ip, remove, reset, 1);
3372 }
3373 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3374
3375 static int
3376 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3377                  int reset, int enable)
3378 {
3379         return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3380 }
3381
3382 /**
3383  * ftrace_set_filter - set a function to filter on in ftrace
3384  * @ops - the ops to set the filter with
3385  * @buf - the string that holds the function filter text.
3386  * @len - the length of the string.
3387  * @reset - non zero to reset all filters before applying this filter.
3388  *
3389  * Filters denote which functions should be enabled when tracing is enabled.
3390  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3391  */
3392 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3393                        int len, int reset)
3394 {
3395         return ftrace_set_regex(ops, buf, len, reset, 1);
3396 }
3397 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3398
3399 /**
3400  * ftrace_set_notrace - set a function to not trace in ftrace
3401  * @ops - the ops to set the notrace filter with
3402  * @buf - the string that holds the function notrace text.
3403  * @len - the length of the string.
3404  * @reset - non zero to reset all filters before applying this filter.
3405  *
3406  * Notrace Filters denote which functions should not be enabled when tracing
3407  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3408  * for tracing.
3409  */
3410 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3411                         int len, int reset)
3412 {
3413         return ftrace_set_regex(ops, buf, len, reset, 0);
3414 }
3415 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3416 /**
3417  * ftrace_set_filter - set a function to filter on in ftrace
3418  * @ops - the ops to set the filter with
3419  * @buf - the string that holds the function filter text.
3420  * @len - the length of the string.
3421  * @reset - non zero to reset all filters before applying this filter.
3422  *
3423  * Filters denote which functions should be enabled when tracing is enabled.
3424  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3425  */
3426 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3427 {
3428         ftrace_set_regex(&global_ops, buf, len, reset, 1);
3429 }
3430 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3431
3432 /**
3433  * ftrace_set_notrace - set a function to not trace in ftrace
3434  * @ops - the ops to set the notrace filter with
3435  * @buf - the string that holds the function notrace text.
3436  * @len - the length of the string.
3437  * @reset - non zero to reset all filters before applying this filter.
3438  *
3439  * Notrace Filters denote which functions should not be enabled when tracing
3440  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3441  * for tracing.
3442  */
3443 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3444 {
3445         ftrace_set_regex(&global_ops, buf, len, reset, 0);
3446 }
3447 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3448
3449 /*
3450  * command line interface to allow users to set filters on boot up.
3451  */
3452 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3453 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3454 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3455
3456 static int __init set_ftrace_notrace(char *str)
3457 {
3458         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3459         return 1;
3460 }
3461 __setup("ftrace_notrace=", set_ftrace_notrace);
3462
3463 static int __init set_ftrace_filter(char *str)
3464 {
3465         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3466         return 1;
3467 }
3468 __setup("ftrace_filter=", set_ftrace_filter);
3469
3470 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3471 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3472 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3473
3474 static int __init set_graph_function(char *str)
3475 {
3476         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3477         return 1;
3478 }
3479 __setup("ftrace_graph_filter=", set_graph_function);
3480
3481 static void __init set_ftrace_early_graph(char *buf)
3482 {
3483         int ret;
3484         char *func;
3485
3486         while (buf) {
3487                 func = strsep(&buf, ",");
3488                 /* we allow only one expression at a time */
3489                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3490                                       func);
3491                 if (ret)
3492                         printk(KERN_DEBUG "ftrace: function %s not "
3493                                           "traceable\n", func);
3494         }
3495 }
3496 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3497
3498 void __init
3499 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3500 {
3501         char *func;
3502
3503         while (buf) {
3504                 func = strsep(&buf, ",");
3505                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3506         }
3507 }
3508
3509 static void __init set_ftrace_early_filters(void)
3510 {
3511         if (ftrace_filter_buf[0])
3512                 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3513         if (ftrace_notrace_buf[0])
3514                 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3515 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3516         if (ftrace_graph_buf[0])
3517                 set_ftrace_early_graph(ftrace_graph_buf);
3518 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3519 }
3520
3521 int ftrace_regex_release(struct inode *inode, struct file *file)
3522 {
3523         struct seq_file *m = (struct seq_file *)file->private_data;
3524         struct ftrace_iterator *iter;
3525         struct ftrace_hash **orig_hash;
3526         struct trace_parser *parser;
3527         int filter_hash;
3528         int ret;
3529
3530         mutex_lock(&ftrace_regex_lock);
3531         if (file->f_mode & FMODE_READ) {
3532                 iter = m->private;
3533
3534                 seq_release(inode, file);
3535         } else
3536                 iter = file->private_data;
3537
3538         parser = &iter->parser;
3539         if (trace_parser_loaded(parser)) {
3540                 parser->buffer[parser->idx] = 0;
3541                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3542         }
3543
3544         trace_parser_put(parser);
3545
3546         if (file->f_mode & FMODE_WRITE) {
3547                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3548
3549                 if (filter_hash)
3550                         orig_hash = &iter->ops->filter_hash;
3551                 else
3552                         orig_hash = &iter->ops->notrace_hash;
3553
3554                 mutex_lock(&ftrace_lock);
3555                 ret = ftrace_hash_move(iter->ops, filter_hash,
3556                                        orig_hash, iter->hash);
3557                 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3558                     && ftrace_enabled)
3559                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3560
3561                 mutex_unlock(&ftrace_lock);
3562         }
3563         free_ftrace_hash(iter->hash);
3564         kfree(iter);
3565
3566         mutex_unlock(&ftrace_regex_lock);
3567         return 0;
3568 }
3569
3570 static const struct file_operations ftrace_avail_fops = {
3571         .open = ftrace_avail_open,
3572         .read = seq_read,
3573         .llseek = seq_lseek,
3574         .release = seq_release_private,
3575 };
3576
3577 static const struct file_operations ftrace_enabled_fops = {
3578         .open = ftrace_enabled_open,
3579         .read = seq_read,
3580         .llseek = seq_lseek,
3581         .release = seq_release_private,
3582 };
3583
3584 static const struct file_operations ftrace_filter_fops = {
3585         .open = ftrace_filter_open,
3586         .read = seq_read,
3587         .write = ftrace_filter_write,
3588         .llseek = ftrace_regex_lseek,
3589         .release = ftrace_regex_release,
3590 };
3591
3592 static const struct file_operations ftrace_notrace_fops = {
3593         .open = ftrace_notrace_open,
3594         .read = seq_read,
3595         .write = ftrace_notrace_write,
3596         .llseek = ftrace_regex_lseek,
3597         .release = ftrace_regex_release,
3598 };
3599
3600 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3601
3602 static DEFINE_MUTEX(graph_lock);
3603
3604 int ftrace_graph_count;
3605 int ftrace_graph_filter_enabled;
3606 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3607
3608 static void *
3609 __g_next(struct seq_file *m, loff_t *pos)
3610 {
3611         if (*pos >= ftrace_graph_count)
3612                 return NULL;
3613         return &ftrace_graph_funcs[*pos];
3614 }
3615
3616 static void *
3617 g_next(struct seq_file *m, void *v, loff_t *pos)
3618 {
3619         (*pos)++;
3620         return __g_next(m, pos);
3621 }
3622
3623 static void *g_start(struct seq_file *m, loff_t *pos)
3624 {
3625         mutex_lock(&graph_lock);
3626
3627         /* Nothing, tell g_show to print all functions are enabled */
3628         if (!ftrace_graph_filter_enabled && !*pos)
3629                 return (void *)1;
3630
3631         return __g_next(m, pos);
3632 }
3633
3634 static void g_stop(struct seq_file *m, void *p)
3635 {
3636         mutex_unlock(&graph_lock);
3637 }
3638
3639 static int g_show(struct seq_file *m, void *v)
3640 {
3641         unsigned long *ptr = v;
3642
3643         if (!ptr)
3644                 return 0;
3645
3646         if (ptr == (unsigned long *)1) {
3647                 seq_printf(m, "#### all functions enabled ####\n");
3648                 return 0;
3649         }
3650
3651         seq_printf(m, "%ps\n", (void *)*ptr);
3652
3653         return 0;
3654 }
3655
3656 static const struct seq_operations ftrace_graph_seq_ops = {
3657         .start = g_start,
3658         .next = g_next,
3659         .stop = g_stop,
3660         .show = g_show,
3661 };
3662
3663 static int
3664 ftrace_graph_open(struct inode *inode, struct file *file)
3665 {
3666         int ret = 0;
3667
3668         if (unlikely(ftrace_disabled))
3669                 return -ENODEV;
3670
3671         mutex_lock(&graph_lock);
3672         if ((file->f_mode & FMODE_WRITE) &&
3673             (file->f_flags & O_TRUNC)) {
3674                 ftrace_graph_filter_enabled = 0;
3675                 ftrace_graph_count = 0;
3676                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3677         }
3678         mutex_unlock(&graph_lock);
3679
3680         if (file->f_mode & FMODE_READ)
3681                 ret = seq_open(file, &ftrace_graph_seq_ops);
3682
3683         return ret;
3684 }
3685
3686 static int
3687 ftrace_graph_release(struct inode *inode, struct file *file)
3688 {
3689         if (file->f_mode & FMODE_READ)
3690                 seq_release(inode, file);
3691         return 0;
3692 }
3693
3694 static int
3695 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3696 {
3697         struct dyn_ftrace *rec;
3698         struct ftrace_page *pg;
3699         int search_len;
3700         int fail = 1;
3701         int type, not;
3702         char *search;
3703         bool exists;
3704         int i;
3705
3706         /* decode regex */
3707         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3708         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3709                 return -EBUSY;
3710
3711         search_len = strlen(search);
3712
3713         mutex_lock(&ftrace_lock);
3714
3715         if (unlikely(ftrace_disabled)) {
3716                 mutex_unlock(&ftrace_lock);
3717                 return -ENODEV;
3718         }
3719
3720         do_for_each_ftrace_rec(pg, rec) {
3721
3722                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3723                         /* if it is in the array */
3724                         exists = false;
3725                         for (i = 0; i < *idx; i++) {
3726                                 if (array[i] == rec->ip) {
3727                                         exists = true;
3728                                         break;
3729                                 }
3730                         }
3731
3732                         if (!not) {
3733                                 fail = 0;
3734                                 if (!exists) {
3735                                         array[(*idx)++] = rec->ip;
3736                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3737                                                 goto out;
3738                                 }
3739                         } else {
3740                                 if (exists) {
3741                                         array[i] = array[--(*idx)];
3742                                         array[*idx] = 0;
3743                                         fail = 0;
3744                                 }
3745                         }
3746                 }
3747         } while_for_each_ftrace_rec();
3748 out:
3749         mutex_unlock(&ftrace_lock);
3750
3751         if (fail)
3752                 return -EINVAL;
3753
3754         ftrace_graph_filter_enabled = 1;
3755         return 0;
3756 }
3757
3758 static ssize_t
3759 ftrace_graph_write(struct file *file, const char __user *ubuf,
3760                    size_t cnt, loff_t *ppos)
3761 {
3762         struct trace_parser parser;
3763         ssize_t read, ret;
3764
3765         if (!cnt)
3766                 return 0;
3767
3768         mutex_lock(&graph_lock);
3769
3770         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3771                 ret = -ENOMEM;
3772                 goto out_unlock;
3773         }
3774
3775         read = trace_get_user(&parser, ubuf, cnt, ppos);
3776
3777         if (read >= 0 && trace_parser_loaded((&parser))) {
3778                 parser.buffer[parser.idx] = 0;
3779
3780                 /* we allow only one expression at a time */
3781                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3782                                         parser.buffer);
3783                 if (ret)
3784                         goto out_free;
3785         }
3786
3787         ret = read;
3788
3789 out_free:
3790         trace_parser_put(&parser);
3791 out_unlock:
3792         mutex_unlock(&graph_lock);
3793
3794         return ret;
3795 }
3796
3797 static const struct file_operations ftrace_graph_fops = {
3798         .open           = ftrace_graph_open,
3799         .read           = seq_read,
3800         .write          = ftrace_graph_write,
3801         .release        = ftrace_graph_release,
3802         .llseek         = seq_lseek,
3803 };
3804 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3805
3806 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3807 {
3808
3809         trace_create_file("available_filter_functions", 0444,
3810                         d_tracer, NULL, &ftrace_avail_fops);
3811
3812         trace_create_file("enabled_functions", 0444,
3813                         d_tracer, NULL, &ftrace_enabled_fops);
3814
3815         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3816                         NULL, &ftrace_filter_fops);
3817
3818         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3819                                     NULL, &ftrace_notrace_fops);
3820
3821 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3822         trace_create_file("set_graph_function", 0444, d_tracer,
3823                                     NULL,
3824                                     &ftrace_graph_fops);
3825 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3826
3827         return 0;
3828 }
3829
3830 static int ftrace_cmp_ips(const void *a, const void *b)
3831 {
3832         const unsigned long *ipa = a;
3833         const unsigned long *ipb = b;
3834
3835         if (*ipa > *ipb)
3836                 return 1;
3837         if (*ipa < *ipb)
3838                 return -1;
3839         return 0;
3840 }
3841
3842 static void ftrace_swap_ips(void *a, void *b, int size)
3843 {
3844         unsigned long *ipa = a;
3845         unsigned long *ipb = b;
3846         unsigned long t;
3847
3848         t = *ipa;
3849         *ipa = *ipb;
3850         *ipb = t;
3851 }
3852
3853 static int ftrace_process_locs(struct module *mod,
3854                                unsigned long *start,
3855                                unsigned long *end)
3856 {
3857         struct ftrace_page *start_pg;
3858         struct ftrace_page *pg;
3859         struct dyn_ftrace *rec;
3860         unsigned long count;
3861         unsigned long *p;
3862         unsigned long addr;
3863         unsigned long flags = 0; /* Shut up gcc */
3864         int ret = -ENOMEM;
3865
3866         count = end - start;
3867
3868         if (!count)
3869                 return 0;
3870
3871         sort(start, count, sizeof(*start),
3872              ftrace_cmp_ips, ftrace_swap_ips);
3873
3874         start_pg = ftrace_allocate_pages(count);
3875         if (!start_pg)
3876                 return -ENOMEM;
3877
3878         mutex_lock(&ftrace_lock);
3879
3880         /*
3881          * Core and each module needs their own pages, as
3882          * modules will free them when they are removed.
3883          * Force a new page to be allocated for modules.
3884          */
3885         if (!mod) {
3886                 WARN_ON(ftrace_pages || ftrace_pages_start);
3887                 /* First initialization */
3888                 ftrace_pages = ftrace_pages_start = start_pg;
3889         } else {
3890                 if (!ftrace_pages)
3891                         goto out;
3892
3893                 if (WARN_ON(ftrace_pages->next)) {
3894                         /* Hmm, we have free pages? */
3895                         while (ftrace_pages->next)
3896                                 ftrace_pages = ftrace_pages->next;
3897                 }
3898
3899                 ftrace_pages->next = start_pg;
3900         }
3901
3902         p = start;
3903         pg = start_pg;
3904         while (p < end) {
3905                 addr = ftrace_call_adjust(*p++);
3906                 /*
3907                  * Some architecture linkers will pad between
3908                  * the different mcount_loc sections of different
3909                  * object files to satisfy alignments.
3910                  * Skip any NULL pointers.
3911                  */
3912                 if (!addr)
3913                         continue;
3914
3915                 if (pg->index == pg->size) {
3916                         /* We should have allocated enough */
3917                         if (WARN_ON(!pg->next))
3918                                 break;
3919                         pg = pg->next;
3920                 }
3921
3922                 rec = &pg->records[pg->index++];
3923                 rec->ip = addr;
3924         }
3925
3926         /* We should have used all pages */
3927         WARN_ON(pg->next);
3928
3929         /* Assign the last page to ftrace_pages */
3930         ftrace_pages = pg;
3931
3932         /* These new locations need to be initialized */
3933         ftrace_new_pgs = start_pg;
3934
3935         /*
3936          * We only need to disable interrupts on start up
3937          * because we are modifying code that an interrupt
3938          * may execute, and the modification is not atomic.
3939          * But for modules, nothing runs the code we modify
3940          * until we are finished with it, and there's no
3941          * reason to cause large interrupt latencies while we do it.
3942          */
3943         if (!mod)
3944                 local_irq_save(flags);
3945         ftrace_update_code(mod);
3946         if (!mod)
3947                 local_irq_restore(flags);
3948         ret = 0;
3949  out:
3950         mutex_unlock(&ftrace_lock);
3951
3952         return ret;
3953 }
3954
3955 #ifdef CONFIG_MODULES
3956
3957 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
3958
3959 void ftrace_release_mod(struct module *mod)
3960 {
3961         struct dyn_ftrace *rec;
3962         struct ftrace_page **last_pg;
3963         struct ftrace_page *pg;
3964         int order;
3965
3966         mutex_lock(&ftrace_lock);
3967
3968         if (ftrace_disabled)
3969                 goto out_unlock;
3970
3971         /*
3972          * Each module has its own ftrace_pages, remove
3973          * them from the list.
3974          */
3975         last_pg = &ftrace_pages_start;
3976         for (pg = ftrace_pages_start; pg; pg = *last_pg) {
3977                 rec = &pg->records[0];
3978                 if (within_module_core(rec->ip, mod)) {
3979                         /*
3980                          * As core pages are first, the first
3981                          * page should never be a module page.
3982                          */
3983                         if (WARN_ON(pg == ftrace_pages_start))
3984                                 goto out_unlock;
3985
3986                         /* Check if we are deleting the last page */
3987                         if (pg == ftrace_pages)
3988                                 ftrace_pages = next_to_ftrace_page(last_pg);
3989
3990                         *last_pg = pg->next;
3991                         order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3992                         free_pages((unsigned long)pg->records, order);
3993                         kfree(pg);
3994                 } else
3995                         last_pg = &pg->next;
3996         }
3997  out_unlock:
3998         mutex_unlock(&ftrace_lock);
3999 }
4000
4001 static void ftrace_init_module(struct module *mod,
4002                                unsigned long *start, unsigned long *end)
4003 {
4004         if (ftrace_disabled || start == end)
4005                 return;
4006         ftrace_process_locs(mod, start, end);
4007 }
4008
4009 static int ftrace_module_notify(struct notifier_block *self,
4010                                 unsigned long val, void *data)
4011 {
4012         struct module *mod = data;
4013
4014         switch (val) {
4015         case MODULE_STATE_COMING:
4016                 ftrace_init_module(mod, mod->ftrace_callsites,
4017                                    mod->ftrace_callsites +
4018                                    mod->num_ftrace_callsites);
4019                 break;
4020         case MODULE_STATE_GOING:
4021                 ftrace_release_mod(mod);
4022                 break;
4023         }
4024
4025         return 0;
4026 }
4027 #else
4028 static int ftrace_module_notify(struct notifier_block *self,
4029                                 unsigned long val, void *data)
4030 {
4031         return 0;
4032 }
4033 #endif /* CONFIG_MODULES */
4034
4035 struct notifier_block ftrace_module_nb = {
4036         .notifier_call = ftrace_module_notify,
4037         .priority = INT_MAX,    /* Run before anything that can use kprobes */
4038 };
4039
4040 extern unsigned long __start_mcount_loc[];
4041 extern unsigned long __stop_mcount_loc[];
4042
4043 void __init ftrace_init(void)
4044 {
4045         unsigned long count, addr, flags;
4046         int ret;
4047
4048         /* Keep the ftrace pointer to the stub */
4049         addr = (unsigned long)ftrace_stub;
4050
4051         local_irq_save(flags);
4052         ftrace_dyn_arch_init(&addr);
4053         local_irq_restore(flags);
4054
4055         /* ftrace_dyn_arch_init places the return code in addr */
4056         if (addr)
4057                 goto failed;
4058
4059         count = __stop_mcount_loc - __start_mcount_loc;
4060
4061         ret = ftrace_dyn_table_alloc(count);
4062         if (ret)
4063                 goto failed;
4064
4065         last_ftrace_enabled = ftrace_enabled = 1;
4066
4067         ret = ftrace_process_locs(NULL,
4068                                   __start_mcount_loc,
4069                                   __stop_mcount_loc);
4070
4071         ret = register_module_notifier(&ftrace_module_nb);
4072         if (ret)
4073                 pr_warning("Failed to register trace ftrace module notifier\n");
4074
4075         set_ftrace_early_filters();
4076
4077         return;
4078  failed:
4079         ftrace_disabled = 1;
4080 }
4081
4082 #else
4083
4084 static struct ftrace_ops global_ops = {
4085         .func                   = ftrace_stub,
4086         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
4087 };
4088
4089 static int __init ftrace_nodyn_init(void)
4090 {
4091         ftrace_enabled = 1;
4092         return 0;
4093 }
4094 core_initcall(ftrace_nodyn_init);
4095
4096 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4097 static inline void ftrace_startup_enable(int command) { }
4098 /* Keep as macros so we do not need to define the commands */
4099 # define ftrace_startup(ops, command)                   \
4100         ({                                              \
4101                 (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
4102                 0;                                      \
4103         })
4104 # define ftrace_shutdown(ops, command)  do { } while (0)
4105 # define ftrace_startup_sysctl()        do { } while (0)
4106 # define ftrace_shutdown_sysctl()       do { } while (0)
4107
4108 static inline int
4109 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
4110 {
4111         return 1;
4112 }
4113
4114 #endif /* CONFIG_DYNAMIC_FTRACE */
4115
4116 static void
4117 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4118                         struct ftrace_ops *op, struct pt_regs *regs)
4119 {
4120         if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4121                 return;
4122
4123         /*
4124          * Some of the ops may be dynamically allocated,
4125          * they must be freed after a synchronize_sched().
4126          */
4127         preempt_disable_notrace();
4128         trace_recursion_set(TRACE_CONTROL_BIT);
4129         do_for_each_ftrace_op(op, ftrace_control_list) {
4130                 if (!ftrace_function_local_disabled(op) &&
4131                     ftrace_ops_test(op, ip))
4132                         op->func(ip, parent_ip, op, regs);
4133         } while_for_each_ftrace_op(op);
4134         trace_recursion_clear(TRACE_CONTROL_BIT);
4135         preempt_enable_notrace();
4136 }
4137
4138 static struct ftrace_ops control_ops = {
4139         .func = ftrace_ops_control_func,
4140         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
4141 };
4142
4143 static inline void
4144 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4145                        struct ftrace_ops *ignored, struct pt_regs *regs)
4146 {
4147         struct ftrace_ops *op;
4148         unsigned int bit;
4149
4150         if (function_trace_stop)
4151                 return;
4152
4153         if (in_interrupt()) {
4154                 if (in_nmi())
4155                         bit = TRACE_INTERNAL_NMI_BIT;
4156
4157                 else if (in_irq())
4158                         bit = TRACE_INTERNAL_IRQ_BIT;
4159                 else
4160                         bit = TRACE_INTERNAL_SIRQ_BIT;
4161         } else
4162                 bit = TRACE_INTERNAL_BIT;
4163
4164         if (unlikely(trace_recursion_test(bit)))
4165                         return;
4166
4167         trace_recursion_set(bit);
4168
4169         /*
4170          * Some of the ops may be dynamically allocated,
4171          * they must be freed after a synchronize_sched().
4172          */
4173         preempt_disable_notrace();
4174         do_for_each_ftrace_op(op, ftrace_ops_list) {
4175                 if (ftrace_ops_test(op, ip))
4176                         op->func(ip, parent_ip, op, regs);
4177         } while_for_each_ftrace_op(op);
4178         preempt_enable_notrace();
4179         trace_recursion_clear(bit);
4180 }
4181
4182 /*
4183  * Some archs only support passing ip and parent_ip. Even though
4184  * the list function ignores the op parameter, we do not want any
4185  * C side effects, where a function is called without the caller
4186  * sending a third parameter.
4187  * Archs are to support both the regs and ftrace_ops at the same time.
4188  * If they support ftrace_ops, it is assumed they support regs.
4189  * If call backs want to use regs, they must either check for regs
4190  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4191  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4192  * An architecture can pass partial regs with ftrace_ops and still
4193  * set the ARCH_SUPPORT_FTARCE_OPS.
4194  */
4195 #if ARCH_SUPPORTS_FTRACE_OPS
4196 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4197                                  struct ftrace_ops *op, struct pt_regs *regs)
4198 {
4199         __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4200 }
4201 #else
4202 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4203 {
4204         __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4205 }
4206 #endif
4207
4208 static void clear_ftrace_swapper(void)
4209 {
4210         struct task_struct *p;
4211         int cpu;
4212
4213         get_online_cpus();
4214         for_each_online_cpu(cpu) {
4215                 p = idle_task(cpu);
4216                 clear_tsk_trace_trace(p);
4217         }
4218         put_online_cpus();
4219 }
4220
4221 static void set_ftrace_swapper(void)
4222 {
4223         struct task_struct *p;
4224         int cpu;
4225
4226         get_online_cpus();
4227         for_each_online_cpu(cpu) {
4228                 p = idle_task(cpu);
4229                 set_tsk_trace_trace(p);
4230         }
4231         put_online_cpus();
4232 }
4233
4234 static void clear_ftrace_pid(struct pid *pid)
4235 {
4236         struct task_struct *p;
4237
4238         rcu_read_lock();
4239         do_each_pid_task(pid, PIDTYPE_PID, p) {
4240                 clear_tsk_trace_trace(p);
4241         } while_each_pid_task(pid, PIDTYPE_PID, p);
4242         rcu_read_unlock();
4243
4244         put_pid(pid);
4245 }
4246
4247 static void set_ftrace_pid(struct pid *pid)
4248 {
4249         struct task_struct *p;
4250
4251         rcu_read_lock();
4252         do_each_pid_task(pid, PIDTYPE_PID, p) {
4253                 set_tsk_trace_trace(p);
4254         } while_each_pid_task(pid, PIDTYPE_PID, p);
4255         rcu_read_unlock();
4256 }
4257
4258 static void clear_ftrace_pid_task(struct pid *pid)
4259 {
4260         if (pid == ftrace_swapper_pid)
4261                 clear_ftrace_swapper();
4262         else
4263                 clear_ftrace_pid(pid);
4264 }
4265
4266 static void set_ftrace_pid_task(struct pid *pid)
4267 {
4268         if (pid == ftrace_swapper_pid)
4269                 set_ftrace_swapper();
4270         else
4271                 set_ftrace_pid(pid);
4272 }
4273
4274 static int ftrace_pid_add(int p)
4275 {
4276         struct pid *pid;
4277         struct ftrace_pid *fpid;
4278         int ret = -EINVAL;
4279
4280         mutex_lock(&ftrace_lock);
4281
4282         if (!p)
4283                 pid = ftrace_swapper_pid;
4284         else
4285                 pid = find_get_pid(p);
4286
4287         if (!pid)
4288                 goto out;
4289
4290         ret = 0;
4291
4292         list_for_each_entry(fpid, &ftrace_pids, list)
4293                 if (fpid->pid == pid)
4294                         goto out_put;
4295
4296         ret = -ENOMEM;
4297
4298         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4299         if (!fpid)
4300                 goto out_put;
4301
4302         list_add(&fpid->list, &ftrace_pids);
4303         fpid->pid = pid;
4304
4305         set_ftrace_pid_task(pid);
4306
4307         ftrace_update_pid_func();
4308         ftrace_startup_enable(0);
4309
4310         mutex_unlock(&ftrace_lock);
4311         return 0;
4312
4313 out_put:
4314         if (pid != ftrace_swapper_pid)
4315                 put_pid(pid);
4316
4317 out:
4318         mutex_unlock(&ftrace_lock);
4319         return ret;
4320 }
4321
4322 static void ftrace_pid_reset(void)
4323 {
4324         struct ftrace_pid *fpid, *safe;
4325
4326         mutex_lock(&ftrace_lock);
4327         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4328                 struct pid *pid = fpid->pid;
4329
4330                 clear_ftrace_pid_task(pid);
4331
4332                 list_del(&fpid->list);
4333                 kfree(fpid);
4334         }
4335
4336         ftrace_update_pid_func();
4337         ftrace_startup_enable(0);
4338
4339         mutex_unlock(&ftrace_lock);
4340 }
4341
4342 static void *fpid_start(struct seq_file *m, loff_t *pos)
4343 {
4344         mutex_lock(&ftrace_lock);
4345
4346         if (list_empty(&ftrace_pids) && (!*pos))
4347                 return (void *) 1;
4348
4349         return seq_list_start(&ftrace_pids, *pos);
4350 }
4351
4352 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4353 {
4354         if (v == (void *)1)
4355                 return NULL;
4356
4357         return seq_list_next(v, &ftrace_pids, pos);
4358 }
4359
4360 static void fpid_stop(struct seq_file *m, void *p)
4361 {
4362         mutex_unlock(&ftrace_lock);
4363 }
4364
4365 static int fpid_show(struct seq_file *m, void *v)
4366 {
4367         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4368
4369         if (v == (void *)1) {
4370                 seq_printf(m, "no pid\n");
4371                 return 0;
4372         }
4373
4374         if (fpid->pid == ftrace_swapper_pid)
4375                 seq_printf(m, "swapper tasks\n");
4376         else
4377                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4378
4379         return 0;
4380 }
4381
4382 static const struct seq_operations ftrace_pid_sops = {
4383         .start = fpid_start,
4384         .next = fpid_next,
4385         .stop = fpid_stop,
4386         .show = fpid_show,
4387 };
4388
4389 static int
4390 ftrace_pid_open(struct inode *inode, struct file *file)
4391 {
4392         int ret = 0;
4393
4394         if ((file->f_mode & FMODE_WRITE) &&
4395             (file->f_flags & O_TRUNC))
4396                 ftrace_pid_reset();
4397
4398         if (file->f_mode & FMODE_READ)
4399                 ret = seq_open(file, &ftrace_pid_sops);
4400
4401         return ret;
4402 }
4403
4404 static ssize_t
4405 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4406                    size_t cnt, loff_t *ppos)
4407 {
4408         char buf[64], *tmp;
4409         long val;
4410         int ret;
4411
4412         if (cnt >= sizeof(buf))
4413                 return -EINVAL;
4414
4415         if (copy_from_user(&buf, ubuf, cnt))
4416                 return -EFAULT;
4417
4418         buf[cnt] = 0;
4419
4420         /*
4421          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4422          * to clean the filter quietly.
4423          */
4424         tmp = strstrip(buf);
4425         if (strlen(tmp) == 0)
4426                 return 1;
4427
4428         ret = kstrtol(tmp, 10, &val);
4429         if (ret < 0)
4430                 return ret;
4431
4432         ret = ftrace_pid_add(val);
4433
4434         return ret ? ret : cnt;
4435 }
4436
4437 static int
4438 ftrace_pid_release(struct inode *inode, struct file *file)
4439 {
4440         if (file->f_mode & FMODE_READ)
4441                 seq_release(inode, file);
4442
4443         return 0;
4444 }
4445
4446 static const struct file_operations ftrace_pid_fops = {
4447         .open           = ftrace_pid_open,
4448         .write          = ftrace_pid_write,
4449         .read           = seq_read,
4450         .llseek         = seq_lseek,
4451         .release        = ftrace_pid_release,
4452 };
4453
4454 static __init int ftrace_init_debugfs(void)
4455 {
4456         struct dentry *d_tracer;
4457
4458         d_tracer = tracing_init_dentry();
4459         if (!d_tracer)
4460                 return 0;
4461
4462         ftrace_init_dyn_debugfs(d_tracer);
4463
4464         trace_create_file("set_ftrace_pid", 0644, d_tracer,
4465                             NULL, &ftrace_pid_fops);
4466
4467         ftrace_profile_debugfs(d_tracer);
4468
4469         return 0;
4470 }
4471 fs_initcall(ftrace_init_debugfs);
4472
4473 /**
4474  * ftrace_kill - kill ftrace
4475  *
4476  * This function should be used by panic code. It stops ftrace
4477  * but in a not so nice way. If you need to simply kill ftrace
4478  * from a non-atomic section, use ftrace_kill.
4479  */
4480 void ftrace_kill(void)
4481 {
4482         ftrace_disabled = 1;
4483         ftrace_enabled = 0;
4484         clear_ftrace_function();
4485 }
4486
4487 /**
4488  * Test if ftrace is dead or not.
4489  */
4490 int ftrace_is_dead(void)
4491 {
4492         return ftrace_disabled;
4493 }
4494
4495 /**
4496  * register_ftrace_function - register a function for profiling
4497  * @ops - ops structure that holds the function for profiling.
4498  *
4499  * Register a function to be called by all functions in the
4500  * kernel.
4501  *
4502  * Note: @ops->func and all the functions it calls must be labeled
4503  *       with "notrace", otherwise it will go into a
4504  *       recursive loop.
4505  */
4506 int register_ftrace_function(struct ftrace_ops *ops)
4507 {
4508         int ret = -1;
4509
4510         mutex_lock(&ftrace_lock);
4511
4512         ret = __register_ftrace_function(ops);
4513         if (!ret)
4514                 ret = ftrace_startup(ops, 0);
4515
4516         mutex_unlock(&ftrace_lock);
4517
4518         return ret;
4519 }
4520 EXPORT_SYMBOL_GPL(register_ftrace_function);
4521
4522 /**
4523  * unregister_ftrace_function - unregister a function for profiling.
4524  * @ops - ops structure that holds the function to unregister
4525  *
4526  * Unregister a function that was added to be called by ftrace profiling.
4527  */
4528 int unregister_ftrace_function(struct ftrace_ops *ops)
4529 {
4530         int ret;
4531
4532         mutex_lock(&ftrace_lock);
4533         ret = __unregister_ftrace_function(ops);
4534         if (!ret)
4535                 ftrace_shutdown(ops, 0);
4536         mutex_unlock(&ftrace_lock);
4537
4538         return ret;
4539 }
4540 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4541
4542 int
4543 ftrace_enable_sysctl(struct ctl_table *table, int write,
4544                      void __user *buffer, size_t *lenp,
4545                      loff_t *ppos)
4546 {
4547         int ret = -ENODEV;
4548
4549         mutex_lock(&ftrace_lock);
4550
4551         if (unlikely(ftrace_disabled))
4552                 goto out;
4553
4554         ret = proc_dointvec(table, write, buffer, lenp, ppos);
4555
4556         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4557                 goto out;
4558
4559         last_ftrace_enabled = !!ftrace_enabled;
4560
4561         if (ftrace_enabled) {
4562
4563                 ftrace_startup_sysctl();
4564
4565                 /* we are starting ftrace again */
4566                 if (ftrace_ops_list != &ftrace_list_end) {
4567                         if (ftrace_ops_list->next == &ftrace_list_end)
4568                                 ftrace_trace_function = ftrace_ops_list->func;
4569                         else
4570                                 ftrace_trace_function = ftrace_ops_list_func;
4571                 }
4572
4573         } else {
4574                 /* stopping ftrace calls (just send to ftrace_stub) */
4575                 ftrace_trace_function = ftrace_stub;
4576
4577                 ftrace_shutdown_sysctl();
4578         }
4579
4580  out:
4581         mutex_unlock(&ftrace_lock);
4582         return ret;
4583 }
4584
4585 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4586
4587 static int ftrace_graph_active;
4588 static struct notifier_block ftrace_suspend_notifier;
4589
4590 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4591 {
4592         return 0;
4593 }
4594
4595 /* The callbacks that hook a function */
4596 trace_func_graph_ret_t ftrace_graph_return =
4597                         (trace_func_graph_ret_t)ftrace_stub;
4598 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4599
4600 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4601 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4602 {
4603         int i;
4604         int ret = 0;
4605         unsigned long flags;
4606         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4607         struct task_struct *g, *t;
4608
4609         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4610                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4611                                         * sizeof(struct ftrace_ret_stack),
4612                                         GFP_KERNEL);
4613                 if (!ret_stack_list[i]) {
4614                         start = 0;
4615                         end = i;
4616                         ret = -ENOMEM;
4617                         goto free;
4618                 }
4619         }
4620
4621         read_lock_irqsave(&tasklist_lock, flags);
4622         do_each_thread(g, t) {
4623                 if (start == end) {
4624                         ret = -EAGAIN;
4625                         goto unlock;
4626                 }
4627
4628                 if (t->ret_stack == NULL) {
4629                         atomic_set(&t->tracing_graph_pause, 0);
4630                         atomic_set(&t->trace_overrun, 0);
4631                         t->curr_ret_stack = -1;
4632                         /* Make sure the tasks see the -1 first: */
4633                         smp_wmb();
4634                         t->ret_stack = ret_stack_list[start++];
4635                 }
4636         } while_each_thread(g, t);
4637
4638 unlock:
4639         read_unlock_irqrestore(&tasklist_lock, flags);
4640 free:
4641         for (i = start; i < end; i++)
4642                 kfree(ret_stack_list[i]);
4643         return ret;
4644 }
4645
4646 static void
4647 ftrace_graph_probe_sched_switch(void *ignore,
4648                         struct task_struct *prev, struct task_struct *next)
4649 {
4650         unsigned long long timestamp;
4651         int index;
4652
4653         /*
4654          * Does the user want to count the time a function was asleep.
4655          * If so, do not update the time stamps.
4656          */
4657         if (trace_flags & TRACE_ITER_SLEEP_TIME)
4658                 return;
4659
4660         timestamp = trace_clock_local();
4661
4662         prev->ftrace_timestamp = timestamp;
4663
4664         /* only process tasks that we timestamped */
4665         if (!next->ftrace_timestamp)
4666                 return;
4667
4668         /*
4669          * Update all the counters in next to make up for the
4670          * time next was sleeping.
4671          */
4672         timestamp -= next->ftrace_timestamp;
4673
4674         for (index = next->curr_ret_stack; index >= 0; index--)
4675                 next->ret_stack[index].calltime += timestamp;
4676 }
4677
4678 /* Allocate a return stack for each task */
4679 static int start_graph_tracing(void)
4680 {
4681         struct ftrace_ret_stack **ret_stack_list;
4682         int ret, cpu;
4683
4684         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4685                                 sizeof(struct ftrace_ret_stack *),
4686                                 GFP_KERNEL);
4687
4688         if (!ret_stack_list)
4689                 return -ENOMEM;
4690
4691         /* The cpu_boot init_task->ret_stack will never be freed */
4692         for_each_online_cpu(cpu) {
4693                 if (!idle_task(cpu)->ret_stack)
4694                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4695         }
4696
4697         do {
4698                 ret = alloc_retstack_tasklist(ret_stack_list);
4699         } while (ret == -EAGAIN);
4700
4701         if (!ret) {
4702                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4703                 if (ret)
4704                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4705                                 " probe to kernel_sched_switch\n");
4706         }
4707
4708         kfree(ret_stack_list);
4709         return ret;
4710 }
4711
4712 /*
4713  * Hibernation protection.
4714  * The state of the current task is too much unstable during
4715  * suspend/restore to disk. We want to protect against that.
4716  */
4717 static int
4718 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4719                                                         void *unused)
4720 {
4721         switch (state) {
4722         case PM_HIBERNATION_PREPARE:
4723                 pause_graph_tracing();
4724                 break;
4725
4726         case PM_POST_HIBERNATION:
4727                 unpause_graph_tracing();
4728                 break;
4729         }
4730         return NOTIFY_DONE;
4731 }
4732
4733 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4734                         trace_func_graph_ent_t entryfunc)
4735 {
4736         int ret = 0;
4737
4738         mutex_lock(&ftrace_lock);
4739
4740         /* we currently allow only one tracer registered at a time */
4741         if (ftrace_graph_active) {
4742                 ret = -EBUSY;
4743                 goto out;
4744         }
4745
4746         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4747         register_pm_notifier(&ftrace_suspend_notifier);
4748
4749         ftrace_graph_active++;
4750         ret = start_graph_tracing();
4751         if (ret) {
4752                 ftrace_graph_active--;
4753                 goto out;
4754         }
4755
4756         ftrace_graph_return = retfunc;
4757         ftrace_graph_entry = entryfunc;
4758
4759         ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4760
4761 out:
4762         mutex_unlock(&ftrace_lock);
4763         return ret;
4764 }
4765
4766 void unregister_ftrace_graph(void)
4767 {
4768         mutex_lock(&ftrace_lock);
4769
4770         if (unlikely(!ftrace_graph_active))
4771                 goto out;
4772
4773         ftrace_graph_active--;
4774         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4775         ftrace_graph_entry = ftrace_graph_entry_stub;
4776         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4777         unregister_pm_notifier(&ftrace_suspend_notifier);
4778         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4779
4780  out:
4781         mutex_unlock(&ftrace_lock);
4782 }
4783
4784 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4785
4786 static void
4787 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4788 {
4789         atomic_set(&t->tracing_graph_pause, 0);
4790         atomic_set(&t->trace_overrun, 0);
4791         t->ftrace_timestamp = 0;
4792         /* make curr_ret_stack visible before we add the ret_stack */
4793         smp_wmb();
4794         t->ret_stack = ret_stack;
4795 }
4796
4797 /*
4798  * Allocate a return stack for the idle task. May be the first
4799  * time through, or it may be done by CPU hotplug online.
4800  */
4801 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4802 {
4803         t->curr_ret_stack = -1;
4804         /*
4805          * The idle task has no parent, it either has its own
4806          * stack or no stack at all.
4807          */
4808         if (t->ret_stack)
4809                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4810
4811         if (ftrace_graph_active) {
4812                 struct ftrace_ret_stack *ret_stack;
4813
4814                 ret_stack = per_cpu(idle_ret_stack, cpu);
4815                 if (!ret_stack) {
4816                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4817                                             * sizeof(struct ftrace_ret_stack),
4818                                             GFP_KERNEL);
4819                         if (!ret_stack)
4820                                 return;
4821                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4822                 }
4823                 graph_init_task(t, ret_stack);
4824         }
4825 }
4826
4827 /* Allocate a return stack for newly created task */
4828 void ftrace_graph_init_task(struct task_struct *t)
4829 {
4830         /* Make sure we do not use the parent ret_stack */
4831         t->ret_stack = NULL;
4832         t->curr_ret_stack = -1;
4833
4834         if (ftrace_graph_active) {
4835                 struct ftrace_ret_stack *ret_stack;
4836
4837                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4838                                 * sizeof(struct ftrace_ret_stack),
4839                                 GFP_KERNEL);
4840                 if (!ret_stack)
4841                         return;
4842                 graph_init_task(t, ret_stack);
4843         }
4844 }
4845
4846 void ftrace_graph_exit_task(struct task_struct *t)
4847 {
4848         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4849
4850         t->ret_stack = NULL;
4851         /* NULL must become visible to IRQs before we free it: */
4852         barrier();
4853
4854         kfree(ret_stack);
4855 }
4856
4857 void ftrace_graph_stop(void)
4858 {
4859         ftrace_stop();
4860 }
4861 #endif