Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
[firefly-linux-kernel-4.4.55.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35
36 #include <trace/events/sched.h>
37
38 #include <asm/setup.h>
39
40 #include "trace_output.h"
41 #include "trace_stat.h"
42
43 #define FTRACE_WARN_ON(cond)                    \
44         ({                                      \
45                 int ___r = cond;                \
46                 if (WARN_ON(___r))              \
47                         ftrace_kill();          \
48                 ___r;                           \
49         })
50
51 #define FTRACE_WARN_ON_ONCE(cond)               \
52         ({                                      \
53                 int ___r = cond;                \
54                 if (WARN_ON_ONCE(___r))         \
55                         ftrace_kill();          \
56                 ___r;                           \
57         })
58
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_REGEX_LOCK(opsname)        \
69         .regex_lock     = __MUTEX_INITIALIZER(opsname.regex_lock),
70 #else
71 #define INIT_REGEX_LOCK(opsname)
72 #endif
73
74 static struct ftrace_ops ftrace_list_end __read_mostly = {
75         .func           = ftrace_stub,
76         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
77 };
78
79 /* ftrace_enabled is a method to turn ftrace on or off */
80 int ftrace_enabled __read_mostly;
81 static int last_ftrace_enabled;
82
83 /* Quick disabling of function tracer. */
84 int function_trace_stop __read_mostly;
85
86 /* Current function tracing op */
87 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
88
89 /* List for set_ftrace_pid's pids. */
90 LIST_HEAD(ftrace_pids);
91 struct ftrace_pid {
92         struct list_head list;
93         struct pid *pid;
94 };
95
96 /*
97  * ftrace_disabled is set when an anomaly is discovered.
98  * ftrace_disabled is much stronger than ftrace_enabled.
99  */
100 static int ftrace_disabled __read_mostly;
101
102 static DEFINE_MUTEX(ftrace_lock);
103
104 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
105 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
106 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
107 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
108 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
109 static struct ftrace_ops global_ops;
110 static struct ftrace_ops control_ops;
111
112 #if ARCH_SUPPORTS_FTRACE_OPS
113 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
114                                  struct ftrace_ops *op, struct pt_regs *regs);
115 #else
116 /* See comment below, where ftrace_ops_list_func is defined */
117 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
118 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
119 #endif
120
121 /*
122  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
123  * can use rcu_dereference_raw_notrace() is that elements removed from this list
124  * are simply leaked, so there is no need to interact with a grace-period
125  * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
126  * concurrent insertions into the ftrace_global_list.
127  *
128  * Silly Alpha and silly pointer-speculation compiler optimizations!
129  */
130 #define do_for_each_ftrace_op(op, list)                 \
131         op = rcu_dereference_raw_notrace(list);                 \
132         do
133
134 /*
135  * Optimized for just a single item in the list (as that is the normal case).
136  */
137 #define while_for_each_ftrace_op(op)                            \
138         while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&  \
139                unlikely((op) != &ftrace_list_end))
140
141 static inline void ftrace_ops_init(struct ftrace_ops *ops)
142 {
143 #ifdef CONFIG_DYNAMIC_FTRACE
144         if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
145                 mutex_init(&ops->regex_lock);
146                 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
147         }
148 #endif
149 }
150
151 /**
152  * ftrace_nr_registered_ops - return number of ops registered
153  *
154  * Returns the number of ftrace_ops registered and tracing functions
155  */
156 int ftrace_nr_registered_ops(void)
157 {
158         struct ftrace_ops *ops;
159         int cnt = 0;
160
161         mutex_lock(&ftrace_lock);
162
163         for (ops = ftrace_ops_list;
164              ops != &ftrace_list_end; ops = ops->next)
165                 cnt++;
166
167         mutex_unlock(&ftrace_lock);
168
169         return cnt;
170 }
171
172 static void
173 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
174                         struct ftrace_ops *op, struct pt_regs *regs)
175 {
176         int bit;
177
178         bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
179         if (bit < 0)
180                 return;
181
182         do_for_each_ftrace_op(op, ftrace_global_list) {
183                 op->func(ip, parent_ip, op, regs);
184         } while_for_each_ftrace_op(op);
185
186         trace_clear_recursion(bit);
187 }
188
189 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
190                             struct ftrace_ops *op, struct pt_regs *regs)
191 {
192         if (!test_tsk_trace_trace(current))
193                 return;
194
195         ftrace_pid_function(ip, parent_ip, op, regs);
196 }
197
198 static void set_ftrace_pid_function(ftrace_func_t func)
199 {
200         /* do not set ftrace_pid_function to itself! */
201         if (func != ftrace_pid_func)
202                 ftrace_pid_function = func;
203 }
204
205 /**
206  * clear_ftrace_function - reset the ftrace function
207  *
208  * This NULLs the ftrace function and in essence stops
209  * tracing.  There may be lag
210  */
211 void clear_ftrace_function(void)
212 {
213         ftrace_trace_function = ftrace_stub;
214         ftrace_pid_function = ftrace_stub;
215 }
216
217 static void control_ops_disable_all(struct ftrace_ops *ops)
218 {
219         int cpu;
220
221         for_each_possible_cpu(cpu)
222                 *per_cpu_ptr(ops->disabled, cpu) = 1;
223 }
224
225 static int control_ops_alloc(struct ftrace_ops *ops)
226 {
227         int __percpu *disabled;
228
229         disabled = alloc_percpu(int);
230         if (!disabled)
231                 return -ENOMEM;
232
233         ops->disabled = disabled;
234         control_ops_disable_all(ops);
235         return 0;
236 }
237
238 static void control_ops_free(struct ftrace_ops *ops)
239 {
240         free_percpu(ops->disabled);
241 }
242
243 static void update_global_ops(void)
244 {
245         ftrace_func_t func;
246
247         /*
248          * If there's only one function registered, then call that
249          * function directly. Otherwise, we need to iterate over the
250          * registered callers.
251          */
252         if (ftrace_global_list == &ftrace_list_end ||
253             ftrace_global_list->next == &ftrace_list_end) {
254                 func = ftrace_global_list->func;
255                 /*
256                  * As we are calling the function directly.
257                  * If it does not have recursion protection,
258                  * the function_trace_op needs to be updated
259                  * accordingly.
260                  */
261                 if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
262                         global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
263                 else
264                         global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
265         } else {
266                 func = ftrace_global_list_func;
267                 /* The list has its own recursion protection. */
268                 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
269         }
270
271
272         /* If we filter on pids, update to use the pid function */
273         if (!list_empty(&ftrace_pids)) {
274                 set_ftrace_pid_function(func);
275                 func = ftrace_pid_func;
276         }
277
278         global_ops.func = func;
279 }
280
281 static void update_ftrace_function(void)
282 {
283         ftrace_func_t func;
284
285         update_global_ops();
286
287         /*
288          * If we are at the end of the list and this ops is
289          * recursion safe and not dynamic and the arch supports passing ops,
290          * then have the mcount trampoline call the function directly.
291          */
292         if (ftrace_ops_list == &ftrace_list_end ||
293             (ftrace_ops_list->next == &ftrace_list_end &&
294              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
295              (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
296              !FTRACE_FORCE_LIST_FUNC)) {
297                 /* Set the ftrace_ops that the arch callback uses */
298                 if (ftrace_ops_list == &global_ops)
299                         function_trace_op = ftrace_global_list;
300                 else
301                         function_trace_op = ftrace_ops_list;
302                 func = ftrace_ops_list->func;
303         } else {
304                 /* Just use the default ftrace_ops */
305                 function_trace_op = &ftrace_list_end;
306                 func = ftrace_ops_list_func;
307         }
308
309         ftrace_trace_function = func;
310 }
311
312 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
313 {
314         ops->next = *list;
315         /*
316          * We are entering ops into the list but another
317          * CPU might be walking that list. We need to make sure
318          * the ops->next pointer is valid before another CPU sees
319          * the ops pointer included into the list.
320          */
321         rcu_assign_pointer(*list, ops);
322 }
323
324 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
325 {
326         struct ftrace_ops **p;
327
328         /*
329          * If we are removing the last function, then simply point
330          * to the ftrace_stub.
331          */
332         if (*list == ops && ops->next == &ftrace_list_end) {
333                 *list = &ftrace_list_end;
334                 return 0;
335         }
336
337         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
338                 if (*p == ops)
339                         break;
340
341         if (*p != ops)
342                 return -1;
343
344         *p = (*p)->next;
345         return 0;
346 }
347
348 static void add_ftrace_list_ops(struct ftrace_ops **list,
349                                 struct ftrace_ops *main_ops,
350                                 struct ftrace_ops *ops)
351 {
352         int first = *list == &ftrace_list_end;
353         add_ftrace_ops(list, ops);
354         if (first)
355                 add_ftrace_ops(&ftrace_ops_list, main_ops);
356 }
357
358 static int remove_ftrace_list_ops(struct ftrace_ops **list,
359                                   struct ftrace_ops *main_ops,
360                                   struct ftrace_ops *ops)
361 {
362         int ret = remove_ftrace_ops(list, ops);
363         if (!ret && *list == &ftrace_list_end)
364                 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
365         return ret;
366 }
367
368 static int __register_ftrace_function(struct ftrace_ops *ops)
369 {
370         if (FTRACE_WARN_ON(ops == &global_ops))
371                 return -EINVAL;
372
373         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
374                 return -EBUSY;
375
376         /* We don't support both control and global flags set. */
377         if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
378                 return -EINVAL;
379
380 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
381         /*
382          * If the ftrace_ops specifies SAVE_REGS, then it only can be used
383          * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
384          * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
385          */
386         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
387             !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
388                 return -EINVAL;
389
390         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
391                 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
392 #endif
393
394         if (!core_kernel_data((unsigned long)ops))
395                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
396
397         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
398                 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
399                 ops->flags |= FTRACE_OPS_FL_ENABLED;
400         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
401                 if (control_ops_alloc(ops))
402                         return -ENOMEM;
403                 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
404         } else
405                 add_ftrace_ops(&ftrace_ops_list, ops);
406
407         if (ftrace_enabled)
408                 update_ftrace_function();
409
410         return 0;
411 }
412
413 static int __unregister_ftrace_function(struct ftrace_ops *ops)
414 {
415         int ret;
416
417         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
418                 return -EBUSY;
419
420         if (FTRACE_WARN_ON(ops == &global_ops))
421                 return -EINVAL;
422
423         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
424                 ret = remove_ftrace_list_ops(&ftrace_global_list,
425                                              &global_ops, ops);
426                 if (!ret)
427                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
428         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
429                 ret = remove_ftrace_list_ops(&ftrace_control_list,
430                                              &control_ops, ops);
431                 if (!ret) {
432                         /*
433                          * The ftrace_ops is now removed from the list,
434                          * so there'll be no new users. We must ensure
435                          * all current users are done before we free
436                          * the control data.
437                          */
438                         synchronize_sched();
439                         control_ops_free(ops);
440                 }
441         } else
442                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
443
444         if (ret < 0)
445                 return ret;
446
447         if (ftrace_enabled)
448                 update_ftrace_function();
449
450         /*
451          * Dynamic ops may be freed, we must make sure that all
452          * callers are done before leaving this function.
453          */
454         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
455                 synchronize_sched();
456
457         return 0;
458 }
459
460 static void ftrace_update_pid_func(void)
461 {
462         /* Only do something if we are tracing something */
463         if (ftrace_trace_function == ftrace_stub)
464                 return;
465
466         update_ftrace_function();
467 }
468
469 #ifdef CONFIG_FUNCTION_PROFILER
470 struct ftrace_profile {
471         struct hlist_node               node;
472         unsigned long                   ip;
473         unsigned long                   counter;
474 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
475         unsigned long long              time;
476         unsigned long long              time_squared;
477 #endif
478 };
479
480 struct ftrace_profile_page {
481         struct ftrace_profile_page      *next;
482         unsigned long                   index;
483         struct ftrace_profile           records[];
484 };
485
486 struct ftrace_profile_stat {
487         atomic_t                        disabled;
488         struct hlist_head               *hash;
489         struct ftrace_profile_page      *pages;
490         struct ftrace_profile_page      *start;
491         struct tracer_stat              stat;
492 };
493
494 #define PROFILE_RECORDS_SIZE                                            \
495         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
496
497 #define PROFILES_PER_PAGE                                       \
498         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
499
500 static int ftrace_profile_enabled __read_mostly;
501
502 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
503 static DEFINE_MUTEX(ftrace_profile_lock);
504
505 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
506
507 #define FTRACE_PROFILE_HASH_BITS 10
508 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
509
510 static void *
511 function_stat_next(void *v, int idx)
512 {
513         struct ftrace_profile *rec = v;
514         struct ftrace_profile_page *pg;
515
516         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
517
518  again:
519         if (idx != 0)
520                 rec++;
521
522         if ((void *)rec >= (void *)&pg->records[pg->index]) {
523                 pg = pg->next;
524                 if (!pg)
525                         return NULL;
526                 rec = &pg->records[0];
527                 if (!rec->counter)
528                         goto again;
529         }
530
531         return rec;
532 }
533
534 static void *function_stat_start(struct tracer_stat *trace)
535 {
536         struct ftrace_profile_stat *stat =
537                 container_of(trace, struct ftrace_profile_stat, stat);
538
539         if (!stat || !stat->start)
540                 return NULL;
541
542         return function_stat_next(&stat->start->records[0], 0);
543 }
544
545 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
546 /* function graph compares on total time */
547 static int function_stat_cmp(void *p1, void *p2)
548 {
549         struct ftrace_profile *a = p1;
550         struct ftrace_profile *b = p2;
551
552         if (a->time < b->time)
553                 return -1;
554         if (a->time > b->time)
555                 return 1;
556         else
557                 return 0;
558 }
559 #else
560 /* not function graph compares against hits */
561 static int function_stat_cmp(void *p1, void *p2)
562 {
563         struct ftrace_profile *a = p1;
564         struct ftrace_profile *b = p2;
565
566         if (a->counter < b->counter)
567                 return -1;
568         if (a->counter > b->counter)
569                 return 1;
570         else
571                 return 0;
572 }
573 #endif
574
575 static int function_stat_headers(struct seq_file *m)
576 {
577 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
578         seq_printf(m, "  Function                               "
579                    "Hit    Time            Avg             s^2\n"
580                       "  --------                               "
581                    "---    ----            ---             ---\n");
582 #else
583         seq_printf(m, "  Function                               Hit\n"
584                       "  --------                               ---\n");
585 #endif
586         return 0;
587 }
588
589 static int function_stat_show(struct seq_file *m, void *v)
590 {
591         struct ftrace_profile *rec = v;
592         char str[KSYM_SYMBOL_LEN];
593         int ret = 0;
594 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
595         static struct trace_seq s;
596         unsigned long long avg;
597         unsigned long long stddev;
598 #endif
599         mutex_lock(&ftrace_profile_lock);
600
601         /* we raced with function_profile_reset() */
602         if (unlikely(rec->counter == 0)) {
603                 ret = -EBUSY;
604                 goto out;
605         }
606
607         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
608         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
609
610 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
611         seq_printf(m, "    ");
612         avg = rec->time;
613         do_div(avg, rec->counter);
614
615         /* Sample standard deviation (s^2) */
616         if (rec->counter <= 1)
617                 stddev = 0;
618         else {
619                 stddev = rec->time_squared - rec->counter * avg * avg;
620                 /*
621                  * Divide only 1000 for ns^2 -> us^2 conversion.
622                  * trace_print_graph_duration will divide 1000 again.
623                  */
624                 do_div(stddev, (rec->counter - 1) * 1000);
625         }
626
627         trace_seq_init(&s);
628         trace_print_graph_duration(rec->time, &s);
629         trace_seq_puts(&s, "    ");
630         trace_print_graph_duration(avg, &s);
631         trace_seq_puts(&s, "    ");
632         trace_print_graph_duration(stddev, &s);
633         trace_print_seq(m, &s);
634 #endif
635         seq_putc(m, '\n');
636 out:
637         mutex_unlock(&ftrace_profile_lock);
638
639         return ret;
640 }
641
642 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
643 {
644         struct ftrace_profile_page *pg;
645
646         pg = stat->pages = stat->start;
647
648         while (pg) {
649                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
650                 pg->index = 0;
651                 pg = pg->next;
652         }
653
654         memset(stat->hash, 0,
655                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
656 }
657
658 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
659 {
660         struct ftrace_profile_page *pg;
661         int functions;
662         int pages;
663         int i;
664
665         /* If we already allocated, do nothing */
666         if (stat->pages)
667                 return 0;
668
669         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
670         if (!stat->pages)
671                 return -ENOMEM;
672
673 #ifdef CONFIG_DYNAMIC_FTRACE
674         functions = ftrace_update_tot_cnt;
675 #else
676         /*
677          * We do not know the number of functions that exist because
678          * dynamic tracing is what counts them. With past experience
679          * we have around 20K functions. That should be more than enough.
680          * It is highly unlikely we will execute every function in
681          * the kernel.
682          */
683         functions = 20000;
684 #endif
685
686         pg = stat->start = stat->pages;
687
688         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
689
690         for (i = 1; i < pages; i++) {
691                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
692                 if (!pg->next)
693                         goto out_free;
694                 pg = pg->next;
695         }
696
697         return 0;
698
699  out_free:
700         pg = stat->start;
701         while (pg) {
702                 unsigned long tmp = (unsigned long)pg;
703
704                 pg = pg->next;
705                 free_page(tmp);
706         }
707
708         stat->pages = NULL;
709         stat->start = NULL;
710
711         return -ENOMEM;
712 }
713
714 static int ftrace_profile_init_cpu(int cpu)
715 {
716         struct ftrace_profile_stat *stat;
717         int size;
718
719         stat = &per_cpu(ftrace_profile_stats, cpu);
720
721         if (stat->hash) {
722                 /* If the profile is already created, simply reset it */
723                 ftrace_profile_reset(stat);
724                 return 0;
725         }
726
727         /*
728          * We are profiling all functions, but usually only a few thousand
729          * functions are hit. We'll make a hash of 1024 items.
730          */
731         size = FTRACE_PROFILE_HASH_SIZE;
732
733         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
734
735         if (!stat->hash)
736                 return -ENOMEM;
737
738         /* Preallocate the function profiling pages */
739         if (ftrace_profile_pages_init(stat) < 0) {
740                 kfree(stat->hash);
741                 stat->hash = NULL;
742                 return -ENOMEM;
743         }
744
745         return 0;
746 }
747
748 static int ftrace_profile_init(void)
749 {
750         int cpu;
751         int ret = 0;
752
753         for_each_possible_cpu(cpu) {
754                 ret = ftrace_profile_init_cpu(cpu);
755                 if (ret)
756                         break;
757         }
758
759         return ret;
760 }
761
762 /* interrupts must be disabled */
763 static struct ftrace_profile *
764 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
765 {
766         struct ftrace_profile *rec;
767         struct hlist_head *hhd;
768         unsigned long key;
769
770         key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
771         hhd = &stat->hash[key];
772
773         if (hlist_empty(hhd))
774                 return NULL;
775
776         hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
777                 if (rec->ip == ip)
778                         return rec;
779         }
780
781         return NULL;
782 }
783
784 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
785                                struct ftrace_profile *rec)
786 {
787         unsigned long key;
788
789         key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
790         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
791 }
792
793 /*
794  * The memory is already allocated, this simply finds a new record to use.
795  */
796 static struct ftrace_profile *
797 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
798 {
799         struct ftrace_profile *rec = NULL;
800
801         /* prevent recursion (from NMIs) */
802         if (atomic_inc_return(&stat->disabled) != 1)
803                 goto out;
804
805         /*
806          * Try to find the function again since an NMI
807          * could have added it
808          */
809         rec = ftrace_find_profiled_func(stat, ip);
810         if (rec)
811                 goto out;
812
813         if (stat->pages->index == PROFILES_PER_PAGE) {
814                 if (!stat->pages->next)
815                         goto out;
816                 stat->pages = stat->pages->next;
817         }
818
819         rec = &stat->pages->records[stat->pages->index++];
820         rec->ip = ip;
821         ftrace_add_profile(stat, rec);
822
823  out:
824         atomic_dec(&stat->disabled);
825
826         return rec;
827 }
828
829 static void
830 function_profile_call(unsigned long ip, unsigned long parent_ip,
831                       struct ftrace_ops *ops, struct pt_regs *regs)
832 {
833         struct ftrace_profile_stat *stat;
834         struct ftrace_profile *rec;
835         unsigned long flags;
836
837         if (!ftrace_profile_enabled)
838                 return;
839
840         local_irq_save(flags);
841
842         stat = &__get_cpu_var(ftrace_profile_stats);
843         if (!stat->hash || !ftrace_profile_enabled)
844                 goto out;
845
846         rec = ftrace_find_profiled_func(stat, ip);
847         if (!rec) {
848                 rec = ftrace_profile_alloc(stat, ip);
849                 if (!rec)
850                         goto out;
851         }
852
853         rec->counter++;
854  out:
855         local_irq_restore(flags);
856 }
857
858 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
859 static int profile_graph_entry(struct ftrace_graph_ent *trace)
860 {
861         function_profile_call(trace->func, 0, NULL, NULL);
862         return 1;
863 }
864
865 static void profile_graph_return(struct ftrace_graph_ret *trace)
866 {
867         struct ftrace_profile_stat *stat;
868         unsigned long long calltime;
869         struct ftrace_profile *rec;
870         unsigned long flags;
871
872         local_irq_save(flags);
873         stat = &__get_cpu_var(ftrace_profile_stats);
874         if (!stat->hash || !ftrace_profile_enabled)
875                 goto out;
876
877         /* If the calltime was zero'd ignore it */
878         if (!trace->calltime)
879                 goto out;
880
881         calltime = trace->rettime - trace->calltime;
882
883         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
884                 int index;
885
886                 index = trace->depth;
887
888                 /* Append this call time to the parent time to subtract */
889                 if (index)
890                         current->ret_stack[index - 1].subtime += calltime;
891
892                 if (current->ret_stack[index].subtime < calltime)
893                         calltime -= current->ret_stack[index].subtime;
894                 else
895                         calltime = 0;
896         }
897
898         rec = ftrace_find_profiled_func(stat, trace->func);
899         if (rec) {
900                 rec->time += calltime;
901                 rec->time_squared += calltime * calltime;
902         }
903
904  out:
905         local_irq_restore(flags);
906 }
907
908 static int register_ftrace_profiler(void)
909 {
910         return register_ftrace_graph(&profile_graph_return,
911                                      &profile_graph_entry);
912 }
913
914 static void unregister_ftrace_profiler(void)
915 {
916         unregister_ftrace_graph();
917 }
918 #else
919 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
920         .func           = function_profile_call,
921         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
922         INIT_REGEX_LOCK(ftrace_profile_ops)
923 };
924
925 static int register_ftrace_profiler(void)
926 {
927         return register_ftrace_function(&ftrace_profile_ops);
928 }
929
930 static void unregister_ftrace_profiler(void)
931 {
932         unregister_ftrace_function(&ftrace_profile_ops);
933 }
934 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
935
936 static ssize_t
937 ftrace_profile_write(struct file *filp, const char __user *ubuf,
938                      size_t cnt, loff_t *ppos)
939 {
940         unsigned long val;
941         int ret;
942
943         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
944         if (ret)
945                 return ret;
946
947         val = !!val;
948
949         mutex_lock(&ftrace_profile_lock);
950         if (ftrace_profile_enabled ^ val) {
951                 if (val) {
952                         ret = ftrace_profile_init();
953                         if (ret < 0) {
954                                 cnt = ret;
955                                 goto out;
956                         }
957
958                         ret = register_ftrace_profiler();
959                         if (ret < 0) {
960                                 cnt = ret;
961                                 goto out;
962                         }
963                         ftrace_profile_enabled = 1;
964                 } else {
965                         ftrace_profile_enabled = 0;
966                         /*
967                          * unregister_ftrace_profiler calls stop_machine
968                          * so this acts like an synchronize_sched.
969                          */
970                         unregister_ftrace_profiler();
971                 }
972         }
973  out:
974         mutex_unlock(&ftrace_profile_lock);
975
976         *ppos += cnt;
977
978         return cnt;
979 }
980
981 static ssize_t
982 ftrace_profile_read(struct file *filp, char __user *ubuf,
983                      size_t cnt, loff_t *ppos)
984 {
985         char buf[64];           /* big enough to hold a number */
986         int r;
987
988         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
989         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
990 }
991
992 static const struct file_operations ftrace_profile_fops = {
993         .open           = tracing_open_generic,
994         .read           = ftrace_profile_read,
995         .write          = ftrace_profile_write,
996         .llseek         = default_llseek,
997 };
998
999 /* used to initialize the real stat files */
1000 static struct tracer_stat function_stats __initdata = {
1001         .name           = "functions",
1002         .stat_start     = function_stat_start,
1003         .stat_next      = function_stat_next,
1004         .stat_cmp       = function_stat_cmp,
1005         .stat_headers   = function_stat_headers,
1006         .stat_show      = function_stat_show
1007 };
1008
1009 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1010 {
1011         struct ftrace_profile_stat *stat;
1012         struct dentry *entry;
1013         char *name;
1014         int ret;
1015         int cpu;
1016
1017         for_each_possible_cpu(cpu) {
1018                 stat = &per_cpu(ftrace_profile_stats, cpu);
1019
1020                 /* allocate enough for function name + cpu number */
1021                 name = kmalloc(32, GFP_KERNEL);
1022                 if (!name) {
1023                         /*
1024                          * The files created are permanent, if something happens
1025                          * we still do not free memory.
1026                          */
1027                         WARN(1,
1028                              "Could not allocate stat file for cpu %d\n",
1029                              cpu);
1030                         return;
1031                 }
1032                 stat->stat = function_stats;
1033                 snprintf(name, 32, "function%d", cpu);
1034                 stat->stat.name = name;
1035                 ret = register_stat_tracer(&stat->stat);
1036                 if (ret) {
1037                         WARN(1,
1038                              "Could not register function stat for cpu %d\n",
1039                              cpu);
1040                         kfree(name);
1041                         return;
1042                 }
1043         }
1044
1045         entry = debugfs_create_file("function_profile_enabled", 0644,
1046                                     d_tracer, NULL, &ftrace_profile_fops);
1047         if (!entry)
1048                 pr_warning("Could not create debugfs "
1049                            "'function_profile_enabled' entry\n");
1050 }
1051
1052 #else /* CONFIG_FUNCTION_PROFILER */
1053 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1054 {
1055 }
1056 #endif /* CONFIG_FUNCTION_PROFILER */
1057
1058 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1059
1060 loff_t
1061 ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1062 {
1063         loff_t ret;
1064
1065         if (file->f_mode & FMODE_READ)
1066                 ret = seq_lseek(file, offset, whence);
1067         else
1068                 file->f_pos = ret = 1;
1069
1070         return ret;
1071 }
1072
1073 #ifdef CONFIG_DYNAMIC_FTRACE
1074
1075 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1076 # error Dynamic ftrace depends on MCOUNT_RECORD
1077 #endif
1078
1079 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1080
1081 struct ftrace_func_probe {
1082         struct hlist_node       node;
1083         struct ftrace_probe_ops *ops;
1084         unsigned long           flags;
1085         unsigned long           ip;
1086         void                    *data;
1087         struct list_head        free_list;
1088 };
1089
1090 struct ftrace_func_entry {
1091         struct hlist_node hlist;
1092         unsigned long ip;
1093 };
1094
1095 struct ftrace_hash {
1096         unsigned long           size_bits;
1097         struct hlist_head       *buckets;
1098         unsigned long           count;
1099         struct rcu_head         rcu;
1100 };
1101
1102 /*
1103  * We make these constant because no one should touch them,
1104  * but they are used as the default "empty hash", to avoid allocating
1105  * it all the time. These are in a read only section such that if
1106  * anyone does try to modify it, it will cause an exception.
1107  */
1108 static const struct hlist_head empty_buckets[1];
1109 static const struct ftrace_hash empty_hash = {
1110         .buckets = (struct hlist_head *)empty_buckets,
1111 };
1112 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1113
1114 static struct ftrace_ops global_ops = {
1115         .func                   = ftrace_stub,
1116         .notrace_hash           = EMPTY_HASH,
1117         .filter_hash            = EMPTY_HASH,
1118         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
1119         INIT_REGEX_LOCK(global_ops)
1120 };
1121
1122 struct ftrace_page {
1123         struct ftrace_page      *next;
1124         struct dyn_ftrace       *records;
1125         int                     index;
1126         int                     size;
1127 };
1128
1129 static struct ftrace_page *ftrace_new_pgs;
1130
1131 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1132 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1133
1134 /* estimate from running different kernels */
1135 #define NR_TO_INIT              10000
1136
1137 static struct ftrace_page       *ftrace_pages_start;
1138 static struct ftrace_page       *ftrace_pages;
1139
1140 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1141 {
1142         return !hash || !hash->count;
1143 }
1144
1145 static struct ftrace_func_entry *
1146 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1147 {
1148         unsigned long key;
1149         struct ftrace_func_entry *entry;
1150         struct hlist_head *hhd;
1151
1152         if (ftrace_hash_empty(hash))
1153                 return NULL;
1154
1155         if (hash->size_bits > 0)
1156                 key = hash_long(ip, hash->size_bits);
1157         else
1158                 key = 0;
1159
1160         hhd = &hash->buckets[key];
1161
1162         hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1163                 if (entry->ip == ip)
1164                         return entry;
1165         }
1166         return NULL;
1167 }
1168
1169 static void __add_hash_entry(struct ftrace_hash *hash,
1170                              struct ftrace_func_entry *entry)
1171 {
1172         struct hlist_head *hhd;
1173         unsigned long key;
1174
1175         if (hash->size_bits)
1176                 key = hash_long(entry->ip, hash->size_bits);
1177         else
1178                 key = 0;
1179
1180         hhd = &hash->buckets[key];
1181         hlist_add_head(&entry->hlist, hhd);
1182         hash->count++;
1183 }
1184
1185 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1186 {
1187         struct ftrace_func_entry *entry;
1188
1189         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1190         if (!entry)
1191                 return -ENOMEM;
1192
1193         entry->ip = ip;
1194         __add_hash_entry(hash, entry);
1195
1196         return 0;
1197 }
1198
1199 static void
1200 free_hash_entry(struct ftrace_hash *hash,
1201                   struct ftrace_func_entry *entry)
1202 {
1203         hlist_del(&entry->hlist);
1204         kfree(entry);
1205         hash->count--;
1206 }
1207
1208 static void
1209 remove_hash_entry(struct ftrace_hash *hash,
1210                   struct ftrace_func_entry *entry)
1211 {
1212         hlist_del(&entry->hlist);
1213         hash->count--;
1214 }
1215
1216 static void ftrace_hash_clear(struct ftrace_hash *hash)
1217 {
1218         struct hlist_head *hhd;
1219         struct hlist_node *tn;
1220         struct ftrace_func_entry *entry;
1221         int size = 1 << hash->size_bits;
1222         int i;
1223
1224         if (!hash->count)
1225                 return;
1226
1227         for (i = 0; i < size; i++) {
1228                 hhd = &hash->buckets[i];
1229                 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1230                         free_hash_entry(hash, entry);
1231         }
1232         FTRACE_WARN_ON(hash->count);
1233 }
1234
1235 static void free_ftrace_hash(struct ftrace_hash *hash)
1236 {
1237         if (!hash || hash == EMPTY_HASH)
1238                 return;
1239         ftrace_hash_clear(hash);
1240         kfree(hash->buckets);
1241         kfree(hash);
1242 }
1243
1244 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1245 {
1246         struct ftrace_hash *hash;
1247
1248         hash = container_of(rcu, struct ftrace_hash, rcu);
1249         free_ftrace_hash(hash);
1250 }
1251
1252 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1253 {
1254         if (!hash || hash == EMPTY_HASH)
1255                 return;
1256         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1257 }
1258
1259 void ftrace_free_filter(struct ftrace_ops *ops)
1260 {
1261         ftrace_ops_init(ops);
1262         free_ftrace_hash(ops->filter_hash);
1263         free_ftrace_hash(ops->notrace_hash);
1264 }
1265
1266 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1267 {
1268         struct ftrace_hash *hash;
1269         int size;
1270
1271         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1272         if (!hash)
1273                 return NULL;
1274
1275         size = 1 << size_bits;
1276         hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1277
1278         if (!hash->buckets) {
1279                 kfree(hash);
1280                 return NULL;
1281         }
1282
1283         hash->size_bits = size_bits;
1284
1285         return hash;
1286 }
1287
1288 static struct ftrace_hash *
1289 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1290 {
1291         struct ftrace_func_entry *entry;
1292         struct ftrace_hash *new_hash;
1293         int size;
1294         int ret;
1295         int i;
1296
1297         new_hash = alloc_ftrace_hash(size_bits);
1298         if (!new_hash)
1299                 return NULL;
1300
1301         /* Empty hash? */
1302         if (ftrace_hash_empty(hash))
1303                 return new_hash;
1304
1305         size = 1 << hash->size_bits;
1306         for (i = 0; i < size; i++) {
1307                 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1308                         ret = add_hash_entry(new_hash, entry->ip);
1309                         if (ret < 0)
1310                                 goto free_hash;
1311                 }
1312         }
1313
1314         FTRACE_WARN_ON(new_hash->count != hash->count);
1315
1316         return new_hash;
1317
1318  free_hash:
1319         free_ftrace_hash(new_hash);
1320         return NULL;
1321 }
1322
1323 static void
1324 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1325 static void
1326 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1327
1328 static int
1329 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1330                  struct ftrace_hash **dst, struct ftrace_hash *src)
1331 {
1332         struct ftrace_func_entry *entry;
1333         struct hlist_node *tn;
1334         struct hlist_head *hhd;
1335         struct ftrace_hash *old_hash;
1336         struct ftrace_hash *new_hash;
1337         int size = src->count;
1338         int bits = 0;
1339         int ret;
1340         int i;
1341
1342         /*
1343          * Remove the current set, update the hash and add
1344          * them back.
1345          */
1346         ftrace_hash_rec_disable(ops, enable);
1347
1348         /*
1349          * If the new source is empty, just free dst and assign it
1350          * the empty_hash.
1351          */
1352         if (!src->count) {
1353                 free_ftrace_hash_rcu(*dst);
1354                 rcu_assign_pointer(*dst, EMPTY_HASH);
1355                 /* still need to update the function records */
1356                 ret = 0;
1357                 goto out;
1358         }
1359
1360         /*
1361          * Make the hash size about 1/2 the # found
1362          */
1363         for (size /= 2; size; size >>= 1)
1364                 bits++;
1365
1366         /* Don't allocate too much */
1367         if (bits > FTRACE_HASH_MAX_BITS)
1368                 bits = FTRACE_HASH_MAX_BITS;
1369
1370         ret = -ENOMEM;
1371         new_hash = alloc_ftrace_hash(bits);
1372         if (!new_hash)
1373                 goto out;
1374
1375         size = 1 << src->size_bits;
1376         for (i = 0; i < size; i++) {
1377                 hhd = &src->buckets[i];
1378                 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1379                         remove_hash_entry(src, entry);
1380                         __add_hash_entry(new_hash, entry);
1381                 }
1382         }
1383
1384         old_hash = *dst;
1385         rcu_assign_pointer(*dst, new_hash);
1386         free_ftrace_hash_rcu(old_hash);
1387
1388         ret = 0;
1389  out:
1390         /*
1391          * Enable regardless of ret:
1392          *  On success, we enable the new hash.
1393          *  On failure, we re-enable the original hash.
1394          */
1395         ftrace_hash_rec_enable(ops, enable);
1396
1397         return ret;
1398 }
1399
1400 /*
1401  * Test the hashes for this ops to see if we want to call
1402  * the ops->func or not.
1403  *
1404  * It's a match if the ip is in the ops->filter_hash or
1405  * the filter_hash does not exist or is empty,
1406  *  AND
1407  * the ip is not in the ops->notrace_hash.
1408  *
1409  * This needs to be called with preemption disabled as
1410  * the hashes are freed with call_rcu_sched().
1411  */
1412 static int
1413 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1414 {
1415         struct ftrace_hash *filter_hash;
1416         struct ftrace_hash *notrace_hash;
1417         int ret;
1418
1419 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1420         /*
1421          * There's a small race when adding ops that the ftrace handler
1422          * that wants regs, may be called without them. We can not
1423          * allow that handler to be called if regs is NULL.
1424          */
1425         if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1426                 return 0;
1427 #endif
1428
1429         filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1430         notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1431
1432         if ((ftrace_hash_empty(filter_hash) ||
1433              ftrace_lookup_ip(filter_hash, ip)) &&
1434             (ftrace_hash_empty(notrace_hash) ||
1435              !ftrace_lookup_ip(notrace_hash, ip)))
1436                 ret = 1;
1437         else
1438                 ret = 0;
1439
1440         return ret;
1441 }
1442
1443 /*
1444  * This is a double for. Do not use 'break' to break out of the loop,
1445  * you must use a goto.
1446  */
1447 #define do_for_each_ftrace_rec(pg, rec)                                 \
1448         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1449                 int _____i;                                             \
1450                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1451                         rec = &pg->records[_____i];
1452
1453 #define while_for_each_ftrace_rec()             \
1454                 }                               \
1455         }
1456
1457
1458 static int ftrace_cmp_recs(const void *a, const void *b)
1459 {
1460         const struct dyn_ftrace *key = a;
1461         const struct dyn_ftrace *rec = b;
1462
1463         if (key->flags < rec->ip)
1464                 return -1;
1465         if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1466                 return 1;
1467         return 0;
1468 }
1469
1470 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1471 {
1472         struct ftrace_page *pg;
1473         struct dyn_ftrace *rec;
1474         struct dyn_ftrace key;
1475
1476         key.ip = start;
1477         key.flags = end;        /* overload flags, as it is unsigned long */
1478
1479         for (pg = ftrace_pages_start; pg; pg = pg->next) {
1480                 if (end < pg->records[0].ip ||
1481                     start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1482                         continue;
1483                 rec = bsearch(&key, pg->records, pg->index,
1484                               sizeof(struct dyn_ftrace),
1485                               ftrace_cmp_recs);
1486                 if (rec)
1487                         return rec->ip;
1488         }
1489
1490         return 0;
1491 }
1492
1493 /**
1494  * ftrace_location - return true if the ip giving is a traced location
1495  * @ip: the instruction pointer to check
1496  *
1497  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1498  * That is, the instruction that is either a NOP or call to
1499  * the function tracer. It checks the ftrace internal tables to
1500  * determine if the address belongs or not.
1501  */
1502 unsigned long ftrace_location(unsigned long ip)
1503 {
1504         return ftrace_location_range(ip, ip);
1505 }
1506
1507 /**
1508  * ftrace_text_reserved - return true if range contains an ftrace location
1509  * @start: start of range to search
1510  * @end: end of range to search (inclusive). @end points to the last byte to check.
1511  *
1512  * Returns 1 if @start and @end contains a ftrace location.
1513  * That is, the instruction that is either a NOP or call to
1514  * the function tracer. It checks the ftrace internal tables to
1515  * determine if the address belongs or not.
1516  */
1517 int ftrace_text_reserved(void *start, void *end)
1518 {
1519         unsigned long ret;
1520
1521         ret = ftrace_location_range((unsigned long)start,
1522                                     (unsigned long)end);
1523
1524         return (int)!!ret;
1525 }
1526
1527 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1528                                      int filter_hash,
1529                                      bool inc)
1530 {
1531         struct ftrace_hash *hash;
1532         struct ftrace_hash *other_hash;
1533         struct ftrace_page *pg;
1534         struct dyn_ftrace *rec;
1535         int count = 0;
1536         int all = 0;
1537
1538         /* Only update if the ops has been registered */
1539         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1540                 return;
1541
1542         /*
1543          * In the filter_hash case:
1544          *   If the count is zero, we update all records.
1545          *   Otherwise we just update the items in the hash.
1546          *
1547          * In the notrace_hash case:
1548          *   We enable the update in the hash.
1549          *   As disabling notrace means enabling the tracing,
1550          *   and enabling notrace means disabling, the inc variable
1551          *   gets inversed.
1552          */
1553         if (filter_hash) {
1554                 hash = ops->filter_hash;
1555                 other_hash = ops->notrace_hash;
1556                 if (ftrace_hash_empty(hash))
1557                         all = 1;
1558         } else {
1559                 inc = !inc;
1560                 hash = ops->notrace_hash;
1561                 other_hash = ops->filter_hash;
1562                 /*
1563                  * If the notrace hash has no items,
1564                  * then there's nothing to do.
1565                  */
1566                 if (ftrace_hash_empty(hash))
1567                         return;
1568         }
1569
1570         do_for_each_ftrace_rec(pg, rec) {
1571                 int in_other_hash = 0;
1572                 int in_hash = 0;
1573                 int match = 0;
1574
1575                 if (all) {
1576                         /*
1577                          * Only the filter_hash affects all records.
1578                          * Update if the record is not in the notrace hash.
1579                          */
1580                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1581                                 match = 1;
1582                 } else {
1583                         in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1584                         in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1585
1586                         /*
1587                          *
1588                          */
1589                         if (filter_hash && in_hash && !in_other_hash)
1590                                 match = 1;
1591                         else if (!filter_hash && in_hash &&
1592                                  (in_other_hash || ftrace_hash_empty(other_hash)))
1593                                 match = 1;
1594                 }
1595                 if (!match)
1596                         continue;
1597
1598                 if (inc) {
1599                         rec->flags++;
1600                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1601                                 return;
1602                         /*
1603                          * If any ops wants regs saved for this function
1604                          * then all ops will get saved regs.
1605                          */
1606                         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1607                                 rec->flags |= FTRACE_FL_REGS;
1608                 } else {
1609                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1610                                 return;
1611                         rec->flags--;
1612                 }
1613                 count++;
1614                 /* Shortcut, if we handled all records, we are done. */
1615                 if (!all && count == hash->count)
1616                         return;
1617         } while_for_each_ftrace_rec();
1618 }
1619
1620 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1621                                     int filter_hash)
1622 {
1623         __ftrace_hash_rec_update(ops, filter_hash, 0);
1624 }
1625
1626 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1627                                    int filter_hash)
1628 {
1629         __ftrace_hash_rec_update(ops, filter_hash, 1);
1630 }
1631
1632 static void print_ip_ins(const char *fmt, unsigned char *p)
1633 {
1634         int i;
1635
1636         printk(KERN_CONT "%s", fmt);
1637
1638         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1639                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1640 }
1641
1642 /**
1643  * ftrace_bug - report and shutdown function tracer
1644  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1645  * @ip: The address that failed
1646  *
1647  * The arch code that enables or disables the function tracing
1648  * can call ftrace_bug() when it has detected a problem in
1649  * modifying the code. @failed should be one of either:
1650  * EFAULT - if the problem happens on reading the @ip address
1651  * EINVAL - if what is read at @ip is not what was expected
1652  * EPERM - if the problem happens on writting to the @ip address
1653  */
1654 void ftrace_bug(int failed, unsigned long ip)
1655 {
1656         switch (failed) {
1657         case -EFAULT:
1658                 FTRACE_WARN_ON_ONCE(1);
1659                 pr_info("ftrace faulted on modifying ");
1660                 print_ip_sym(ip);
1661                 break;
1662         case -EINVAL:
1663                 FTRACE_WARN_ON_ONCE(1);
1664                 pr_info("ftrace failed to modify ");
1665                 print_ip_sym(ip);
1666                 print_ip_ins(" actual: ", (unsigned char *)ip);
1667                 printk(KERN_CONT "\n");
1668                 break;
1669         case -EPERM:
1670                 FTRACE_WARN_ON_ONCE(1);
1671                 pr_info("ftrace faulted on writing ");
1672                 print_ip_sym(ip);
1673                 break;
1674         default:
1675                 FTRACE_WARN_ON_ONCE(1);
1676                 pr_info("ftrace faulted on unknown error ");
1677                 print_ip_sym(ip);
1678         }
1679 }
1680
1681 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1682 {
1683         unsigned long flag = 0UL;
1684
1685         /*
1686          * If we are updating calls:
1687          *
1688          *   If the record has a ref count, then we need to enable it
1689          *   because someone is using it.
1690          *
1691          *   Otherwise we make sure its disabled.
1692          *
1693          * If we are disabling calls, then disable all records that
1694          * are enabled.
1695          */
1696         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1697                 flag = FTRACE_FL_ENABLED;
1698
1699         /*
1700          * If enabling and the REGS flag does not match the REGS_EN, then
1701          * do not ignore this record. Set flags to fail the compare against
1702          * ENABLED.
1703          */
1704         if (flag &&
1705             (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1706                 flag |= FTRACE_FL_REGS;
1707
1708         /* If the state of this record hasn't changed, then do nothing */
1709         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1710                 return FTRACE_UPDATE_IGNORE;
1711
1712         if (flag) {
1713                 /* Save off if rec is being enabled (for return value) */
1714                 flag ^= rec->flags & FTRACE_FL_ENABLED;
1715
1716                 if (update) {
1717                         rec->flags |= FTRACE_FL_ENABLED;
1718                         if (flag & FTRACE_FL_REGS) {
1719                                 if (rec->flags & FTRACE_FL_REGS)
1720                                         rec->flags |= FTRACE_FL_REGS_EN;
1721                                 else
1722                                         rec->flags &= ~FTRACE_FL_REGS_EN;
1723                         }
1724                 }
1725
1726                 /*
1727                  * If this record is being updated from a nop, then
1728                  *   return UPDATE_MAKE_CALL.
1729                  * Otherwise, if the EN flag is set, then return
1730                  *   UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1731                  *   from the non-save regs, to a save regs function.
1732                  * Otherwise,
1733                  *   return UPDATE_MODIFY_CALL to tell the caller to convert
1734                  *   from the save regs, to a non-save regs function.
1735                  */
1736                 if (flag & FTRACE_FL_ENABLED)
1737                         return FTRACE_UPDATE_MAKE_CALL;
1738                 else if (rec->flags & FTRACE_FL_REGS_EN)
1739                         return FTRACE_UPDATE_MODIFY_CALL_REGS;
1740                 else
1741                         return FTRACE_UPDATE_MODIFY_CALL;
1742         }
1743
1744         if (update) {
1745                 /* If there's no more users, clear all flags */
1746                 if (!(rec->flags & ~FTRACE_FL_MASK))
1747                         rec->flags = 0;
1748                 else
1749                         /* Just disable the record (keep REGS state) */
1750                         rec->flags &= ~FTRACE_FL_ENABLED;
1751         }
1752
1753         return FTRACE_UPDATE_MAKE_NOP;
1754 }
1755
1756 /**
1757  * ftrace_update_record, set a record that now is tracing or not
1758  * @rec: the record to update
1759  * @enable: set to 1 if the record is tracing, zero to force disable
1760  *
1761  * The records that represent all functions that can be traced need
1762  * to be updated when tracing has been enabled.
1763  */
1764 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1765 {
1766         return ftrace_check_record(rec, enable, 1);
1767 }
1768
1769 /**
1770  * ftrace_test_record, check if the record has been enabled or not
1771  * @rec: the record to test
1772  * @enable: set to 1 to check if enabled, 0 if it is disabled
1773  *
1774  * The arch code may need to test if a record is already set to
1775  * tracing to determine how to modify the function code that it
1776  * represents.
1777  */
1778 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1779 {
1780         return ftrace_check_record(rec, enable, 0);
1781 }
1782
1783 static int
1784 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1785 {
1786         unsigned long ftrace_old_addr;
1787         unsigned long ftrace_addr;
1788         int ret;
1789
1790         ret = ftrace_update_record(rec, enable);
1791
1792         if (rec->flags & FTRACE_FL_REGS)
1793                 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1794         else
1795                 ftrace_addr = (unsigned long)FTRACE_ADDR;
1796
1797         switch (ret) {
1798         case FTRACE_UPDATE_IGNORE:
1799                 return 0;
1800
1801         case FTRACE_UPDATE_MAKE_CALL:
1802                 return ftrace_make_call(rec, ftrace_addr);
1803
1804         case FTRACE_UPDATE_MAKE_NOP:
1805                 return ftrace_make_nop(NULL, rec, ftrace_addr);
1806
1807         case FTRACE_UPDATE_MODIFY_CALL_REGS:
1808         case FTRACE_UPDATE_MODIFY_CALL:
1809                 if (rec->flags & FTRACE_FL_REGS)
1810                         ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1811                 else
1812                         ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1813
1814                 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1815         }
1816
1817         return -1; /* unknow ftrace bug */
1818 }
1819
1820 void __weak ftrace_replace_code(int enable)
1821 {
1822         struct dyn_ftrace *rec;
1823         struct ftrace_page *pg;
1824         int failed;
1825
1826         if (unlikely(ftrace_disabled))
1827                 return;
1828
1829         do_for_each_ftrace_rec(pg, rec) {
1830                 failed = __ftrace_replace_code(rec, enable);
1831                 if (failed) {
1832                         ftrace_bug(failed, rec->ip);
1833                         /* Stop processing */
1834                         return;
1835                 }
1836         } while_for_each_ftrace_rec();
1837 }
1838
1839 struct ftrace_rec_iter {
1840         struct ftrace_page      *pg;
1841         int                     index;
1842 };
1843
1844 /**
1845  * ftrace_rec_iter_start, start up iterating over traced functions
1846  *
1847  * Returns an iterator handle that is used to iterate over all
1848  * the records that represent address locations where functions
1849  * are traced.
1850  *
1851  * May return NULL if no records are available.
1852  */
1853 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1854 {
1855         /*
1856          * We only use a single iterator.
1857          * Protected by the ftrace_lock mutex.
1858          */
1859         static struct ftrace_rec_iter ftrace_rec_iter;
1860         struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1861
1862         iter->pg = ftrace_pages_start;
1863         iter->index = 0;
1864
1865         /* Could have empty pages */
1866         while (iter->pg && !iter->pg->index)
1867                 iter->pg = iter->pg->next;
1868
1869         if (!iter->pg)
1870                 return NULL;
1871
1872         return iter;
1873 }
1874
1875 /**
1876  * ftrace_rec_iter_next, get the next record to process.
1877  * @iter: The handle to the iterator.
1878  *
1879  * Returns the next iterator after the given iterator @iter.
1880  */
1881 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1882 {
1883         iter->index++;
1884
1885         if (iter->index >= iter->pg->index) {
1886                 iter->pg = iter->pg->next;
1887                 iter->index = 0;
1888
1889                 /* Could have empty pages */
1890                 while (iter->pg && !iter->pg->index)
1891                         iter->pg = iter->pg->next;
1892         }
1893
1894         if (!iter->pg)
1895                 return NULL;
1896
1897         return iter;
1898 }
1899
1900 /**
1901  * ftrace_rec_iter_record, get the record at the iterator location
1902  * @iter: The current iterator location
1903  *
1904  * Returns the record that the current @iter is at.
1905  */
1906 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1907 {
1908         return &iter->pg->records[iter->index];
1909 }
1910
1911 static int
1912 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1913 {
1914         unsigned long ip;
1915         int ret;
1916
1917         ip = rec->ip;
1918
1919         if (unlikely(ftrace_disabled))
1920                 return 0;
1921
1922         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1923         if (ret) {
1924                 ftrace_bug(ret, ip);
1925                 return 0;
1926         }
1927         return 1;
1928 }
1929
1930 /*
1931  * archs can override this function if they must do something
1932  * before the modifying code is performed.
1933  */
1934 int __weak ftrace_arch_code_modify_prepare(void)
1935 {
1936         return 0;
1937 }
1938
1939 /*
1940  * archs can override this function if they must do something
1941  * after the modifying code is performed.
1942  */
1943 int __weak ftrace_arch_code_modify_post_process(void)
1944 {
1945         return 0;
1946 }
1947
1948 void ftrace_modify_all_code(int command)
1949 {
1950         if (command & FTRACE_UPDATE_CALLS)
1951                 ftrace_replace_code(1);
1952         else if (command & FTRACE_DISABLE_CALLS)
1953                 ftrace_replace_code(0);
1954
1955         if (command & FTRACE_UPDATE_TRACE_FUNC)
1956                 ftrace_update_ftrace_func(ftrace_trace_function);
1957
1958         if (command & FTRACE_START_FUNC_RET)
1959                 ftrace_enable_ftrace_graph_caller();
1960         else if (command & FTRACE_STOP_FUNC_RET)
1961                 ftrace_disable_ftrace_graph_caller();
1962 }
1963
1964 static int __ftrace_modify_code(void *data)
1965 {
1966         int *command = data;
1967
1968         ftrace_modify_all_code(*command);
1969
1970         return 0;
1971 }
1972
1973 /**
1974  * ftrace_run_stop_machine, go back to the stop machine method
1975  * @command: The command to tell ftrace what to do
1976  *
1977  * If an arch needs to fall back to the stop machine method, the
1978  * it can call this function.
1979  */
1980 void ftrace_run_stop_machine(int command)
1981 {
1982         stop_machine(__ftrace_modify_code, &command, NULL);
1983 }
1984
1985 /**
1986  * arch_ftrace_update_code, modify the code to trace or not trace
1987  * @command: The command that needs to be done
1988  *
1989  * Archs can override this function if it does not need to
1990  * run stop_machine() to modify code.
1991  */
1992 void __weak arch_ftrace_update_code(int command)
1993 {
1994         ftrace_run_stop_machine(command);
1995 }
1996
1997 static void ftrace_run_update_code(int command)
1998 {
1999         int ret;
2000
2001         ret = ftrace_arch_code_modify_prepare();
2002         FTRACE_WARN_ON(ret);
2003         if (ret)
2004                 return;
2005         /*
2006          * Do not call function tracer while we update the code.
2007          * We are in stop machine.
2008          */
2009         function_trace_stop++;
2010
2011         /*
2012          * By default we use stop_machine() to modify the code.
2013          * But archs can do what ever they want as long as it
2014          * is safe. The stop_machine() is the safest, but also
2015          * produces the most overhead.
2016          */
2017         arch_ftrace_update_code(command);
2018
2019         function_trace_stop--;
2020
2021         ret = ftrace_arch_code_modify_post_process();
2022         FTRACE_WARN_ON(ret);
2023 }
2024
2025 static ftrace_func_t saved_ftrace_func;
2026 static int ftrace_start_up;
2027 static int global_start_up;
2028
2029 static void ftrace_startup_enable(int command)
2030 {
2031         if (saved_ftrace_func != ftrace_trace_function) {
2032                 saved_ftrace_func = ftrace_trace_function;
2033                 command |= FTRACE_UPDATE_TRACE_FUNC;
2034         }
2035
2036         if (!command || !ftrace_enabled)
2037                 return;
2038
2039         ftrace_run_update_code(command);
2040 }
2041
2042 static int ftrace_startup(struct ftrace_ops *ops, int command)
2043 {
2044         bool hash_enable = true;
2045         int ret;
2046
2047         if (unlikely(ftrace_disabled))
2048                 return -ENODEV;
2049
2050         ret = __register_ftrace_function(ops);
2051         if (ret)
2052                 return ret;
2053
2054         ftrace_start_up++;
2055         command |= FTRACE_UPDATE_CALLS;
2056
2057         /* ops marked global share the filter hashes */
2058         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2059                 ops = &global_ops;
2060                 /* Don't update hash if global is already set */
2061                 if (global_start_up)
2062                         hash_enable = false;
2063                 global_start_up++;
2064         }
2065
2066         ops->flags |= FTRACE_OPS_FL_ENABLED;
2067         if (hash_enable)
2068                 ftrace_hash_rec_enable(ops, 1);
2069
2070         ftrace_startup_enable(command);
2071
2072         return 0;
2073 }
2074
2075 static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2076 {
2077         bool hash_disable = true;
2078         int ret;
2079
2080         if (unlikely(ftrace_disabled))
2081                 return -ENODEV;
2082
2083         ret = __unregister_ftrace_function(ops);
2084         if (ret)
2085                 return ret;
2086
2087         ftrace_start_up--;
2088         /*
2089          * Just warn in case of unbalance, no need to kill ftrace, it's not
2090          * critical but the ftrace_call callers may be never nopped again after
2091          * further ftrace uses.
2092          */
2093         WARN_ON_ONCE(ftrace_start_up < 0);
2094
2095         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2096                 ops = &global_ops;
2097                 global_start_up--;
2098                 WARN_ON_ONCE(global_start_up < 0);
2099                 /* Don't update hash if global still has users */
2100                 if (global_start_up) {
2101                         WARN_ON_ONCE(!ftrace_start_up);
2102                         hash_disable = false;
2103                 }
2104         }
2105
2106         if (hash_disable)
2107                 ftrace_hash_rec_disable(ops, 1);
2108
2109         if (ops != &global_ops || !global_start_up)
2110                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2111
2112         command |= FTRACE_UPDATE_CALLS;
2113
2114         if (saved_ftrace_func != ftrace_trace_function) {
2115                 saved_ftrace_func = ftrace_trace_function;
2116                 command |= FTRACE_UPDATE_TRACE_FUNC;
2117         }
2118
2119         if (!command || !ftrace_enabled)
2120                 return 0;
2121
2122         ftrace_run_update_code(command);
2123         return 0;
2124 }
2125
2126 static void ftrace_startup_sysctl(void)
2127 {
2128         if (unlikely(ftrace_disabled))
2129                 return;
2130
2131         /* Force update next time */
2132         saved_ftrace_func = NULL;
2133         /* ftrace_start_up is true if we want ftrace running */
2134         if (ftrace_start_up)
2135                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2136 }
2137
2138 static void ftrace_shutdown_sysctl(void)
2139 {
2140         if (unlikely(ftrace_disabled))
2141                 return;
2142
2143         /* ftrace_start_up is true if ftrace is running */
2144         if (ftrace_start_up)
2145                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2146 }
2147
2148 static cycle_t          ftrace_update_time;
2149 static unsigned long    ftrace_update_cnt;
2150 unsigned long           ftrace_update_tot_cnt;
2151
2152 static inline int ops_traces_mod(struct ftrace_ops *ops)
2153 {
2154         /*
2155          * Filter_hash being empty will default to trace module.
2156          * But notrace hash requires a test of individual module functions.
2157          */
2158         return ftrace_hash_empty(ops->filter_hash) &&
2159                 ftrace_hash_empty(ops->notrace_hash);
2160 }
2161
2162 /*
2163  * Check if the current ops references the record.
2164  *
2165  * If the ops traces all functions, then it was already accounted for.
2166  * If the ops does not trace the current record function, skip it.
2167  * If the ops ignores the function via notrace filter, skip it.
2168  */
2169 static inline bool
2170 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2171 {
2172         /* If ops isn't enabled, ignore it */
2173         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2174                 return 0;
2175
2176         /* If ops traces all mods, we already accounted for it */
2177         if (ops_traces_mod(ops))
2178                 return 0;
2179
2180         /* The function must be in the filter */
2181         if (!ftrace_hash_empty(ops->filter_hash) &&
2182             !ftrace_lookup_ip(ops->filter_hash, rec->ip))
2183                 return 0;
2184
2185         /* If in notrace hash, we ignore it too */
2186         if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
2187                 return 0;
2188
2189         return 1;
2190 }
2191
2192 static int referenced_filters(struct dyn_ftrace *rec)
2193 {
2194         struct ftrace_ops *ops;
2195         int cnt = 0;
2196
2197         for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2198                 if (ops_references_rec(ops, rec))
2199                     cnt++;
2200         }
2201
2202         return cnt;
2203 }
2204
2205 static int ftrace_update_code(struct module *mod)
2206 {
2207         struct ftrace_page *pg;
2208         struct dyn_ftrace *p;
2209         cycle_t start, stop;
2210         unsigned long ref = 0;
2211         bool test = false;
2212         int i;
2213
2214         /*
2215          * When adding a module, we need to check if tracers are
2216          * currently enabled and if they are set to trace all functions.
2217          * If they are, we need to enable the module functions as well
2218          * as update the reference counts for those function records.
2219          */
2220         if (mod) {
2221                 struct ftrace_ops *ops;
2222
2223                 for (ops = ftrace_ops_list;
2224                      ops != &ftrace_list_end; ops = ops->next) {
2225                         if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2226                                 if (ops_traces_mod(ops))
2227                                         ref++;
2228                                 else
2229                                         test = true;
2230                         }
2231                 }
2232         }
2233
2234         start = ftrace_now(raw_smp_processor_id());
2235         ftrace_update_cnt = 0;
2236
2237         for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2238
2239                 for (i = 0; i < pg->index; i++) {
2240                         int cnt = ref;
2241
2242                         /* If something went wrong, bail without enabling anything */
2243                         if (unlikely(ftrace_disabled))
2244                                 return -1;
2245
2246                         p = &pg->records[i];
2247                         if (test)
2248                                 cnt += referenced_filters(p);
2249                         p->flags = cnt;
2250
2251                         /*
2252                          * Do the initial record conversion from mcount jump
2253                          * to the NOP instructions.
2254                          */
2255                         if (!ftrace_code_disable(mod, p))
2256                                 break;
2257
2258                         ftrace_update_cnt++;
2259
2260                         /*
2261                          * If the tracing is enabled, go ahead and enable the record.
2262                          *
2263                          * The reason not to enable the record immediatelly is the
2264                          * inherent check of ftrace_make_nop/ftrace_make_call for
2265                          * correct previous instructions.  Making first the NOP
2266                          * conversion puts the module to the correct state, thus
2267                          * passing the ftrace_make_call check.
2268                          */
2269                         if (ftrace_start_up && cnt) {
2270                                 int failed = __ftrace_replace_code(p, 1);
2271                                 if (failed)
2272                                         ftrace_bug(failed, p->ip);
2273                         }
2274                 }
2275         }
2276
2277         ftrace_new_pgs = NULL;
2278
2279         stop = ftrace_now(raw_smp_processor_id());
2280         ftrace_update_time = stop - start;
2281         ftrace_update_tot_cnt += ftrace_update_cnt;
2282
2283         return 0;
2284 }
2285
2286 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2287 {
2288         int order;
2289         int cnt;
2290
2291         if (WARN_ON(!count))
2292                 return -EINVAL;
2293
2294         order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2295
2296         /*
2297          * We want to fill as much as possible. No more than a page
2298          * may be empty.
2299          */
2300         while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2301                 order--;
2302
2303  again:
2304         pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2305
2306         if (!pg->records) {
2307                 /* if we can't allocate this size, try something smaller */
2308                 if (!order)
2309                         return -ENOMEM;
2310                 order >>= 1;
2311                 goto again;
2312         }
2313
2314         cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2315         pg->size = cnt;
2316
2317         if (cnt > count)
2318                 cnt = count;
2319
2320         return cnt;
2321 }
2322
2323 static struct ftrace_page *
2324 ftrace_allocate_pages(unsigned long num_to_init)
2325 {
2326         struct ftrace_page *start_pg;
2327         struct ftrace_page *pg;
2328         int order;
2329         int cnt;
2330
2331         if (!num_to_init)
2332                 return 0;
2333
2334         start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2335         if (!pg)
2336                 return NULL;
2337
2338         /*
2339          * Try to allocate as much as possible in one continues
2340          * location that fills in all of the space. We want to
2341          * waste as little space as possible.
2342          */
2343         for (;;) {
2344                 cnt = ftrace_allocate_records(pg, num_to_init);
2345                 if (cnt < 0)
2346                         goto free_pages;
2347
2348                 num_to_init -= cnt;
2349                 if (!num_to_init)
2350                         break;
2351
2352                 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2353                 if (!pg->next)
2354                         goto free_pages;
2355
2356                 pg = pg->next;
2357         }
2358
2359         return start_pg;
2360
2361  free_pages:
2362         while (start_pg) {
2363                 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2364                 free_pages((unsigned long)pg->records, order);
2365                 start_pg = pg->next;
2366                 kfree(pg);
2367                 pg = start_pg;
2368         }
2369         pr_info("ftrace: FAILED to allocate memory for functions\n");
2370         return NULL;
2371 }
2372
2373 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2374 {
2375         int cnt;
2376
2377         if (!num_to_init) {
2378                 pr_info("ftrace: No functions to be traced?\n");
2379                 return -1;
2380         }
2381
2382         cnt = num_to_init / ENTRIES_PER_PAGE;
2383         pr_info("ftrace: allocating %ld entries in %d pages\n",
2384                 num_to_init, cnt + 1);
2385
2386         return 0;
2387 }
2388
2389 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2390
2391 struct ftrace_iterator {
2392         loff_t                          pos;
2393         loff_t                          func_pos;
2394         struct ftrace_page              *pg;
2395         struct dyn_ftrace               *func;
2396         struct ftrace_func_probe        *probe;
2397         struct trace_parser             parser;
2398         struct ftrace_hash              *hash;
2399         struct ftrace_ops               *ops;
2400         int                             hidx;
2401         int                             idx;
2402         unsigned                        flags;
2403 };
2404
2405 static void *
2406 t_hash_next(struct seq_file *m, loff_t *pos)
2407 {
2408         struct ftrace_iterator *iter = m->private;
2409         struct hlist_node *hnd = NULL;
2410         struct hlist_head *hhd;
2411
2412         (*pos)++;
2413         iter->pos = *pos;
2414
2415         if (iter->probe)
2416                 hnd = &iter->probe->node;
2417  retry:
2418         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2419                 return NULL;
2420
2421         hhd = &ftrace_func_hash[iter->hidx];
2422
2423         if (hlist_empty(hhd)) {
2424                 iter->hidx++;
2425                 hnd = NULL;
2426                 goto retry;
2427         }
2428
2429         if (!hnd)
2430                 hnd = hhd->first;
2431         else {
2432                 hnd = hnd->next;
2433                 if (!hnd) {
2434                         iter->hidx++;
2435                         goto retry;
2436                 }
2437         }
2438
2439         if (WARN_ON_ONCE(!hnd))
2440                 return NULL;
2441
2442         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2443
2444         return iter;
2445 }
2446
2447 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2448 {
2449         struct ftrace_iterator *iter = m->private;
2450         void *p = NULL;
2451         loff_t l;
2452
2453         if (!(iter->flags & FTRACE_ITER_DO_HASH))
2454                 return NULL;
2455
2456         if (iter->func_pos > *pos)
2457                 return NULL;
2458
2459         iter->hidx = 0;
2460         for (l = 0; l <= (*pos - iter->func_pos); ) {
2461                 p = t_hash_next(m, &l);
2462                 if (!p)
2463                         break;
2464         }
2465         if (!p)
2466                 return NULL;
2467
2468         /* Only set this if we have an item */
2469         iter->flags |= FTRACE_ITER_HASH;
2470
2471         return iter;
2472 }
2473
2474 static int
2475 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2476 {
2477         struct ftrace_func_probe *rec;
2478
2479         rec = iter->probe;
2480         if (WARN_ON_ONCE(!rec))
2481                 return -EIO;
2482
2483         if (rec->ops->print)
2484                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2485
2486         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2487
2488         if (rec->data)
2489                 seq_printf(m, ":%p", rec->data);
2490         seq_putc(m, '\n');
2491
2492         return 0;
2493 }
2494
2495 static void *
2496 t_next(struct seq_file *m, void *v, loff_t *pos)
2497 {
2498         struct ftrace_iterator *iter = m->private;
2499         struct ftrace_ops *ops = iter->ops;
2500         struct dyn_ftrace *rec = NULL;
2501
2502         if (unlikely(ftrace_disabled))
2503                 return NULL;
2504
2505         if (iter->flags & FTRACE_ITER_HASH)
2506                 return t_hash_next(m, pos);
2507
2508         (*pos)++;
2509         iter->pos = iter->func_pos = *pos;
2510
2511         if (iter->flags & FTRACE_ITER_PRINTALL)
2512                 return t_hash_start(m, pos);
2513
2514  retry:
2515         if (iter->idx >= iter->pg->index) {
2516                 if (iter->pg->next) {
2517                         iter->pg = iter->pg->next;
2518                         iter->idx = 0;
2519                         goto retry;
2520                 }
2521         } else {
2522                 rec = &iter->pg->records[iter->idx++];
2523                 if (((iter->flags & FTRACE_ITER_FILTER) &&
2524                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2525
2526                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2527                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2528
2529                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2530                      !(rec->flags & FTRACE_FL_ENABLED))) {
2531
2532                         rec = NULL;
2533                         goto retry;
2534                 }
2535         }
2536
2537         if (!rec)
2538                 return t_hash_start(m, pos);
2539
2540         iter->func = rec;
2541
2542         return iter;
2543 }
2544
2545 static void reset_iter_read(struct ftrace_iterator *iter)
2546 {
2547         iter->pos = 0;
2548         iter->func_pos = 0;
2549         iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2550 }
2551
2552 static void *t_start(struct seq_file *m, loff_t *pos)
2553 {
2554         struct ftrace_iterator *iter = m->private;
2555         struct ftrace_ops *ops = iter->ops;
2556         void *p = NULL;
2557         loff_t l;
2558
2559         mutex_lock(&ftrace_lock);
2560
2561         if (unlikely(ftrace_disabled))
2562                 return NULL;
2563
2564         /*
2565          * If an lseek was done, then reset and start from beginning.
2566          */
2567         if (*pos < iter->pos)
2568                 reset_iter_read(iter);
2569
2570         /*
2571          * For set_ftrace_filter reading, if we have the filter
2572          * off, we can short cut and just print out that all
2573          * functions are enabled.
2574          */
2575         if (iter->flags & FTRACE_ITER_FILTER &&
2576             ftrace_hash_empty(ops->filter_hash)) {
2577                 if (*pos > 0)
2578                         return t_hash_start(m, pos);
2579                 iter->flags |= FTRACE_ITER_PRINTALL;
2580                 /* reset in case of seek/pread */
2581                 iter->flags &= ~FTRACE_ITER_HASH;
2582                 return iter;
2583         }
2584
2585         if (iter->flags & FTRACE_ITER_HASH)
2586                 return t_hash_start(m, pos);
2587
2588         /*
2589          * Unfortunately, we need to restart at ftrace_pages_start
2590          * every time we let go of the ftrace_mutex. This is because
2591          * those pointers can change without the lock.
2592          */
2593         iter->pg = ftrace_pages_start;
2594         iter->idx = 0;
2595         for (l = 0; l <= *pos; ) {
2596                 p = t_next(m, p, &l);
2597                 if (!p)
2598                         break;
2599         }
2600
2601         if (!p)
2602                 return t_hash_start(m, pos);
2603
2604         return iter;
2605 }
2606
2607 static void t_stop(struct seq_file *m, void *p)
2608 {
2609         mutex_unlock(&ftrace_lock);
2610 }
2611
2612 static int t_show(struct seq_file *m, void *v)
2613 {
2614         struct ftrace_iterator *iter = m->private;
2615         struct dyn_ftrace *rec;
2616
2617         if (iter->flags & FTRACE_ITER_HASH)
2618                 return t_hash_show(m, iter);
2619
2620         if (iter->flags & FTRACE_ITER_PRINTALL) {
2621                 seq_printf(m, "#### all functions enabled ####\n");
2622                 return 0;
2623         }
2624
2625         rec = iter->func;
2626
2627         if (!rec)
2628                 return 0;
2629
2630         seq_printf(m, "%ps", (void *)rec->ip);
2631         if (iter->flags & FTRACE_ITER_ENABLED)
2632                 seq_printf(m, " (%ld)%s",
2633                            rec->flags & ~FTRACE_FL_MASK,
2634                            rec->flags & FTRACE_FL_REGS ? " R" : "");
2635         seq_printf(m, "\n");
2636
2637         return 0;
2638 }
2639
2640 static const struct seq_operations show_ftrace_seq_ops = {
2641         .start = t_start,
2642         .next = t_next,
2643         .stop = t_stop,
2644         .show = t_show,
2645 };
2646
2647 static int
2648 ftrace_avail_open(struct inode *inode, struct file *file)
2649 {
2650         struct ftrace_iterator *iter;
2651
2652         if (unlikely(ftrace_disabled))
2653                 return -ENODEV;
2654
2655         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2656         if (iter) {
2657                 iter->pg = ftrace_pages_start;
2658                 iter->ops = &global_ops;
2659         }
2660
2661         return iter ? 0 : -ENOMEM;
2662 }
2663
2664 static int
2665 ftrace_enabled_open(struct inode *inode, struct file *file)
2666 {
2667         struct ftrace_iterator *iter;
2668
2669         if (unlikely(ftrace_disabled))
2670                 return -ENODEV;
2671
2672         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2673         if (iter) {
2674                 iter->pg = ftrace_pages_start;
2675                 iter->flags = FTRACE_ITER_ENABLED;
2676                 iter->ops = &global_ops;
2677         }
2678
2679         return iter ? 0 : -ENOMEM;
2680 }
2681
2682 static void ftrace_filter_reset(struct ftrace_hash *hash)
2683 {
2684         mutex_lock(&ftrace_lock);
2685         ftrace_hash_clear(hash);
2686         mutex_unlock(&ftrace_lock);
2687 }
2688
2689 /**
2690  * ftrace_regex_open - initialize function tracer filter files
2691  * @ops: The ftrace_ops that hold the hash filters
2692  * @flag: The type of filter to process
2693  * @inode: The inode, usually passed in to your open routine
2694  * @file: The file, usually passed in to your open routine
2695  *
2696  * ftrace_regex_open() initializes the filter files for the
2697  * @ops. Depending on @flag it may process the filter hash or
2698  * the notrace hash of @ops. With this called from the open
2699  * routine, you can use ftrace_filter_write() for the write
2700  * routine if @flag has FTRACE_ITER_FILTER set, or
2701  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2702  * ftrace_filter_lseek() should be used as the lseek routine, and
2703  * release must call ftrace_regex_release().
2704  */
2705 int
2706 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2707                   struct inode *inode, struct file *file)
2708 {
2709         struct ftrace_iterator *iter;
2710         struct ftrace_hash *hash;
2711         int ret = 0;
2712
2713         ftrace_ops_init(ops);
2714
2715         if (unlikely(ftrace_disabled))
2716                 return -ENODEV;
2717
2718         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2719         if (!iter)
2720                 return -ENOMEM;
2721
2722         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2723                 kfree(iter);
2724                 return -ENOMEM;
2725         }
2726
2727         iter->ops = ops;
2728         iter->flags = flag;
2729
2730         mutex_lock(&ops->regex_lock);
2731
2732         if (flag & FTRACE_ITER_NOTRACE)
2733                 hash = ops->notrace_hash;
2734         else
2735                 hash = ops->filter_hash;
2736
2737         if (file->f_mode & FMODE_WRITE) {
2738                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2739                 if (!iter->hash) {
2740                         trace_parser_put(&iter->parser);
2741                         kfree(iter);
2742                         ret = -ENOMEM;
2743                         goto out_unlock;
2744                 }
2745         }
2746
2747         if ((file->f_mode & FMODE_WRITE) &&
2748             (file->f_flags & O_TRUNC))
2749                 ftrace_filter_reset(iter->hash);
2750
2751         if (file->f_mode & FMODE_READ) {
2752                 iter->pg = ftrace_pages_start;
2753
2754                 ret = seq_open(file, &show_ftrace_seq_ops);
2755                 if (!ret) {
2756                         struct seq_file *m = file->private_data;
2757                         m->private = iter;
2758                 } else {
2759                         /* Failed */
2760                         free_ftrace_hash(iter->hash);
2761                         trace_parser_put(&iter->parser);
2762                         kfree(iter);
2763                 }
2764         } else
2765                 file->private_data = iter;
2766
2767  out_unlock:
2768         mutex_unlock(&ops->regex_lock);
2769
2770         return ret;
2771 }
2772
2773 static int
2774 ftrace_filter_open(struct inode *inode, struct file *file)
2775 {
2776         return ftrace_regex_open(&global_ops,
2777                         FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2778                         inode, file);
2779 }
2780
2781 static int
2782 ftrace_notrace_open(struct inode *inode, struct file *file)
2783 {
2784         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2785                                  inode, file);
2786 }
2787
2788 static int ftrace_match(char *str, char *regex, int len, int type)
2789 {
2790         int matched = 0;
2791         int slen;
2792
2793         switch (type) {
2794         case MATCH_FULL:
2795                 if (strcmp(str, regex) == 0)
2796                         matched = 1;
2797                 break;
2798         case MATCH_FRONT_ONLY:
2799                 if (strncmp(str, regex, len) == 0)
2800                         matched = 1;
2801                 break;
2802         case MATCH_MIDDLE_ONLY:
2803                 if (strstr(str, regex))
2804                         matched = 1;
2805                 break;
2806         case MATCH_END_ONLY:
2807                 slen = strlen(str);
2808                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2809                         matched = 1;
2810                 break;
2811         }
2812
2813         return matched;
2814 }
2815
2816 static int
2817 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2818 {
2819         struct ftrace_func_entry *entry;
2820         int ret = 0;
2821
2822         entry = ftrace_lookup_ip(hash, rec->ip);
2823         if (not) {
2824                 /* Do nothing if it doesn't exist */
2825                 if (!entry)
2826                         return 0;
2827
2828                 free_hash_entry(hash, entry);
2829         } else {
2830                 /* Do nothing if it exists */
2831                 if (entry)
2832                         return 0;
2833
2834                 ret = add_hash_entry(hash, rec->ip);
2835         }
2836         return ret;
2837 }
2838
2839 static int
2840 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2841                     char *regex, int len, int type)
2842 {
2843         char str[KSYM_SYMBOL_LEN];
2844         char *modname;
2845
2846         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2847
2848         if (mod) {
2849                 /* module lookup requires matching the module */
2850                 if (!modname || strcmp(modname, mod))
2851                         return 0;
2852
2853                 /* blank search means to match all funcs in the mod */
2854                 if (!len)
2855                         return 1;
2856         }
2857
2858         return ftrace_match(str, regex, len, type);
2859 }
2860
2861 static int
2862 match_records(struct ftrace_hash *hash, char *buff,
2863               int len, char *mod, int not)
2864 {
2865         unsigned search_len = 0;
2866         struct ftrace_page *pg;
2867         struct dyn_ftrace *rec;
2868         int type = MATCH_FULL;
2869         char *search = buff;
2870         int found = 0;
2871         int ret;
2872
2873         if (len) {
2874                 type = filter_parse_regex(buff, len, &search, &not);
2875                 search_len = strlen(search);
2876         }
2877
2878         mutex_lock(&ftrace_lock);
2879
2880         if (unlikely(ftrace_disabled))
2881                 goto out_unlock;
2882
2883         do_for_each_ftrace_rec(pg, rec) {
2884                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2885                         ret = enter_record(hash, rec, not);
2886                         if (ret < 0) {
2887                                 found = ret;
2888                                 goto out_unlock;
2889                         }
2890                         found = 1;
2891                 }
2892         } while_for_each_ftrace_rec();
2893  out_unlock:
2894         mutex_unlock(&ftrace_lock);
2895
2896         return found;
2897 }
2898
2899 static int
2900 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2901 {
2902         return match_records(hash, buff, len, NULL, 0);
2903 }
2904
2905 static int
2906 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2907 {
2908         int not = 0;
2909
2910         /* blank or '*' mean the same */
2911         if (strcmp(buff, "*") == 0)
2912                 buff[0] = 0;
2913
2914         /* handle the case of 'dont filter this module' */
2915         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2916                 buff[0] = 0;
2917                 not = 1;
2918         }
2919
2920         return match_records(hash, buff, strlen(buff), mod, not);
2921 }
2922
2923 /*
2924  * We register the module command as a template to show others how
2925  * to register the a command as well.
2926  */
2927
2928 static int
2929 ftrace_mod_callback(struct ftrace_hash *hash,
2930                     char *func, char *cmd, char *param, int enable)
2931 {
2932         char *mod;
2933         int ret = -EINVAL;
2934
2935         /*
2936          * cmd == 'mod' because we only registered this func
2937          * for the 'mod' ftrace_func_command.
2938          * But if you register one func with multiple commands,
2939          * you can tell which command was used by the cmd
2940          * parameter.
2941          */
2942
2943         /* we must have a module name */
2944         if (!param)
2945                 return ret;
2946
2947         mod = strsep(&param, ":");
2948         if (!strlen(mod))
2949                 return ret;
2950
2951         ret = ftrace_match_module_records(hash, func, mod);
2952         if (!ret)
2953                 ret = -EINVAL;
2954         if (ret < 0)
2955                 return ret;
2956
2957         return 0;
2958 }
2959
2960 static struct ftrace_func_command ftrace_mod_cmd = {
2961         .name                   = "mod",
2962         .func                   = ftrace_mod_callback,
2963 };
2964
2965 static int __init ftrace_mod_cmd_init(void)
2966 {
2967         return register_ftrace_command(&ftrace_mod_cmd);
2968 }
2969 core_initcall(ftrace_mod_cmd_init);
2970
2971 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2972                                       struct ftrace_ops *op, struct pt_regs *pt_regs)
2973 {
2974         struct ftrace_func_probe *entry;
2975         struct hlist_head *hhd;
2976         unsigned long key;
2977
2978         key = hash_long(ip, FTRACE_HASH_BITS);
2979
2980         hhd = &ftrace_func_hash[key];
2981
2982         if (hlist_empty(hhd))
2983                 return;
2984
2985         /*
2986          * Disable preemption for these calls to prevent a RCU grace
2987          * period. This syncs the hash iteration and freeing of items
2988          * on the hash. rcu_read_lock is too dangerous here.
2989          */
2990         preempt_disable_notrace();
2991         hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
2992                 if (entry->ip == ip)
2993                         entry->ops->func(ip, parent_ip, &entry->data);
2994         }
2995         preempt_enable_notrace();
2996 }
2997
2998 static struct ftrace_ops trace_probe_ops __read_mostly =
2999 {
3000         .func           = function_trace_probe_call,
3001         .flags          = FTRACE_OPS_FL_INITIALIZED,
3002         INIT_REGEX_LOCK(trace_probe_ops)
3003 };
3004
3005 static int ftrace_probe_registered;
3006
3007 static void __enable_ftrace_function_probe(void)
3008 {
3009         int ret;
3010         int i;
3011
3012         if (ftrace_probe_registered) {
3013                 /* still need to update the function call sites */
3014                 if (ftrace_enabled)
3015                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3016                 return;
3017         }
3018
3019         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3020                 struct hlist_head *hhd = &ftrace_func_hash[i];
3021                 if (hhd->first)
3022                         break;
3023         }
3024         /* Nothing registered? */
3025         if (i == FTRACE_FUNC_HASHSIZE)
3026                 return;
3027
3028         ret = ftrace_startup(&trace_probe_ops, 0);
3029
3030         ftrace_probe_registered = 1;
3031 }
3032
3033 static void __disable_ftrace_function_probe(void)
3034 {
3035         int i;
3036
3037         if (!ftrace_probe_registered)
3038                 return;
3039
3040         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3041                 struct hlist_head *hhd = &ftrace_func_hash[i];
3042                 if (hhd->first)
3043                         return;
3044         }
3045
3046         /* no more funcs left */
3047         ftrace_shutdown(&trace_probe_ops, 0);
3048
3049         ftrace_probe_registered = 0;
3050 }
3051
3052
3053 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3054 {
3055         if (entry->ops->free)
3056                 entry->ops->free(entry->ops, entry->ip, &entry->data);
3057         kfree(entry);
3058 }
3059
3060 int
3061 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3062                               void *data)
3063 {
3064         struct ftrace_func_probe *entry;
3065         struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3066         struct ftrace_hash *hash;
3067         struct ftrace_page *pg;
3068         struct dyn_ftrace *rec;
3069         int type, len, not;
3070         unsigned long key;
3071         int count = 0;
3072         char *search;
3073         int ret;
3074
3075         type = filter_parse_regex(glob, strlen(glob), &search, &not);
3076         len = strlen(search);
3077
3078         /* we do not support '!' for function probes */
3079         if (WARN_ON(not))
3080                 return -EINVAL;
3081
3082         mutex_lock(&trace_probe_ops.regex_lock);
3083
3084         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3085         if (!hash) {
3086                 count = -ENOMEM;
3087                 goto out;
3088         }
3089
3090         if (unlikely(ftrace_disabled)) {
3091                 count = -ENODEV;
3092                 goto out;
3093         }
3094
3095         mutex_lock(&ftrace_lock);
3096
3097         do_for_each_ftrace_rec(pg, rec) {
3098
3099                 if (!ftrace_match_record(rec, NULL, search, len, type))
3100                         continue;
3101
3102                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3103                 if (!entry) {
3104                         /* If we did not process any, then return error */
3105                         if (!count)
3106                                 count = -ENOMEM;
3107                         goto out_unlock;
3108                 }
3109
3110                 count++;
3111
3112                 entry->data = data;
3113
3114                 /*
3115                  * The caller might want to do something special
3116                  * for each function we find. We call the callback
3117                  * to give the caller an opportunity to do so.
3118                  */
3119                 if (ops->init) {
3120                         if (ops->init(ops, rec->ip, &entry->data) < 0) {
3121                                 /* caller does not like this func */
3122                                 kfree(entry);
3123                                 continue;
3124                         }
3125                 }
3126
3127                 ret = enter_record(hash, rec, 0);
3128                 if (ret < 0) {
3129                         kfree(entry);
3130                         count = ret;
3131                         goto out_unlock;
3132                 }
3133
3134                 entry->ops = ops;
3135                 entry->ip = rec->ip;
3136
3137                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3138                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3139
3140         } while_for_each_ftrace_rec();
3141
3142         ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3143         if (ret < 0)
3144                 count = ret;
3145
3146         __enable_ftrace_function_probe();
3147
3148  out_unlock:
3149         mutex_unlock(&ftrace_lock);
3150  out:
3151         mutex_unlock(&trace_probe_ops.regex_lock);
3152         free_ftrace_hash(hash);
3153
3154         return count;
3155 }
3156
3157 enum {
3158         PROBE_TEST_FUNC         = 1,
3159         PROBE_TEST_DATA         = 2
3160 };
3161
3162 static void
3163 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3164                                   void *data, int flags)
3165 {
3166         struct ftrace_func_entry *rec_entry;
3167         struct ftrace_func_probe *entry;
3168         struct ftrace_func_probe *p;
3169         struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3170         struct list_head free_list;
3171         struct ftrace_hash *hash;
3172         struct hlist_node *tmp;
3173         char str[KSYM_SYMBOL_LEN];
3174         int type = MATCH_FULL;
3175         int i, len = 0;
3176         char *search;
3177
3178         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3179                 glob = NULL;
3180         else if (glob) {
3181                 int not;
3182
3183                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3184                 len = strlen(search);
3185
3186                 /* we do not support '!' for function probes */
3187                 if (WARN_ON(not))
3188                         return;
3189         }
3190
3191         mutex_lock(&trace_probe_ops.regex_lock);
3192
3193         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3194         if (!hash)
3195                 /* Hmm, should report this somehow */
3196                 goto out_unlock;
3197
3198         INIT_LIST_HEAD(&free_list);
3199
3200         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3201                 struct hlist_head *hhd = &ftrace_func_hash[i];
3202
3203                 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3204
3205                         /* break up if statements for readability */
3206                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3207                                 continue;
3208
3209                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
3210                                 continue;
3211
3212                         /* do this last, since it is the most expensive */
3213                         if (glob) {
3214                                 kallsyms_lookup(entry->ip, NULL, NULL,
3215                                                 NULL, str);
3216                                 if (!ftrace_match(str, glob, len, type))
3217                                         continue;
3218                         }
3219
3220                         rec_entry = ftrace_lookup_ip(hash, entry->ip);
3221                         /* It is possible more than one entry had this ip */
3222                         if (rec_entry)
3223                                 free_hash_entry(hash, rec_entry);
3224
3225                         hlist_del_rcu(&entry->node);
3226                         list_add(&entry->free_list, &free_list);
3227                 }
3228         }
3229         mutex_lock(&ftrace_lock);
3230         __disable_ftrace_function_probe();
3231         /*
3232          * Remove after the disable is called. Otherwise, if the last
3233          * probe is removed, a null hash means *all enabled*.
3234          */
3235         ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3236         synchronize_sched();
3237         list_for_each_entry_safe(entry, p, &free_list, free_list) {
3238                 list_del(&entry->free_list);
3239                 ftrace_free_entry(entry);
3240         }
3241         mutex_unlock(&ftrace_lock);
3242                 
3243  out_unlock:
3244         mutex_unlock(&trace_probe_ops.regex_lock);
3245         free_ftrace_hash(hash);
3246 }
3247
3248 void
3249 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3250                                 void *data)
3251 {
3252         __unregister_ftrace_function_probe(glob, ops, data,
3253                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
3254 }
3255
3256 void
3257 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3258 {
3259         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3260 }
3261
3262 void unregister_ftrace_function_probe_all(char *glob)
3263 {
3264         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3265 }
3266
3267 static LIST_HEAD(ftrace_commands);
3268 static DEFINE_MUTEX(ftrace_cmd_mutex);
3269
3270 int register_ftrace_command(struct ftrace_func_command *cmd)
3271 {
3272         struct ftrace_func_command *p;
3273         int ret = 0;
3274
3275         mutex_lock(&ftrace_cmd_mutex);
3276         list_for_each_entry(p, &ftrace_commands, list) {
3277                 if (strcmp(cmd->name, p->name) == 0) {
3278                         ret = -EBUSY;
3279                         goto out_unlock;
3280                 }
3281         }
3282         list_add(&cmd->list, &ftrace_commands);
3283  out_unlock:
3284         mutex_unlock(&ftrace_cmd_mutex);
3285
3286         return ret;
3287 }
3288
3289 int unregister_ftrace_command(struct ftrace_func_command *cmd)
3290 {
3291         struct ftrace_func_command *p, *n;
3292         int ret = -ENODEV;
3293
3294         mutex_lock(&ftrace_cmd_mutex);
3295         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3296                 if (strcmp(cmd->name, p->name) == 0) {
3297                         ret = 0;
3298                         list_del_init(&p->list);
3299                         goto out_unlock;
3300                 }
3301         }
3302  out_unlock:
3303         mutex_unlock(&ftrace_cmd_mutex);
3304
3305         return ret;
3306 }
3307
3308 static int ftrace_process_regex(struct ftrace_hash *hash,
3309                                 char *buff, int len, int enable)
3310 {
3311         char *func, *command, *next = buff;
3312         struct ftrace_func_command *p;
3313         int ret = -EINVAL;
3314
3315         func = strsep(&next, ":");
3316
3317         if (!next) {
3318                 ret = ftrace_match_records(hash, func, len);
3319                 if (!ret)
3320                         ret = -EINVAL;
3321                 if (ret < 0)
3322                         return ret;
3323                 return 0;
3324         }
3325
3326         /* command found */
3327
3328         command = strsep(&next, ":");
3329
3330         mutex_lock(&ftrace_cmd_mutex);
3331         list_for_each_entry(p, &ftrace_commands, list) {
3332                 if (strcmp(p->name, command) == 0) {
3333                         ret = p->func(hash, func, command, next, enable);
3334                         goto out_unlock;
3335                 }
3336         }
3337  out_unlock:
3338         mutex_unlock(&ftrace_cmd_mutex);
3339
3340         return ret;
3341 }
3342
3343 static ssize_t
3344 ftrace_regex_write(struct file *file, const char __user *ubuf,
3345                    size_t cnt, loff_t *ppos, int enable)
3346 {
3347         struct ftrace_iterator *iter;
3348         struct trace_parser *parser;
3349         ssize_t ret, read;
3350
3351         if (!cnt)
3352                 return 0;
3353
3354         if (file->f_mode & FMODE_READ) {
3355                 struct seq_file *m = file->private_data;
3356                 iter = m->private;
3357         } else
3358                 iter = file->private_data;
3359
3360         if (unlikely(ftrace_disabled))
3361                 return -ENODEV;
3362
3363         /* iter->hash is a local copy, so we don't need regex_lock */
3364
3365         parser = &iter->parser;
3366         read = trace_get_user(parser, ubuf, cnt, ppos);
3367
3368         if (read >= 0 && trace_parser_loaded(parser) &&
3369             !trace_parser_cont(parser)) {
3370                 ret = ftrace_process_regex(iter->hash, parser->buffer,
3371                                            parser->idx, enable);
3372                 trace_parser_clear(parser);
3373                 if (ret < 0)
3374                         goto out;
3375         }
3376
3377         ret = read;
3378  out:
3379         return ret;
3380 }
3381
3382 ssize_t
3383 ftrace_filter_write(struct file *file, const char __user *ubuf,
3384                     size_t cnt, loff_t *ppos)
3385 {
3386         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3387 }
3388
3389 ssize_t
3390 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3391                      size_t cnt, loff_t *ppos)
3392 {
3393         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3394 }
3395
3396 static int
3397 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3398 {
3399         struct ftrace_func_entry *entry;
3400
3401         if (!ftrace_location(ip))
3402                 return -EINVAL;
3403
3404         if (remove) {
3405                 entry = ftrace_lookup_ip(hash, ip);
3406                 if (!entry)
3407                         return -ENOENT;
3408                 free_hash_entry(hash, entry);
3409                 return 0;
3410         }
3411
3412         return add_hash_entry(hash, ip);
3413 }
3414
3415 static int
3416 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3417                 unsigned long ip, int remove, int reset, int enable)
3418 {
3419         struct ftrace_hash **orig_hash;
3420         struct ftrace_hash *hash;
3421         int ret;
3422
3423         /* All global ops uses the global ops filters */
3424         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3425                 ops = &global_ops;
3426
3427         if (unlikely(ftrace_disabled))
3428                 return -ENODEV;
3429
3430         mutex_lock(&ops->regex_lock);
3431
3432         if (enable)
3433                 orig_hash = &ops->filter_hash;
3434         else
3435                 orig_hash = &ops->notrace_hash;
3436
3437         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3438         if (!hash) {
3439                 ret = -ENOMEM;
3440                 goto out_regex_unlock;
3441         }
3442
3443         if (reset)
3444                 ftrace_filter_reset(hash);
3445         if (buf && !ftrace_match_records(hash, buf, len)) {
3446                 ret = -EINVAL;
3447                 goto out_regex_unlock;
3448         }
3449         if (ip) {
3450                 ret = ftrace_match_addr(hash, ip, remove);
3451                 if (ret < 0)
3452                         goto out_regex_unlock;
3453         }
3454
3455         mutex_lock(&ftrace_lock);
3456         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3457         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3458             && ftrace_enabled)
3459                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3460
3461         mutex_unlock(&ftrace_lock);
3462
3463  out_regex_unlock:
3464         mutex_unlock(&ops->regex_lock);
3465
3466         free_ftrace_hash(hash);
3467         return ret;
3468 }
3469
3470 static int
3471 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3472                 int reset, int enable)
3473 {
3474         return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3475 }
3476
3477 /**
3478  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3479  * @ops - the ops to set the filter with
3480  * @ip - the address to add to or remove from the filter.
3481  * @remove - non zero to remove the ip from the filter
3482  * @reset - non zero to reset all filters before applying this filter.
3483  *
3484  * Filters denote which functions should be enabled when tracing is enabled
3485  * If @ip is NULL, it failes to update filter.
3486  */
3487 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3488                          int remove, int reset)
3489 {
3490         ftrace_ops_init(ops);
3491         return ftrace_set_addr(ops, ip, remove, reset, 1);
3492 }
3493 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3494
3495 static int
3496 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3497                  int reset, int enable)
3498 {
3499         return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3500 }
3501
3502 /**
3503  * ftrace_set_filter - set a function to filter on in ftrace
3504  * @ops - the ops to set the filter with
3505  * @buf - the string that holds the function filter text.
3506  * @len - the length of the string.
3507  * @reset - non zero to reset all filters before applying this filter.
3508  *
3509  * Filters denote which functions should be enabled when tracing is enabled.
3510  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3511  */
3512 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3513                        int len, int reset)
3514 {
3515         ftrace_ops_init(ops);
3516         return ftrace_set_regex(ops, buf, len, reset, 1);
3517 }
3518 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3519
3520 /**
3521  * ftrace_set_notrace - set a function to not trace in ftrace
3522  * @ops - the ops to set the notrace filter with
3523  * @buf - the string that holds the function notrace text.
3524  * @len - the length of the string.
3525  * @reset - non zero to reset all filters before applying this filter.
3526  *
3527  * Notrace Filters denote which functions should not be enabled when tracing
3528  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3529  * for tracing.
3530  */
3531 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3532                         int len, int reset)
3533 {
3534         ftrace_ops_init(ops);
3535         return ftrace_set_regex(ops, buf, len, reset, 0);
3536 }
3537 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3538 /**
3539  * ftrace_set_filter - set a function to filter on in ftrace
3540  * @ops - the ops to set the filter with
3541  * @buf - the string that holds the function filter text.
3542  * @len - the length of the string.
3543  * @reset - non zero to reset all filters before applying this filter.
3544  *
3545  * Filters denote which functions should be enabled when tracing is enabled.
3546  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3547  */
3548 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3549 {
3550         ftrace_set_regex(&global_ops, buf, len, reset, 1);
3551 }
3552 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3553
3554 /**
3555  * ftrace_set_notrace - set a function to not trace in ftrace
3556  * @ops - the ops to set the notrace filter with
3557  * @buf - the string that holds the function notrace text.
3558  * @len - the length of the string.
3559  * @reset - non zero to reset all filters before applying this filter.
3560  *
3561  * Notrace Filters denote which functions should not be enabled when tracing
3562  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3563  * for tracing.
3564  */
3565 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3566 {
3567         ftrace_set_regex(&global_ops, buf, len, reset, 0);
3568 }
3569 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3570
3571 /*
3572  * command line interface to allow users to set filters on boot up.
3573  */
3574 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3575 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3576 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3577
3578 static int __init set_ftrace_notrace(char *str)
3579 {
3580         strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3581         return 1;
3582 }
3583 __setup("ftrace_notrace=", set_ftrace_notrace);
3584
3585 static int __init set_ftrace_filter(char *str)
3586 {
3587         strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3588         return 1;
3589 }
3590 __setup("ftrace_filter=", set_ftrace_filter);
3591
3592 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3593 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3594 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3595
3596 static int __init set_graph_function(char *str)
3597 {
3598         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3599         return 1;
3600 }
3601 __setup("ftrace_graph_filter=", set_graph_function);
3602
3603 static void __init set_ftrace_early_graph(char *buf)
3604 {
3605         int ret;
3606         char *func;
3607
3608         while (buf) {
3609                 func = strsep(&buf, ",");
3610                 /* we allow only one expression at a time */
3611                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3612                                       func);
3613                 if (ret)
3614                         printk(KERN_DEBUG "ftrace: function %s not "
3615                                           "traceable\n", func);
3616         }
3617 }
3618 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3619
3620 void __init
3621 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3622 {
3623         char *func;
3624
3625         ftrace_ops_init(ops);
3626
3627         while (buf) {
3628                 func = strsep(&buf, ",");
3629                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3630         }
3631 }
3632
3633 static void __init set_ftrace_early_filters(void)
3634 {
3635         if (ftrace_filter_buf[0])
3636                 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3637         if (ftrace_notrace_buf[0])
3638                 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3639 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3640         if (ftrace_graph_buf[0])
3641                 set_ftrace_early_graph(ftrace_graph_buf);
3642 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3643 }
3644
3645 int ftrace_regex_release(struct inode *inode, struct file *file)
3646 {
3647         struct seq_file *m = (struct seq_file *)file->private_data;
3648         struct ftrace_iterator *iter;
3649         struct ftrace_hash **orig_hash;
3650         struct trace_parser *parser;
3651         int filter_hash;
3652         int ret;
3653
3654         if (file->f_mode & FMODE_READ) {
3655                 iter = m->private;
3656                 seq_release(inode, file);
3657         } else
3658                 iter = file->private_data;
3659
3660         parser = &iter->parser;
3661         if (trace_parser_loaded(parser)) {
3662                 parser->buffer[parser->idx] = 0;
3663                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3664         }
3665
3666         trace_parser_put(parser);
3667
3668         mutex_lock(&iter->ops->regex_lock);
3669
3670         if (file->f_mode & FMODE_WRITE) {
3671                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3672
3673                 if (filter_hash)
3674                         orig_hash = &iter->ops->filter_hash;
3675                 else
3676                         orig_hash = &iter->ops->notrace_hash;
3677
3678                 mutex_lock(&ftrace_lock);
3679                 ret = ftrace_hash_move(iter->ops, filter_hash,
3680                                        orig_hash, iter->hash);
3681                 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3682                     && ftrace_enabled)
3683                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3684
3685                 mutex_unlock(&ftrace_lock);
3686         }
3687
3688         mutex_unlock(&iter->ops->regex_lock);
3689         free_ftrace_hash(iter->hash);
3690         kfree(iter);
3691
3692         return 0;
3693 }
3694
3695 static const struct file_operations ftrace_avail_fops = {
3696         .open = ftrace_avail_open,
3697         .read = seq_read,
3698         .llseek = seq_lseek,
3699         .release = seq_release_private,
3700 };
3701
3702 static const struct file_operations ftrace_enabled_fops = {
3703         .open = ftrace_enabled_open,
3704         .read = seq_read,
3705         .llseek = seq_lseek,
3706         .release = seq_release_private,
3707 };
3708
3709 static const struct file_operations ftrace_filter_fops = {
3710         .open = ftrace_filter_open,
3711         .read = seq_read,
3712         .write = ftrace_filter_write,
3713         .llseek = ftrace_filter_lseek,
3714         .release = ftrace_regex_release,
3715 };
3716
3717 static const struct file_operations ftrace_notrace_fops = {
3718         .open = ftrace_notrace_open,
3719         .read = seq_read,
3720         .write = ftrace_notrace_write,
3721         .llseek = ftrace_filter_lseek,
3722         .release = ftrace_regex_release,
3723 };
3724
3725 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3726
3727 static DEFINE_MUTEX(graph_lock);
3728
3729 int ftrace_graph_count;
3730 int ftrace_graph_filter_enabled;
3731 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3732
3733 static void *
3734 __g_next(struct seq_file *m, loff_t *pos)
3735 {
3736         if (*pos >= ftrace_graph_count)
3737                 return NULL;
3738         return &ftrace_graph_funcs[*pos];
3739 }
3740
3741 static void *
3742 g_next(struct seq_file *m, void *v, loff_t *pos)
3743 {
3744         (*pos)++;
3745         return __g_next(m, pos);
3746 }
3747
3748 static void *g_start(struct seq_file *m, loff_t *pos)
3749 {
3750         mutex_lock(&graph_lock);
3751
3752         /* Nothing, tell g_show to print all functions are enabled */
3753         if (!ftrace_graph_filter_enabled && !*pos)
3754                 return (void *)1;
3755
3756         return __g_next(m, pos);
3757 }
3758
3759 static void g_stop(struct seq_file *m, void *p)
3760 {
3761         mutex_unlock(&graph_lock);
3762 }
3763
3764 static int g_show(struct seq_file *m, void *v)
3765 {
3766         unsigned long *ptr = v;
3767
3768         if (!ptr)
3769                 return 0;
3770
3771         if (ptr == (unsigned long *)1) {
3772                 seq_printf(m, "#### all functions enabled ####\n");
3773                 return 0;
3774         }
3775
3776         seq_printf(m, "%ps\n", (void *)*ptr);
3777
3778         return 0;
3779 }
3780
3781 static const struct seq_operations ftrace_graph_seq_ops = {
3782         .start = g_start,
3783         .next = g_next,
3784         .stop = g_stop,
3785         .show = g_show,
3786 };
3787
3788 static int
3789 ftrace_graph_open(struct inode *inode, struct file *file)
3790 {
3791         int ret = 0;
3792
3793         if (unlikely(ftrace_disabled))
3794                 return -ENODEV;
3795
3796         mutex_lock(&graph_lock);
3797         if ((file->f_mode & FMODE_WRITE) &&
3798             (file->f_flags & O_TRUNC)) {
3799                 ftrace_graph_filter_enabled = 0;
3800                 ftrace_graph_count = 0;
3801                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3802         }
3803         mutex_unlock(&graph_lock);
3804
3805         if (file->f_mode & FMODE_READ)
3806                 ret = seq_open(file, &ftrace_graph_seq_ops);
3807
3808         return ret;
3809 }
3810
3811 static int
3812 ftrace_graph_release(struct inode *inode, struct file *file)
3813 {
3814         if (file->f_mode & FMODE_READ)
3815                 seq_release(inode, file);
3816         return 0;
3817 }
3818
3819 static int
3820 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3821 {
3822         struct dyn_ftrace *rec;
3823         struct ftrace_page *pg;
3824         int search_len;
3825         int fail = 1;
3826         int type, not;
3827         char *search;
3828         bool exists;
3829         int i;
3830
3831         /* decode regex */
3832         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3833         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3834                 return -EBUSY;
3835
3836         search_len = strlen(search);
3837
3838         mutex_lock(&ftrace_lock);
3839
3840         if (unlikely(ftrace_disabled)) {
3841                 mutex_unlock(&ftrace_lock);
3842                 return -ENODEV;
3843         }
3844
3845         do_for_each_ftrace_rec(pg, rec) {
3846
3847                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3848                         /* if it is in the array */
3849                         exists = false;
3850                         for (i = 0; i < *idx; i++) {
3851                                 if (array[i] == rec->ip) {
3852                                         exists = true;
3853                                         break;
3854                                 }
3855                         }
3856
3857                         if (!not) {
3858                                 fail = 0;
3859                                 if (!exists) {
3860                                         array[(*idx)++] = rec->ip;
3861                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3862                                                 goto out;
3863                                 }
3864                         } else {
3865                                 if (exists) {
3866                                         array[i] = array[--(*idx)];
3867                                         array[*idx] = 0;
3868                                         fail = 0;
3869                                 }
3870                         }
3871                 }
3872         } while_for_each_ftrace_rec();
3873 out:
3874         mutex_unlock(&ftrace_lock);
3875
3876         if (fail)
3877                 return -EINVAL;
3878
3879         ftrace_graph_filter_enabled = !!(*idx);
3880
3881         return 0;
3882 }
3883
3884 static ssize_t
3885 ftrace_graph_write(struct file *file, const char __user *ubuf,
3886                    size_t cnt, loff_t *ppos)
3887 {
3888         struct trace_parser parser;
3889         ssize_t read, ret;
3890
3891         if (!cnt)
3892                 return 0;
3893
3894         mutex_lock(&graph_lock);
3895
3896         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3897                 ret = -ENOMEM;
3898                 goto out_unlock;
3899         }
3900
3901         read = trace_get_user(&parser, ubuf, cnt, ppos);
3902
3903         if (read >= 0 && trace_parser_loaded((&parser))) {
3904                 parser.buffer[parser.idx] = 0;
3905
3906                 /* we allow only one expression at a time */
3907                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3908                                         parser.buffer);
3909                 if (ret)
3910                         goto out_free;
3911         }
3912
3913         ret = read;
3914
3915 out_free:
3916         trace_parser_put(&parser);
3917 out_unlock:
3918         mutex_unlock(&graph_lock);
3919
3920         return ret;
3921 }
3922
3923 static const struct file_operations ftrace_graph_fops = {
3924         .open           = ftrace_graph_open,
3925         .read           = seq_read,
3926         .write          = ftrace_graph_write,
3927         .llseek         = ftrace_filter_lseek,
3928         .release        = ftrace_graph_release,
3929 };
3930 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3931
3932 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3933 {
3934
3935         trace_create_file("available_filter_functions", 0444,
3936                         d_tracer, NULL, &ftrace_avail_fops);
3937
3938         trace_create_file("enabled_functions", 0444,
3939                         d_tracer, NULL, &ftrace_enabled_fops);
3940
3941         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3942                         NULL, &ftrace_filter_fops);
3943
3944         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3945                                     NULL, &ftrace_notrace_fops);
3946
3947 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3948         trace_create_file("set_graph_function", 0444, d_tracer,
3949                                     NULL,
3950                                     &ftrace_graph_fops);
3951 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3952
3953         return 0;
3954 }
3955
3956 static int ftrace_cmp_ips(const void *a, const void *b)
3957 {
3958         const unsigned long *ipa = a;
3959         const unsigned long *ipb = b;
3960
3961         if (*ipa > *ipb)
3962                 return 1;
3963         if (*ipa < *ipb)
3964                 return -1;
3965         return 0;
3966 }
3967
3968 static void ftrace_swap_ips(void *a, void *b, int size)
3969 {
3970         unsigned long *ipa = a;
3971         unsigned long *ipb = b;
3972         unsigned long t;
3973
3974         t = *ipa;
3975         *ipa = *ipb;
3976         *ipb = t;
3977 }
3978
3979 static int ftrace_process_locs(struct module *mod,
3980                                unsigned long *start,
3981                                unsigned long *end)
3982 {
3983         struct ftrace_page *start_pg;
3984         struct ftrace_page *pg;
3985         struct dyn_ftrace *rec;
3986         unsigned long count;
3987         unsigned long *p;
3988         unsigned long addr;
3989         unsigned long flags = 0; /* Shut up gcc */
3990         int ret = -ENOMEM;
3991
3992         count = end - start;
3993
3994         if (!count)
3995                 return 0;
3996
3997         sort(start, count, sizeof(*start),
3998              ftrace_cmp_ips, ftrace_swap_ips);
3999
4000         start_pg = ftrace_allocate_pages(count);
4001         if (!start_pg)
4002                 return -ENOMEM;
4003
4004         mutex_lock(&ftrace_lock);
4005
4006         /*
4007          * Core and each module needs their own pages, as
4008          * modules will free them when they are removed.
4009          * Force a new page to be allocated for modules.
4010          */
4011         if (!mod) {
4012                 WARN_ON(ftrace_pages || ftrace_pages_start);
4013                 /* First initialization */
4014                 ftrace_pages = ftrace_pages_start = start_pg;
4015         } else {
4016                 if (!ftrace_pages)
4017                         goto out;
4018
4019                 if (WARN_ON(ftrace_pages->next)) {
4020                         /* Hmm, we have free pages? */
4021                         while (ftrace_pages->next)
4022                                 ftrace_pages = ftrace_pages->next;
4023                 }
4024
4025                 ftrace_pages->next = start_pg;
4026         }
4027
4028         p = start;
4029         pg = start_pg;
4030         while (p < end) {
4031                 addr = ftrace_call_adjust(*p++);
4032                 /*
4033                  * Some architecture linkers will pad between
4034                  * the different mcount_loc sections of different
4035                  * object files to satisfy alignments.
4036                  * Skip any NULL pointers.
4037                  */
4038                 if (!addr)
4039                         continue;
4040
4041                 if (pg->index == pg->size) {
4042                         /* We should have allocated enough */
4043                         if (WARN_ON(!pg->next))
4044                                 break;
4045                         pg = pg->next;
4046                 }
4047
4048                 rec = &pg->records[pg->index++];
4049                 rec->ip = addr;
4050         }
4051
4052         /* We should have used all pages */
4053         WARN_ON(pg->next);
4054
4055         /* Assign the last page to ftrace_pages */
4056         ftrace_pages = pg;
4057
4058         /* These new locations need to be initialized */
4059         ftrace_new_pgs = start_pg;
4060
4061         /*
4062          * We only need to disable interrupts on start up
4063          * because we are modifying code that an interrupt
4064          * may execute, and the modification is not atomic.
4065          * But for modules, nothing runs the code we modify
4066          * until we are finished with it, and there's no
4067          * reason to cause large interrupt latencies while we do it.
4068          */
4069         if (!mod)
4070                 local_irq_save(flags);
4071         ftrace_update_code(mod);
4072         if (!mod)
4073                 local_irq_restore(flags);
4074         ret = 0;
4075  out:
4076         mutex_unlock(&ftrace_lock);
4077
4078         return ret;
4079 }
4080
4081 #ifdef CONFIG_MODULES
4082
4083 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4084
4085 void ftrace_release_mod(struct module *mod)
4086 {
4087         struct dyn_ftrace *rec;
4088         struct ftrace_page **last_pg;
4089         struct ftrace_page *pg;
4090         int order;
4091
4092         mutex_lock(&ftrace_lock);
4093
4094         if (ftrace_disabled)
4095                 goto out_unlock;
4096
4097         /*
4098          * Each module has its own ftrace_pages, remove
4099          * them from the list.
4100          */
4101         last_pg = &ftrace_pages_start;
4102         for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4103                 rec = &pg->records[0];
4104                 if (within_module_core(rec->ip, mod)) {
4105                         /*
4106                          * As core pages are first, the first
4107                          * page should never be a module page.
4108                          */
4109                         if (WARN_ON(pg == ftrace_pages_start))
4110                                 goto out_unlock;
4111
4112                         /* Check if we are deleting the last page */
4113                         if (pg == ftrace_pages)
4114                                 ftrace_pages = next_to_ftrace_page(last_pg);
4115
4116                         *last_pg = pg->next;
4117                         order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4118                         free_pages((unsigned long)pg->records, order);
4119                         kfree(pg);
4120                 } else
4121                         last_pg = &pg->next;
4122         }
4123  out_unlock:
4124         mutex_unlock(&ftrace_lock);
4125 }
4126
4127 static void ftrace_init_module(struct module *mod,
4128                                unsigned long *start, unsigned long *end)
4129 {
4130         if (ftrace_disabled || start == end)
4131                 return;
4132         ftrace_process_locs(mod, start, end);
4133 }
4134
4135 static int ftrace_module_notify_enter(struct notifier_block *self,
4136                                       unsigned long val, void *data)
4137 {
4138         struct module *mod = data;
4139
4140         if (val == MODULE_STATE_COMING)
4141                 ftrace_init_module(mod, mod->ftrace_callsites,
4142                                    mod->ftrace_callsites +
4143                                    mod->num_ftrace_callsites);
4144         return 0;
4145 }
4146
4147 static int ftrace_module_notify_exit(struct notifier_block *self,
4148                                      unsigned long val, void *data)
4149 {
4150         struct module *mod = data;
4151
4152         if (val == MODULE_STATE_GOING)
4153                 ftrace_release_mod(mod);
4154
4155         return 0;
4156 }
4157 #else
4158 static int ftrace_module_notify_enter(struct notifier_block *self,
4159                                       unsigned long val, void *data)
4160 {
4161         return 0;
4162 }
4163 static int ftrace_module_notify_exit(struct notifier_block *self,
4164                                      unsigned long val, void *data)
4165 {
4166         return 0;
4167 }
4168 #endif /* CONFIG_MODULES */
4169
4170 struct notifier_block ftrace_module_enter_nb = {
4171         .notifier_call = ftrace_module_notify_enter,
4172         .priority = INT_MAX,    /* Run before anything that can use kprobes */
4173 };
4174
4175 struct notifier_block ftrace_module_exit_nb = {
4176         .notifier_call = ftrace_module_notify_exit,
4177         .priority = INT_MIN,    /* Run after anything that can remove kprobes */
4178 };
4179
4180 extern unsigned long __start_mcount_loc[];
4181 extern unsigned long __stop_mcount_loc[];
4182
4183 void __init ftrace_init(void)
4184 {
4185         unsigned long count, addr, flags;
4186         int ret;
4187
4188         /* Keep the ftrace pointer to the stub */
4189         addr = (unsigned long)ftrace_stub;
4190
4191         local_irq_save(flags);
4192         ftrace_dyn_arch_init(&addr);
4193         local_irq_restore(flags);
4194
4195         /* ftrace_dyn_arch_init places the return code in addr */
4196         if (addr)
4197                 goto failed;
4198
4199         count = __stop_mcount_loc - __start_mcount_loc;
4200
4201         ret = ftrace_dyn_table_alloc(count);
4202         if (ret)
4203                 goto failed;
4204
4205         last_ftrace_enabled = ftrace_enabled = 1;
4206
4207         ret = ftrace_process_locs(NULL,
4208                                   __start_mcount_loc,
4209                                   __stop_mcount_loc);
4210
4211         ret = register_module_notifier(&ftrace_module_enter_nb);
4212         if (ret)
4213                 pr_warning("Failed to register trace ftrace module enter notifier\n");
4214
4215         ret = register_module_notifier(&ftrace_module_exit_nb);
4216         if (ret)
4217                 pr_warning("Failed to register trace ftrace module exit notifier\n");
4218
4219         set_ftrace_early_filters();
4220
4221         return;
4222  failed:
4223         ftrace_disabled = 1;
4224 }
4225
4226 #else
4227
4228 static struct ftrace_ops global_ops = {
4229         .func                   = ftrace_stub,
4230         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4231         INIT_REGEX_LOCK(global_ops)
4232 };
4233
4234 static int __init ftrace_nodyn_init(void)
4235 {
4236         ftrace_enabled = 1;
4237         return 0;
4238 }
4239 core_initcall(ftrace_nodyn_init);
4240
4241 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4242 static inline void ftrace_startup_enable(int command) { }
4243 /* Keep as macros so we do not need to define the commands */
4244 # define ftrace_startup(ops, command)                                   \
4245         ({                                                              \
4246                 int ___ret = __register_ftrace_function(ops);           \
4247                 if (!___ret)                                            \
4248                         (ops)->flags |= FTRACE_OPS_FL_ENABLED;          \
4249                 ___ret;                                                 \
4250         })
4251 # define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
4252
4253 # define ftrace_startup_sysctl()        do { } while (0)
4254 # define ftrace_shutdown_sysctl()       do { } while (0)
4255
4256 static inline int
4257 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4258 {
4259         return 1;
4260 }
4261
4262 #endif /* CONFIG_DYNAMIC_FTRACE */
4263
4264 static void
4265 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4266                         struct ftrace_ops *op, struct pt_regs *regs)
4267 {
4268         if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4269                 return;
4270
4271         /*
4272          * Some of the ops may be dynamically allocated,
4273          * they must be freed after a synchronize_sched().
4274          */
4275         preempt_disable_notrace();
4276         trace_recursion_set(TRACE_CONTROL_BIT);
4277         do_for_each_ftrace_op(op, ftrace_control_list) {
4278                 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4279                     !ftrace_function_local_disabled(op) &&
4280                     ftrace_ops_test(op, ip, regs))
4281                         op->func(ip, parent_ip, op, regs);
4282         } while_for_each_ftrace_op(op);
4283         trace_recursion_clear(TRACE_CONTROL_BIT);
4284         preempt_enable_notrace();
4285 }
4286
4287 static struct ftrace_ops control_ops = {
4288         .func   = ftrace_ops_control_func,
4289         .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4290         INIT_REGEX_LOCK(control_ops)
4291 };
4292
4293 static inline void
4294 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4295                        struct ftrace_ops *ignored, struct pt_regs *regs)
4296 {
4297         struct ftrace_ops *op;
4298         int bit;
4299
4300         if (function_trace_stop)
4301                 return;
4302
4303         bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4304         if (bit < 0)
4305                 return;
4306
4307         /*
4308          * Some of the ops may be dynamically allocated,
4309          * they must be freed after a synchronize_sched().
4310          */
4311         preempt_disable_notrace();
4312         do_for_each_ftrace_op(op, ftrace_ops_list) {
4313                 if (ftrace_ops_test(op, ip, regs))
4314                         op->func(ip, parent_ip, op, regs);
4315         } while_for_each_ftrace_op(op);
4316         preempt_enable_notrace();
4317         trace_clear_recursion(bit);
4318 }
4319
4320 /*
4321  * Some archs only support passing ip and parent_ip. Even though
4322  * the list function ignores the op parameter, we do not want any
4323  * C side effects, where a function is called without the caller
4324  * sending a third parameter.
4325  * Archs are to support both the regs and ftrace_ops at the same time.
4326  * If they support ftrace_ops, it is assumed they support regs.
4327  * If call backs want to use regs, they must either check for regs
4328  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4329  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4330  * An architecture can pass partial regs with ftrace_ops and still
4331  * set the ARCH_SUPPORT_FTARCE_OPS.
4332  */
4333 #if ARCH_SUPPORTS_FTRACE_OPS
4334 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4335                                  struct ftrace_ops *op, struct pt_regs *regs)
4336 {
4337         __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4338 }
4339 #else
4340 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4341 {
4342         __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4343 }
4344 #endif
4345
4346 static void clear_ftrace_swapper(void)
4347 {
4348         struct task_struct *p;
4349         int cpu;
4350
4351         get_online_cpus();
4352         for_each_online_cpu(cpu) {
4353                 p = idle_task(cpu);
4354                 clear_tsk_trace_trace(p);
4355         }
4356         put_online_cpus();
4357 }
4358
4359 static void set_ftrace_swapper(void)
4360 {
4361         struct task_struct *p;
4362         int cpu;
4363
4364         get_online_cpus();
4365         for_each_online_cpu(cpu) {
4366                 p = idle_task(cpu);
4367                 set_tsk_trace_trace(p);
4368         }
4369         put_online_cpus();
4370 }
4371
4372 static void clear_ftrace_pid(struct pid *pid)
4373 {
4374         struct task_struct *p;
4375
4376         rcu_read_lock();
4377         do_each_pid_task(pid, PIDTYPE_PID, p) {
4378                 clear_tsk_trace_trace(p);
4379         } while_each_pid_task(pid, PIDTYPE_PID, p);
4380         rcu_read_unlock();
4381
4382         put_pid(pid);
4383 }
4384
4385 static void set_ftrace_pid(struct pid *pid)
4386 {
4387         struct task_struct *p;
4388
4389         rcu_read_lock();
4390         do_each_pid_task(pid, PIDTYPE_PID, p) {
4391                 set_tsk_trace_trace(p);
4392         } while_each_pid_task(pid, PIDTYPE_PID, p);
4393         rcu_read_unlock();
4394 }
4395
4396 static void clear_ftrace_pid_task(struct pid *pid)
4397 {
4398         if (pid == ftrace_swapper_pid)
4399                 clear_ftrace_swapper();
4400         else
4401                 clear_ftrace_pid(pid);
4402 }
4403
4404 static void set_ftrace_pid_task(struct pid *pid)
4405 {
4406         if (pid == ftrace_swapper_pid)
4407                 set_ftrace_swapper();
4408         else
4409                 set_ftrace_pid(pid);
4410 }
4411
4412 static int ftrace_pid_add(int p)
4413 {
4414         struct pid *pid;
4415         struct ftrace_pid *fpid;
4416         int ret = -EINVAL;
4417
4418         mutex_lock(&ftrace_lock);
4419
4420         if (!p)
4421                 pid = ftrace_swapper_pid;
4422         else
4423                 pid = find_get_pid(p);
4424
4425         if (!pid)
4426                 goto out;
4427
4428         ret = 0;
4429
4430         list_for_each_entry(fpid, &ftrace_pids, list)
4431                 if (fpid->pid == pid)
4432                         goto out_put;
4433
4434         ret = -ENOMEM;
4435
4436         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4437         if (!fpid)
4438                 goto out_put;
4439
4440         list_add(&fpid->list, &ftrace_pids);
4441         fpid->pid = pid;
4442
4443         set_ftrace_pid_task(pid);
4444
4445         ftrace_update_pid_func();
4446         ftrace_startup_enable(0);
4447
4448         mutex_unlock(&ftrace_lock);
4449         return 0;
4450
4451 out_put:
4452         if (pid != ftrace_swapper_pid)
4453                 put_pid(pid);
4454
4455 out:
4456         mutex_unlock(&ftrace_lock);
4457         return ret;
4458 }
4459
4460 static void ftrace_pid_reset(void)
4461 {
4462         struct ftrace_pid *fpid, *safe;
4463
4464         mutex_lock(&ftrace_lock);
4465         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4466                 struct pid *pid = fpid->pid;
4467
4468                 clear_ftrace_pid_task(pid);
4469
4470                 list_del(&fpid->list);
4471                 kfree(fpid);
4472         }
4473
4474         ftrace_update_pid_func();
4475         ftrace_startup_enable(0);
4476
4477         mutex_unlock(&ftrace_lock);
4478 }
4479
4480 static void *fpid_start(struct seq_file *m, loff_t *pos)
4481 {
4482         mutex_lock(&ftrace_lock);
4483
4484         if (list_empty(&ftrace_pids) && (!*pos))
4485                 return (void *) 1;
4486
4487         return seq_list_start(&ftrace_pids, *pos);
4488 }
4489
4490 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4491 {
4492         if (v == (void *)1)
4493                 return NULL;
4494
4495         return seq_list_next(v, &ftrace_pids, pos);
4496 }
4497
4498 static void fpid_stop(struct seq_file *m, void *p)
4499 {
4500         mutex_unlock(&ftrace_lock);
4501 }
4502
4503 static int fpid_show(struct seq_file *m, void *v)
4504 {
4505         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4506
4507         if (v == (void *)1) {
4508                 seq_printf(m, "no pid\n");
4509                 return 0;
4510         }
4511
4512         if (fpid->pid == ftrace_swapper_pid)
4513                 seq_printf(m, "swapper tasks\n");
4514         else
4515                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4516
4517         return 0;
4518 }
4519
4520 static const struct seq_operations ftrace_pid_sops = {
4521         .start = fpid_start,
4522         .next = fpid_next,
4523         .stop = fpid_stop,
4524         .show = fpid_show,
4525 };
4526
4527 static int
4528 ftrace_pid_open(struct inode *inode, struct file *file)
4529 {
4530         int ret = 0;
4531
4532         if ((file->f_mode & FMODE_WRITE) &&
4533             (file->f_flags & O_TRUNC))
4534                 ftrace_pid_reset();
4535
4536         if (file->f_mode & FMODE_READ)
4537                 ret = seq_open(file, &ftrace_pid_sops);
4538
4539         return ret;
4540 }
4541
4542 static ssize_t
4543 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4544                    size_t cnt, loff_t *ppos)
4545 {
4546         char buf[64], *tmp;
4547         long val;
4548         int ret;
4549
4550         if (cnt >= sizeof(buf))
4551                 return -EINVAL;
4552
4553         if (copy_from_user(&buf, ubuf, cnt))
4554                 return -EFAULT;
4555
4556         buf[cnt] = 0;
4557
4558         /*
4559          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4560          * to clean the filter quietly.
4561          */
4562         tmp = strstrip(buf);
4563         if (strlen(tmp) == 0)
4564                 return 1;
4565
4566         ret = kstrtol(tmp, 10, &val);
4567         if (ret < 0)
4568                 return ret;
4569
4570         ret = ftrace_pid_add(val);
4571
4572         return ret ? ret : cnt;
4573 }
4574
4575 static int
4576 ftrace_pid_release(struct inode *inode, struct file *file)
4577 {
4578         if (file->f_mode & FMODE_READ)
4579                 seq_release(inode, file);
4580
4581         return 0;
4582 }
4583
4584 static const struct file_operations ftrace_pid_fops = {
4585         .open           = ftrace_pid_open,
4586         .write          = ftrace_pid_write,
4587         .read           = seq_read,
4588         .llseek         = ftrace_filter_lseek,
4589         .release        = ftrace_pid_release,
4590 };
4591
4592 static __init int ftrace_init_debugfs(void)
4593 {
4594         struct dentry *d_tracer;
4595
4596         d_tracer = tracing_init_dentry();
4597         if (!d_tracer)
4598                 return 0;
4599
4600         ftrace_init_dyn_debugfs(d_tracer);
4601
4602         trace_create_file("set_ftrace_pid", 0644, d_tracer,
4603                             NULL, &ftrace_pid_fops);
4604
4605         ftrace_profile_debugfs(d_tracer);
4606
4607         return 0;
4608 }
4609 fs_initcall(ftrace_init_debugfs);
4610
4611 /**
4612  * ftrace_kill - kill ftrace
4613  *
4614  * This function should be used by panic code. It stops ftrace
4615  * but in a not so nice way. If you need to simply kill ftrace
4616  * from a non-atomic section, use ftrace_kill.
4617  */
4618 void ftrace_kill(void)
4619 {
4620         ftrace_disabled = 1;
4621         ftrace_enabled = 0;
4622         clear_ftrace_function();
4623 }
4624
4625 /**
4626  * Test if ftrace is dead or not.
4627  */
4628 int ftrace_is_dead(void)
4629 {
4630         return ftrace_disabled;
4631 }
4632
4633 /**
4634  * register_ftrace_function - register a function for profiling
4635  * @ops - ops structure that holds the function for profiling.
4636  *
4637  * Register a function to be called by all functions in the
4638  * kernel.
4639  *
4640  * Note: @ops->func and all the functions it calls must be labeled
4641  *       with "notrace", otherwise it will go into a
4642  *       recursive loop.
4643  */
4644 int register_ftrace_function(struct ftrace_ops *ops)
4645 {
4646         int ret = -1;
4647
4648         ftrace_ops_init(ops);
4649
4650         mutex_lock(&ftrace_lock);
4651
4652         ret = ftrace_startup(ops, 0);
4653
4654         mutex_unlock(&ftrace_lock);
4655
4656         return ret;
4657 }
4658 EXPORT_SYMBOL_GPL(register_ftrace_function);
4659
4660 /**
4661  * unregister_ftrace_function - unregister a function for profiling.
4662  * @ops - ops structure that holds the function to unregister
4663  *
4664  * Unregister a function that was added to be called by ftrace profiling.
4665  */
4666 int unregister_ftrace_function(struct ftrace_ops *ops)
4667 {
4668         int ret;
4669
4670         mutex_lock(&ftrace_lock);
4671         ret = ftrace_shutdown(ops, 0);
4672         mutex_unlock(&ftrace_lock);
4673
4674         return ret;
4675 }
4676 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4677
4678 int
4679 ftrace_enable_sysctl(struct ctl_table *table, int write,
4680                      void __user *buffer, size_t *lenp,
4681                      loff_t *ppos)
4682 {
4683         int ret = -ENODEV;
4684
4685         mutex_lock(&ftrace_lock);
4686
4687         if (unlikely(ftrace_disabled))
4688                 goto out;
4689
4690         ret = proc_dointvec(table, write, buffer, lenp, ppos);
4691
4692         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4693                 goto out;
4694
4695         last_ftrace_enabled = !!ftrace_enabled;
4696
4697         if (ftrace_enabled) {
4698
4699                 ftrace_startup_sysctl();
4700
4701                 /* we are starting ftrace again */
4702                 if (ftrace_ops_list != &ftrace_list_end)
4703                         update_ftrace_function();
4704
4705         } else {
4706                 /* stopping ftrace calls (just send to ftrace_stub) */
4707                 ftrace_trace_function = ftrace_stub;
4708
4709                 ftrace_shutdown_sysctl();
4710         }
4711
4712  out:
4713         mutex_unlock(&ftrace_lock);
4714         return ret;
4715 }
4716
4717 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4718
4719 static int ftrace_graph_active;
4720 static struct notifier_block ftrace_suspend_notifier;
4721
4722 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4723 {
4724         return 0;
4725 }
4726
4727 /* The callbacks that hook a function */
4728 trace_func_graph_ret_t ftrace_graph_return =
4729                         (trace_func_graph_ret_t)ftrace_stub;
4730 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4731
4732 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4733 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4734 {
4735         int i;
4736         int ret = 0;
4737         unsigned long flags;
4738         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4739         struct task_struct *g, *t;
4740
4741         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4742                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4743                                         * sizeof(struct ftrace_ret_stack),
4744                                         GFP_KERNEL);
4745                 if (!ret_stack_list[i]) {
4746                         start = 0;
4747                         end = i;
4748                         ret = -ENOMEM;
4749                         goto free;
4750                 }
4751         }
4752
4753         read_lock_irqsave(&tasklist_lock, flags);
4754         do_each_thread(g, t) {
4755                 if (start == end) {
4756                         ret = -EAGAIN;
4757                         goto unlock;
4758                 }
4759
4760                 if (t->ret_stack == NULL) {
4761                         atomic_set(&t->tracing_graph_pause, 0);
4762                         atomic_set(&t->trace_overrun, 0);
4763                         t->curr_ret_stack = -1;
4764                         /* Make sure the tasks see the -1 first: */
4765                         smp_wmb();
4766                         t->ret_stack = ret_stack_list[start++];
4767                 }
4768         } while_each_thread(g, t);
4769
4770 unlock:
4771         read_unlock_irqrestore(&tasklist_lock, flags);
4772 free:
4773         for (i = start; i < end; i++)
4774                 kfree(ret_stack_list[i]);
4775         return ret;
4776 }
4777
4778 static void
4779 ftrace_graph_probe_sched_switch(void *ignore,
4780                         struct task_struct *prev, struct task_struct *next)
4781 {
4782         unsigned long long timestamp;
4783         int index;
4784
4785         /*
4786          * Does the user want to count the time a function was asleep.
4787          * If so, do not update the time stamps.
4788          */
4789         if (trace_flags & TRACE_ITER_SLEEP_TIME)
4790                 return;
4791
4792         timestamp = trace_clock_local();
4793
4794         prev->ftrace_timestamp = timestamp;
4795
4796         /* only process tasks that we timestamped */
4797         if (!next->ftrace_timestamp)
4798                 return;
4799
4800         /*
4801          * Update all the counters in next to make up for the
4802          * time next was sleeping.
4803          */
4804         timestamp -= next->ftrace_timestamp;
4805
4806         for (index = next->curr_ret_stack; index >= 0; index--)
4807                 next->ret_stack[index].calltime += timestamp;
4808 }
4809
4810 /* Allocate a return stack for each task */
4811 static int start_graph_tracing(void)
4812 {
4813         struct ftrace_ret_stack **ret_stack_list;
4814         int ret, cpu;
4815
4816         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4817                                 sizeof(struct ftrace_ret_stack *),
4818                                 GFP_KERNEL);
4819
4820         if (!ret_stack_list)
4821                 return -ENOMEM;
4822
4823         /* The cpu_boot init_task->ret_stack will never be freed */
4824         for_each_online_cpu(cpu) {
4825                 if (!idle_task(cpu)->ret_stack)
4826                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4827         }
4828
4829         do {
4830                 ret = alloc_retstack_tasklist(ret_stack_list);
4831         } while (ret == -EAGAIN);
4832
4833         if (!ret) {
4834                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4835                 if (ret)
4836                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4837                                 " probe to kernel_sched_switch\n");
4838         }
4839
4840         kfree(ret_stack_list);
4841         return ret;
4842 }
4843
4844 /*
4845  * Hibernation protection.
4846  * The state of the current task is too much unstable during
4847  * suspend/restore to disk. We want to protect against that.
4848  */
4849 static int
4850 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4851                                                         void *unused)
4852 {
4853         switch (state) {
4854         case PM_HIBERNATION_PREPARE:
4855                 pause_graph_tracing();
4856                 break;
4857
4858         case PM_POST_HIBERNATION:
4859                 unpause_graph_tracing();
4860                 break;
4861         }
4862         return NOTIFY_DONE;
4863 }
4864
4865 /* Just a place holder for function graph */
4866 static struct ftrace_ops fgraph_ops __read_mostly = {
4867         .func           = ftrace_stub,
4868         .flags          = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
4869                                 FTRACE_OPS_FL_RECURSION_SAFE,
4870 };
4871
4872 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4873                         trace_func_graph_ent_t entryfunc)
4874 {
4875         int ret = 0;
4876
4877         mutex_lock(&ftrace_lock);
4878
4879         /* we currently allow only one tracer registered at a time */
4880         if (ftrace_graph_active) {
4881                 ret = -EBUSY;
4882                 goto out;
4883         }
4884
4885         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4886         register_pm_notifier(&ftrace_suspend_notifier);
4887
4888         ftrace_graph_active++;
4889         ret = start_graph_tracing();
4890         if (ret) {
4891                 ftrace_graph_active--;
4892                 goto out;
4893         }
4894
4895         ftrace_graph_return = retfunc;
4896         ftrace_graph_entry = entryfunc;
4897
4898         ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
4899
4900 out:
4901         mutex_unlock(&ftrace_lock);
4902         return ret;
4903 }
4904
4905 void unregister_ftrace_graph(void)
4906 {
4907         mutex_lock(&ftrace_lock);
4908
4909         if (unlikely(!ftrace_graph_active))
4910                 goto out;
4911
4912         ftrace_graph_active--;
4913         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4914         ftrace_graph_entry = ftrace_graph_entry_stub;
4915         ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
4916         unregister_pm_notifier(&ftrace_suspend_notifier);
4917         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4918
4919  out:
4920         mutex_unlock(&ftrace_lock);
4921 }
4922
4923 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4924
4925 static void
4926 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4927 {
4928         atomic_set(&t->tracing_graph_pause, 0);
4929         atomic_set(&t->trace_overrun, 0);
4930         t->ftrace_timestamp = 0;
4931         /* make curr_ret_stack visible before we add the ret_stack */
4932         smp_wmb();
4933         t->ret_stack = ret_stack;
4934 }
4935
4936 /*
4937  * Allocate a return stack for the idle task. May be the first
4938  * time through, or it may be done by CPU hotplug online.
4939  */
4940 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4941 {
4942         t->curr_ret_stack = -1;
4943         /*
4944          * The idle task has no parent, it either has its own
4945          * stack or no stack at all.
4946          */
4947         if (t->ret_stack)
4948                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4949
4950         if (ftrace_graph_active) {
4951                 struct ftrace_ret_stack *ret_stack;
4952
4953                 ret_stack = per_cpu(idle_ret_stack, cpu);
4954                 if (!ret_stack) {
4955                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4956                                             * sizeof(struct ftrace_ret_stack),
4957                                             GFP_KERNEL);
4958                         if (!ret_stack)
4959                                 return;
4960                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4961                 }
4962                 graph_init_task(t, ret_stack);
4963         }
4964 }
4965
4966 /* Allocate a return stack for newly created task */
4967 void ftrace_graph_init_task(struct task_struct *t)
4968 {
4969         /* Make sure we do not use the parent ret_stack */
4970         t->ret_stack = NULL;
4971         t->curr_ret_stack = -1;
4972
4973         if (ftrace_graph_active) {
4974                 struct ftrace_ret_stack *ret_stack;
4975
4976                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4977                                 * sizeof(struct ftrace_ret_stack),
4978                                 GFP_KERNEL);
4979                 if (!ret_stack)
4980                         return;
4981                 graph_init_task(t, ret_stack);
4982         }
4983 }
4984
4985 void ftrace_graph_exit_task(struct task_struct *t)
4986 {
4987         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4988
4989         t->ret_stack = NULL;
4990         /* NULL must become visible to IRQs before we free it: */
4991         barrier();
4992
4993         kfree(ret_stack);
4994 }
4995
4996 void ftrace_graph_stop(void)
4997 {
4998         ftrace_stop();
4999 }
5000 #endif