Merge remote-tracking branch 'lsk/v3.10/topic/tc2' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35
36 #include <trace/events/sched.h>
37
38 #include <asm/setup.h>
39
40 #include "trace_output.h"
41 #include "trace_stat.h"
42
43 #define FTRACE_WARN_ON(cond)                    \
44         ({                                      \
45                 int ___r = cond;                \
46                 if (WARN_ON(___r))              \
47                         ftrace_kill();          \
48                 ___r;                           \
49         })
50
51 #define FTRACE_WARN_ON_ONCE(cond)               \
52         ({                                      \
53                 int ___r = cond;                \
54                 if (WARN_ON_ONCE(___r))         \
55                         ftrace_kill();          \
56                 ___r;                           \
57         })
58
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_REGEX_LOCK(opsname)        \
69         .regex_lock     = __MUTEX_INITIALIZER(opsname.regex_lock),
70 #else
71 #define INIT_REGEX_LOCK(opsname)
72 #endif
73
74 static struct ftrace_ops ftrace_list_end __read_mostly = {
75         .func           = ftrace_stub,
76         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
77 };
78
79 /* ftrace_enabled is a method to turn ftrace on or off */
80 int ftrace_enabled __read_mostly;
81 static int last_ftrace_enabled;
82
83 /* Quick disabling of function tracer. */
84 int function_trace_stop __read_mostly;
85
86 /* Current function tracing op */
87 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
88
89 /* List for set_ftrace_pid's pids. */
90 LIST_HEAD(ftrace_pids);
91 struct ftrace_pid {
92         struct list_head list;
93         struct pid *pid;
94 };
95
96 /*
97  * ftrace_disabled is set when an anomaly is discovered.
98  * ftrace_disabled is much stronger than ftrace_enabled.
99  */
100 static int ftrace_disabled __read_mostly;
101
102 static DEFINE_MUTEX(ftrace_lock);
103
104 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
105 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
106 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
107 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
108 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
109 static struct ftrace_ops global_ops;
110 static struct ftrace_ops control_ops;
111
112 #if ARCH_SUPPORTS_FTRACE_OPS
113 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
114                                  struct ftrace_ops *op, struct pt_regs *regs);
115 #else
116 /* See comment below, where ftrace_ops_list_func is defined */
117 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
118 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
119 #endif
120
121 /*
122  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
123  * can use rcu_dereference_raw_notrace() is that elements removed from this list
124  * are simply leaked, so there is no need to interact with a grace-period
125  * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
126  * concurrent insertions into the ftrace_global_list.
127  *
128  * Silly Alpha and silly pointer-speculation compiler optimizations!
129  */
130 #define do_for_each_ftrace_op(op, list)                 \
131         op = rcu_dereference_raw_notrace(list);                 \
132         do
133
134 /*
135  * Optimized for just a single item in the list (as that is the normal case).
136  */
137 #define while_for_each_ftrace_op(op)                            \
138         while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&  \
139                unlikely((op) != &ftrace_list_end))
140
141 static inline void ftrace_ops_init(struct ftrace_ops *ops)
142 {
143 #ifdef CONFIG_DYNAMIC_FTRACE
144         if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
145                 mutex_init(&ops->regex_lock);
146                 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
147         }
148 #endif
149 }
150
151 /**
152  * ftrace_nr_registered_ops - return number of ops registered
153  *
154  * Returns the number of ftrace_ops registered and tracing functions
155  */
156 int ftrace_nr_registered_ops(void)
157 {
158         struct ftrace_ops *ops;
159         int cnt = 0;
160
161         mutex_lock(&ftrace_lock);
162
163         for (ops = ftrace_ops_list;
164              ops != &ftrace_list_end; ops = ops->next)
165                 cnt++;
166
167         mutex_unlock(&ftrace_lock);
168
169         return cnt;
170 }
171
172 static void
173 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
174                         struct ftrace_ops *op, struct pt_regs *regs)
175 {
176         int bit;
177
178         bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
179         if (bit < 0)
180                 return;
181
182         do_for_each_ftrace_op(op, ftrace_global_list) {
183                 op->func(ip, parent_ip, op, regs);
184         } while_for_each_ftrace_op(op);
185
186         trace_clear_recursion(bit);
187 }
188
189 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
190                             struct ftrace_ops *op, struct pt_regs *regs)
191 {
192         if (!test_tsk_trace_trace(current))
193                 return;
194
195         ftrace_pid_function(ip, parent_ip, op, regs);
196 }
197
198 static void set_ftrace_pid_function(ftrace_func_t func)
199 {
200         /* do not set ftrace_pid_function to itself! */
201         if (func != ftrace_pid_func)
202                 ftrace_pid_function = func;
203 }
204
205 /**
206  * clear_ftrace_function - reset the ftrace function
207  *
208  * This NULLs the ftrace function and in essence stops
209  * tracing.  There may be lag
210  */
211 void clear_ftrace_function(void)
212 {
213         ftrace_trace_function = ftrace_stub;
214         ftrace_pid_function = ftrace_stub;
215 }
216
217 static void control_ops_disable_all(struct ftrace_ops *ops)
218 {
219         int cpu;
220
221         for_each_possible_cpu(cpu)
222                 *per_cpu_ptr(ops->disabled, cpu) = 1;
223 }
224
225 static int control_ops_alloc(struct ftrace_ops *ops)
226 {
227         int __percpu *disabled;
228
229         disabled = alloc_percpu(int);
230         if (!disabled)
231                 return -ENOMEM;
232
233         ops->disabled = disabled;
234         control_ops_disable_all(ops);
235         return 0;
236 }
237
238 static void control_ops_free(struct ftrace_ops *ops)
239 {
240         free_percpu(ops->disabled);
241 }
242
243 static void update_global_ops(void)
244 {
245         ftrace_func_t func;
246
247         /*
248          * If there's only one function registered, then call that
249          * function directly. Otherwise, we need to iterate over the
250          * registered callers.
251          */
252         if (ftrace_global_list == &ftrace_list_end ||
253             ftrace_global_list->next == &ftrace_list_end) {
254                 func = ftrace_global_list->func;
255                 /*
256                  * As we are calling the function directly.
257                  * If it does not have recursion protection,
258                  * the function_trace_op needs to be updated
259                  * accordingly.
260                  */
261                 if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
262                         global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
263                 else
264                         global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
265         } else {
266                 func = ftrace_global_list_func;
267                 /* The list has its own recursion protection. */
268                 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
269         }
270
271
272         /* If we filter on pids, update to use the pid function */
273         if (!list_empty(&ftrace_pids)) {
274                 set_ftrace_pid_function(func);
275                 func = ftrace_pid_func;
276         }
277
278         global_ops.func = func;
279 }
280
281 static void update_ftrace_function(void)
282 {
283         ftrace_func_t func;
284
285         update_global_ops();
286
287         /*
288          * If we are at the end of the list and this ops is
289          * recursion safe and not dynamic and the arch supports passing ops,
290          * then have the mcount trampoline call the function directly.
291          */
292         if (ftrace_ops_list == &ftrace_list_end ||
293             (ftrace_ops_list->next == &ftrace_list_end &&
294              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
295              (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
296              !FTRACE_FORCE_LIST_FUNC)) {
297                 /* Set the ftrace_ops that the arch callback uses */
298                 if (ftrace_ops_list == &global_ops)
299                         function_trace_op = ftrace_global_list;
300                 else
301                         function_trace_op = ftrace_ops_list;
302                 func = ftrace_ops_list->func;
303         } else {
304                 /* Just use the default ftrace_ops */
305                 function_trace_op = &ftrace_list_end;
306                 func = ftrace_ops_list_func;
307         }
308
309         ftrace_trace_function = func;
310 }
311
312 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
313 {
314         ops->next = *list;
315         /*
316          * We are entering ops into the list but another
317          * CPU might be walking that list. We need to make sure
318          * the ops->next pointer is valid before another CPU sees
319          * the ops pointer included into the list.
320          */
321         rcu_assign_pointer(*list, ops);
322 }
323
324 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
325 {
326         struct ftrace_ops **p;
327
328         /*
329          * If we are removing the last function, then simply point
330          * to the ftrace_stub.
331          */
332         if (*list == ops && ops->next == &ftrace_list_end) {
333                 *list = &ftrace_list_end;
334                 return 0;
335         }
336
337         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
338                 if (*p == ops)
339                         break;
340
341         if (*p != ops)
342                 return -1;
343
344         *p = (*p)->next;
345         return 0;
346 }
347
348 static void add_ftrace_list_ops(struct ftrace_ops **list,
349                                 struct ftrace_ops *main_ops,
350                                 struct ftrace_ops *ops)
351 {
352         int first = *list == &ftrace_list_end;
353         add_ftrace_ops(list, ops);
354         if (first)
355                 add_ftrace_ops(&ftrace_ops_list, main_ops);
356 }
357
358 static int remove_ftrace_list_ops(struct ftrace_ops **list,
359                                   struct ftrace_ops *main_ops,
360                                   struct ftrace_ops *ops)
361 {
362         int ret = remove_ftrace_ops(list, ops);
363         if (!ret && *list == &ftrace_list_end)
364                 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
365         return ret;
366 }
367
368 static int __register_ftrace_function(struct ftrace_ops *ops)
369 {
370         if (unlikely(ftrace_disabled))
371                 return -ENODEV;
372
373         if (FTRACE_WARN_ON(ops == &global_ops))
374                 return -EINVAL;
375
376         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
377                 return -EBUSY;
378
379         /* We don't support both control and global flags set. */
380         if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
381                 return -EINVAL;
382
383 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
384         /*
385          * If the ftrace_ops specifies SAVE_REGS, then it only can be used
386          * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
387          * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
388          */
389         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
390             !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
391                 return -EINVAL;
392
393         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
394                 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
395 #endif
396
397         if (!core_kernel_data((unsigned long)ops))
398                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
399
400         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
401                 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
402                 ops->flags |= FTRACE_OPS_FL_ENABLED;
403         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
404                 if (control_ops_alloc(ops))
405                         return -ENOMEM;
406                 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
407         } else
408                 add_ftrace_ops(&ftrace_ops_list, ops);
409
410         if (ftrace_enabled)
411                 update_ftrace_function();
412
413         return 0;
414 }
415
416 static int __unregister_ftrace_function(struct ftrace_ops *ops)
417 {
418         int ret;
419
420         if (ftrace_disabled)
421                 return -ENODEV;
422
423         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
424                 return -EBUSY;
425
426         if (FTRACE_WARN_ON(ops == &global_ops))
427                 return -EINVAL;
428
429         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
430                 ret = remove_ftrace_list_ops(&ftrace_global_list,
431                                              &global_ops, ops);
432                 if (!ret)
433                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
434         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
435                 ret = remove_ftrace_list_ops(&ftrace_control_list,
436                                              &control_ops, ops);
437                 if (!ret) {
438                         /*
439                          * The ftrace_ops is now removed from the list,
440                          * so there'll be no new users. We must ensure
441                          * all current users are done before we free
442                          * the control data.
443                          */
444                         synchronize_sched();
445                         control_ops_free(ops);
446                 }
447         } else
448                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
449
450         if (ret < 0)
451                 return ret;
452
453         if (ftrace_enabled)
454                 update_ftrace_function();
455
456         /*
457          * Dynamic ops may be freed, we must make sure that all
458          * callers are done before leaving this function.
459          */
460         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
461                 synchronize_sched();
462
463         return 0;
464 }
465
466 static void ftrace_update_pid_func(void)
467 {
468         /* Only do something if we are tracing something */
469         if (ftrace_trace_function == ftrace_stub)
470                 return;
471
472         update_ftrace_function();
473 }
474
475 #ifdef CONFIG_FUNCTION_PROFILER
476 struct ftrace_profile {
477         struct hlist_node               node;
478         unsigned long                   ip;
479         unsigned long                   counter;
480 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
481         unsigned long long              time;
482         unsigned long long              time_squared;
483 #endif
484 };
485
486 struct ftrace_profile_page {
487         struct ftrace_profile_page      *next;
488         unsigned long                   index;
489         struct ftrace_profile           records[];
490 };
491
492 struct ftrace_profile_stat {
493         atomic_t                        disabled;
494         struct hlist_head               *hash;
495         struct ftrace_profile_page      *pages;
496         struct ftrace_profile_page      *start;
497         struct tracer_stat              stat;
498 };
499
500 #define PROFILE_RECORDS_SIZE                                            \
501         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
502
503 #define PROFILES_PER_PAGE                                       \
504         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
505
506 static int ftrace_profile_enabled __read_mostly;
507
508 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
509 static DEFINE_MUTEX(ftrace_profile_lock);
510
511 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
512
513 #define FTRACE_PROFILE_HASH_BITS 10
514 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
515
516 static void *
517 function_stat_next(void *v, int idx)
518 {
519         struct ftrace_profile *rec = v;
520         struct ftrace_profile_page *pg;
521
522         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
523
524  again:
525         if (idx != 0)
526                 rec++;
527
528         if ((void *)rec >= (void *)&pg->records[pg->index]) {
529                 pg = pg->next;
530                 if (!pg)
531                         return NULL;
532                 rec = &pg->records[0];
533                 if (!rec->counter)
534                         goto again;
535         }
536
537         return rec;
538 }
539
540 static void *function_stat_start(struct tracer_stat *trace)
541 {
542         struct ftrace_profile_stat *stat =
543                 container_of(trace, struct ftrace_profile_stat, stat);
544
545         if (!stat || !stat->start)
546                 return NULL;
547
548         return function_stat_next(&stat->start->records[0], 0);
549 }
550
551 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
552 /* function graph compares on total time */
553 static int function_stat_cmp(void *p1, void *p2)
554 {
555         struct ftrace_profile *a = p1;
556         struct ftrace_profile *b = p2;
557
558         if (a->time < b->time)
559                 return -1;
560         if (a->time > b->time)
561                 return 1;
562         else
563                 return 0;
564 }
565 #else
566 /* not function graph compares against hits */
567 static int function_stat_cmp(void *p1, void *p2)
568 {
569         struct ftrace_profile *a = p1;
570         struct ftrace_profile *b = p2;
571
572         if (a->counter < b->counter)
573                 return -1;
574         if (a->counter > b->counter)
575                 return 1;
576         else
577                 return 0;
578 }
579 #endif
580
581 static int function_stat_headers(struct seq_file *m)
582 {
583 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
584         seq_printf(m, "  Function                               "
585                    "Hit    Time            Avg             s^2\n"
586                       "  --------                               "
587                    "---    ----            ---             ---\n");
588 #else
589         seq_printf(m, "  Function                               Hit\n"
590                       "  --------                               ---\n");
591 #endif
592         return 0;
593 }
594
595 static int function_stat_show(struct seq_file *m, void *v)
596 {
597         struct ftrace_profile *rec = v;
598         char str[KSYM_SYMBOL_LEN];
599         int ret = 0;
600 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
601         static struct trace_seq s;
602         unsigned long long avg;
603         unsigned long long stddev;
604 #endif
605         mutex_lock(&ftrace_profile_lock);
606
607         /* we raced with function_profile_reset() */
608         if (unlikely(rec->counter == 0)) {
609                 ret = -EBUSY;
610                 goto out;
611         }
612
613         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
614         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
615
616 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
617         seq_printf(m, "    ");
618         avg = rec->time;
619         do_div(avg, rec->counter);
620
621         /* Sample standard deviation (s^2) */
622         if (rec->counter <= 1)
623                 stddev = 0;
624         else {
625                 stddev = rec->time_squared - rec->counter * avg * avg;
626                 /*
627                  * Divide only 1000 for ns^2 -> us^2 conversion.
628                  * trace_print_graph_duration will divide 1000 again.
629                  */
630                 do_div(stddev, (rec->counter - 1) * 1000);
631         }
632
633         trace_seq_init(&s);
634         trace_print_graph_duration(rec->time, &s);
635         trace_seq_puts(&s, "    ");
636         trace_print_graph_duration(avg, &s);
637         trace_seq_puts(&s, "    ");
638         trace_print_graph_duration(stddev, &s);
639         trace_print_seq(m, &s);
640 #endif
641         seq_putc(m, '\n');
642 out:
643         mutex_unlock(&ftrace_profile_lock);
644
645         return ret;
646 }
647
648 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
649 {
650         struct ftrace_profile_page *pg;
651
652         pg = stat->pages = stat->start;
653
654         while (pg) {
655                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
656                 pg->index = 0;
657                 pg = pg->next;
658         }
659
660         memset(stat->hash, 0,
661                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
662 }
663
664 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
665 {
666         struct ftrace_profile_page *pg;
667         int functions;
668         int pages;
669         int i;
670
671         /* If we already allocated, do nothing */
672         if (stat->pages)
673                 return 0;
674
675         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
676         if (!stat->pages)
677                 return -ENOMEM;
678
679 #ifdef CONFIG_DYNAMIC_FTRACE
680         functions = ftrace_update_tot_cnt;
681 #else
682         /*
683          * We do not know the number of functions that exist because
684          * dynamic tracing is what counts them. With past experience
685          * we have around 20K functions. That should be more than enough.
686          * It is highly unlikely we will execute every function in
687          * the kernel.
688          */
689         functions = 20000;
690 #endif
691
692         pg = stat->start = stat->pages;
693
694         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
695
696         for (i = 1; i < pages; i++) {
697                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
698                 if (!pg->next)
699                         goto out_free;
700                 pg = pg->next;
701         }
702
703         return 0;
704
705  out_free:
706         pg = stat->start;
707         while (pg) {
708                 unsigned long tmp = (unsigned long)pg;
709
710                 pg = pg->next;
711                 free_page(tmp);
712         }
713
714         stat->pages = NULL;
715         stat->start = NULL;
716
717         return -ENOMEM;
718 }
719
720 static int ftrace_profile_init_cpu(int cpu)
721 {
722         struct ftrace_profile_stat *stat;
723         int size;
724
725         stat = &per_cpu(ftrace_profile_stats, cpu);
726
727         if (stat->hash) {
728                 /* If the profile is already created, simply reset it */
729                 ftrace_profile_reset(stat);
730                 return 0;
731         }
732
733         /*
734          * We are profiling all functions, but usually only a few thousand
735          * functions are hit. We'll make a hash of 1024 items.
736          */
737         size = FTRACE_PROFILE_HASH_SIZE;
738
739         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
740
741         if (!stat->hash)
742                 return -ENOMEM;
743
744         /* Preallocate the function profiling pages */
745         if (ftrace_profile_pages_init(stat) < 0) {
746                 kfree(stat->hash);
747                 stat->hash = NULL;
748                 return -ENOMEM;
749         }
750
751         return 0;
752 }
753
754 static int ftrace_profile_init(void)
755 {
756         int cpu;
757         int ret = 0;
758
759         for_each_online_cpu(cpu) {
760                 ret = ftrace_profile_init_cpu(cpu);
761                 if (ret)
762                         break;
763         }
764
765         return ret;
766 }
767
768 /* interrupts must be disabled */
769 static struct ftrace_profile *
770 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
771 {
772         struct ftrace_profile *rec;
773         struct hlist_head *hhd;
774         unsigned long key;
775
776         key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
777         hhd = &stat->hash[key];
778
779         if (hlist_empty(hhd))
780                 return NULL;
781
782         hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
783                 if (rec->ip == ip)
784                         return rec;
785         }
786
787         return NULL;
788 }
789
790 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
791                                struct ftrace_profile *rec)
792 {
793         unsigned long key;
794
795         key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
796         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
797 }
798
799 /*
800  * The memory is already allocated, this simply finds a new record to use.
801  */
802 static struct ftrace_profile *
803 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
804 {
805         struct ftrace_profile *rec = NULL;
806
807         /* prevent recursion (from NMIs) */
808         if (atomic_inc_return(&stat->disabled) != 1)
809                 goto out;
810
811         /*
812          * Try to find the function again since an NMI
813          * could have added it
814          */
815         rec = ftrace_find_profiled_func(stat, ip);
816         if (rec)
817                 goto out;
818
819         if (stat->pages->index == PROFILES_PER_PAGE) {
820                 if (!stat->pages->next)
821                         goto out;
822                 stat->pages = stat->pages->next;
823         }
824
825         rec = &stat->pages->records[stat->pages->index++];
826         rec->ip = ip;
827         ftrace_add_profile(stat, rec);
828
829  out:
830         atomic_dec(&stat->disabled);
831
832         return rec;
833 }
834
835 static void
836 function_profile_call(unsigned long ip, unsigned long parent_ip,
837                       struct ftrace_ops *ops, struct pt_regs *regs)
838 {
839         struct ftrace_profile_stat *stat;
840         struct ftrace_profile *rec;
841         unsigned long flags;
842
843         if (!ftrace_profile_enabled)
844                 return;
845
846         local_irq_save(flags);
847
848         stat = &__get_cpu_var(ftrace_profile_stats);
849         if (!stat->hash || !ftrace_profile_enabled)
850                 goto out;
851
852         rec = ftrace_find_profiled_func(stat, ip);
853         if (!rec) {
854                 rec = ftrace_profile_alloc(stat, ip);
855                 if (!rec)
856                         goto out;
857         }
858
859         rec->counter++;
860  out:
861         local_irq_restore(flags);
862 }
863
864 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
865 static int profile_graph_entry(struct ftrace_graph_ent *trace)
866 {
867         function_profile_call(trace->func, 0, NULL, NULL);
868         return 1;
869 }
870
871 static void profile_graph_return(struct ftrace_graph_ret *trace)
872 {
873         struct ftrace_profile_stat *stat;
874         unsigned long long calltime;
875         struct ftrace_profile *rec;
876         unsigned long flags;
877
878         local_irq_save(flags);
879         stat = &__get_cpu_var(ftrace_profile_stats);
880         if (!stat->hash || !ftrace_profile_enabled)
881                 goto out;
882
883         /* If the calltime was zero'd ignore it */
884         if (!trace->calltime)
885                 goto out;
886
887         calltime = trace->rettime - trace->calltime;
888
889         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
890                 int index;
891
892                 index = trace->depth;
893
894                 /* Append this call time to the parent time to subtract */
895                 if (index)
896                         current->ret_stack[index - 1].subtime += calltime;
897
898                 if (current->ret_stack[index].subtime < calltime)
899                         calltime -= current->ret_stack[index].subtime;
900                 else
901                         calltime = 0;
902         }
903
904         rec = ftrace_find_profiled_func(stat, trace->func);
905         if (rec) {
906                 rec->time += calltime;
907                 rec->time_squared += calltime * calltime;
908         }
909
910  out:
911         local_irq_restore(flags);
912 }
913
914 static int register_ftrace_profiler(void)
915 {
916         return register_ftrace_graph(&profile_graph_return,
917                                      &profile_graph_entry);
918 }
919
920 static void unregister_ftrace_profiler(void)
921 {
922         unregister_ftrace_graph();
923 }
924 #else
925 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
926         .func           = function_profile_call,
927         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
928         INIT_REGEX_LOCK(ftrace_profile_ops)
929 };
930
931 static int register_ftrace_profiler(void)
932 {
933         return register_ftrace_function(&ftrace_profile_ops);
934 }
935
936 static void unregister_ftrace_profiler(void)
937 {
938         unregister_ftrace_function(&ftrace_profile_ops);
939 }
940 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
941
942 static ssize_t
943 ftrace_profile_write(struct file *filp, const char __user *ubuf,
944                      size_t cnt, loff_t *ppos)
945 {
946         unsigned long val;
947         int ret;
948
949         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
950         if (ret)
951                 return ret;
952
953         val = !!val;
954
955         mutex_lock(&ftrace_profile_lock);
956         if (ftrace_profile_enabled ^ val) {
957                 if (val) {
958                         ret = ftrace_profile_init();
959                         if (ret < 0) {
960                                 cnt = ret;
961                                 goto out;
962                         }
963
964                         ret = register_ftrace_profiler();
965                         if (ret < 0) {
966                                 cnt = ret;
967                                 goto out;
968                         }
969                         ftrace_profile_enabled = 1;
970                 } else {
971                         ftrace_profile_enabled = 0;
972                         /*
973                          * unregister_ftrace_profiler calls stop_machine
974                          * so this acts like an synchronize_sched.
975                          */
976                         unregister_ftrace_profiler();
977                 }
978         }
979  out:
980         mutex_unlock(&ftrace_profile_lock);
981
982         *ppos += cnt;
983
984         return cnt;
985 }
986
987 static ssize_t
988 ftrace_profile_read(struct file *filp, char __user *ubuf,
989                      size_t cnt, loff_t *ppos)
990 {
991         char buf[64];           /* big enough to hold a number */
992         int r;
993
994         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
995         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
996 }
997
998 static const struct file_operations ftrace_profile_fops = {
999         .open           = tracing_open_generic,
1000         .read           = ftrace_profile_read,
1001         .write          = ftrace_profile_write,
1002         .llseek         = default_llseek,
1003 };
1004
1005 /* used to initialize the real stat files */
1006 static struct tracer_stat function_stats __initdata = {
1007         .name           = "functions",
1008         .stat_start     = function_stat_start,
1009         .stat_next      = function_stat_next,
1010         .stat_cmp       = function_stat_cmp,
1011         .stat_headers   = function_stat_headers,
1012         .stat_show      = function_stat_show
1013 };
1014
1015 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1016 {
1017         struct ftrace_profile_stat *stat;
1018         struct dentry *entry;
1019         char *name;
1020         int ret;
1021         int cpu;
1022
1023         for_each_possible_cpu(cpu) {
1024                 stat = &per_cpu(ftrace_profile_stats, cpu);
1025
1026                 /* allocate enough for function name + cpu number */
1027                 name = kmalloc(32, GFP_KERNEL);
1028                 if (!name) {
1029                         /*
1030                          * The files created are permanent, if something happens
1031                          * we still do not free memory.
1032                          */
1033                         WARN(1,
1034                              "Could not allocate stat file for cpu %d\n",
1035                              cpu);
1036                         return;
1037                 }
1038                 stat->stat = function_stats;
1039                 snprintf(name, 32, "function%d", cpu);
1040                 stat->stat.name = name;
1041                 ret = register_stat_tracer(&stat->stat);
1042                 if (ret) {
1043                         WARN(1,
1044                              "Could not register function stat for cpu %d\n",
1045                              cpu);
1046                         kfree(name);
1047                         return;
1048                 }
1049         }
1050
1051         entry = debugfs_create_file("function_profile_enabled", 0644,
1052                                     d_tracer, NULL, &ftrace_profile_fops);
1053         if (!entry)
1054                 pr_warning("Could not create debugfs "
1055                            "'function_profile_enabled' entry\n");
1056 }
1057
1058 #else /* CONFIG_FUNCTION_PROFILER */
1059 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1060 {
1061 }
1062 #endif /* CONFIG_FUNCTION_PROFILER */
1063
1064 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1065
1066 loff_t
1067 ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1068 {
1069         loff_t ret;
1070
1071         if (file->f_mode & FMODE_READ)
1072                 ret = seq_lseek(file, offset, whence);
1073         else
1074                 file->f_pos = ret = 1;
1075
1076         return ret;
1077 }
1078
1079 #ifdef CONFIG_DYNAMIC_FTRACE
1080
1081 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1082 # error Dynamic ftrace depends on MCOUNT_RECORD
1083 #endif
1084
1085 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1086
1087 struct ftrace_func_probe {
1088         struct hlist_node       node;
1089         struct ftrace_probe_ops *ops;
1090         unsigned long           flags;
1091         unsigned long           ip;
1092         void                    *data;
1093         struct list_head        free_list;
1094 };
1095
1096 struct ftrace_func_entry {
1097         struct hlist_node hlist;
1098         unsigned long ip;
1099 };
1100
1101 struct ftrace_hash {
1102         unsigned long           size_bits;
1103         struct hlist_head       *buckets;
1104         unsigned long           count;
1105         struct rcu_head         rcu;
1106 };
1107
1108 /*
1109  * We make these constant because no one should touch them,
1110  * but they are used as the default "empty hash", to avoid allocating
1111  * it all the time. These are in a read only section such that if
1112  * anyone does try to modify it, it will cause an exception.
1113  */
1114 static const struct hlist_head empty_buckets[1];
1115 static const struct ftrace_hash empty_hash = {
1116         .buckets = (struct hlist_head *)empty_buckets,
1117 };
1118 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1119
1120 static struct ftrace_ops global_ops = {
1121         .func                   = ftrace_stub,
1122         .notrace_hash           = EMPTY_HASH,
1123         .filter_hash            = EMPTY_HASH,
1124         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
1125         INIT_REGEX_LOCK(global_ops)
1126 };
1127
1128 struct ftrace_page {
1129         struct ftrace_page      *next;
1130         struct dyn_ftrace       *records;
1131         int                     index;
1132         int                     size;
1133 };
1134
1135 static struct ftrace_page *ftrace_new_pgs;
1136
1137 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1138 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1139
1140 /* estimate from running different kernels */
1141 #define NR_TO_INIT              10000
1142
1143 static struct ftrace_page       *ftrace_pages_start;
1144 static struct ftrace_page       *ftrace_pages;
1145
1146 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1147 {
1148         return !hash || !hash->count;
1149 }
1150
1151 static struct ftrace_func_entry *
1152 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1153 {
1154         unsigned long key;
1155         struct ftrace_func_entry *entry;
1156         struct hlist_head *hhd;
1157
1158         if (ftrace_hash_empty(hash))
1159                 return NULL;
1160
1161         if (hash->size_bits > 0)
1162                 key = hash_long(ip, hash->size_bits);
1163         else
1164                 key = 0;
1165
1166         hhd = &hash->buckets[key];
1167
1168         hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1169                 if (entry->ip == ip)
1170                         return entry;
1171         }
1172         return NULL;
1173 }
1174
1175 static void __add_hash_entry(struct ftrace_hash *hash,
1176                              struct ftrace_func_entry *entry)
1177 {
1178         struct hlist_head *hhd;
1179         unsigned long key;
1180
1181         if (hash->size_bits)
1182                 key = hash_long(entry->ip, hash->size_bits);
1183         else
1184                 key = 0;
1185
1186         hhd = &hash->buckets[key];
1187         hlist_add_head(&entry->hlist, hhd);
1188         hash->count++;
1189 }
1190
1191 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1192 {
1193         struct ftrace_func_entry *entry;
1194
1195         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1196         if (!entry)
1197                 return -ENOMEM;
1198
1199         entry->ip = ip;
1200         __add_hash_entry(hash, entry);
1201
1202         return 0;
1203 }
1204
1205 static void
1206 free_hash_entry(struct ftrace_hash *hash,
1207                   struct ftrace_func_entry *entry)
1208 {
1209         hlist_del(&entry->hlist);
1210         kfree(entry);
1211         hash->count--;
1212 }
1213
1214 static void
1215 remove_hash_entry(struct ftrace_hash *hash,
1216                   struct ftrace_func_entry *entry)
1217 {
1218         hlist_del(&entry->hlist);
1219         hash->count--;
1220 }
1221
1222 static void ftrace_hash_clear(struct ftrace_hash *hash)
1223 {
1224         struct hlist_head *hhd;
1225         struct hlist_node *tn;
1226         struct ftrace_func_entry *entry;
1227         int size = 1 << hash->size_bits;
1228         int i;
1229
1230         if (!hash->count)
1231                 return;
1232
1233         for (i = 0; i < size; i++) {
1234                 hhd = &hash->buckets[i];
1235                 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1236                         free_hash_entry(hash, entry);
1237         }
1238         FTRACE_WARN_ON(hash->count);
1239 }
1240
1241 static void free_ftrace_hash(struct ftrace_hash *hash)
1242 {
1243         if (!hash || hash == EMPTY_HASH)
1244                 return;
1245         ftrace_hash_clear(hash);
1246         kfree(hash->buckets);
1247         kfree(hash);
1248 }
1249
1250 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1251 {
1252         struct ftrace_hash *hash;
1253
1254         hash = container_of(rcu, struct ftrace_hash, rcu);
1255         free_ftrace_hash(hash);
1256 }
1257
1258 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1259 {
1260         if (!hash || hash == EMPTY_HASH)
1261                 return;
1262         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1263 }
1264
1265 void ftrace_free_filter(struct ftrace_ops *ops)
1266 {
1267         ftrace_ops_init(ops);
1268         free_ftrace_hash(ops->filter_hash);
1269         free_ftrace_hash(ops->notrace_hash);
1270 }
1271
1272 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1273 {
1274         struct ftrace_hash *hash;
1275         int size;
1276
1277         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1278         if (!hash)
1279                 return NULL;
1280
1281         size = 1 << size_bits;
1282         hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1283
1284         if (!hash->buckets) {
1285                 kfree(hash);
1286                 return NULL;
1287         }
1288
1289         hash->size_bits = size_bits;
1290
1291         return hash;
1292 }
1293
1294 static struct ftrace_hash *
1295 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1296 {
1297         struct ftrace_func_entry *entry;
1298         struct ftrace_hash *new_hash;
1299         int size;
1300         int ret;
1301         int i;
1302
1303         new_hash = alloc_ftrace_hash(size_bits);
1304         if (!new_hash)
1305                 return NULL;
1306
1307         /* Empty hash? */
1308         if (ftrace_hash_empty(hash))
1309                 return new_hash;
1310
1311         size = 1 << hash->size_bits;
1312         for (i = 0; i < size; i++) {
1313                 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1314                         ret = add_hash_entry(new_hash, entry->ip);
1315                         if (ret < 0)
1316                                 goto free_hash;
1317                 }
1318         }
1319
1320         FTRACE_WARN_ON(new_hash->count != hash->count);
1321
1322         return new_hash;
1323
1324  free_hash:
1325         free_ftrace_hash(new_hash);
1326         return NULL;
1327 }
1328
1329 static void
1330 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1331 static void
1332 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1333
1334 static int
1335 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1336                  struct ftrace_hash **dst, struct ftrace_hash *src)
1337 {
1338         struct ftrace_func_entry *entry;
1339         struct hlist_node *tn;
1340         struct hlist_head *hhd;
1341         struct ftrace_hash *old_hash;
1342         struct ftrace_hash *new_hash;
1343         int size = src->count;
1344         int bits = 0;
1345         int ret;
1346         int i;
1347
1348         /*
1349          * Remove the current set, update the hash and add
1350          * them back.
1351          */
1352         ftrace_hash_rec_disable(ops, enable);
1353
1354         /*
1355          * If the new source is empty, just free dst and assign it
1356          * the empty_hash.
1357          */
1358         if (!src->count) {
1359                 free_ftrace_hash_rcu(*dst);
1360                 rcu_assign_pointer(*dst, EMPTY_HASH);
1361                 /* still need to update the function records */
1362                 ret = 0;
1363                 goto out;
1364         }
1365
1366         /*
1367          * Make the hash size about 1/2 the # found
1368          */
1369         for (size /= 2; size; size >>= 1)
1370                 bits++;
1371
1372         /* Don't allocate too much */
1373         if (bits > FTRACE_HASH_MAX_BITS)
1374                 bits = FTRACE_HASH_MAX_BITS;
1375
1376         ret = -ENOMEM;
1377         new_hash = alloc_ftrace_hash(bits);
1378         if (!new_hash)
1379                 goto out;
1380
1381         size = 1 << src->size_bits;
1382         for (i = 0; i < size; i++) {
1383                 hhd = &src->buckets[i];
1384                 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1385                         remove_hash_entry(src, entry);
1386                         __add_hash_entry(new_hash, entry);
1387                 }
1388         }
1389
1390         old_hash = *dst;
1391         rcu_assign_pointer(*dst, new_hash);
1392         free_ftrace_hash_rcu(old_hash);
1393
1394         ret = 0;
1395  out:
1396         /*
1397          * Enable regardless of ret:
1398          *  On success, we enable the new hash.
1399          *  On failure, we re-enable the original hash.
1400          */
1401         ftrace_hash_rec_enable(ops, enable);
1402
1403         return ret;
1404 }
1405
1406 /*
1407  * Test the hashes for this ops to see if we want to call
1408  * the ops->func or not.
1409  *
1410  * It's a match if the ip is in the ops->filter_hash or
1411  * the filter_hash does not exist or is empty,
1412  *  AND
1413  * the ip is not in the ops->notrace_hash.
1414  *
1415  * This needs to be called with preemption disabled as
1416  * the hashes are freed with call_rcu_sched().
1417  */
1418 static int
1419 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1420 {
1421         struct ftrace_hash *filter_hash;
1422         struct ftrace_hash *notrace_hash;
1423         int ret;
1424
1425 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1426         /*
1427          * There's a small race when adding ops that the ftrace handler
1428          * that wants regs, may be called without them. We can not
1429          * allow that handler to be called if regs is NULL.
1430          */
1431         if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1432                 return 0;
1433 #endif
1434
1435         filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1436         notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1437
1438         if ((ftrace_hash_empty(filter_hash) ||
1439              ftrace_lookup_ip(filter_hash, ip)) &&
1440             (ftrace_hash_empty(notrace_hash) ||
1441              !ftrace_lookup_ip(notrace_hash, ip)))
1442                 ret = 1;
1443         else
1444                 ret = 0;
1445
1446         return ret;
1447 }
1448
1449 /*
1450  * This is a double for. Do not use 'break' to break out of the loop,
1451  * you must use a goto.
1452  */
1453 #define do_for_each_ftrace_rec(pg, rec)                                 \
1454         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1455                 int _____i;                                             \
1456                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1457                         rec = &pg->records[_____i];
1458
1459 #define while_for_each_ftrace_rec()             \
1460                 }                               \
1461         }
1462
1463
1464 static int ftrace_cmp_recs(const void *a, const void *b)
1465 {
1466         const struct dyn_ftrace *key = a;
1467         const struct dyn_ftrace *rec = b;
1468
1469         if (key->flags < rec->ip)
1470                 return -1;
1471         if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1472                 return 1;
1473         return 0;
1474 }
1475
1476 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1477 {
1478         struct ftrace_page *pg;
1479         struct dyn_ftrace *rec;
1480         struct dyn_ftrace key;
1481
1482         key.ip = start;
1483         key.flags = end;        /* overload flags, as it is unsigned long */
1484
1485         for (pg = ftrace_pages_start; pg; pg = pg->next) {
1486                 if (end < pg->records[0].ip ||
1487                     start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1488                         continue;
1489                 rec = bsearch(&key, pg->records, pg->index,
1490                               sizeof(struct dyn_ftrace),
1491                               ftrace_cmp_recs);
1492                 if (rec)
1493                         return rec->ip;
1494         }
1495
1496         return 0;
1497 }
1498
1499 /**
1500  * ftrace_location - return true if the ip giving is a traced location
1501  * @ip: the instruction pointer to check
1502  *
1503  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1504  * That is, the instruction that is either a NOP or call to
1505  * the function tracer. It checks the ftrace internal tables to
1506  * determine if the address belongs or not.
1507  */
1508 unsigned long ftrace_location(unsigned long ip)
1509 {
1510         return ftrace_location_range(ip, ip);
1511 }
1512
1513 /**
1514  * ftrace_text_reserved - return true if range contains an ftrace location
1515  * @start: start of range to search
1516  * @end: end of range to search (inclusive). @end points to the last byte to check.
1517  *
1518  * Returns 1 if @start and @end contains a ftrace location.
1519  * That is, the instruction that is either a NOP or call to
1520  * the function tracer. It checks the ftrace internal tables to
1521  * determine if the address belongs or not.
1522  */
1523 int ftrace_text_reserved(void *start, void *end)
1524 {
1525         unsigned long ret;
1526
1527         ret = ftrace_location_range((unsigned long)start,
1528                                     (unsigned long)end);
1529
1530         return (int)!!ret;
1531 }
1532
1533 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1534                                      int filter_hash,
1535                                      bool inc)
1536 {
1537         struct ftrace_hash *hash;
1538         struct ftrace_hash *other_hash;
1539         struct ftrace_page *pg;
1540         struct dyn_ftrace *rec;
1541         int count = 0;
1542         int all = 0;
1543
1544         /* Only update if the ops has been registered */
1545         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1546                 return;
1547
1548         /*
1549          * In the filter_hash case:
1550          *   If the count is zero, we update all records.
1551          *   Otherwise we just update the items in the hash.
1552          *
1553          * In the notrace_hash case:
1554          *   We enable the update in the hash.
1555          *   As disabling notrace means enabling the tracing,
1556          *   and enabling notrace means disabling, the inc variable
1557          *   gets inversed.
1558          */
1559         if (filter_hash) {
1560                 hash = ops->filter_hash;
1561                 other_hash = ops->notrace_hash;
1562                 if (ftrace_hash_empty(hash))
1563                         all = 1;
1564         } else {
1565                 inc = !inc;
1566                 hash = ops->notrace_hash;
1567                 other_hash = ops->filter_hash;
1568                 /*
1569                  * If the notrace hash has no items,
1570                  * then there's nothing to do.
1571                  */
1572                 if (ftrace_hash_empty(hash))
1573                         return;
1574         }
1575
1576         do_for_each_ftrace_rec(pg, rec) {
1577                 int in_other_hash = 0;
1578                 int in_hash = 0;
1579                 int match = 0;
1580
1581                 if (all) {
1582                         /*
1583                          * Only the filter_hash affects all records.
1584                          * Update if the record is not in the notrace hash.
1585                          */
1586                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1587                                 match = 1;
1588                 } else {
1589                         in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1590                         in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1591
1592                         /*
1593                          *
1594                          */
1595                         if (filter_hash && in_hash && !in_other_hash)
1596                                 match = 1;
1597                         else if (!filter_hash && in_hash &&
1598                                  (in_other_hash || ftrace_hash_empty(other_hash)))
1599                                 match = 1;
1600                 }
1601                 if (!match)
1602                         continue;
1603
1604                 if (inc) {
1605                         rec->flags++;
1606                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1607                                 return;
1608                         /*
1609                          * If any ops wants regs saved for this function
1610                          * then all ops will get saved regs.
1611                          */
1612                         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1613                                 rec->flags |= FTRACE_FL_REGS;
1614                 } else {
1615                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1616                                 return;
1617                         rec->flags--;
1618                 }
1619                 count++;
1620                 /* Shortcut, if we handled all records, we are done. */
1621                 if (!all && count == hash->count)
1622                         return;
1623         } while_for_each_ftrace_rec();
1624 }
1625
1626 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1627                                     int filter_hash)
1628 {
1629         __ftrace_hash_rec_update(ops, filter_hash, 0);
1630 }
1631
1632 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1633                                    int filter_hash)
1634 {
1635         __ftrace_hash_rec_update(ops, filter_hash, 1);
1636 }
1637
1638 static void print_ip_ins(const char *fmt, unsigned char *p)
1639 {
1640         int i;
1641
1642         printk(KERN_CONT "%s", fmt);
1643
1644         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1645                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1646 }
1647
1648 /**
1649  * ftrace_bug - report and shutdown function tracer
1650  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1651  * @ip: The address that failed
1652  *
1653  * The arch code that enables or disables the function tracing
1654  * can call ftrace_bug() when it has detected a problem in
1655  * modifying the code. @failed should be one of either:
1656  * EFAULT - if the problem happens on reading the @ip address
1657  * EINVAL - if what is read at @ip is not what was expected
1658  * EPERM - if the problem happens on writting to the @ip address
1659  */
1660 void ftrace_bug(int failed, unsigned long ip)
1661 {
1662         switch (failed) {
1663         case -EFAULT:
1664                 FTRACE_WARN_ON_ONCE(1);
1665                 pr_info("ftrace faulted on modifying ");
1666                 print_ip_sym(ip);
1667                 break;
1668         case -EINVAL:
1669                 FTRACE_WARN_ON_ONCE(1);
1670                 pr_info("ftrace failed to modify ");
1671                 print_ip_sym(ip);
1672                 print_ip_ins(" actual: ", (unsigned char *)ip);
1673                 printk(KERN_CONT "\n");
1674                 break;
1675         case -EPERM:
1676                 FTRACE_WARN_ON_ONCE(1);
1677                 pr_info("ftrace faulted on writing ");
1678                 print_ip_sym(ip);
1679                 break;
1680         default:
1681                 FTRACE_WARN_ON_ONCE(1);
1682                 pr_info("ftrace faulted on unknown error ");
1683                 print_ip_sym(ip);
1684         }
1685 }
1686
1687 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1688 {
1689         unsigned long flag = 0UL;
1690
1691         /*
1692          * If we are updating calls:
1693          *
1694          *   If the record has a ref count, then we need to enable it
1695          *   because someone is using it.
1696          *
1697          *   Otherwise we make sure its disabled.
1698          *
1699          * If we are disabling calls, then disable all records that
1700          * are enabled.
1701          */
1702         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1703                 flag = FTRACE_FL_ENABLED;
1704
1705         /*
1706          * If enabling and the REGS flag does not match the REGS_EN, then
1707          * do not ignore this record. Set flags to fail the compare against
1708          * ENABLED.
1709          */
1710         if (flag &&
1711             (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1712                 flag |= FTRACE_FL_REGS;
1713
1714         /* If the state of this record hasn't changed, then do nothing */
1715         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1716                 return FTRACE_UPDATE_IGNORE;
1717
1718         if (flag) {
1719                 /* Save off if rec is being enabled (for return value) */
1720                 flag ^= rec->flags & FTRACE_FL_ENABLED;
1721
1722                 if (update) {
1723                         rec->flags |= FTRACE_FL_ENABLED;
1724                         if (flag & FTRACE_FL_REGS) {
1725                                 if (rec->flags & FTRACE_FL_REGS)
1726                                         rec->flags |= FTRACE_FL_REGS_EN;
1727                                 else
1728                                         rec->flags &= ~FTRACE_FL_REGS_EN;
1729                         }
1730                 }
1731
1732                 /*
1733                  * If this record is being updated from a nop, then
1734                  *   return UPDATE_MAKE_CALL.
1735                  * Otherwise, if the EN flag is set, then return
1736                  *   UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1737                  *   from the non-save regs, to a save regs function.
1738                  * Otherwise,
1739                  *   return UPDATE_MODIFY_CALL to tell the caller to convert
1740                  *   from the save regs, to a non-save regs function.
1741                  */
1742                 if (flag & FTRACE_FL_ENABLED)
1743                         return FTRACE_UPDATE_MAKE_CALL;
1744                 else if (rec->flags & FTRACE_FL_REGS_EN)
1745                         return FTRACE_UPDATE_MODIFY_CALL_REGS;
1746                 else
1747                         return FTRACE_UPDATE_MODIFY_CALL;
1748         }
1749
1750         if (update) {
1751                 /* If there's no more users, clear all flags */
1752                 if (!(rec->flags & ~FTRACE_FL_MASK))
1753                         rec->flags = 0;
1754                 else
1755                         /* Just disable the record (keep REGS state) */
1756                         rec->flags &= ~FTRACE_FL_ENABLED;
1757         }
1758
1759         return FTRACE_UPDATE_MAKE_NOP;
1760 }
1761
1762 /**
1763  * ftrace_update_record, set a record that now is tracing or not
1764  * @rec: the record to update
1765  * @enable: set to 1 if the record is tracing, zero to force disable
1766  *
1767  * The records that represent all functions that can be traced need
1768  * to be updated when tracing has been enabled.
1769  */
1770 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1771 {
1772         return ftrace_check_record(rec, enable, 1);
1773 }
1774
1775 /**
1776  * ftrace_test_record, check if the record has been enabled or not
1777  * @rec: the record to test
1778  * @enable: set to 1 to check if enabled, 0 if it is disabled
1779  *
1780  * The arch code may need to test if a record is already set to
1781  * tracing to determine how to modify the function code that it
1782  * represents.
1783  */
1784 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1785 {
1786         return ftrace_check_record(rec, enable, 0);
1787 }
1788
1789 static int
1790 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1791 {
1792         unsigned long ftrace_old_addr;
1793         unsigned long ftrace_addr;
1794         int ret;
1795
1796         ret = ftrace_update_record(rec, enable);
1797
1798         if (rec->flags & FTRACE_FL_REGS)
1799                 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1800         else
1801                 ftrace_addr = (unsigned long)FTRACE_ADDR;
1802
1803         switch (ret) {
1804         case FTRACE_UPDATE_IGNORE:
1805                 return 0;
1806
1807         case FTRACE_UPDATE_MAKE_CALL:
1808                 return ftrace_make_call(rec, ftrace_addr);
1809
1810         case FTRACE_UPDATE_MAKE_NOP:
1811                 return ftrace_make_nop(NULL, rec, ftrace_addr);
1812
1813         case FTRACE_UPDATE_MODIFY_CALL_REGS:
1814         case FTRACE_UPDATE_MODIFY_CALL:
1815                 if (rec->flags & FTRACE_FL_REGS)
1816                         ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1817                 else
1818                         ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1819
1820                 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1821         }
1822
1823         return -1; /* unknow ftrace bug */
1824 }
1825
1826 void __weak ftrace_replace_code(int enable)
1827 {
1828         struct dyn_ftrace *rec;
1829         struct ftrace_page *pg;
1830         int failed;
1831
1832         if (unlikely(ftrace_disabled))
1833                 return;
1834
1835         do_for_each_ftrace_rec(pg, rec) {
1836                 failed = __ftrace_replace_code(rec, enable);
1837                 if (failed) {
1838                         ftrace_bug(failed, rec->ip);
1839                         /* Stop processing */
1840                         return;
1841                 }
1842         } while_for_each_ftrace_rec();
1843 }
1844
1845 struct ftrace_rec_iter {
1846         struct ftrace_page      *pg;
1847         int                     index;
1848 };
1849
1850 /**
1851  * ftrace_rec_iter_start, start up iterating over traced functions
1852  *
1853  * Returns an iterator handle that is used to iterate over all
1854  * the records that represent address locations where functions
1855  * are traced.
1856  *
1857  * May return NULL if no records are available.
1858  */
1859 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1860 {
1861         /*
1862          * We only use a single iterator.
1863          * Protected by the ftrace_lock mutex.
1864          */
1865         static struct ftrace_rec_iter ftrace_rec_iter;
1866         struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1867
1868         iter->pg = ftrace_pages_start;
1869         iter->index = 0;
1870
1871         /* Could have empty pages */
1872         while (iter->pg && !iter->pg->index)
1873                 iter->pg = iter->pg->next;
1874
1875         if (!iter->pg)
1876                 return NULL;
1877
1878         return iter;
1879 }
1880
1881 /**
1882  * ftrace_rec_iter_next, get the next record to process.
1883  * @iter: The handle to the iterator.
1884  *
1885  * Returns the next iterator after the given iterator @iter.
1886  */
1887 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1888 {
1889         iter->index++;
1890
1891         if (iter->index >= iter->pg->index) {
1892                 iter->pg = iter->pg->next;
1893                 iter->index = 0;
1894
1895                 /* Could have empty pages */
1896                 while (iter->pg && !iter->pg->index)
1897                         iter->pg = iter->pg->next;
1898         }
1899
1900         if (!iter->pg)
1901                 return NULL;
1902
1903         return iter;
1904 }
1905
1906 /**
1907  * ftrace_rec_iter_record, get the record at the iterator location
1908  * @iter: The current iterator location
1909  *
1910  * Returns the record that the current @iter is at.
1911  */
1912 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1913 {
1914         return &iter->pg->records[iter->index];
1915 }
1916
1917 static int
1918 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1919 {
1920         unsigned long ip;
1921         int ret;
1922
1923         ip = rec->ip;
1924
1925         if (unlikely(ftrace_disabled))
1926                 return 0;
1927
1928         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1929         if (ret) {
1930                 ftrace_bug(ret, ip);
1931                 return 0;
1932         }
1933         return 1;
1934 }
1935
1936 /*
1937  * archs can override this function if they must do something
1938  * before the modifying code is performed.
1939  */
1940 int __weak ftrace_arch_code_modify_prepare(void)
1941 {
1942         return 0;
1943 }
1944
1945 /*
1946  * archs can override this function if they must do something
1947  * after the modifying code is performed.
1948  */
1949 int __weak ftrace_arch_code_modify_post_process(void)
1950 {
1951         return 0;
1952 }
1953
1954 void ftrace_modify_all_code(int command)
1955 {
1956         if (command & FTRACE_UPDATE_CALLS)
1957                 ftrace_replace_code(1);
1958         else if (command & FTRACE_DISABLE_CALLS)
1959                 ftrace_replace_code(0);
1960
1961         if (command & FTRACE_UPDATE_TRACE_FUNC)
1962                 ftrace_update_ftrace_func(ftrace_trace_function);
1963
1964         if (command & FTRACE_START_FUNC_RET)
1965                 ftrace_enable_ftrace_graph_caller();
1966         else if (command & FTRACE_STOP_FUNC_RET)
1967                 ftrace_disable_ftrace_graph_caller();
1968 }
1969
1970 static int __ftrace_modify_code(void *data)
1971 {
1972         int *command = data;
1973
1974         ftrace_modify_all_code(*command);
1975
1976         return 0;
1977 }
1978
1979 /**
1980  * ftrace_run_stop_machine, go back to the stop machine method
1981  * @command: The command to tell ftrace what to do
1982  *
1983  * If an arch needs to fall back to the stop machine method, the
1984  * it can call this function.
1985  */
1986 void ftrace_run_stop_machine(int command)
1987 {
1988         stop_machine(__ftrace_modify_code, &command, NULL);
1989 }
1990
1991 /**
1992  * arch_ftrace_update_code, modify the code to trace or not trace
1993  * @command: The command that needs to be done
1994  *
1995  * Archs can override this function if it does not need to
1996  * run stop_machine() to modify code.
1997  */
1998 void __weak arch_ftrace_update_code(int command)
1999 {
2000         ftrace_run_stop_machine(command);
2001 }
2002
2003 static void ftrace_run_update_code(int command)
2004 {
2005         int ret;
2006
2007         ret = ftrace_arch_code_modify_prepare();
2008         FTRACE_WARN_ON(ret);
2009         if (ret)
2010                 return;
2011         /*
2012          * Do not call function tracer while we update the code.
2013          * We are in stop machine.
2014          */
2015         function_trace_stop++;
2016
2017         /*
2018          * By default we use stop_machine() to modify the code.
2019          * But archs can do what ever they want as long as it
2020          * is safe. The stop_machine() is the safest, but also
2021          * produces the most overhead.
2022          */
2023         arch_ftrace_update_code(command);
2024
2025         function_trace_stop--;
2026
2027         ret = ftrace_arch_code_modify_post_process();
2028         FTRACE_WARN_ON(ret);
2029 }
2030
2031 static ftrace_func_t saved_ftrace_func;
2032 static int ftrace_start_up;
2033 static int global_start_up;
2034
2035 static void ftrace_startup_enable(int command)
2036 {
2037         if (saved_ftrace_func != ftrace_trace_function) {
2038                 saved_ftrace_func = ftrace_trace_function;
2039                 command |= FTRACE_UPDATE_TRACE_FUNC;
2040         }
2041
2042         if (!command || !ftrace_enabled)
2043                 return;
2044
2045         ftrace_run_update_code(command);
2046 }
2047
2048 static int ftrace_startup(struct ftrace_ops *ops, int command)
2049 {
2050         bool hash_enable = true;
2051
2052         if (unlikely(ftrace_disabled))
2053                 return -ENODEV;
2054
2055         ftrace_start_up++;
2056         command |= FTRACE_UPDATE_CALLS;
2057
2058         /* ops marked global share the filter hashes */
2059         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2060                 ops = &global_ops;
2061                 /* Don't update hash if global is already set */
2062                 if (global_start_up)
2063                         hash_enable = false;
2064                 global_start_up++;
2065         }
2066
2067         ops->flags |= FTRACE_OPS_FL_ENABLED;
2068         if (hash_enable)
2069                 ftrace_hash_rec_enable(ops, 1);
2070
2071         ftrace_startup_enable(command);
2072
2073         return 0;
2074 }
2075
2076 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
2077 {
2078         bool hash_disable = true;
2079
2080         if (unlikely(ftrace_disabled))
2081                 return;
2082
2083         ftrace_start_up--;
2084         /*
2085          * Just warn in case of unbalance, no need to kill ftrace, it's not
2086          * critical but the ftrace_call callers may be never nopped again after
2087          * further ftrace uses.
2088          */
2089         WARN_ON_ONCE(ftrace_start_up < 0);
2090
2091         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2092                 ops = &global_ops;
2093                 global_start_up--;
2094                 WARN_ON_ONCE(global_start_up < 0);
2095                 /* Don't update hash if global still has users */
2096                 if (global_start_up) {
2097                         WARN_ON_ONCE(!ftrace_start_up);
2098                         hash_disable = false;
2099                 }
2100         }
2101
2102         if (hash_disable)
2103                 ftrace_hash_rec_disable(ops, 1);
2104
2105         if (ops != &global_ops || !global_start_up)
2106                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2107
2108         command |= FTRACE_UPDATE_CALLS;
2109
2110         if (saved_ftrace_func != ftrace_trace_function) {
2111                 saved_ftrace_func = ftrace_trace_function;
2112                 command |= FTRACE_UPDATE_TRACE_FUNC;
2113         }
2114
2115         if (!command || !ftrace_enabled)
2116                 return;
2117
2118         ftrace_run_update_code(command);
2119 }
2120
2121 static void ftrace_startup_sysctl(void)
2122 {
2123         if (unlikely(ftrace_disabled))
2124                 return;
2125
2126         /* Force update next time */
2127         saved_ftrace_func = NULL;
2128         /* ftrace_start_up is true if we want ftrace running */
2129         if (ftrace_start_up)
2130                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2131 }
2132
2133 static void ftrace_shutdown_sysctl(void)
2134 {
2135         if (unlikely(ftrace_disabled))
2136                 return;
2137
2138         /* ftrace_start_up is true if ftrace is running */
2139         if (ftrace_start_up)
2140                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2141 }
2142
2143 static cycle_t          ftrace_update_time;
2144 static unsigned long    ftrace_update_cnt;
2145 unsigned long           ftrace_update_tot_cnt;
2146
2147 static inline int ops_traces_mod(struct ftrace_ops *ops)
2148 {
2149         /*
2150          * Filter_hash being empty will default to trace module.
2151          * But notrace hash requires a test of individual module functions.
2152          */
2153         return ftrace_hash_empty(ops->filter_hash) &&
2154                 ftrace_hash_empty(ops->notrace_hash);
2155 }
2156
2157 /*
2158  * Check if the current ops references the record.
2159  *
2160  * If the ops traces all functions, then it was already accounted for.
2161  * If the ops does not trace the current record function, skip it.
2162  * If the ops ignores the function via notrace filter, skip it.
2163  */
2164 static inline bool
2165 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2166 {
2167         /* If ops isn't enabled, ignore it */
2168         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2169                 return 0;
2170
2171         /* If ops traces all mods, we already accounted for it */
2172         if (ops_traces_mod(ops))
2173                 return 0;
2174
2175         /* The function must be in the filter */
2176         if (!ftrace_hash_empty(ops->filter_hash) &&
2177             !ftrace_lookup_ip(ops->filter_hash, rec->ip))
2178                 return 0;
2179
2180         /* If in notrace hash, we ignore it too */
2181         if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
2182                 return 0;
2183
2184         return 1;
2185 }
2186
2187 static int referenced_filters(struct dyn_ftrace *rec)
2188 {
2189         struct ftrace_ops *ops;
2190         int cnt = 0;
2191
2192         for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2193                 if (ops_references_rec(ops, rec))
2194                     cnt++;
2195         }
2196
2197         return cnt;
2198 }
2199
2200 static int ftrace_update_code(struct module *mod)
2201 {
2202         struct ftrace_page *pg;
2203         struct dyn_ftrace *p;
2204         cycle_t start, stop;
2205         unsigned long ref = 0;
2206         bool test = false;
2207         int i;
2208
2209         /*
2210          * When adding a module, we need to check if tracers are
2211          * currently enabled and if they are set to trace all functions.
2212          * If they are, we need to enable the module functions as well
2213          * as update the reference counts for those function records.
2214          */
2215         if (mod) {
2216                 struct ftrace_ops *ops;
2217
2218                 for (ops = ftrace_ops_list;
2219                      ops != &ftrace_list_end; ops = ops->next) {
2220                         if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2221                                 if (ops_traces_mod(ops))
2222                                         ref++;
2223                                 else
2224                                         test = true;
2225                         }
2226                 }
2227         }
2228
2229         start = ftrace_now(raw_smp_processor_id());
2230         ftrace_update_cnt = 0;
2231
2232         for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2233
2234                 for (i = 0; i < pg->index; i++) {
2235                         int cnt = ref;
2236
2237                         /* If something went wrong, bail without enabling anything */
2238                         if (unlikely(ftrace_disabled))
2239                                 return -1;
2240
2241                         p = &pg->records[i];
2242                         if (test)
2243                                 cnt += referenced_filters(p);
2244                         p->flags = cnt;
2245
2246                         /*
2247                          * Do the initial record conversion from mcount jump
2248                          * to the NOP instructions.
2249                          */
2250                         if (!ftrace_code_disable(mod, p))
2251                                 break;
2252
2253                         ftrace_update_cnt++;
2254
2255                         /*
2256                          * If the tracing is enabled, go ahead and enable the record.
2257                          *
2258                          * The reason not to enable the record immediatelly is the
2259                          * inherent check of ftrace_make_nop/ftrace_make_call for
2260                          * correct previous instructions.  Making first the NOP
2261                          * conversion puts the module to the correct state, thus
2262                          * passing the ftrace_make_call check.
2263                          */
2264                         if (ftrace_start_up && cnt) {
2265                                 int failed = __ftrace_replace_code(p, 1);
2266                                 if (failed)
2267                                         ftrace_bug(failed, p->ip);
2268                         }
2269                 }
2270         }
2271
2272         ftrace_new_pgs = NULL;
2273
2274         stop = ftrace_now(raw_smp_processor_id());
2275         ftrace_update_time = stop - start;
2276         ftrace_update_tot_cnt += ftrace_update_cnt;
2277
2278         return 0;
2279 }
2280
2281 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2282 {
2283         int order;
2284         int cnt;
2285
2286         if (WARN_ON(!count))
2287                 return -EINVAL;
2288
2289         order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2290
2291         /*
2292          * We want to fill as much as possible. No more than a page
2293          * may be empty.
2294          */
2295         while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2296                 order--;
2297
2298  again:
2299         pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2300
2301         if (!pg->records) {
2302                 /* if we can't allocate this size, try something smaller */
2303                 if (!order)
2304                         return -ENOMEM;
2305                 order >>= 1;
2306                 goto again;
2307         }
2308
2309         cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2310         pg->size = cnt;
2311
2312         if (cnt > count)
2313                 cnt = count;
2314
2315         return cnt;
2316 }
2317
2318 static struct ftrace_page *
2319 ftrace_allocate_pages(unsigned long num_to_init)
2320 {
2321         struct ftrace_page *start_pg;
2322         struct ftrace_page *pg;
2323         int order;
2324         int cnt;
2325
2326         if (!num_to_init)
2327                 return 0;
2328
2329         start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2330         if (!pg)
2331                 return NULL;
2332
2333         /*
2334          * Try to allocate as much as possible in one continues
2335          * location that fills in all of the space. We want to
2336          * waste as little space as possible.
2337          */
2338         for (;;) {
2339                 cnt = ftrace_allocate_records(pg, num_to_init);
2340                 if (cnt < 0)
2341                         goto free_pages;
2342
2343                 num_to_init -= cnt;
2344                 if (!num_to_init)
2345                         break;
2346
2347                 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2348                 if (!pg->next)
2349                         goto free_pages;
2350
2351                 pg = pg->next;
2352         }
2353
2354         return start_pg;
2355
2356  free_pages:
2357         while (start_pg) {
2358                 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2359                 free_pages((unsigned long)pg->records, order);
2360                 start_pg = pg->next;
2361                 kfree(pg);
2362                 pg = start_pg;
2363         }
2364         pr_info("ftrace: FAILED to allocate memory for functions\n");
2365         return NULL;
2366 }
2367
2368 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2369 {
2370         int cnt;
2371
2372         if (!num_to_init) {
2373                 pr_info("ftrace: No functions to be traced?\n");
2374                 return -1;
2375         }
2376
2377         cnt = num_to_init / ENTRIES_PER_PAGE;
2378         pr_info("ftrace: allocating %ld entries in %d pages\n",
2379                 num_to_init, cnt + 1);
2380
2381         return 0;
2382 }
2383
2384 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2385
2386 struct ftrace_iterator {
2387         loff_t                          pos;
2388         loff_t                          func_pos;
2389         struct ftrace_page              *pg;
2390         struct dyn_ftrace               *func;
2391         struct ftrace_func_probe        *probe;
2392         struct trace_parser             parser;
2393         struct ftrace_hash              *hash;
2394         struct ftrace_ops               *ops;
2395         int                             hidx;
2396         int                             idx;
2397         unsigned                        flags;
2398 };
2399
2400 static void *
2401 t_hash_next(struct seq_file *m, loff_t *pos)
2402 {
2403         struct ftrace_iterator *iter = m->private;
2404         struct hlist_node *hnd = NULL;
2405         struct hlist_head *hhd;
2406
2407         (*pos)++;
2408         iter->pos = *pos;
2409
2410         if (iter->probe)
2411                 hnd = &iter->probe->node;
2412  retry:
2413         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2414                 return NULL;
2415
2416         hhd = &ftrace_func_hash[iter->hidx];
2417
2418         if (hlist_empty(hhd)) {
2419                 iter->hidx++;
2420                 hnd = NULL;
2421                 goto retry;
2422         }
2423
2424         if (!hnd)
2425                 hnd = hhd->first;
2426         else {
2427                 hnd = hnd->next;
2428                 if (!hnd) {
2429                         iter->hidx++;
2430                         goto retry;
2431                 }
2432         }
2433
2434         if (WARN_ON_ONCE(!hnd))
2435                 return NULL;
2436
2437         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2438
2439         return iter;
2440 }
2441
2442 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2443 {
2444         struct ftrace_iterator *iter = m->private;
2445         void *p = NULL;
2446         loff_t l;
2447
2448         if (!(iter->flags & FTRACE_ITER_DO_HASH))
2449                 return NULL;
2450
2451         if (iter->func_pos > *pos)
2452                 return NULL;
2453
2454         iter->hidx = 0;
2455         for (l = 0; l <= (*pos - iter->func_pos); ) {
2456                 p = t_hash_next(m, &l);
2457                 if (!p)
2458                         break;
2459         }
2460         if (!p)
2461                 return NULL;
2462
2463         /* Only set this if we have an item */
2464         iter->flags |= FTRACE_ITER_HASH;
2465
2466         return iter;
2467 }
2468
2469 static int
2470 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2471 {
2472         struct ftrace_func_probe *rec;
2473
2474         rec = iter->probe;
2475         if (WARN_ON_ONCE(!rec))
2476                 return -EIO;
2477
2478         if (rec->ops->print)
2479                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2480
2481         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2482
2483         if (rec->data)
2484                 seq_printf(m, ":%p", rec->data);
2485         seq_putc(m, '\n');
2486
2487         return 0;
2488 }
2489
2490 static void *
2491 t_next(struct seq_file *m, void *v, loff_t *pos)
2492 {
2493         struct ftrace_iterator *iter = m->private;
2494         struct ftrace_ops *ops = iter->ops;
2495         struct dyn_ftrace *rec = NULL;
2496
2497         if (unlikely(ftrace_disabled))
2498                 return NULL;
2499
2500         if (iter->flags & FTRACE_ITER_HASH)
2501                 return t_hash_next(m, pos);
2502
2503         (*pos)++;
2504         iter->pos = iter->func_pos = *pos;
2505
2506         if (iter->flags & FTRACE_ITER_PRINTALL)
2507                 return t_hash_start(m, pos);
2508
2509  retry:
2510         if (iter->idx >= iter->pg->index) {
2511                 if (iter->pg->next) {
2512                         iter->pg = iter->pg->next;
2513                         iter->idx = 0;
2514                         goto retry;
2515                 }
2516         } else {
2517                 rec = &iter->pg->records[iter->idx++];
2518                 if (((iter->flags & FTRACE_ITER_FILTER) &&
2519                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2520
2521                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2522                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2523
2524                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2525                      !(rec->flags & FTRACE_FL_ENABLED))) {
2526
2527                         rec = NULL;
2528                         goto retry;
2529                 }
2530         }
2531
2532         if (!rec)
2533                 return t_hash_start(m, pos);
2534
2535         iter->func = rec;
2536
2537         return iter;
2538 }
2539
2540 static void reset_iter_read(struct ftrace_iterator *iter)
2541 {
2542         iter->pos = 0;
2543         iter->func_pos = 0;
2544         iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2545 }
2546
2547 static void *t_start(struct seq_file *m, loff_t *pos)
2548 {
2549         struct ftrace_iterator *iter = m->private;
2550         struct ftrace_ops *ops = iter->ops;
2551         void *p = NULL;
2552         loff_t l;
2553
2554         mutex_lock(&ftrace_lock);
2555
2556         if (unlikely(ftrace_disabled))
2557                 return NULL;
2558
2559         /*
2560          * If an lseek was done, then reset and start from beginning.
2561          */
2562         if (*pos < iter->pos)
2563                 reset_iter_read(iter);
2564
2565         /*
2566          * For set_ftrace_filter reading, if we have the filter
2567          * off, we can short cut and just print out that all
2568          * functions are enabled.
2569          */
2570         if (iter->flags & FTRACE_ITER_FILTER &&
2571             ftrace_hash_empty(ops->filter_hash)) {
2572                 if (*pos > 0)
2573                         return t_hash_start(m, pos);
2574                 iter->flags |= FTRACE_ITER_PRINTALL;
2575                 /* reset in case of seek/pread */
2576                 iter->flags &= ~FTRACE_ITER_HASH;
2577                 return iter;
2578         }
2579
2580         if (iter->flags & FTRACE_ITER_HASH)
2581                 return t_hash_start(m, pos);
2582
2583         /*
2584          * Unfortunately, we need to restart at ftrace_pages_start
2585          * every time we let go of the ftrace_mutex. This is because
2586          * those pointers can change without the lock.
2587          */
2588         iter->pg = ftrace_pages_start;
2589         iter->idx = 0;
2590         for (l = 0; l <= *pos; ) {
2591                 p = t_next(m, p, &l);
2592                 if (!p)
2593                         break;
2594         }
2595
2596         if (!p)
2597                 return t_hash_start(m, pos);
2598
2599         return iter;
2600 }
2601
2602 static void t_stop(struct seq_file *m, void *p)
2603 {
2604         mutex_unlock(&ftrace_lock);
2605 }
2606
2607 static int t_show(struct seq_file *m, void *v)
2608 {
2609         struct ftrace_iterator *iter = m->private;
2610         struct dyn_ftrace *rec;
2611
2612         if (iter->flags & FTRACE_ITER_HASH)
2613                 return t_hash_show(m, iter);
2614
2615         if (iter->flags & FTRACE_ITER_PRINTALL) {
2616                 seq_printf(m, "#### all functions enabled ####\n");
2617                 return 0;
2618         }
2619
2620         rec = iter->func;
2621
2622         if (!rec)
2623                 return 0;
2624
2625         seq_printf(m, "%ps", (void *)rec->ip);
2626         if (iter->flags & FTRACE_ITER_ENABLED)
2627                 seq_printf(m, " (%ld)%s",
2628                            rec->flags & ~FTRACE_FL_MASK,
2629                            rec->flags & FTRACE_FL_REGS ? " R" : "");
2630         seq_printf(m, "\n");
2631
2632         return 0;
2633 }
2634
2635 static const struct seq_operations show_ftrace_seq_ops = {
2636         .start = t_start,
2637         .next = t_next,
2638         .stop = t_stop,
2639         .show = t_show,
2640 };
2641
2642 static int
2643 ftrace_avail_open(struct inode *inode, struct file *file)
2644 {
2645         struct ftrace_iterator *iter;
2646
2647         if (unlikely(ftrace_disabled))
2648                 return -ENODEV;
2649
2650         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2651         if (iter) {
2652                 iter->pg = ftrace_pages_start;
2653                 iter->ops = &global_ops;
2654         }
2655
2656         return iter ? 0 : -ENOMEM;
2657 }
2658
2659 static int
2660 ftrace_enabled_open(struct inode *inode, struct file *file)
2661 {
2662         struct ftrace_iterator *iter;
2663
2664         if (unlikely(ftrace_disabled))
2665                 return -ENODEV;
2666
2667         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2668         if (iter) {
2669                 iter->pg = ftrace_pages_start;
2670                 iter->flags = FTRACE_ITER_ENABLED;
2671                 iter->ops = &global_ops;
2672         }
2673
2674         return iter ? 0 : -ENOMEM;
2675 }
2676
2677 static void ftrace_filter_reset(struct ftrace_hash *hash)
2678 {
2679         mutex_lock(&ftrace_lock);
2680         ftrace_hash_clear(hash);
2681         mutex_unlock(&ftrace_lock);
2682 }
2683
2684 /**
2685  * ftrace_regex_open - initialize function tracer filter files
2686  * @ops: The ftrace_ops that hold the hash filters
2687  * @flag: The type of filter to process
2688  * @inode: The inode, usually passed in to your open routine
2689  * @file: The file, usually passed in to your open routine
2690  *
2691  * ftrace_regex_open() initializes the filter files for the
2692  * @ops. Depending on @flag it may process the filter hash or
2693  * the notrace hash of @ops. With this called from the open
2694  * routine, you can use ftrace_filter_write() for the write
2695  * routine if @flag has FTRACE_ITER_FILTER set, or
2696  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2697  * ftrace_filter_lseek() should be used as the lseek routine, and
2698  * release must call ftrace_regex_release().
2699  */
2700 int
2701 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2702                   struct inode *inode, struct file *file)
2703 {
2704         struct ftrace_iterator *iter;
2705         struct ftrace_hash *hash;
2706         int ret = 0;
2707
2708         ftrace_ops_init(ops);
2709
2710         if (unlikely(ftrace_disabled))
2711                 return -ENODEV;
2712
2713         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2714         if (!iter)
2715                 return -ENOMEM;
2716
2717         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2718                 kfree(iter);
2719                 return -ENOMEM;
2720         }
2721
2722         iter->ops = ops;
2723         iter->flags = flag;
2724
2725         mutex_lock(&ops->regex_lock);
2726
2727         if (flag & FTRACE_ITER_NOTRACE)
2728                 hash = ops->notrace_hash;
2729         else
2730                 hash = ops->filter_hash;
2731
2732         if (file->f_mode & FMODE_WRITE) {
2733                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2734                 if (!iter->hash) {
2735                         trace_parser_put(&iter->parser);
2736                         kfree(iter);
2737                         ret = -ENOMEM;
2738                         goto out_unlock;
2739                 }
2740         }
2741
2742         if ((file->f_mode & FMODE_WRITE) &&
2743             (file->f_flags & O_TRUNC))
2744                 ftrace_filter_reset(iter->hash);
2745
2746         if (file->f_mode & FMODE_READ) {
2747                 iter->pg = ftrace_pages_start;
2748
2749                 ret = seq_open(file, &show_ftrace_seq_ops);
2750                 if (!ret) {
2751                         struct seq_file *m = file->private_data;
2752                         m->private = iter;
2753                 } else {
2754                         /* Failed */
2755                         free_ftrace_hash(iter->hash);
2756                         trace_parser_put(&iter->parser);
2757                         kfree(iter);
2758                 }
2759         } else
2760                 file->private_data = iter;
2761
2762  out_unlock:
2763         mutex_unlock(&ops->regex_lock);
2764
2765         return ret;
2766 }
2767
2768 static int
2769 ftrace_filter_open(struct inode *inode, struct file *file)
2770 {
2771         return ftrace_regex_open(&global_ops,
2772                         FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2773                         inode, file);
2774 }
2775
2776 static int
2777 ftrace_notrace_open(struct inode *inode, struct file *file)
2778 {
2779         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2780                                  inode, file);
2781 }
2782
2783 static int ftrace_match(char *str, char *regex, int len, int type)
2784 {
2785         int matched = 0;
2786         int slen;
2787
2788         switch (type) {
2789         case MATCH_FULL:
2790                 if (strcmp(str, regex) == 0)
2791                         matched = 1;
2792                 break;
2793         case MATCH_FRONT_ONLY:
2794                 if (strncmp(str, regex, len) == 0)
2795                         matched = 1;
2796                 break;
2797         case MATCH_MIDDLE_ONLY:
2798                 if (strstr(str, regex))
2799                         matched = 1;
2800                 break;
2801         case MATCH_END_ONLY:
2802                 slen = strlen(str);
2803                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2804                         matched = 1;
2805                 break;
2806         }
2807
2808         return matched;
2809 }
2810
2811 static int
2812 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2813 {
2814         struct ftrace_func_entry *entry;
2815         int ret = 0;
2816
2817         entry = ftrace_lookup_ip(hash, rec->ip);
2818         if (not) {
2819                 /* Do nothing if it doesn't exist */
2820                 if (!entry)
2821                         return 0;
2822
2823                 free_hash_entry(hash, entry);
2824         } else {
2825                 /* Do nothing if it exists */
2826                 if (entry)
2827                         return 0;
2828
2829                 ret = add_hash_entry(hash, rec->ip);
2830         }
2831         return ret;
2832 }
2833
2834 static int
2835 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2836                     char *regex, int len, int type)
2837 {
2838         char str[KSYM_SYMBOL_LEN];
2839         char *modname;
2840
2841         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2842
2843         if (mod) {
2844                 /* module lookup requires matching the module */
2845                 if (!modname || strcmp(modname, mod))
2846                         return 0;
2847
2848                 /* blank search means to match all funcs in the mod */
2849                 if (!len)
2850                         return 1;
2851         }
2852
2853         return ftrace_match(str, regex, len, type);
2854 }
2855
2856 static int
2857 match_records(struct ftrace_hash *hash, char *buff,
2858               int len, char *mod, int not)
2859 {
2860         unsigned search_len = 0;
2861         struct ftrace_page *pg;
2862         struct dyn_ftrace *rec;
2863         int type = MATCH_FULL;
2864         char *search = buff;
2865         int found = 0;
2866         int ret;
2867
2868         if (len) {
2869                 type = filter_parse_regex(buff, len, &search, &not);
2870                 search_len = strlen(search);
2871         }
2872
2873         mutex_lock(&ftrace_lock);
2874
2875         if (unlikely(ftrace_disabled))
2876                 goto out_unlock;
2877
2878         do_for_each_ftrace_rec(pg, rec) {
2879                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2880                         ret = enter_record(hash, rec, not);
2881                         if (ret < 0) {
2882                                 found = ret;
2883                                 goto out_unlock;
2884                         }
2885                         found = 1;
2886                 }
2887         } while_for_each_ftrace_rec();
2888  out_unlock:
2889         mutex_unlock(&ftrace_lock);
2890
2891         return found;
2892 }
2893
2894 static int
2895 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2896 {
2897         return match_records(hash, buff, len, NULL, 0);
2898 }
2899
2900 static int
2901 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2902 {
2903         int not = 0;
2904
2905         /* blank or '*' mean the same */
2906         if (strcmp(buff, "*") == 0)
2907                 buff[0] = 0;
2908
2909         /* handle the case of 'dont filter this module' */
2910         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2911                 buff[0] = 0;
2912                 not = 1;
2913         }
2914
2915         return match_records(hash, buff, strlen(buff), mod, not);
2916 }
2917
2918 /*
2919  * We register the module command as a template to show others how
2920  * to register the a command as well.
2921  */
2922
2923 static int
2924 ftrace_mod_callback(struct ftrace_hash *hash,
2925                     char *func, char *cmd, char *param, int enable)
2926 {
2927         char *mod;
2928         int ret = -EINVAL;
2929
2930         /*
2931          * cmd == 'mod' because we only registered this func
2932          * for the 'mod' ftrace_func_command.
2933          * But if you register one func with multiple commands,
2934          * you can tell which command was used by the cmd
2935          * parameter.
2936          */
2937
2938         /* we must have a module name */
2939         if (!param)
2940                 return ret;
2941
2942         mod = strsep(&param, ":");
2943         if (!strlen(mod))
2944                 return ret;
2945
2946         ret = ftrace_match_module_records(hash, func, mod);
2947         if (!ret)
2948                 ret = -EINVAL;
2949         if (ret < 0)
2950                 return ret;
2951
2952         return 0;
2953 }
2954
2955 static struct ftrace_func_command ftrace_mod_cmd = {
2956         .name                   = "mod",
2957         .func                   = ftrace_mod_callback,
2958 };
2959
2960 static int __init ftrace_mod_cmd_init(void)
2961 {
2962         return register_ftrace_command(&ftrace_mod_cmd);
2963 }
2964 core_initcall(ftrace_mod_cmd_init);
2965
2966 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2967                                       struct ftrace_ops *op, struct pt_regs *pt_regs)
2968 {
2969         struct ftrace_func_probe *entry;
2970         struct hlist_head *hhd;
2971         unsigned long key;
2972
2973         key = hash_long(ip, FTRACE_HASH_BITS);
2974
2975         hhd = &ftrace_func_hash[key];
2976
2977         if (hlist_empty(hhd))
2978                 return;
2979
2980         /*
2981          * Disable preemption for these calls to prevent a RCU grace
2982          * period. This syncs the hash iteration and freeing of items
2983          * on the hash. rcu_read_lock is too dangerous here.
2984          */
2985         preempt_disable_notrace();
2986         hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
2987                 if (entry->ip == ip)
2988                         entry->ops->func(ip, parent_ip, &entry->data);
2989         }
2990         preempt_enable_notrace();
2991 }
2992
2993 static struct ftrace_ops trace_probe_ops __read_mostly =
2994 {
2995         .func           = function_trace_probe_call,
2996         .flags          = FTRACE_OPS_FL_INITIALIZED,
2997         INIT_REGEX_LOCK(trace_probe_ops)
2998 };
2999
3000 static int ftrace_probe_registered;
3001
3002 static void __enable_ftrace_function_probe(void)
3003 {
3004         int ret;
3005         int i;
3006
3007         if (ftrace_probe_registered) {
3008                 /* still need to update the function call sites */
3009                 if (ftrace_enabled)
3010                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3011                 return;
3012         }
3013
3014         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3015                 struct hlist_head *hhd = &ftrace_func_hash[i];
3016                 if (hhd->first)
3017                         break;
3018         }
3019         /* Nothing registered? */
3020         if (i == FTRACE_FUNC_HASHSIZE)
3021                 return;
3022
3023         ret = __register_ftrace_function(&trace_probe_ops);
3024         if (!ret)
3025                 ret = ftrace_startup(&trace_probe_ops, 0);
3026
3027         ftrace_probe_registered = 1;
3028 }
3029
3030 static void __disable_ftrace_function_probe(void)
3031 {
3032         int ret;
3033         int i;
3034
3035         if (!ftrace_probe_registered)
3036                 return;
3037
3038         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3039                 struct hlist_head *hhd = &ftrace_func_hash[i];
3040                 if (hhd->first)
3041                         return;
3042         }
3043
3044         /* no more funcs left */
3045         ret = __unregister_ftrace_function(&trace_probe_ops);
3046         if (!ret)
3047                 ftrace_shutdown(&trace_probe_ops, 0);
3048
3049         ftrace_probe_registered = 0;
3050 }
3051
3052
3053 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3054 {
3055         if (entry->ops->free)
3056                 entry->ops->free(entry->ops, entry->ip, &entry->data);
3057         kfree(entry);
3058 }
3059
3060 int
3061 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3062                               void *data)
3063 {
3064         struct ftrace_func_probe *entry;
3065         struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3066         struct ftrace_hash *hash;
3067         struct ftrace_page *pg;
3068         struct dyn_ftrace *rec;
3069         int type, len, not;
3070         unsigned long key;
3071         int count = 0;
3072         char *search;
3073         int ret;
3074
3075         type = filter_parse_regex(glob, strlen(glob), &search, &not);
3076         len = strlen(search);
3077
3078         /* we do not support '!' for function probes */
3079         if (WARN_ON(not))
3080                 return -EINVAL;
3081
3082         mutex_lock(&trace_probe_ops.regex_lock);
3083
3084         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3085         if (!hash) {
3086                 count = -ENOMEM;
3087                 goto out;
3088         }
3089
3090         if (unlikely(ftrace_disabled)) {
3091                 count = -ENODEV;
3092                 goto out;
3093         }
3094
3095         mutex_lock(&ftrace_lock);
3096
3097         do_for_each_ftrace_rec(pg, rec) {
3098
3099                 if (!ftrace_match_record(rec, NULL, search, len, type))
3100                         continue;
3101
3102                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3103                 if (!entry) {
3104                         /* If we did not process any, then return error */
3105                         if (!count)
3106                                 count = -ENOMEM;
3107                         goto out_unlock;
3108                 }
3109
3110                 count++;
3111
3112                 entry->data = data;
3113
3114                 /*
3115                  * The caller might want to do something special
3116                  * for each function we find. We call the callback
3117                  * to give the caller an opportunity to do so.
3118                  */
3119                 if (ops->init) {
3120                         if (ops->init(ops, rec->ip, &entry->data) < 0) {
3121                                 /* caller does not like this func */
3122                                 kfree(entry);
3123                                 continue;
3124                         }
3125                 }
3126
3127                 ret = enter_record(hash, rec, 0);
3128                 if (ret < 0) {
3129                         kfree(entry);
3130                         count = ret;
3131                         goto out_unlock;
3132                 }
3133
3134                 entry->ops = ops;
3135                 entry->ip = rec->ip;
3136
3137                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3138                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3139
3140         } while_for_each_ftrace_rec();
3141
3142         ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3143         if (ret < 0)
3144                 count = ret;
3145
3146         __enable_ftrace_function_probe();
3147
3148  out_unlock:
3149         mutex_unlock(&ftrace_lock);
3150  out:
3151         mutex_unlock(&trace_probe_ops.regex_lock);
3152         free_ftrace_hash(hash);
3153
3154         return count;
3155 }
3156
3157 enum {
3158         PROBE_TEST_FUNC         = 1,
3159         PROBE_TEST_DATA         = 2
3160 };
3161
3162 static void
3163 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3164                                   void *data, int flags)
3165 {
3166         struct ftrace_func_entry *rec_entry;
3167         struct ftrace_func_probe *entry;
3168         struct ftrace_func_probe *p;
3169         struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3170         struct list_head free_list;
3171         struct ftrace_hash *hash;
3172         struct hlist_node *tmp;
3173         char str[KSYM_SYMBOL_LEN];
3174         int type = MATCH_FULL;
3175         int i, len = 0;
3176         char *search;
3177
3178         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3179                 glob = NULL;
3180         else if (glob) {
3181                 int not;
3182
3183                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3184                 len = strlen(search);
3185
3186                 /* we do not support '!' for function probes */
3187                 if (WARN_ON(not))
3188                         return;
3189         }
3190
3191         mutex_lock(&trace_probe_ops.regex_lock);
3192
3193         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3194         if (!hash)
3195                 /* Hmm, should report this somehow */
3196                 goto out_unlock;
3197
3198         INIT_LIST_HEAD(&free_list);
3199
3200         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3201                 struct hlist_head *hhd = &ftrace_func_hash[i];
3202
3203                 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3204
3205                         /* break up if statements for readability */
3206                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3207                                 continue;
3208
3209                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
3210                                 continue;
3211
3212                         /* do this last, since it is the most expensive */
3213                         if (glob) {
3214                                 kallsyms_lookup(entry->ip, NULL, NULL,
3215                                                 NULL, str);
3216                                 if (!ftrace_match(str, glob, len, type))
3217                                         continue;
3218                         }
3219
3220                         rec_entry = ftrace_lookup_ip(hash, entry->ip);
3221                         /* It is possible more than one entry had this ip */
3222                         if (rec_entry)
3223                                 free_hash_entry(hash, rec_entry);
3224
3225                         hlist_del_rcu(&entry->node);
3226                         list_add(&entry->free_list, &free_list);
3227                 }
3228         }
3229         mutex_lock(&ftrace_lock);
3230         __disable_ftrace_function_probe();
3231         /*
3232          * Remove after the disable is called. Otherwise, if the last
3233          * probe is removed, a null hash means *all enabled*.
3234          */
3235         ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3236         synchronize_sched();
3237         list_for_each_entry_safe(entry, p, &free_list, free_list) {
3238                 list_del(&entry->free_list);
3239                 ftrace_free_entry(entry);
3240         }
3241         mutex_unlock(&ftrace_lock);
3242                 
3243  out_unlock:
3244         mutex_unlock(&trace_probe_ops.regex_lock);
3245         free_ftrace_hash(hash);
3246 }
3247
3248 void
3249 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3250                                 void *data)
3251 {
3252         __unregister_ftrace_function_probe(glob, ops, data,
3253                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
3254 }
3255
3256 void
3257 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3258 {
3259         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3260 }
3261
3262 void unregister_ftrace_function_probe_all(char *glob)
3263 {
3264         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3265 }
3266
3267 static LIST_HEAD(ftrace_commands);
3268 static DEFINE_MUTEX(ftrace_cmd_mutex);
3269
3270 int register_ftrace_command(struct ftrace_func_command *cmd)
3271 {
3272         struct ftrace_func_command *p;
3273         int ret = 0;
3274
3275         mutex_lock(&ftrace_cmd_mutex);
3276         list_for_each_entry(p, &ftrace_commands, list) {
3277                 if (strcmp(cmd->name, p->name) == 0) {
3278                         ret = -EBUSY;
3279                         goto out_unlock;
3280                 }
3281         }
3282         list_add(&cmd->list, &ftrace_commands);
3283  out_unlock:
3284         mutex_unlock(&ftrace_cmd_mutex);
3285
3286         return ret;
3287 }
3288
3289 int unregister_ftrace_command(struct ftrace_func_command *cmd)
3290 {
3291         struct ftrace_func_command *p, *n;
3292         int ret = -ENODEV;
3293
3294         mutex_lock(&ftrace_cmd_mutex);
3295         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3296                 if (strcmp(cmd->name, p->name) == 0) {
3297                         ret = 0;
3298                         list_del_init(&p->list);
3299                         goto out_unlock;
3300                 }
3301         }
3302  out_unlock:
3303         mutex_unlock(&ftrace_cmd_mutex);
3304
3305         return ret;
3306 }
3307
3308 static int ftrace_process_regex(struct ftrace_hash *hash,
3309                                 char *buff, int len, int enable)
3310 {
3311         char *func, *command, *next = buff;
3312         struct ftrace_func_command *p;
3313         int ret = -EINVAL;
3314
3315         func = strsep(&next, ":");
3316
3317         if (!next) {
3318                 ret = ftrace_match_records(hash, func, len);
3319                 if (!ret)
3320                         ret = -EINVAL;
3321                 if (ret < 0)
3322                         return ret;
3323                 return 0;
3324         }
3325
3326         /* command found */
3327
3328         command = strsep(&next, ":");
3329
3330         mutex_lock(&ftrace_cmd_mutex);
3331         list_for_each_entry(p, &ftrace_commands, list) {
3332                 if (strcmp(p->name, command) == 0) {
3333                         ret = p->func(hash, func, command, next, enable);
3334                         goto out_unlock;
3335                 }
3336         }
3337  out_unlock:
3338         mutex_unlock(&ftrace_cmd_mutex);
3339
3340         return ret;
3341 }
3342
3343 static ssize_t
3344 ftrace_regex_write(struct file *file, const char __user *ubuf,
3345                    size_t cnt, loff_t *ppos, int enable)
3346 {
3347         struct ftrace_iterator *iter;
3348         struct trace_parser *parser;
3349         ssize_t ret, read;
3350
3351         if (!cnt)
3352                 return 0;
3353
3354         if (file->f_mode & FMODE_READ) {
3355                 struct seq_file *m = file->private_data;
3356                 iter = m->private;
3357         } else
3358                 iter = file->private_data;
3359
3360         if (unlikely(ftrace_disabled))
3361                 return -ENODEV;
3362
3363         /* iter->hash is a local copy, so we don't need regex_lock */
3364
3365         parser = &iter->parser;
3366         read = trace_get_user(parser, ubuf, cnt, ppos);
3367
3368         if (read >= 0 && trace_parser_loaded(parser) &&
3369             !trace_parser_cont(parser)) {
3370                 ret = ftrace_process_regex(iter->hash, parser->buffer,
3371                                            parser->idx, enable);
3372                 trace_parser_clear(parser);
3373                 if (ret < 0)
3374                         goto out;
3375         }
3376
3377         ret = read;
3378  out:
3379         return ret;
3380 }
3381
3382 ssize_t
3383 ftrace_filter_write(struct file *file, const char __user *ubuf,
3384                     size_t cnt, loff_t *ppos)
3385 {
3386         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3387 }
3388
3389 ssize_t
3390 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3391                      size_t cnt, loff_t *ppos)
3392 {
3393         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3394 }
3395
3396 static int
3397 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3398 {
3399         struct ftrace_func_entry *entry;
3400
3401         if (!ftrace_location(ip))
3402                 return -EINVAL;
3403
3404         if (remove) {
3405                 entry = ftrace_lookup_ip(hash, ip);
3406                 if (!entry)
3407                         return -ENOENT;
3408                 free_hash_entry(hash, entry);
3409                 return 0;
3410         }
3411
3412         return add_hash_entry(hash, ip);
3413 }
3414
3415 static int
3416 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3417                 unsigned long ip, int remove, int reset, int enable)
3418 {
3419         struct ftrace_hash **orig_hash;
3420         struct ftrace_hash *hash;
3421         int ret;
3422
3423         /* All global ops uses the global ops filters */
3424         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3425                 ops = &global_ops;
3426
3427         if (unlikely(ftrace_disabled))
3428                 return -ENODEV;
3429
3430         mutex_lock(&ops->regex_lock);
3431
3432         if (enable)
3433                 orig_hash = &ops->filter_hash;
3434         else
3435                 orig_hash = &ops->notrace_hash;
3436
3437         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3438         if (!hash) {
3439                 ret = -ENOMEM;
3440                 goto out_regex_unlock;
3441         }
3442
3443         if (reset)
3444                 ftrace_filter_reset(hash);
3445         if (buf && !ftrace_match_records(hash, buf, len)) {
3446                 ret = -EINVAL;
3447                 goto out_regex_unlock;
3448         }
3449         if (ip) {
3450                 ret = ftrace_match_addr(hash, ip, remove);
3451                 if (ret < 0)
3452                         goto out_regex_unlock;
3453         }
3454
3455         mutex_lock(&ftrace_lock);
3456         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3457         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3458             && ftrace_enabled)
3459                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3460
3461         mutex_unlock(&ftrace_lock);
3462
3463  out_regex_unlock:
3464         mutex_unlock(&ops->regex_lock);
3465
3466         free_ftrace_hash(hash);
3467         return ret;
3468 }
3469
3470 static int
3471 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3472                 int reset, int enable)
3473 {
3474         return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3475 }
3476
3477 /**
3478  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3479  * @ops - the ops to set the filter with
3480  * @ip - the address to add to or remove from the filter.
3481  * @remove - non zero to remove the ip from the filter
3482  * @reset - non zero to reset all filters before applying this filter.
3483  *
3484  * Filters denote which functions should be enabled when tracing is enabled
3485  * If @ip is NULL, it failes to update filter.
3486  */
3487 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3488                          int remove, int reset)
3489 {
3490         ftrace_ops_init(ops);
3491         return ftrace_set_addr(ops, ip, remove, reset, 1);
3492 }
3493 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3494
3495 static int
3496 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3497                  int reset, int enable)
3498 {
3499         return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3500 }
3501
3502 /**
3503  * ftrace_set_filter - set a function to filter on in ftrace
3504  * @ops - the ops to set the filter with
3505  * @buf - the string that holds the function filter text.
3506  * @len - the length of the string.
3507  * @reset - non zero to reset all filters before applying this filter.
3508  *
3509  * Filters denote which functions should be enabled when tracing is enabled.
3510  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3511  */
3512 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3513                        int len, int reset)
3514 {
3515         ftrace_ops_init(ops);
3516         return ftrace_set_regex(ops, buf, len, reset, 1);
3517 }
3518 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3519
3520 /**
3521  * ftrace_set_notrace - set a function to not trace in ftrace
3522  * @ops - the ops to set the notrace filter with
3523  * @buf - the string that holds the function notrace text.
3524  * @len - the length of the string.
3525  * @reset - non zero to reset all filters before applying this filter.
3526  *
3527  * Notrace Filters denote which functions should not be enabled when tracing
3528  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3529  * for tracing.
3530  */
3531 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3532                         int len, int reset)
3533 {
3534         ftrace_ops_init(ops);
3535         return ftrace_set_regex(ops, buf, len, reset, 0);
3536 }
3537 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3538 /**
3539  * ftrace_set_filter - set a function to filter on in ftrace
3540  * @ops - the ops to set the filter with
3541  * @buf - the string that holds the function filter text.
3542  * @len - the length of the string.
3543  * @reset - non zero to reset all filters before applying this filter.
3544  *
3545  * Filters denote which functions should be enabled when tracing is enabled.
3546  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3547  */
3548 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3549 {
3550         ftrace_set_regex(&global_ops, buf, len, reset, 1);
3551 }
3552 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3553
3554 /**
3555  * ftrace_set_notrace - set a function to not trace in ftrace
3556  * @ops - the ops to set the notrace filter with
3557  * @buf - the string that holds the function notrace text.
3558  * @len - the length of the string.
3559  * @reset - non zero to reset all filters before applying this filter.
3560  *
3561  * Notrace Filters denote which functions should not be enabled when tracing
3562  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3563  * for tracing.
3564  */
3565 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3566 {
3567         ftrace_set_regex(&global_ops, buf, len, reset, 0);
3568 }
3569 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3570
3571 /*
3572  * command line interface to allow users to set filters on boot up.
3573  */
3574 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3575 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3576 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3577
3578 static int __init set_ftrace_notrace(char *str)
3579 {
3580         strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3581         return 1;
3582 }
3583 __setup("ftrace_notrace=", set_ftrace_notrace);
3584
3585 static int __init set_ftrace_filter(char *str)
3586 {
3587         strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3588         return 1;
3589 }
3590 __setup("ftrace_filter=", set_ftrace_filter);
3591
3592 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3593 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3594 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3595
3596 static int __init set_graph_function(char *str)
3597 {
3598         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3599         return 1;
3600 }
3601 __setup("ftrace_graph_filter=", set_graph_function);
3602
3603 static void __init set_ftrace_early_graph(char *buf)
3604 {
3605         int ret;
3606         char *func;
3607
3608         while (buf) {
3609                 func = strsep(&buf, ",");
3610                 /* we allow only one expression at a time */
3611                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3612                                       func);
3613                 if (ret)
3614                         printk(KERN_DEBUG "ftrace: function %s not "
3615                                           "traceable\n", func);
3616         }
3617 }
3618 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3619
3620 void __init
3621 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3622 {
3623         char *func;
3624
3625         ftrace_ops_init(ops);
3626
3627         while (buf) {
3628                 func = strsep(&buf, ",");
3629                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3630         }
3631 }
3632
3633 static void __init set_ftrace_early_filters(void)
3634 {
3635         if (ftrace_filter_buf[0])
3636                 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3637         if (ftrace_notrace_buf[0])
3638                 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3639 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3640         if (ftrace_graph_buf[0])
3641                 set_ftrace_early_graph(ftrace_graph_buf);
3642 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3643 }
3644
3645 int ftrace_regex_release(struct inode *inode, struct file *file)
3646 {
3647         struct seq_file *m = (struct seq_file *)file->private_data;
3648         struct ftrace_iterator *iter;
3649         struct ftrace_hash **orig_hash;
3650         struct trace_parser *parser;
3651         int filter_hash;
3652         int ret;
3653
3654         if (file->f_mode & FMODE_READ) {
3655                 iter = m->private;
3656                 seq_release(inode, file);
3657         } else
3658                 iter = file->private_data;
3659
3660         parser = &iter->parser;
3661         if (trace_parser_loaded(parser)) {
3662                 parser->buffer[parser->idx] = 0;
3663                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3664         }
3665
3666         trace_parser_put(parser);
3667
3668         mutex_lock(&iter->ops->regex_lock);
3669
3670         if (file->f_mode & FMODE_WRITE) {
3671                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3672
3673                 if (filter_hash)
3674                         orig_hash = &iter->ops->filter_hash;
3675                 else
3676                         orig_hash = &iter->ops->notrace_hash;
3677
3678                 mutex_lock(&ftrace_lock);
3679                 ret = ftrace_hash_move(iter->ops, filter_hash,
3680                                        orig_hash, iter->hash);
3681                 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3682                     && ftrace_enabled)
3683                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3684
3685                 mutex_unlock(&ftrace_lock);
3686         }
3687
3688         mutex_unlock(&iter->ops->regex_lock);
3689         free_ftrace_hash(iter->hash);
3690         kfree(iter);
3691
3692         return 0;
3693 }
3694
3695 static const struct file_operations ftrace_avail_fops = {
3696         .open = ftrace_avail_open,
3697         .read = seq_read,
3698         .llseek = seq_lseek,
3699         .release = seq_release_private,
3700 };
3701
3702 static const struct file_operations ftrace_enabled_fops = {
3703         .open = ftrace_enabled_open,
3704         .read = seq_read,
3705         .llseek = seq_lseek,
3706         .release = seq_release_private,
3707 };
3708
3709 static const struct file_operations ftrace_filter_fops = {
3710         .open = ftrace_filter_open,
3711         .read = seq_read,
3712         .write = ftrace_filter_write,
3713         .llseek = ftrace_filter_lseek,
3714         .release = ftrace_regex_release,
3715 };
3716
3717 static const struct file_operations ftrace_notrace_fops = {
3718         .open = ftrace_notrace_open,
3719         .read = seq_read,
3720         .write = ftrace_notrace_write,
3721         .llseek = ftrace_filter_lseek,
3722         .release = ftrace_regex_release,
3723 };
3724
3725 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3726
3727 static DEFINE_MUTEX(graph_lock);
3728
3729 int ftrace_graph_count;
3730 int ftrace_graph_filter_enabled;
3731 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3732
3733 static void *
3734 __g_next(struct seq_file *m, loff_t *pos)
3735 {
3736         if (*pos >= ftrace_graph_count)
3737                 return NULL;
3738         return &ftrace_graph_funcs[*pos];
3739 }
3740
3741 static void *
3742 g_next(struct seq_file *m, void *v, loff_t *pos)
3743 {
3744         (*pos)++;
3745         return __g_next(m, pos);
3746 }
3747
3748 static void *g_start(struct seq_file *m, loff_t *pos)
3749 {
3750         mutex_lock(&graph_lock);
3751
3752         /* Nothing, tell g_show to print all functions are enabled */
3753         if (!ftrace_graph_filter_enabled && !*pos)
3754                 return (void *)1;
3755
3756         return __g_next(m, pos);
3757 }
3758
3759 static void g_stop(struct seq_file *m, void *p)
3760 {
3761         mutex_unlock(&graph_lock);
3762 }
3763
3764 static int g_show(struct seq_file *m, void *v)
3765 {
3766         unsigned long *ptr = v;
3767
3768         if (!ptr)
3769                 return 0;
3770
3771         if (ptr == (unsigned long *)1) {
3772                 seq_printf(m, "#### all functions enabled ####\n");
3773                 return 0;
3774         }
3775
3776         seq_printf(m, "%ps\n", (void *)*ptr);
3777
3778         return 0;
3779 }
3780
3781 static const struct seq_operations ftrace_graph_seq_ops = {
3782         .start = g_start,
3783         .next = g_next,
3784         .stop = g_stop,
3785         .show = g_show,
3786 };
3787
3788 static int
3789 ftrace_graph_open(struct inode *inode, struct file *file)
3790 {
3791         int ret = 0;
3792
3793         if (unlikely(ftrace_disabled))
3794                 return -ENODEV;
3795
3796         mutex_lock(&graph_lock);
3797         if ((file->f_mode & FMODE_WRITE) &&
3798             (file->f_flags & O_TRUNC)) {
3799                 ftrace_graph_filter_enabled = 0;
3800                 ftrace_graph_count = 0;
3801                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3802         }
3803         mutex_unlock(&graph_lock);
3804
3805         if (file->f_mode & FMODE_READ)
3806                 ret = seq_open(file, &ftrace_graph_seq_ops);
3807
3808         return ret;
3809 }
3810
3811 static int
3812 ftrace_graph_release(struct inode *inode, struct file *file)
3813 {
3814         if (file->f_mode & FMODE_READ)
3815                 seq_release(inode, file);
3816         return 0;
3817 }
3818
3819 static int
3820 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3821 {
3822         struct dyn_ftrace *rec;
3823         struct ftrace_page *pg;
3824         int search_len;
3825         int fail = 1;
3826         int type, not;
3827         char *search;
3828         bool exists;
3829         int i;
3830
3831         /* decode regex */
3832         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3833         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3834                 return -EBUSY;
3835
3836         search_len = strlen(search);
3837
3838         mutex_lock(&ftrace_lock);
3839
3840         if (unlikely(ftrace_disabled)) {
3841                 mutex_unlock(&ftrace_lock);
3842                 return -ENODEV;
3843         }
3844
3845         do_for_each_ftrace_rec(pg, rec) {
3846
3847                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3848                         /* if it is in the array */
3849                         exists = false;
3850                         for (i = 0; i < *idx; i++) {
3851                                 if (array[i] == rec->ip) {
3852                                         exists = true;
3853                                         break;
3854                                 }
3855                         }
3856
3857                         if (!not) {
3858                                 fail = 0;
3859                                 if (!exists) {
3860                                         array[(*idx)++] = rec->ip;
3861                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3862                                                 goto out;
3863                                 }
3864                         } else {
3865                                 if (exists) {
3866                                         array[i] = array[--(*idx)];
3867                                         array[*idx] = 0;
3868                                         fail = 0;
3869                                 }
3870                         }
3871                 }
3872         } while_for_each_ftrace_rec();
3873 out:
3874         mutex_unlock(&ftrace_lock);
3875
3876         if (fail)
3877                 return -EINVAL;
3878
3879         ftrace_graph_filter_enabled = !!(*idx);
3880
3881         return 0;
3882 }
3883
3884 static ssize_t
3885 ftrace_graph_write(struct file *file, const char __user *ubuf,
3886                    size_t cnt, loff_t *ppos)
3887 {
3888         struct trace_parser parser;
3889         ssize_t read, ret;
3890
3891         if (!cnt)
3892                 return 0;
3893
3894         mutex_lock(&graph_lock);
3895
3896         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3897                 ret = -ENOMEM;
3898                 goto out_unlock;
3899         }
3900
3901         read = trace_get_user(&parser, ubuf, cnt, ppos);
3902
3903         if (read >= 0 && trace_parser_loaded((&parser))) {
3904                 parser.buffer[parser.idx] = 0;
3905
3906                 /* we allow only one expression at a time */
3907                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3908                                         parser.buffer);
3909                 if (ret)
3910                         goto out_free;
3911         }
3912
3913         ret = read;
3914
3915 out_free:
3916         trace_parser_put(&parser);
3917 out_unlock:
3918         mutex_unlock(&graph_lock);
3919
3920         return ret;
3921 }
3922
3923 static const struct file_operations ftrace_graph_fops = {
3924         .open           = ftrace_graph_open,
3925         .read           = seq_read,
3926         .write          = ftrace_graph_write,
3927         .llseek         = ftrace_filter_lseek,
3928         .release        = ftrace_graph_release,
3929 };
3930 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3931
3932 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3933 {
3934
3935         trace_create_file("available_filter_functions", 0444,
3936                         d_tracer, NULL, &ftrace_avail_fops);
3937
3938         trace_create_file("enabled_functions", 0444,
3939                         d_tracer, NULL, &ftrace_enabled_fops);
3940
3941         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3942                         NULL, &ftrace_filter_fops);
3943
3944         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3945                                     NULL, &ftrace_notrace_fops);
3946
3947 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3948         trace_create_file("set_graph_function", 0444, d_tracer,
3949                                     NULL,
3950                                     &ftrace_graph_fops);
3951 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3952
3953         return 0;
3954 }
3955
3956 static int ftrace_cmp_ips(const void *a, const void *b)
3957 {
3958         const unsigned long *ipa = a;
3959         const unsigned long *ipb = b;
3960
3961         if (*ipa > *ipb)
3962                 return 1;
3963         if (*ipa < *ipb)
3964                 return -1;
3965         return 0;
3966 }
3967
3968 static void ftrace_swap_ips(void *a, void *b, int size)
3969 {
3970         unsigned long *ipa = a;
3971         unsigned long *ipb = b;
3972         unsigned long t;
3973
3974         t = *ipa;
3975         *ipa = *ipb;
3976         *ipb = t;
3977 }
3978
3979 static int ftrace_process_locs(struct module *mod,
3980                                unsigned long *start,
3981                                unsigned long *end)
3982 {
3983         struct ftrace_page *start_pg;
3984         struct ftrace_page *pg;
3985         struct dyn_ftrace *rec;
3986         unsigned long count;
3987         unsigned long *p;
3988         unsigned long addr;
3989         unsigned long flags = 0; /* Shut up gcc */
3990         int ret = -ENOMEM;
3991
3992         count = end - start;
3993
3994         if (!count)
3995                 return 0;
3996
3997         sort(start, count, sizeof(*start),
3998              ftrace_cmp_ips, ftrace_swap_ips);
3999
4000         start_pg = ftrace_allocate_pages(count);
4001         if (!start_pg)
4002                 return -ENOMEM;
4003
4004         mutex_lock(&ftrace_lock);
4005
4006         /*
4007          * Core and each module needs their own pages, as
4008          * modules will free them when they are removed.
4009          * Force a new page to be allocated for modules.
4010          */
4011         if (!mod) {
4012                 WARN_ON(ftrace_pages || ftrace_pages_start);
4013                 /* First initialization */
4014                 ftrace_pages = ftrace_pages_start = start_pg;
4015         } else {
4016                 if (!ftrace_pages)
4017                         goto out;
4018
4019                 if (WARN_ON(ftrace_pages->next)) {
4020                         /* Hmm, we have free pages? */
4021                         while (ftrace_pages->next)
4022                                 ftrace_pages = ftrace_pages->next;
4023                 }
4024
4025                 ftrace_pages->next = start_pg;
4026         }
4027
4028         p = start;
4029         pg = start_pg;
4030         while (p < end) {
4031                 addr = ftrace_call_adjust(*p++);
4032                 /*
4033                  * Some architecture linkers will pad between
4034                  * the different mcount_loc sections of different
4035                  * object files to satisfy alignments.
4036                  * Skip any NULL pointers.
4037                  */
4038                 if (!addr)
4039                         continue;
4040
4041                 if (pg->index == pg->size) {
4042                         /* We should have allocated enough */
4043                         if (WARN_ON(!pg->next))
4044                                 break;
4045                         pg = pg->next;
4046                 }
4047
4048                 rec = &pg->records[pg->index++];
4049                 rec->ip = addr;
4050         }
4051
4052         /* We should have used all pages */
4053         WARN_ON(pg->next);
4054
4055         /* Assign the last page to ftrace_pages */
4056         ftrace_pages = pg;
4057
4058         /* These new locations need to be initialized */
4059         ftrace_new_pgs = start_pg;
4060
4061         /*
4062          * We only need to disable interrupts on start up
4063          * because we are modifying code that an interrupt
4064          * may execute, and the modification is not atomic.
4065          * But for modules, nothing runs the code we modify
4066          * until we are finished with it, and there's no
4067          * reason to cause large interrupt latencies while we do it.
4068          */
4069         if (!mod)
4070                 local_irq_save(flags);
4071         ftrace_update_code(mod);
4072         if (!mod)
4073                 local_irq_restore(flags);
4074         ret = 0;
4075  out:
4076         mutex_unlock(&ftrace_lock);
4077
4078         return ret;
4079 }
4080
4081 #ifdef CONFIG_MODULES
4082
4083 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4084
4085 void ftrace_release_mod(struct module *mod)
4086 {
4087         struct dyn_ftrace *rec;
4088         struct ftrace_page **last_pg;
4089         struct ftrace_page *pg;
4090         int order;
4091
4092         mutex_lock(&ftrace_lock);
4093
4094         if (ftrace_disabled)
4095                 goto out_unlock;
4096
4097         /*
4098          * Each module has its own ftrace_pages, remove
4099          * them from the list.
4100          */
4101         last_pg = &ftrace_pages_start;
4102         for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4103                 rec = &pg->records[0];
4104                 if (within_module_core(rec->ip, mod)) {
4105                         /*
4106                          * As core pages are first, the first
4107                          * page should never be a module page.
4108                          */
4109                         if (WARN_ON(pg == ftrace_pages_start))
4110                                 goto out_unlock;
4111
4112                         /* Check if we are deleting the last page */
4113                         if (pg == ftrace_pages)
4114                                 ftrace_pages = next_to_ftrace_page(last_pg);
4115
4116                         *last_pg = pg->next;
4117                         order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4118                         free_pages((unsigned long)pg->records, order);
4119                         kfree(pg);
4120                 } else
4121                         last_pg = &pg->next;
4122         }
4123  out_unlock:
4124         mutex_unlock(&ftrace_lock);
4125 }
4126
4127 static void ftrace_init_module(struct module *mod,
4128                                unsigned long *start, unsigned long *end)
4129 {
4130         if (ftrace_disabled || start == end)
4131                 return;
4132         ftrace_process_locs(mod, start, end);
4133 }
4134
4135 static int ftrace_module_notify_enter(struct notifier_block *self,
4136                                       unsigned long val, void *data)
4137 {
4138         struct module *mod = data;
4139
4140         if (val == MODULE_STATE_COMING)
4141                 ftrace_init_module(mod, mod->ftrace_callsites,
4142                                    mod->ftrace_callsites +
4143                                    mod->num_ftrace_callsites);
4144         return 0;
4145 }
4146
4147 static int ftrace_module_notify_exit(struct notifier_block *self,
4148                                      unsigned long val, void *data)
4149 {
4150         struct module *mod = data;
4151
4152         if (val == MODULE_STATE_GOING)
4153                 ftrace_release_mod(mod);
4154
4155         return 0;
4156 }
4157 #else
4158 static int ftrace_module_notify_enter(struct notifier_block *self,
4159                                       unsigned long val, void *data)
4160 {
4161         return 0;
4162 }
4163 static int ftrace_module_notify_exit(struct notifier_block *self,
4164                                      unsigned long val, void *data)
4165 {
4166         return 0;
4167 }
4168 #endif /* CONFIG_MODULES */
4169
4170 struct notifier_block ftrace_module_enter_nb = {
4171         .notifier_call = ftrace_module_notify_enter,
4172         .priority = INT_MAX,    /* Run before anything that can use kprobes */
4173 };
4174
4175 struct notifier_block ftrace_module_exit_nb = {
4176         .notifier_call = ftrace_module_notify_exit,
4177         .priority = INT_MIN,    /* Run after anything that can remove kprobes */
4178 };
4179
4180 extern unsigned long __start_mcount_loc[];
4181 extern unsigned long __stop_mcount_loc[];
4182
4183 void __init ftrace_init(void)
4184 {
4185         unsigned long count, addr, flags;
4186         int ret;
4187
4188         /* Keep the ftrace pointer to the stub */
4189         addr = (unsigned long)ftrace_stub;
4190
4191         local_irq_save(flags);
4192         ftrace_dyn_arch_init(&addr);
4193         local_irq_restore(flags);
4194
4195         /* ftrace_dyn_arch_init places the return code in addr */
4196         if (addr)
4197                 goto failed;
4198
4199         count = __stop_mcount_loc - __start_mcount_loc;
4200
4201         ret = ftrace_dyn_table_alloc(count);
4202         if (ret)
4203                 goto failed;
4204
4205         last_ftrace_enabled = ftrace_enabled = 1;
4206
4207         ret = ftrace_process_locs(NULL,
4208                                   __start_mcount_loc,
4209                                   __stop_mcount_loc);
4210
4211         ret = register_module_notifier(&ftrace_module_enter_nb);
4212         if (ret)
4213                 pr_warning("Failed to register trace ftrace module enter notifier\n");
4214
4215         ret = register_module_notifier(&ftrace_module_exit_nb);
4216         if (ret)
4217                 pr_warning("Failed to register trace ftrace module exit notifier\n");
4218
4219         set_ftrace_early_filters();
4220
4221         return;
4222  failed:
4223         ftrace_disabled = 1;
4224 }
4225
4226 #else
4227
4228 static struct ftrace_ops global_ops = {
4229         .func                   = ftrace_stub,
4230         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4231         INIT_REGEX_LOCK(global_ops)
4232 };
4233
4234 static int __init ftrace_nodyn_init(void)
4235 {
4236         ftrace_enabled = 1;
4237         return 0;
4238 }
4239 core_initcall(ftrace_nodyn_init);
4240
4241 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4242 static inline void ftrace_startup_enable(int command) { }
4243 /* Keep as macros so we do not need to define the commands */
4244 # define ftrace_startup(ops, command)                   \
4245         ({                                              \
4246                 (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
4247                 0;                                      \
4248         })
4249 # define ftrace_shutdown(ops, command)  do { } while (0)
4250 # define ftrace_startup_sysctl()        do { } while (0)
4251 # define ftrace_shutdown_sysctl()       do { } while (0)
4252
4253 static inline int
4254 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4255 {
4256         return 1;
4257 }
4258
4259 #endif /* CONFIG_DYNAMIC_FTRACE */
4260
4261 static void
4262 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4263                         struct ftrace_ops *op, struct pt_regs *regs)
4264 {
4265         if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4266                 return;
4267
4268         /*
4269          * Some of the ops may be dynamically allocated,
4270          * they must be freed after a synchronize_sched().
4271          */
4272         preempt_disable_notrace();
4273         trace_recursion_set(TRACE_CONTROL_BIT);
4274         do_for_each_ftrace_op(op, ftrace_control_list) {
4275                 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4276                     !ftrace_function_local_disabled(op) &&
4277                     ftrace_ops_test(op, ip, regs))
4278                         op->func(ip, parent_ip, op, regs);
4279         } while_for_each_ftrace_op(op);
4280         trace_recursion_clear(TRACE_CONTROL_BIT);
4281         preempt_enable_notrace();
4282 }
4283
4284 static struct ftrace_ops control_ops = {
4285         .func   = ftrace_ops_control_func,
4286         .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4287         INIT_REGEX_LOCK(control_ops)
4288 };
4289
4290 static inline void
4291 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4292                        struct ftrace_ops *ignored, struct pt_regs *regs)
4293 {
4294         struct ftrace_ops *op;
4295         int bit;
4296
4297         if (function_trace_stop)
4298                 return;
4299
4300         bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4301         if (bit < 0)
4302                 return;
4303
4304         /*
4305          * Some of the ops may be dynamically allocated,
4306          * they must be freed after a synchronize_sched().
4307          */
4308         preempt_disable_notrace();
4309         do_for_each_ftrace_op(op, ftrace_ops_list) {
4310                 if (ftrace_ops_test(op, ip, regs))
4311                         op->func(ip, parent_ip, op, regs);
4312         } while_for_each_ftrace_op(op);
4313         preempt_enable_notrace();
4314         trace_clear_recursion(bit);
4315 }
4316
4317 /*
4318  * Some archs only support passing ip and parent_ip. Even though
4319  * the list function ignores the op parameter, we do not want any
4320  * C side effects, where a function is called without the caller
4321  * sending a third parameter.
4322  * Archs are to support both the regs and ftrace_ops at the same time.
4323  * If they support ftrace_ops, it is assumed they support regs.
4324  * If call backs want to use regs, they must either check for regs
4325  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4326  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4327  * An architecture can pass partial regs with ftrace_ops and still
4328  * set the ARCH_SUPPORT_FTARCE_OPS.
4329  */
4330 #if ARCH_SUPPORTS_FTRACE_OPS
4331 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4332                                  struct ftrace_ops *op, struct pt_regs *regs)
4333 {
4334         __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4335 }
4336 #else
4337 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4338 {
4339         __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4340 }
4341 #endif
4342
4343 static void clear_ftrace_swapper(void)
4344 {
4345         struct task_struct *p;
4346         int cpu;
4347
4348         get_online_cpus();
4349         for_each_online_cpu(cpu) {
4350                 p = idle_task(cpu);
4351                 clear_tsk_trace_trace(p);
4352         }
4353         put_online_cpus();
4354 }
4355
4356 static void set_ftrace_swapper(void)
4357 {
4358         struct task_struct *p;
4359         int cpu;
4360
4361         get_online_cpus();
4362         for_each_online_cpu(cpu) {
4363                 p = idle_task(cpu);
4364                 set_tsk_trace_trace(p);
4365         }
4366         put_online_cpus();
4367 }
4368
4369 static void clear_ftrace_pid(struct pid *pid)
4370 {
4371         struct task_struct *p;
4372
4373         rcu_read_lock();
4374         do_each_pid_task(pid, PIDTYPE_PID, p) {
4375                 clear_tsk_trace_trace(p);
4376         } while_each_pid_task(pid, PIDTYPE_PID, p);
4377         rcu_read_unlock();
4378
4379         put_pid(pid);
4380 }
4381
4382 static void set_ftrace_pid(struct pid *pid)
4383 {
4384         struct task_struct *p;
4385
4386         rcu_read_lock();
4387         do_each_pid_task(pid, PIDTYPE_PID, p) {
4388                 set_tsk_trace_trace(p);
4389         } while_each_pid_task(pid, PIDTYPE_PID, p);
4390         rcu_read_unlock();
4391 }
4392
4393 static void clear_ftrace_pid_task(struct pid *pid)
4394 {
4395         if (pid == ftrace_swapper_pid)
4396                 clear_ftrace_swapper();
4397         else
4398                 clear_ftrace_pid(pid);
4399 }
4400
4401 static void set_ftrace_pid_task(struct pid *pid)
4402 {
4403         if (pid == ftrace_swapper_pid)
4404                 set_ftrace_swapper();
4405         else
4406                 set_ftrace_pid(pid);
4407 }
4408
4409 static int ftrace_pid_add(int p)
4410 {
4411         struct pid *pid;
4412         struct ftrace_pid *fpid;
4413         int ret = -EINVAL;
4414
4415         mutex_lock(&ftrace_lock);
4416
4417         if (!p)
4418                 pid = ftrace_swapper_pid;
4419         else
4420                 pid = find_get_pid(p);
4421
4422         if (!pid)
4423                 goto out;
4424
4425         ret = 0;
4426
4427         list_for_each_entry(fpid, &ftrace_pids, list)
4428                 if (fpid->pid == pid)
4429                         goto out_put;
4430
4431         ret = -ENOMEM;
4432
4433         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4434         if (!fpid)
4435                 goto out_put;
4436
4437         list_add(&fpid->list, &ftrace_pids);
4438         fpid->pid = pid;
4439
4440         set_ftrace_pid_task(pid);
4441
4442         ftrace_update_pid_func();
4443         ftrace_startup_enable(0);
4444
4445         mutex_unlock(&ftrace_lock);
4446         return 0;
4447
4448 out_put:
4449         if (pid != ftrace_swapper_pid)
4450                 put_pid(pid);
4451
4452 out:
4453         mutex_unlock(&ftrace_lock);
4454         return ret;
4455 }
4456
4457 static void ftrace_pid_reset(void)
4458 {
4459         struct ftrace_pid *fpid, *safe;
4460
4461         mutex_lock(&ftrace_lock);
4462         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4463                 struct pid *pid = fpid->pid;
4464
4465                 clear_ftrace_pid_task(pid);
4466
4467                 list_del(&fpid->list);
4468                 kfree(fpid);
4469         }
4470
4471         ftrace_update_pid_func();
4472         ftrace_startup_enable(0);
4473
4474         mutex_unlock(&ftrace_lock);
4475 }
4476
4477 static void *fpid_start(struct seq_file *m, loff_t *pos)
4478 {
4479         mutex_lock(&ftrace_lock);
4480
4481         if (list_empty(&ftrace_pids) && (!*pos))
4482                 return (void *) 1;
4483
4484         return seq_list_start(&ftrace_pids, *pos);
4485 }
4486
4487 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4488 {
4489         if (v == (void *)1)
4490                 return NULL;
4491
4492         return seq_list_next(v, &ftrace_pids, pos);
4493 }
4494
4495 static void fpid_stop(struct seq_file *m, void *p)
4496 {
4497         mutex_unlock(&ftrace_lock);
4498 }
4499
4500 static int fpid_show(struct seq_file *m, void *v)
4501 {
4502         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4503
4504         if (v == (void *)1) {
4505                 seq_printf(m, "no pid\n");
4506                 return 0;
4507         }
4508
4509         if (fpid->pid == ftrace_swapper_pid)
4510                 seq_printf(m, "swapper tasks\n");
4511         else
4512                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4513
4514         return 0;
4515 }
4516
4517 static const struct seq_operations ftrace_pid_sops = {
4518         .start = fpid_start,
4519         .next = fpid_next,
4520         .stop = fpid_stop,
4521         .show = fpid_show,
4522 };
4523
4524 static int
4525 ftrace_pid_open(struct inode *inode, struct file *file)
4526 {
4527         int ret = 0;
4528
4529         if ((file->f_mode & FMODE_WRITE) &&
4530             (file->f_flags & O_TRUNC))
4531                 ftrace_pid_reset();
4532
4533         if (file->f_mode & FMODE_READ)
4534                 ret = seq_open(file, &ftrace_pid_sops);
4535
4536         return ret;
4537 }
4538
4539 static ssize_t
4540 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4541                    size_t cnt, loff_t *ppos)
4542 {
4543         char buf[64], *tmp;
4544         long val;
4545         int ret;
4546
4547         if (cnt >= sizeof(buf))
4548                 return -EINVAL;
4549
4550         if (copy_from_user(&buf, ubuf, cnt))
4551                 return -EFAULT;
4552
4553         buf[cnt] = 0;
4554
4555         /*
4556          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4557          * to clean the filter quietly.
4558          */
4559         tmp = strstrip(buf);
4560         if (strlen(tmp) == 0)
4561                 return 1;
4562
4563         ret = kstrtol(tmp, 10, &val);
4564         if (ret < 0)
4565                 return ret;
4566
4567         ret = ftrace_pid_add(val);
4568
4569         return ret ? ret : cnt;
4570 }
4571
4572 static int
4573 ftrace_pid_release(struct inode *inode, struct file *file)
4574 {
4575         if (file->f_mode & FMODE_READ)
4576                 seq_release(inode, file);
4577
4578         return 0;
4579 }
4580
4581 static const struct file_operations ftrace_pid_fops = {
4582         .open           = ftrace_pid_open,
4583         .write          = ftrace_pid_write,
4584         .read           = seq_read,
4585         .llseek         = ftrace_filter_lseek,
4586         .release        = ftrace_pid_release,
4587 };
4588
4589 static __init int ftrace_init_debugfs(void)
4590 {
4591         struct dentry *d_tracer;
4592
4593         d_tracer = tracing_init_dentry();
4594         if (!d_tracer)
4595                 return 0;
4596
4597         ftrace_init_dyn_debugfs(d_tracer);
4598
4599         trace_create_file("set_ftrace_pid", 0644, d_tracer,
4600                             NULL, &ftrace_pid_fops);
4601
4602         ftrace_profile_debugfs(d_tracer);
4603
4604         return 0;
4605 }
4606 fs_initcall(ftrace_init_debugfs);
4607
4608 /**
4609  * ftrace_kill - kill ftrace
4610  *
4611  * This function should be used by panic code. It stops ftrace
4612  * but in a not so nice way. If you need to simply kill ftrace
4613  * from a non-atomic section, use ftrace_kill.
4614  */
4615 void ftrace_kill(void)
4616 {
4617         ftrace_disabled = 1;
4618         ftrace_enabled = 0;
4619         clear_ftrace_function();
4620 }
4621
4622 /**
4623  * Test if ftrace is dead or not.
4624  */
4625 int ftrace_is_dead(void)
4626 {
4627         return ftrace_disabled;
4628 }
4629
4630 /**
4631  * register_ftrace_function - register a function for profiling
4632  * @ops - ops structure that holds the function for profiling.
4633  *
4634  * Register a function to be called by all functions in the
4635  * kernel.
4636  *
4637  * Note: @ops->func and all the functions it calls must be labeled
4638  *       with "notrace", otherwise it will go into a
4639  *       recursive loop.
4640  */
4641 int register_ftrace_function(struct ftrace_ops *ops)
4642 {
4643         int ret = -1;
4644
4645         ftrace_ops_init(ops);
4646
4647         mutex_lock(&ftrace_lock);
4648
4649         ret = __register_ftrace_function(ops);
4650         if (!ret)
4651                 ret = ftrace_startup(ops, 0);
4652
4653         mutex_unlock(&ftrace_lock);
4654
4655         return ret;
4656 }
4657 EXPORT_SYMBOL_GPL(register_ftrace_function);
4658
4659 /**
4660  * unregister_ftrace_function - unregister a function for profiling.
4661  * @ops - ops structure that holds the function to unregister
4662  *
4663  * Unregister a function that was added to be called by ftrace profiling.
4664  */
4665 int unregister_ftrace_function(struct ftrace_ops *ops)
4666 {
4667         int ret;
4668
4669         mutex_lock(&ftrace_lock);
4670         ret = __unregister_ftrace_function(ops);
4671         if (!ret)
4672                 ftrace_shutdown(ops, 0);
4673         mutex_unlock(&ftrace_lock);
4674
4675         return ret;
4676 }
4677 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4678
4679 int
4680 ftrace_enable_sysctl(struct ctl_table *table, int write,
4681                      void __user *buffer, size_t *lenp,
4682                      loff_t *ppos)
4683 {
4684         int ret = -ENODEV;
4685
4686         mutex_lock(&ftrace_lock);
4687
4688         if (unlikely(ftrace_disabled))
4689                 goto out;
4690
4691         ret = proc_dointvec(table, write, buffer, lenp, ppos);
4692
4693         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4694                 goto out;
4695
4696         last_ftrace_enabled = !!ftrace_enabled;
4697
4698         if (ftrace_enabled) {
4699
4700                 ftrace_startup_sysctl();
4701
4702                 /* we are starting ftrace again */
4703                 if (ftrace_ops_list != &ftrace_list_end)
4704                         update_ftrace_function();
4705
4706         } else {
4707                 /* stopping ftrace calls (just send to ftrace_stub) */
4708                 ftrace_trace_function = ftrace_stub;
4709
4710                 ftrace_shutdown_sysctl();
4711         }
4712
4713  out:
4714         mutex_unlock(&ftrace_lock);
4715         return ret;
4716 }
4717
4718 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4719
4720 static int ftrace_graph_active;
4721 static struct notifier_block ftrace_suspend_notifier;
4722
4723 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4724 {
4725         return 0;
4726 }
4727
4728 /* The callbacks that hook a function */
4729 trace_func_graph_ret_t ftrace_graph_return =
4730                         (trace_func_graph_ret_t)ftrace_stub;
4731 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4732
4733 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4734 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4735 {
4736         int i;
4737         int ret = 0;
4738         unsigned long flags;
4739         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4740         struct task_struct *g, *t;
4741
4742         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4743                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4744                                         * sizeof(struct ftrace_ret_stack),
4745                                         GFP_KERNEL);
4746                 if (!ret_stack_list[i]) {
4747                         start = 0;
4748                         end = i;
4749                         ret = -ENOMEM;
4750                         goto free;
4751                 }
4752         }
4753
4754         read_lock_irqsave(&tasklist_lock, flags);
4755         do_each_thread(g, t) {
4756                 if (start == end) {
4757                         ret = -EAGAIN;
4758                         goto unlock;
4759                 }
4760
4761                 if (t->ret_stack == NULL) {
4762                         atomic_set(&t->tracing_graph_pause, 0);
4763                         atomic_set(&t->trace_overrun, 0);
4764                         t->curr_ret_stack = -1;
4765                         /* Make sure the tasks see the -1 first: */
4766                         smp_wmb();
4767                         t->ret_stack = ret_stack_list[start++];
4768                 }
4769         } while_each_thread(g, t);
4770
4771 unlock:
4772         read_unlock_irqrestore(&tasklist_lock, flags);
4773 free:
4774         for (i = start; i < end; i++)
4775                 kfree(ret_stack_list[i]);
4776         return ret;
4777 }
4778
4779 static void
4780 ftrace_graph_probe_sched_switch(void *ignore,
4781                         struct task_struct *prev, struct task_struct *next)
4782 {
4783         unsigned long long timestamp;
4784         int index;
4785
4786         /*
4787          * Does the user want to count the time a function was asleep.
4788          * If so, do not update the time stamps.
4789          */
4790         if (trace_flags & TRACE_ITER_SLEEP_TIME)
4791                 return;
4792
4793         timestamp = trace_clock_local();
4794
4795         prev->ftrace_timestamp = timestamp;
4796
4797         /* only process tasks that we timestamped */
4798         if (!next->ftrace_timestamp)
4799                 return;
4800
4801         /*
4802          * Update all the counters in next to make up for the
4803          * time next was sleeping.
4804          */
4805         timestamp -= next->ftrace_timestamp;
4806
4807         for (index = next->curr_ret_stack; index >= 0; index--)
4808                 next->ret_stack[index].calltime += timestamp;
4809 }
4810
4811 /* Allocate a return stack for each task */
4812 static int start_graph_tracing(void)
4813 {
4814         struct ftrace_ret_stack **ret_stack_list;
4815         int ret, cpu;
4816
4817         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4818                                 sizeof(struct ftrace_ret_stack *),
4819                                 GFP_KERNEL);
4820
4821         if (!ret_stack_list)
4822                 return -ENOMEM;
4823
4824         /* The cpu_boot init_task->ret_stack will never be freed */
4825         for_each_online_cpu(cpu) {
4826                 if (!idle_task(cpu)->ret_stack)
4827                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4828         }
4829
4830         do {
4831                 ret = alloc_retstack_tasklist(ret_stack_list);
4832         } while (ret == -EAGAIN);
4833
4834         if (!ret) {
4835                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4836                 if (ret)
4837                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4838                                 " probe to kernel_sched_switch\n");
4839         }
4840
4841         kfree(ret_stack_list);
4842         return ret;
4843 }
4844
4845 /*
4846  * Hibernation protection.
4847  * The state of the current task is too much unstable during
4848  * suspend/restore to disk. We want to protect against that.
4849  */
4850 static int
4851 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4852                                                         void *unused)
4853 {
4854         switch (state) {
4855         case PM_HIBERNATION_PREPARE:
4856                 pause_graph_tracing();
4857                 break;
4858
4859         case PM_POST_HIBERNATION:
4860                 unpause_graph_tracing();
4861                 break;
4862         }
4863         return NOTIFY_DONE;
4864 }
4865
4866 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4867                         trace_func_graph_ent_t entryfunc)
4868 {
4869         int ret = 0;
4870
4871         mutex_lock(&ftrace_lock);
4872
4873         /* we currently allow only one tracer registered at a time */
4874         if (ftrace_graph_active) {
4875                 ret = -EBUSY;
4876                 goto out;
4877         }
4878
4879         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4880         register_pm_notifier(&ftrace_suspend_notifier);
4881
4882         ftrace_graph_active++;
4883         ret = start_graph_tracing();
4884         if (ret) {
4885                 ftrace_graph_active--;
4886                 goto out;
4887         }
4888
4889         ftrace_graph_return = retfunc;
4890         ftrace_graph_entry = entryfunc;
4891
4892         ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4893
4894 out:
4895         mutex_unlock(&ftrace_lock);
4896         return ret;
4897 }
4898
4899 void unregister_ftrace_graph(void)
4900 {
4901         mutex_lock(&ftrace_lock);
4902
4903         if (unlikely(!ftrace_graph_active))
4904                 goto out;
4905
4906         ftrace_graph_active--;
4907         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4908         ftrace_graph_entry = ftrace_graph_entry_stub;
4909         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4910         unregister_pm_notifier(&ftrace_suspend_notifier);
4911         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4912
4913  out:
4914         mutex_unlock(&ftrace_lock);
4915 }
4916
4917 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4918
4919 static void
4920 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4921 {
4922         atomic_set(&t->tracing_graph_pause, 0);
4923         atomic_set(&t->trace_overrun, 0);
4924         t->ftrace_timestamp = 0;
4925         /* make curr_ret_stack visible before we add the ret_stack */
4926         smp_wmb();
4927         t->ret_stack = ret_stack;
4928 }
4929
4930 /*
4931  * Allocate a return stack for the idle task. May be the first
4932  * time through, or it may be done by CPU hotplug online.
4933  */
4934 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4935 {
4936         t->curr_ret_stack = -1;
4937         /*
4938          * The idle task has no parent, it either has its own
4939          * stack or no stack at all.
4940          */
4941         if (t->ret_stack)
4942                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4943
4944         if (ftrace_graph_active) {
4945                 struct ftrace_ret_stack *ret_stack;
4946
4947                 ret_stack = per_cpu(idle_ret_stack, cpu);
4948                 if (!ret_stack) {
4949                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4950                                             * sizeof(struct ftrace_ret_stack),
4951                                             GFP_KERNEL);
4952                         if (!ret_stack)
4953                                 return;
4954                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4955                 }
4956                 graph_init_task(t, ret_stack);
4957         }
4958 }
4959
4960 /* Allocate a return stack for newly created task */
4961 void ftrace_graph_init_task(struct task_struct *t)
4962 {
4963         /* Make sure we do not use the parent ret_stack */
4964         t->ret_stack = NULL;
4965         t->curr_ret_stack = -1;
4966
4967         if (ftrace_graph_active) {
4968                 struct ftrace_ret_stack *ret_stack;
4969
4970                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4971                                 * sizeof(struct ftrace_ret_stack),
4972                                 GFP_KERNEL);
4973                 if (!ret_stack)
4974                         return;
4975                 graph_init_task(t, ret_stack);
4976         }
4977 }
4978
4979 void ftrace_graph_exit_task(struct task_struct *t)
4980 {
4981         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4982
4983         t->ret_stack = NULL;
4984         /* NULL must become visible to IRQs before we free it: */
4985         barrier();
4986
4987         kfree(ret_stack);
4988 }
4989
4990 void ftrace_graph_stop(void)
4991 {
4992         ftrace_stop();
4993 }
4994 #endif