2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/slab.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/rcupdate.h>
33 #include <trace/events/sched.h>
35 #include <asm/ftrace.h>
36 #include <asm/setup.h>
38 #include "trace_output.h"
39 #include "trace_stat.h"
41 #define FTRACE_WARN_ON(cond) \
49 #define FTRACE_WARN_ON_ONCE(cond) \
52 if (WARN_ON_ONCE(___r)) \
57 /* hash bits for specific function selection */
58 #define FTRACE_HASH_BITS 7
59 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
60 #define FTRACE_HASH_DEFAULT_BITS 10
61 #define FTRACE_HASH_MAX_BITS 12
63 /* ftrace_enabled is a method to turn ftrace on or off */
64 int ftrace_enabled __read_mostly;
65 static int last_ftrace_enabled;
67 /* Quick disabling of function tracer. */
68 int function_trace_stop;
70 /* List for set_ftrace_pid's pids. */
71 LIST_HEAD(ftrace_pids);
73 struct list_head list;
78 * ftrace_disabled is set when an anomaly is discovered.
79 * ftrace_disabled is much stronger than ftrace_enabled.
81 static int ftrace_disabled __read_mostly;
83 static DEFINE_MUTEX(ftrace_lock);
85 static struct ftrace_ops ftrace_list_end __read_mostly =
90 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
91 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
92 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
93 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
94 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
95 static struct ftrace_ops global_ops;
98 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
101 * Traverse the ftrace_global_list, invoking all entries. The reason that we
102 * can use rcu_dereference_raw() is that elements removed from this list
103 * are simply leaked, so there is no need to interact with a grace-period
104 * mechanism. The rcu_dereference_raw() calls are needed to handle
105 * concurrent insertions into the ftrace_global_list.
107 * Silly Alpha and silly pointer-speculation compiler optimizations!
109 static void ftrace_global_list_func(unsigned long ip,
110 unsigned long parent_ip)
112 struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/
114 while (op != &ftrace_list_end) {
115 op->func(ip, parent_ip);
116 op = rcu_dereference_raw(op->next); /*see above*/
120 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
122 if (!test_tsk_trace_trace(current))
125 ftrace_pid_function(ip, parent_ip);
128 static void set_ftrace_pid_function(ftrace_func_t func)
130 /* do not set ftrace_pid_function to itself! */
131 if (func != ftrace_pid_func)
132 ftrace_pid_function = func;
136 * clear_ftrace_function - reset the ftrace function
138 * This NULLs the ftrace function and in essence stops
139 * tracing. There may be lag
141 void clear_ftrace_function(void)
143 ftrace_trace_function = ftrace_stub;
144 __ftrace_trace_function = ftrace_stub;
145 ftrace_pid_function = ftrace_stub;
148 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
150 * For those archs that do not test ftrace_trace_stop in their
151 * mcount call site, we need to do it from C.
153 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
155 if (function_trace_stop)
158 __ftrace_trace_function(ip, parent_ip);
162 static void update_global_ops(void)
167 * If there's only one function registered, then call that
168 * function directly. Otherwise, we need to iterate over the
169 * registered callers.
171 if (ftrace_global_list == &ftrace_list_end ||
172 ftrace_global_list->next == &ftrace_list_end)
173 func = ftrace_global_list->func;
175 func = ftrace_global_list_func;
177 /* If we filter on pids, update to use the pid function */
178 if (!list_empty(&ftrace_pids)) {
179 set_ftrace_pid_function(func);
180 func = ftrace_pid_func;
183 global_ops.func = func;
186 static void update_ftrace_function(void)
193 * If we are at the end of the list and this ops is
194 * not dynamic, then have the mcount trampoline call
195 * the function directly
197 if (ftrace_ops_list == &ftrace_list_end ||
198 (ftrace_ops_list->next == &ftrace_list_end &&
199 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
200 func = ftrace_ops_list->func;
202 func = ftrace_ops_list_func;
204 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
205 ftrace_trace_function = func;
207 __ftrace_trace_function = func;
208 ftrace_trace_function = ftrace_test_stop_func;
212 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
216 * We are entering ops into the list but another
217 * CPU might be walking that list. We need to make sure
218 * the ops->next pointer is valid before another CPU sees
219 * the ops pointer included into the list.
221 rcu_assign_pointer(*list, ops);
224 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
226 struct ftrace_ops **p;
229 * If we are removing the last function, then simply point
230 * to the ftrace_stub.
232 if (*list == ops && ops->next == &ftrace_list_end) {
233 *list = &ftrace_list_end;
237 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
248 static int __register_ftrace_function(struct ftrace_ops *ops)
253 if (FTRACE_WARN_ON(ops == &global_ops))
256 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
259 if (!core_kernel_data((unsigned long)ops))
260 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
262 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
263 int first = ftrace_global_list == &ftrace_list_end;
264 add_ftrace_ops(&ftrace_global_list, ops);
265 ops->flags |= FTRACE_OPS_FL_ENABLED;
267 add_ftrace_ops(&ftrace_ops_list, &global_ops);
269 add_ftrace_ops(&ftrace_ops_list, ops);
272 update_ftrace_function();
277 static int __unregister_ftrace_function(struct ftrace_ops *ops)
284 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
287 if (FTRACE_WARN_ON(ops == &global_ops))
290 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
291 ret = remove_ftrace_ops(&ftrace_global_list, ops);
292 if (!ret && ftrace_global_list == &ftrace_list_end)
293 ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
295 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
297 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
303 update_ftrace_function();
306 * Dynamic ops may be freed, we must make sure that all
307 * callers are done before leaving this function.
309 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
315 static void ftrace_update_pid_func(void)
317 /* Only do something if we are tracing something */
318 if (ftrace_trace_function == ftrace_stub)
321 update_ftrace_function();
324 #ifdef CONFIG_FUNCTION_PROFILER
325 struct ftrace_profile {
326 struct hlist_node node;
328 unsigned long counter;
329 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
330 unsigned long long time;
331 unsigned long long time_squared;
335 struct ftrace_profile_page {
336 struct ftrace_profile_page *next;
338 struct ftrace_profile records[];
341 struct ftrace_profile_stat {
343 struct hlist_head *hash;
344 struct ftrace_profile_page *pages;
345 struct ftrace_profile_page *start;
346 struct tracer_stat stat;
349 #define PROFILE_RECORDS_SIZE \
350 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
352 #define PROFILES_PER_PAGE \
353 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
355 static int ftrace_profile_bits __read_mostly;
356 static int ftrace_profile_enabled __read_mostly;
358 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
359 static DEFINE_MUTEX(ftrace_profile_lock);
361 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
363 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
366 function_stat_next(void *v, int idx)
368 struct ftrace_profile *rec = v;
369 struct ftrace_profile_page *pg;
371 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
377 if ((void *)rec >= (void *)&pg->records[pg->index]) {
381 rec = &pg->records[0];
389 static void *function_stat_start(struct tracer_stat *trace)
391 struct ftrace_profile_stat *stat =
392 container_of(trace, struct ftrace_profile_stat, stat);
394 if (!stat || !stat->start)
397 return function_stat_next(&stat->start->records[0], 0);
400 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
401 /* function graph compares on total time */
402 static int function_stat_cmp(void *p1, void *p2)
404 struct ftrace_profile *a = p1;
405 struct ftrace_profile *b = p2;
407 if (a->time < b->time)
409 if (a->time > b->time)
415 /* not function graph compares against hits */
416 static int function_stat_cmp(void *p1, void *p2)
418 struct ftrace_profile *a = p1;
419 struct ftrace_profile *b = p2;
421 if (a->counter < b->counter)
423 if (a->counter > b->counter)
430 static int function_stat_headers(struct seq_file *m)
432 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
433 seq_printf(m, " Function "
436 "--- ---- --- ---\n");
438 seq_printf(m, " Function Hit\n"
444 static int function_stat_show(struct seq_file *m, void *v)
446 struct ftrace_profile *rec = v;
447 char str[KSYM_SYMBOL_LEN];
449 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
450 static struct trace_seq s;
451 unsigned long long avg;
452 unsigned long long stddev;
454 mutex_lock(&ftrace_profile_lock);
456 /* we raced with function_profile_reset() */
457 if (unlikely(rec->counter == 0)) {
462 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
463 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
465 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
468 do_div(avg, rec->counter);
470 /* Sample standard deviation (s^2) */
471 if (rec->counter <= 1)
474 stddev = rec->time_squared - rec->counter * avg * avg;
476 * Divide only 1000 for ns^2 -> us^2 conversion.
477 * trace_print_graph_duration will divide 1000 again.
479 do_div(stddev, (rec->counter - 1) * 1000);
483 trace_print_graph_duration(rec->time, &s);
484 trace_seq_puts(&s, " ");
485 trace_print_graph_duration(avg, &s);
486 trace_seq_puts(&s, " ");
487 trace_print_graph_duration(stddev, &s);
488 trace_print_seq(m, &s);
492 mutex_unlock(&ftrace_profile_lock);
497 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
499 struct ftrace_profile_page *pg;
501 pg = stat->pages = stat->start;
504 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
509 memset(stat->hash, 0,
510 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
513 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
515 struct ftrace_profile_page *pg;
520 /* If we already allocated, do nothing */
524 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
528 #ifdef CONFIG_DYNAMIC_FTRACE
529 functions = ftrace_update_tot_cnt;
532 * We do not know the number of functions that exist because
533 * dynamic tracing is what counts them. With past experience
534 * we have around 20K functions. That should be more than enough.
535 * It is highly unlikely we will execute every function in
541 pg = stat->start = stat->pages;
543 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
545 for (i = 0; i < pages; i++) {
546 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
557 unsigned long tmp = (unsigned long)pg;
563 free_page((unsigned long)stat->pages);
570 static int ftrace_profile_init_cpu(int cpu)
572 struct ftrace_profile_stat *stat;
575 stat = &per_cpu(ftrace_profile_stats, cpu);
578 /* If the profile is already created, simply reset it */
579 ftrace_profile_reset(stat);
584 * We are profiling all functions, but usually only a few thousand
585 * functions are hit. We'll make a hash of 1024 items.
587 size = FTRACE_PROFILE_HASH_SIZE;
589 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
594 if (!ftrace_profile_bits) {
597 for (; size; size >>= 1)
598 ftrace_profile_bits++;
601 /* Preallocate the function profiling pages */
602 if (ftrace_profile_pages_init(stat) < 0) {
611 static int ftrace_profile_init(void)
616 for_each_online_cpu(cpu) {
617 ret = ftrace_profile_init_cpu(cpu);
625 /* interrupts must be disabled */
626 static struct ftrace_profile *
627 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
629 struct ftrace_profile *rec;
630 struct hlist_head *hhd;
631 struct hlist_node *n;
634 key = hash_long(ip, ftrace_profile_bits);
635 hhd = &stat->hash[key];
637 if (hlist_empty(hhd))
640 hlist_for_each_entry_rcu(rec, n, hhd, node) {
648 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
649 struct ftrace_profile *rec)
653 key = hash_long(rec->ip, ftrace_profile_bits);
654 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
658 * The memory is already allocated, this simply finds a new record to use.
660 static struct ftrace_profile *
661 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
663 struct ftrace_profile *rec = NULL;
665 /* prevent recursion (from NMIs) */
666 if (atomic_inc_return(&stat->disabled) != 1)
670 * Try to find the function again since an NMI
671 * could have added it
673 rec = ftrace_find_profiled_func(stat, ip);
677 if (stat->pages->index == PROFILES_PER_PAGE) {
678 if (!stat->pages->next)
680 stat->pages = stat->pages->next;
683 rec = &stat->pages->records[stat->pages->index++];
685 ftrace_add_profile(stat, rec);
688 atomic_dec(&stat->disabled);
694 function_profile_call(unsigned long ip, unsigned long parent_ip)
696 struct ftrace_profile_stat *stat;
697 struct ftrace_profile *rec;
700 if (!ftrace_profile_enabled)
703 local_irq_save(flags);
705 stat = &__get_cpu_var(ftrace_profile_stats);
706 if (!stat->hash || !ftrace_profile_enabled)
709 rec = ftrace_find_profiled_func(stat, ip);
711 rec = ftrace_profile_alloc(stat, ip);
718 local_irq_restore(flags);
721 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
722 static int profile_graph_entry(struct ftrace_graph_ent *trace)
724 function_profile_call(trace->func, 0);
728 static void profile_graph_return(struct ftrace_graph_ret *trace)
730 struct ftrace_profile_stat *stat;
731 unsigned long long calltime;
732 struct ftrace_profile *rec;
735 local_irq_save(flags);
736 stat = &__get_cpu_var(ftrace_profile_stats);
737 if (!stat->hash || !ftrace_profile_enabled)
740 /* If the calltime was zero'd ignore it */
741 if (!trace->calltime)
744 calltime = trace->rettime - trace->calltime;
746 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
749 index = trace->depth;
751 /* Append this call time to the parent time to subtract */
753 current->ret_stack[index - 1].subtime += calltime;
755 if (current->ret_stack[index].subtime < calltime)
756 calltime -= current->ret_stack[index].subtime;
761 rec = ftrace_find_profiled_func(stat, trace->func);
763 rec->time += calltime;
764 rec->time_squared += calltime * calltime;
768 local_irq_restore(flags);
771 static int register_ftrace_profiler(void)
773 return register_ftrace_graph(&profile_graph_return,
774 &profile_graph_entry);
777 static void unregister_ftrace_profiler(void)
779 unregister_ftrace_graph();
782 static struct ftrace_ops ftrace_profile_ops __read_mostly =
784 .func = function_profile_call,
787 static int register_ftrace_profiler(void)
789 return register_ftrace_function(&ftrace_profile_ops);
792 static void unregister_ftrace_profiler(void)
794 unregister_ftrace_function(&ftrace_profile_ops);
796 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
799 ftrace_profile_write(struct file *filp, const char __user *ubuf,
800 size_t cnt, loff_t *ppos)
803 char buf[64]; /* big enough to hold a number */
806 if (cnt >= sizeof(buf))
809 if (copy_from_user(&buf, ubuf, cnt))
814 ret = strict_strtoul(buf, 10, &val);
820 mutex_lock(&ftrace_profile_lock);
821 if (ftrace_profile_enabled ^ val) {
823 ret = ftrace_profile_init();
829 ret = register_ftrace_profiler();
834 ftrace_profile_enabled = 1;
836 ftrace_profile_enabled = 0;
838 * unregister_ftrace_profiler calls stop_machine
839 * so this acts like an synchronize_sched.
841 unregister_ftrace_profiler();
845 mutex_unlock(&ftrace_profile_lock);
853 ftrace_profile_read(struct file *filp, char __user *ubuf,
854 size_t cnt, loff_t *ppos)
856 char buf[64]; /* big enough to hold a number */
859 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
860 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
863 static const struct file_operations ftrace_profile_fops = {
864 .open = tracing_open_generic,
865 .read = ftrace_profile_read,
866 .write = ftrace_profile_write,
867 .llseek = default_llseek,
870 /* used to initialize the real stat files */
871 static struct tracer_stat function_stats __initdata = {
873 .stat_start = function_stat_start,
874 .stat_next = function_stat_next,
875 .stat_cmp = function_stat_cmp,
876 .stat_headers = function_stat_headers,
877 .stat_show = function_stat_show
880 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
882 struct ftrace_profile_stat *stat;
883 struct dentry *entry;
888 for_each_possible_cpu(cpu) {
889 stat = &per_cpu(ftrace_profile_stats, cpu);
891 /* allocate enough for function name + cpu number */
892 name = kmalloc(32, GFP_KERNEL);
895 * The files created are permanent, if something happens
896 * we still do not free memory.
899 "Could not allocate stat file for cpu %d\n",
903 stat->stat = function_stats;
904 snprintf(name, 32, "function%d", cpu);
905 stat->stat.name = name;
906 ret = register_stat_tracer(&stat->stat);
909 "Could not register function stat for cpu %d\n",
916 entry = debugfs_create_file("function_profile_enabled", 0644,
917 d_tracer, NULL, &ftrace_profile_fops);
919 pr_warning("Could not create debugfs "
920 "'function_profile_enabled' entry\n");
923 #else /* CONFIG_FUNCTION_PROFILER */
924 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
927 #endif /* CONFIG_FUNCTION_PROFILER */
929 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
931 #ifdef CONFIG_DYNAMIC_FTRACE
933 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
934 # error Dynamic ftrace depends on MCOUNT_RECORD
937 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
939 struct ftrace_func_probe {
940 struct hlist_node node;
941 struct ftrace_probe_ops *ops;
949 FTRACE_ENABLE_CALLS = (1 << 0),
950 FTRACE_DISABLE_CALLS = (1 << 1),
951 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
952 FTRACE_START_FUNC_RET = (1 << 3),
953 FTRACE_STOP_FUNC_RET = (1 << 4),
955 struct ftrace_func_entry {
956 struct hlist_node hlist;
961 unsigned long size_bits;
962 struct hlist_head *buckets;
968 * We make these constant because no one should touch them,
969 * but they are used as the default "empty hash", to avoid allocating
970 * it all the time. These are in a read only section such that if
971 * anyone does try to modify it, it will cause an exception.
973 static const struct hlist_head empty_buckets[1];
974 static const struct ftrace_hash empty_hash = {
975 .buckets = (struct hlist_head *)empty_buckets,
977 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
979 static struct ftrace_ops global_ops = {
981 .notrace_hash = EMPTY_HASH,
982 .filter_hash = EMPTY_HASH,
985 static struct dyn_ftrace *ftrace_new_addrs;
987 static DEFINE_MUTEX(ftrace_regex_lock);
990 struct ftrace_page *next;
992 struct dyn_ftrace records[];
995 #define ENTRIES_PER_PAGE \
996 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
998 /* estimate from running different kernels */
999 #define NR_TO_INIT 10000
1001 static struct ftrace_page *ftrace_pages_start;
1002 static struct ftrace_page *ftrace_pages;
1004 static struct dyn_ftrace *ftrace_free_records;
1006 static struct ftrace_func_entry *
1007 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1010 struct ftrace_func_entry *entry;
1011 struct hlist_head *hhd;
1012 struct hlist_node *n;
1017 if (hash->size_bits > 0)
1018 key = hash_long(ip, hash->size_bits);
1022 hhd = &hash->buckets[key];
1024 hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1025 if (entry->ip == ip)
1031 static void __add_hash_entry(struct ftrace_hash *hash,
1032 struct ftrace_func_entry *entry)
1034 struct hlist_head *hhd;
1037 if (hash->size_bits)
1038 key = hash_long(entry->ip, hash->size_bits);
1042 hhd = &hash->buckets[key];
1043 hlist_add_head(&entry->hlist, hhd);
1047 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1049 struct ftrace_func_entry *entry;
1051 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1056 __add_hash_entry(hash, entry);
1062 free_hash_entry(struct ftrace_hash *hash,
1063 struct ftrace_func_entry *entry)
1065 hlist_del(&entry->hlist);
1071 remove_hash_entry(struct ftrace_hash *hash,
1072 struct ftrace_func_entry *entry)
1074 hlist_del(&entry->hlist);
1078 static void ftrace_hash_clear(struct ftrace_hash *hash)
1080 struct hlist_head *hhd;
1081 struct hlist_node *tp, *tn;
1082 struct ftrace_func_entry *entry;
1083 int size = 1 << hash->size_bits;
1089 for (i = 0; i < size; i++) {
1090 hhd = &hash->buckets[i];
1091 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1092 free_hash_entry(hash, entry);
1094 FTRACE_WARN_ON(hash->count);
1097 static void free_ftrace_hash(struct ftrace_hash *hash)
1099 if (!hash || hash == EMPTY_HASH)
1101 ftrace_hash_clear(hash);
1102 kfree(hash->buckets);
1106 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1108 struct ftrace_hash *hash;
1110 hash = container_of(rcu, struct ftrace_hash, rcu);
1111 free_ftrace_hash(hash);
1114 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1116 if (!hash || hash == EMPTY_HASH)
1118 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1121 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1123 struct ftrace_hash *hash;
1126 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1130 size = 1 << size_bits;
1131 hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1133 if (!hash->buckets) {
1138 hash->size_bits = size_bits;
1143 static struct ftrace_hash *
1144 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1146 struct ftrace_func_entry *entry;
1147 struct ftrace_hash *new_hash;
1148 struct hlist_node *tp;
1153 new_hash = alloc_ftrace_hash(size_bits);
1158 if (!hash || !hash->count)
1161 size = 1 << hash->size_bits;
1162 for (i = 0; i < size; i++) {
1163 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1164 ret = add_hash_entry(new_hash, entry->ip);
1170 FTRACE_WARN_ON(new_hash->count != hash->count);
1175 free_ftrace_hash(new_hash);
1180 ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1182 struct ftrace_func_entry *entry;
1183 struct hlist_node *tp, *tn;
1184 struct hlist_head *hhd;
1185 struct ftrace_hash *old_hash;
1186 struct ftrace_hash *new_hash;
1188 int size = src->count;
1193 * If the new source is empty, just free dst and assign it
1197 free_ftrace_hash_rcu(*dst);
1198 rcu_assign_pointer(*dst, EMPTY_HASH);
1203 * Make the hash size about 1/2 the # found
1205 for (size /= 2; size; size >>= 1)
1208 /* Don't allocate too much */
1209 if (bits > FTRACE_HASH_MAX_BITS)
1210 bits = FTRACE_HASH_MAX_BITS;
1212 new_hash = alloc_ftrace_hash(bits);
1216 size = 1 << src->size_bits;
1217 for (i = 0; i < size; i++) {
1218 hhd = &src->buckets[i];
1219 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1221 key = hash_long(entry->ip, bits);
1224 remove_hash_entry(src, entry);
1225 __add_hash_entry(new_hash, entry);
1230 rcu_assign_pointer(*dst, new_hash);
1231 free_ftrace_hash_rcu(old_hash);
1237 * Test the hashes for this ops to see if we want to call
1238 * the ops->func or not.
1240 * It's a match if the ip is in the ops->filter_hash or
1241 * the filter_hash does not exist or is empty,
1243 * the ip is not in the ops->notrace_hash.
1245 * This needs to be called with preemption disabled as
1246 * the hashes are freed with call_rcu_sched().
1249 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1251 struct ftrace_hash *filter_hash;
1252 struct ftrace_hash *notrace_hash;
1255 filter_hash = rcu_dereference_raw(ops->filter_hash);
1256 notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1258 if ((!filter_hash || !filter_hash->count ||
1259 ftrace_lookup_ip(filter_hash, ip)) &&
1260 (!notrace_hash || !notrace_hash->count ||
1261 !ftrace_lookup_ip(notrace_hash, ip)))
1270 * This is a double for. Do not use 'break' to break out of the loop,
1271 * you must use a goto.
1273 #define do_for_each_ftrace_rec(pg, rec) \
1274 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1276 for (_____i = 0; _____i < pg->index; _____i++) { \
1277 rec = &pg->records[_____i];
1279 #define while_for_each_ftrace_rec() \
1283 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1287 struct ftrace_hash *hash;
1288 struct ftrace_hash *other_hash;
1289 struct ftrace_page *pg;
1290 struct dyn_ftrace *rec;
1294 /* Only update if the ops has been registered */
1295 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1299 * In the filter_hash case:
1300 * If the count is zero, we update all records.
1301 * Otherwise we just update the items in the hash.
1303 * In the notrace_hash case:
1304 * We enable the update in the hash.
1305 * As disabling notrace means enabling the tracing,
1306 * and enabling notrace means disabling, the inc variable
1310 hash = ops->filter_hash;
1311 other_hash = ops->notrace_hash;
1312 if (!hash || !hash->count)
1316 hash = ops->notrace_hash;
1317 other_hash = ops->filter_hash;
1319 * If the notrace hash has no items,
1320 * then there's nothing to do.
1322 if (hash && !hash->count)
1326 do_for_each_ftrace_rec(pg, rec) {
1327 int in_other_hash = 0;
1333 * Only the filter_hash affects all records.
1334 * Update if the record is not in the notrace hash.
1336 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1339 in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1340 in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
1345 if (filter_hash && in_hash && !in_other_hash)
1347 else if (!filter_hash && in_hash &&
1348 (in_other_hash || !other_hash->count))
1356 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1359 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1364 /* Shortcut, if we handled all records, we are done. */
1365 if (!all && count == hash->count)
1367 } while_for_each_ftrace_rec();
1370 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1373 __ftrace_hash_rec_update(ops, filter_hash, 0);
1376 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1379 __ftrace_hash_rec_update(ops, filter_hash, 1);
1382 static void ftrace_free_rec(struct dyn_ftrace *rec)
1384 rec->freelist = ftrace_free_records;
1385 ftrace_free_records = rec;
1386 rec->flags |= FTRACE_FL_FREE;
1389 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1391 struct dyn_ftrace *rec;
1393 /* First check for freed records */
1394 if (ftrace_free_records) {
1395 rec = ftrace_free_records;
1397 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1398 FTRACE_WARN_ON_ONCE(1);
1399 ftrace_free_records = NULL;
1403 ftrace_free_records = rec->freelist;
1404 memset(rec, 0, sizeof(*rec));
1408 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1409 if (!ftrace_pages->next) {
1410 /* allocate another page */
1411 ftrace_pages->next =
1412 (void *)get_zeroed_page(GFP_KERNEL);
1413 if (!ftrace_pages->next)
1416 ftrace_pages = ftrace_pages->next;
1419 return &ftrace_pages->records[ftrace_pages->index++];
1422 static struct dyn_ftrace *
1423 ftrace_record_ip(unsigned long ip)
1425 struct dyn_ftrace *rec;
1427 if (ftrace_disabled)
1430 rec = ftrace_alloc_dyn_node(ip);
1435 rec->newlist = ftrace_new_addrs;
1436 ftrace_new_addrs = rec;
1441 static void print_ip_ins(const char *fmt, unsigned char *p)
1445 printk(KERN_CONT "%s", fmt);
1447 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1448 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1451 static void ftrace_bug(int failed, unsigned long ip)
1455 FTRACE_WARN_ON_ONCE(1);
1456 pr_info("ftrace faulted on modifying ");
1460 FTRACE_WARN_ON_ONCE(1);
1461 pr_info("ftrace failed to modify ");
1463 print_ip_ins(" actual: ", (unsigned char *)ip);
1464 printk(KERN_CONT "\n");
1467 FTRACE_WARN_ON_ONCE(1);
1468 pr_info("ftrace faulted on writing ");
1472 FTRACE_WARN_ON_ONCE(1);
1473 pr_info("ftrace faulted on unknown error ");
1479 /* Return 1 if the address range is reserved for ftrace */
1480 int ftrace_text_reserved(void *start, void *end)
1482 struct dyn_ftrace *rec;
1483 struct ftrace_page *pg;
1485 do_for_each_ftrace_rec(pg, rec) {
1486 if (rec->ip <= (unsigned long)end &&
1487 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1489 } while_for_each_ftrace_rec();
1495 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1497 unsigned long ftrace_addr;
1498 unsigned long flag = 0UL;
1500 ftrace_addr = (unsigned long)FTRACE_ADDR;
1503 * If we are enabling tracing:
1505 * If the record has a ref count, then we need to enable it
1506 * because someone is using it.
1508 * Otherwise we make sure its disabled.
1510 * If we are disabling tracing, then disable all records that
1513 if (enable && (rec->flags & ~FTRACE_FL_MASK))
1514 flag = FTRACE_FL_ENABLED;
1516 /* If the state of this record hasn't changed, then do nothing */
1517 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1521 rec->flags |= FTRACE_FL_ENABLED;
1522 return ftrace_make_call(rec, ftrace_addr);
1525 rec->flags &= ~FTRACE_FL_ENABLED;
1526 return ftrace_make_nop(NULL, rec, ftrace_addr);
1529 static void ftrace_replace_code(int enable)
1531 struct dyn_ftrace *rec;
1532 struct ftrace_page *pg;
1535 if (unlikely(ftrace_disabled))
1538 do_for_each_ftrace_rec(pg, rec) {
1539 /* Skip over free records */
1540 if (rec->flags & FTRACE_FL_FREE)
1543 failed = __ftrace_replace_code(rec, enable);
1545 ftrace_bug(failed, rec->ip);
1546 /* Stop processing */
1549 } while_for_each_ftrace_rec();
1553 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1560 if (unlikely(ftrace_disabled))
1563 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1565 ftrace_bug(ret, ip);
1572 * archs can override this function if they must do something
1573 * before the modifying code is performed.
1575 int __weak ftrace_arch_code_modify_prepare(void)
1581 * archs can override this function if they must do something
1582 * after the modifying code is performed.
1584 int __weak ftrace_arch_code_modify_post_process(void)
1589 static int __ftrace_modify_code(void *data)
1591 int *command = data;
1593 if (*command & FTRACE_ENABLE_CALLS)
1594 ftrace_replace_code(1);
1595 else if (*command & FTRACE_DISABLE_CALLS)
1596 ftrace_replace_code(0);
1598 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1599 ftrace_update_ftrace_func(ftrace_trace_function);
1601 if (*command & FTRACE_START_FUNC_RET)
1602 ftrace_enable_ftrace_graph_caller();
1603 else if (*command & FTRACE_STOP_FUNC_RET)
1604 ftrace_disable_ftrace_graph_caller();
1609 static void ftrace_run_update_code(int command)
1613 ret = ftrace_arch_code_modify_prepare();
1614 FTRACE_WARN_ON(ret);
1618 stop_machine(__ftrace_modify_code, &command, NULL);
1620 ret = ftrace_arch_code_modify_post_process();
1621 FTRACE_WARN_ON(ret);
1624 static ftrace_func_t saved_ftrace_func;
1625 static int ftrace_start_up;
1626 static int global_start_up;
1628 static void ftrace_startup_enable(int command)
1630 if (saved_ftrace_func != ftrace_trace_function) {
1631 saved_ftrace_func = ftrace_trace_function;
1632 command |= FTRACE_UPDATE_TRACE_FUNC;
1635 if (!command || !ftrace_enabled)
1638 ftrace_run_update_code(command);
1641 static int ftrace_startup(struct ftrace_ops *ops, int command)
1643 bool hash_enable = true;
1645 if (unlikely(ftrace_disabled))
1649 command |= FTRACE_ENABLE_CALLS;
1651 /* ops marked global share the filter hashes */
1652 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1654 /* Don't update hash if global is already set */
1655 if (global_start_up)
1656 hash_enable = false;
1660 ops->flags |= FTRACE_OPS_FL_ENABLED;
1662 ftrace_hash_rec_enable(ops, 1);
1664 ftrace_startup_enable(command);
1669 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1671 bool hash_disable = true;
1673 if (unlikely(ftrace_disabled))
1678 * Just warn in case of unbalance, no need to kill ftrace, it's not
1679 * critical but the ftrace_call callers may be never nopped again after
1680 * further ftrace uses.
1682 WARN_ON_ONCE(ftrace_start_up < 0);
1684 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1687 WARN_ON_ONCE(global_start_up < 0);
1688 /* Don't update hash if global still has users */
1689 if (global_start_up) {
1690 WARN_ON_ONCE(!ftrace_start_up);
1691 hash_disable = false;
1696 ftrace_hash_rec_disable(ops, 1);
1698 if (ops != &global_ops || !global_start_up)
1699 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1701 if (!ftrace_start_up)
1702 command |= FTRACE_DISABLE_CALLS;
1704 if (saved_ftrace_func != ftrace_trace_function) {
1705 saved_ftrace_func = ftrace_trace_function;
1706 command |= FTRACE_UPDATE_TRACE_FUNC;
1709 if (!command || !ftrace_enabled)
1712 ftrace_run_update_code(command);
1715 static void ftrace_startup_sysctl(void)
1717 if (unlikely(ftrace_disabled))
1720 /* Force update next time */
1721 saved_ftrace_func = NULL;
1722 /* ftrace_start_up is true if we want ftrace running */
1723 if (ftrace_start_up)
1724 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1727 static void ftrace_shutdown_sysctl(void)
1729 if (unlikely(ftrace_disabled))
1732 /* ftrace_start_up is true if ftrace is running */
1733 if (ftrace_start_up)
1734 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1737 static cycle_t ftrace_update_time;
1738 static unsigned long ftrace_update_cnt;
1739 unsigned long ftrace_update_tot_cnt;
1741 static int ftrace_update_code(struct module *mod)
1743 struct dyn_ftrace *p;
1744 cycle_t start, stop;
1746 start = ftrace_now(raw_smp_processor_id());
1747 ftrace_update_cnt = 0;
1749 while (ftrace_new_addrs) {
1751 /* If something went wrong, bail without enabling anything */
1752 if (unlikely(ftrace_disabled))
1755 p = ftrace_new_addrs;
1756 ftrace_new_addrs = p->newlist;
1760 * Do the initial record conversion from mcount jump
1761 * to the NOP instructions.
1763 if (!ftrace_code_disable(mod, p)) {
1769 ftrace_update_cnt++;
1772 * If the tracing is enabled, go ahead and enable the record.
1774 * The reason not to enable the record immediatelly is the
1775 * inherent check of ftrace_make_nop/ftrace_make_call for
1776 * correct previous instructions. Making first the NOP
1777 * conversion puts the module to the correct state, thus
1778 * passing the ftrace_make_call check.
1780 if (ftrace_start_up) {
1781 int failed = __ftrace_replace_code(p, 1);
1783 ftrace_bug(failed, p->ip);
1789 stop = ftrace_now(raw_smp_processor_id());
1790 ftrace_update_time = stop - start;
1791 ftrace_update_tot_cnt += ftrace_update_cnt;
1796 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1798 struct ftrace_page *pg;
1802 /* allocate a few pages */
1803 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1804 if (!ftrace_pages_start)
1808 * Allocate a few more pages.
1810 * TODO: have some parser search vmlinux before
1811 * final linking to find all calls to ftrace.
1813 * a) know how many pages to allocate.
1815 * b) set up the table then.
1817 * The dynamic code is still necessary for
1821 pg = ftrace_pages = ftrace_pages_start;
1823 cnt = num_to_init / ENTRIES_PER_PAGE;
1824 pr_info("ftrace: allocating %ld entries in %d pages\n",
1825 num_to_init, cnt + 1);
1827 for (i = 0; i < cnt; i++) {
1828 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1830 /* If we fail, we'll try later anyway */
1841 FTRACE_ITER_FILTER = (1 << 0),
1842 FTRACE_ITER_NOTRACE = (1 << 1),
1843 FTRACE_ITER_PRINTALL = (1 << 2),
1844 FTRACE_ITER_HASH = (1 << 3),
1845 FTRACE_ITER_ENABLED = (1 << 4),
1848 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1850 struct ftrace_iterator {
1853 struct ftrace_page *pg;
1854 struct dyn_ftrace *func;
1855 struct ftrace_func_probe *probe;
1856 struct trace_parser parser;
1857 struct ftrace_hash *hash;
1858 struct ftrace_ops *ops;
1865 t_hash_next(struct seq_file *m, loff_t *pos)
1867 struct ftrace_iterator *iter = m->private;
1868 struct hlist_node *hnd = NULL;
1869 struct hlist_head *hhd;
1875 hnd = &iter->probe->node;
1877 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1880 hhd = &ftrace_func_hash[iter->hidx];
1882 if (hlist_empty(hhd)) {
1898 if (WARN_ON_ONCE(!hnd))
1901 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1906 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1908 struct ftrace_iterator *iter = m->private;
1912 if (iter->func_pos > *pos)
1916 for (l = 0; l <= (*pos - iter->func_pos); ) {
1917 p = t_hash_next(m, &l);
1924 /* Only set this if we have an item */
1925 iter->flags |= FTRACE_ITER_HASH;
1931 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
1933 struct ftrace_func_probe *rec;
1936 if (WARN_ON_ONCE(!rec))
1939 if (rec->ops->print)
1940 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1942 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1945 seq_printf(m, ":%p", rec->data);
1952 t_next(struct seq_file *m, void *v, loff_t *pos)
1954 struct ftrace_iterator *iter = m->private;
1955 struct ftrace_ops *ops = &global_ops;
1956 struct dyn_ftrace *rec = NULL;
1958 if (unlikely(ftrace_disabled))
1961 if (iter->flags & FTRACE_ITER_HASH)
1962 return t_hash_next(m, pos);
1965 iter->pos = iter->func_pos = *pos;
1967 if (iter->flags & FTRACE_ITER_PRINTALL)
1968 return t_hash_start(m, pos);
1971 if (iter->idx >= iter->pg->index) {
1972 if (iter->pg->next) {
1973 iter->pg = iter->pg->next;
1978 rec = &iter->pg->records[iter->idx++];
1979 if ((rec->flags & FTRACE_FL_FREE) ||
1981 ((iter->flags & FTRACE_ITER_FILTER) &&
1982 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
1984 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1985 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
1987 ((iter->flags & FTRACE_ITER_ENABLED) &&
1988 !(rec->flags & ~FTRACE_FL_MASK))) {
1996 return t_hash_start(m, pos);
2003 static void reset_iter_read(struct ftrace_iterator *iter)
2007 iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
2010 static void *t_start(struct seq_file *m, loff_t *pos)
2012 struct ftrace_iterator *iter = m->private;
2013 struct ftrace_ops *ops = &global_ops;
2017 mutex_lock(&ftrace_lock);
2019 if (unlikely(ftrace_disabled))
2023 * If an lseek was done, then reset and start from beginning.
2025 if (*pos < iter->pos)
2026 reset_iter_read(iter);
2029 * For set_ftrace_filter reading, if we have the filter
2030 * off, we can short cut and just print out that all
2031 * functions are enabled.
2033 if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
2035 return t_hash_start(m, pos);
2036 iter->flags |= FTRACE_ITER_PRINTALL;
2037 /* reset in case of seek/pread */
2038 iter->flags &= ~FTRACE_ITER_HASH;
2042 if (iter->flags & FTRACE_ITER_HASH)
2043 return t_hash_start(m, pos);
2046 * Unfortunately, we need to restart at ftrace_pages_start
2047 * every time we let go of the ftrace_mutex. This is because
2048 * those pointers can change without the lock.
2050 iter->pg = ftrace_pages_start;
2052 for (l = 0; l <= *pos; ) {
2053 p = t_next(m, p, &l);
2059 if (iter->flags & FTRACE_ITER_FILTER)
2060 return t_hash_start(m, pos);
2068 static void t_stop(struct seq_file *m, void *p)
2070 mutex_unlock(&ftrace_lock);
2073 static int t_show(struct seq_file *m, void *v)
2075 struct ftrace_iterator *iter = m->private;
2076 struct dyn_ftrace *rec;
2078 if (iter->flags & FTRACE_ITER_HASH)
2079 return t_hash_show(m, iter);
2081 if (iter->flags & FTRACE_ITER_PRINTALL) {
2082 seq_printf(m, "#### all functions enabled ####\n");
2091 seq_printf(m, "%ps", (void *)rec->ip);
2092 if (iter->flags & FTRACE_ITER_ENABLED)
2093 seq_printf(m, " (%ld)",
2094 rec->flags & ~FTRACE_FL_MASK);
2095 seq_printf(m, "\n");
2100 static const struct seq_operations show_ftrace_seq_ops = {
2108 ftrace_avail_open(struct inode *inode, struct file *file)
2110 struct ftrace_iterator *iter;
2113 if (unlikely(ftrace_disabled))
2116 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2120 iter->pg = ftrace_pages_start;
2122 ret = seq_open(file, &show_ftrace_seq_ops);
2124 struct seq_file *m = file->private_data;
2135 ftrace_enabled_open(struct inode *inode, struct file *file)
2137 struct ftrace_iterator *iter;
2140 if (unlikely(ftrace_disabled))
2143 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2147 iter->pg = ftrace_pages_start;
2148 iter->flags = FTRACE_ITER_ENABLED;
2150 ret = seq_open(file, &show_ftrace_seq_ops);
2152 struct seq_file *m = file->private_data;
2162 static void ftrace_filter_reset(struct ftrace_hash *hash)
2164 mutex_lock(&ftrace_lock);
2165 ftrace_hash_clear(hash);
2166 mutex_unlock(&ftrace_lock);
2170 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2171 struct inode *inode, struct file *file)
2173 struct ftrace_iterator *iter;
2174 struct ftrace_hash *hash;
2177 if (unlikely(ftrace_disabled))
2180 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2184 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2189 if (flag & FTRACE_ITER_NOTRACE)
2190 hash = ops->notrace_hash;
2192 hash = ops->filter_hash;
2197 if (file->f_mode & FMODE_WRITE) {
2198 mutex_lock(&ftrace_lock);
2199 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2200 mutex_unlock(&ftrace_lock);
2203 trace_parser_put(&iter->parser);
2209 mutex_lock(&ftrace_regex_lock);
2211 if ((file->f_mode & FMODE_WRITE) &&
2212 (file->f_flags & O_TRUNC))
2213 ftrace_filter_reset(iter->hash);
2215 if (file->f_mode & FMODE_READ) {
2216 iter->pg = ftrace_pages_start;
2218 ret = seq_open(file, &show_ftrace_seq_ops);
2220 struct seq_file *m = file->private_data;
2224 free_ftrace_hash(iter->hash);
2225 trace_parser_put(&iter->parser);
2229 file->private_data = iter;
2230 mutex_unlock(&ftrace_regex_lock);
2236 ftrace_filter_open(struct inode *inode, struct file *file)
2238 return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2243 ftrace_notrace_open(struct inode *inode, struct file *file)
2245 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2250 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2254 if (file->f_mode & FMODE_READ)
2255 ret = seq_lseek(file, offset, origin);
2257 file->f_pos = ret = 1;
2262 static int ftrace_match(char *str, char *regex, int len, int type)
2269 if (strcmp(str, regex) == 0)
2272 case MATCH_FRONT_ONLY:
2273 if (strncmp(str, regex, len) == 0)
2276 case MATCH_MIDDLE_ONLY:
2277 if (strstr(str, regex))
2280 case MATCH_END_ONLY:
2282 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2291 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2293 struct ftrace_func_entry *entry;
2296 entry = ftrace_lookup_ip(hash, rec->ip);
2298 /* Do nothing if it doesn't exist */
2302 free_hash_entry(hash, entry);
2304 /* Do nothing if it exists */
2308 ret = add_hash_entry(hash, rec->ip);
2314 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2315 char *regex, int len, int type)
2317 char str[KSYM_SYMBOL_LEN];
2320 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2323 /* module lookup requires matching the module */
2324 if (!modname || strcmp(modname, mod))
2327 /* blank search means to match all funcs in the mod */
2332 return ftrace_match(str, regex, len, type);
2336 match_records(struct ftrace_hash *hash, char *buff,
2337 int len, char *mod, int not)
2339 unsigned search_len = 0;
2340 struct ftrace_page *pg;
2341 struct dyn_ftrace *rec;
2342 int type = MATCH_FULL;
2343 char *search = buff;
2348 type = filter_parse_regex(buff, len, &search, ¬);
2349 search_len = strlen(search);
2352 mutex_lock(&ftrace_lock);
2354 if (unlikely(ftrace_disabled))
2357 do_for_each_ftrace_rec(pg, rec) {
2359 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2360 ret = enter_record(hash, rec, not);
2367 } while_for_each_ftrace_rec();
2369 mutex_unlock(&ftrace_lock);
2375 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2377 return match_records(hash, buff, len, NULL, 0);
2381 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2385 /* blank or '*' mean the same */
2386 if (strcmp(buff, "*") == 0)
2389 /* handle the case of 'dont filter this module' */
2390 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2395 return match_records(hash, buff, strlen(buff), mod, not);
2399 * We register the module command as a template to show others how
2400 * to register the a command as well.
2404 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
2406 struct ftrace_ops *ops = &global_ops;
2407 struct ftrace_hash *hash;
2412 * cmd == 'mod' because we only registered this func
2413 * for the 'mod' ftrace_func_command.
2414 * But if you register one func with multiple commands,
2415 * you can tell which command was used by the cmd
2419 /* we must have a module name */
2423 mod = strsep(¶m, ":");
2428 hash = ops->filter_hash;
2430 hash = ops->notrace_hash;
2432 ret = ftrace_match_module_records(hash, func, mod);
2441 static struct ftrace_func_command ftrace_mod_cmd = {
2443 .func = ftrace_mod_callback,
2446 static int __init ftrace_mod_cmd_init(void)
2448 return register_ftrace_command(&ftrace_mod_cmd);
2450 device_initcall(ftrace_mod_cmd_init);
2453 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2455 struct ftrace_func_probe *entry;
2456 struct hlist_head *hhd;
2457 struct hlist_node *n;
2460 key = hash_long(ip, FTRACE_HASH_BITS);
2462 hhd = &ftrace_func_hash[key];
2464 if (hlist_empty(hhd))
2468 * Disable preemption for these calls to prevent a RCU grace
2469 * period. This syncs the hash iteration and freeing of items
2470 * on the hash. rcu_read_lock is too dangerous here.
2472 preempt_disable_notrace();
2473 hlist_for_each_entry_rcu(entry, n, hhd, node) {
2474 if (entry->ip == ip)
2475 entry->ops->func(ip, parent_ip, &entry->data);
2477 preempt_enable_notrace();
2480 static struct ftrace_ops trace_probe_ops __read_mostly =
2482 .func = function_trace_probe_call,
2485 static int ftrace_probe_registered;
2487 static void __enable_ftrace_function_probe(void)
2492 if (ftrace_probe_registered)
2495 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2496 struct hlist_head *hhd = &ftrace_func_hash[i];
2500 /* Nothing registered? */
2501 if (i == FTRACE_FUNC_HASHSIZE)
2504 ret = __register_ftrace_function(&trace_probe_ops);
2506 ret = ftrace_startup(&trace_probe_ops, 0);
2508 ftrace_probe_registered = 1;
2511 static void __disable_ftrace_function_probe(void)
2516 if (!ftrace_probe_registered)
2519 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2520 struct hlist_head *hhd = &ftrace_func_hash[i];
2525 /* no more funcs left */
2526 ret = __unregister_ftrace_function(&trace_probe_ops);
2528 ftrace_shutdown(&trace_probe_ops, 0);
2530 ftrace_probe_registered = 0;
2534 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2536 struct ftrace_func_probe *entry =
2537 container_of(rhp, struct ftrace_func_probe, rcu);
2539 if (entry->ops->free)
2540 entry->ops->free(&entry->data);
2546 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2549 struct ftrace_func_probe *entry;
2550 struct ftrace_page *pg;
2551 struct dyn_ftrace *rec;
2557 type = filter_parse_regex(glob, strlen(glob), &search, ¬);
2558 len = strlen(search);
2560 /* we do not support '!' for function probes */
2564 mutex_lock(&ftrace_lock);
2566 if (unlikely(ftrace_disabled))
2569 do_for_each_ftrace_rec(pg, rec) {
2571 if (!ftrace_match_record(rec, NULL, search, len, type))
2574 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2576 /* If we did not process any, then return error */
2587 * The caller might want to do something special
2588 * for each function we find. We call the callback
2589 * to give the caller an opportunity to do so.
2591 if (ops->callback) {
2592 if (ops->callback(rec->ip, &entry->data) < 0) {
2593 /* caller does not like this func */
2600 entry->ip = rec->ip;
2602 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2603 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2605 } while_for_each_ftrace_rec();
2606 __enable_ftrace_function_probe();
2609 mutex_unlock(&ftrace_lock);
2615 PROBE_TEST_FUNC = 1,
2620 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2621 void *data, int flags)
2623 struct ftrace_func_probe *entry;
2624 struct hlist_node *n, *tmp;
2625 char str[KSYM_SYMBOL_LEN];
2626 int type = MATCH_FULL;
2630 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2635 type = filter_parse_regex(glob, strlen(glob), &search, ¬);
2636 len = strlen(search);
2638 /* we do not support '!' for function probes */
2643 mutex_lock(&ftrace_lock);
2644 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2645 struct hlist_head *hhd = &ftrace_func_hash[i];
2647 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2649 /* break up if statements for readability */
2650 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2653 if ((flags & PROBE_TEST_DATA) && entry->data != data)
2656 /* do this last, since it is the most expensive */
2658 kallsyms_lookup(entry->ip, NULL, NULL,
2660 if (!ftrace_match(str, glob, len, type))
2664 hlist_del(&entry->node);
2665 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2668 __disable_ftrace_function_probe();
2669 mutex_unlock(&ftrace_lock);
2673 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2676 __unregister_ftrace_function_probe(glob, ops, data,
2677 PROBE_TEST_FUNC | PROBE_TEST_DATA);
2681 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2683 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2686 void unregister_ftrace_function_probe_all(char *glob)
2688 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2691 static LIST_HEAD(ftrace_commands);
2692 static DEFINE_MUTEX(ftrace_cmd_mutex);
2694 int register_ftrace_command(struct ftrace_func_command *cmd)
2696 struct ftrace_func_command *p;
2699 mutex_lock(&ftrace_cmd_mutex);
2700 list_for_each_entry(p, &ftrace_commands, list) {
2701 if (strcmp(cmd->name, p->name) == 0) {
2706 list_add(&cmd->list, &ftrace_commands);
2708 mutex_unlock(&ftrace_cmd_mutex);
2713 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2715 struct ftrace_func_command *p, *n;
2718 mutex_lock(&ftrace_cmd_mutex);
2719 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2720 if (strcmp(cmd->name, p->name) == 0) {
2722 list_del_init(&p->list);
2727 mutex_unlock(&ftrace_cmd_mutex);
2732 static int ftrace_process_regex(struct ftrace_hash *hash,
2733 char *buff, int len, int enable)
2735 char *func, *command, *next = buff;
2736 struct ftrace_func_command *p;
2739 func = strsep(&next, ":");
2742 ret = ftrace_match_records(hash, func, len);
2752 command = strsep(&next, ":");
2754 mutex_lock(&ftrace_cmd_mutex);
2755 list_for_each_entry(p, &ftrace_commands, list) {
2756 if (strcmp(p->name, command) == 0) {
2757 ret = p->func(func, command, next, enable);
2762 mutex_unlock(&ftrace_cmd_mutex);
2768 ftrace_regex_write(struct file *file, const char __user *ubuf,
2769 size_t cnt, loff_t *ppos, int enable)
2771 struct ftrace_iterator *iter;
2772 struct trace_parser *parser;
2778 mutex_lock(&ftrace_regex_lock);
2781 if (unlikely(ftrace_disabled))
2784 if (file->f_mode & FMODE_READ) {
2785 struct seq_file *m = file->private_data;
2788 iter = file->private_data;
2790 parser = &iter->parser;
2791 read = trace_get_user(parser, ubuf, cnt, ppos);
2793 if (read >= 0 && trace_parser_loaded(parser) &&
2794 !trace_parser_cont(parser)) {
2795 ret = ftrace_process_regex(iter->hash, parser->buffer,
2796 parser->idx, enable);
2797 trace_parser_clear(parser);
2804 mutex_unlock(&ftrace_regex_lock);
2810 ftrace_filter_write(struct file *file, const char __user *ubuf,
2811 size_t cnt, loff_t *ppos)
2813 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2817 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2818 size_t cnt, loff_t *ppos)
2820 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2824 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2825 int reset, int enable)
2827 struct ftrace_hash **orig_hash;
2828 struct ftrace_hash *hash;
2831 /* All global ops uses the global ops filters */
2832 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2835 if (unlikely(ftrace_disabled))
2839 orig_hash = &ops->filter_hash;
2841 orig_hash = &ops->notrace_hash;
2843 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2847 mutex_lock(&ftrace_regex_lock);
2849 ftrace_filter_reset(hash);
2851 ftrace_match_records(hash, buf, len);
2853 mutex_lock(&ftrace_lock);
2854 ret = ftrace_hash_move(orig_hash, hash);
2855 mutex_unlock(&ftrace_lock);
2857 mutex_unlock(&ftrace_regex_lock);
2859 free_ftrace_hash(hash);
2864 * ftrace_set_filter - set a function to filter on in ftrace
2865 * @ops - the ops to set the filter with
2866 * @buf - the string that holds the function filter text.
2867 * @len - the length of the string.
2868 * @reset - non zero to reset all filters before applying this filter.
2870 * Filters denote which functions should be enabled when tracing is enabled.
2871 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2873 void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
2876 ftrace_set_regex(ops, buf, len, reset, 1);
2878 EXPORT_SYMBOL_GPL(ftrace_set_filter);
2881 * ftrace_set_notrace - set a function to not trace in ftrace
2882 * @ops - the ops to set the notrace filter with
2883 * @buf - the string that holds the function notrace text.
2884 * @len - the length of the string.
2885 * @reset - non zero to reset all filters before applying this filter.
2887 * Notrace Filters denote which functions should not be enabled when tracing
2888 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2891 void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
2894 ftrace_set_regex(ops, buf, len, reset, 0);
2896 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
2898 * ftrace_set_filter - set a function to filter on in ftrace
2899 * @ops - the ops to set the filter with
2900 * @buf - the string that holds the function filter text.
2901 * @len - the length of the string.
2902 * @reset - non zero to reset all filters before applying this filter.
2904 * Filters denote which functions should be enabled when tracing is enabled.
2905 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2907 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
2909 ftrace_set_regex(&global_ops, buf, len, reset, 1);
2911 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
2914 * ftrace_set_notrace - set a function to not trace in ftrace
2915 * @ops - the ops to set the notrace filter with
2916 * @buf - the string that holds the function notrace text.
2917 * @len - the length of the string.
2918 * @reset - non zero to reset all filters before applying this filter.
2920 * Notrace Filters denote which functions should not be enabled when tracing
2921 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2924 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
2926 ftrace_set_regex(&global_ops, buf, len, reset, 0);
2928 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
2931 * command line interface to allow users to set filters on boot up.
2933 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2934 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2935 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2937 static int __init set_ftrace_notrace(char *str)
2939 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2942 __setup("ftrace_notrace=", set_ftrace_notrace);
2944 static int __init set_ftrace_filter(char *str)
2946 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2949 __setup("ftrace_filter=", set_ftrace_filter);
2951 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2952 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2953 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2955 static int __init set_graph_function(char *str)
2957 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2960 __setup("ftrace_graph_filter=", set_graph_function);
2962 static void __init set_ftrace_early_graph(char *buf)
2968 func = strsep(&buf, ",");
2969 /* we allow only one expression at a time */
2970 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2973 printk(KERN_DEBUG "ftrace: function %s not "
2974 "traceable\n", func);
2977 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2980 set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
2985 func = strsep(&buf, ",");
2986 ftrace_set_regex(ops, func, strlen(func), 0, enable);
2990 static void __init set_ftrace_early_filters(void)
2992 if (ftrace_filter_buf[0])
2993 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
2994 if (ftrace_notrace_buf[0])
2995 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
2996 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2997 if (ftrace_graph_buf[0])
2998 set_ftrace_early_graph(ftrace_graph_buf);
2999 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3003 ftrace_regex_release(struct inode *inode, struct file *file)
3005 struct seq_file *m = (struct seq_file *)file->private_data;
3006 struct ftrace_iterator *iter;
3007 struct ftrace_hash **orig_hash;
3008 struct trace_parser *parser;
3012 mutex_lock(&ftrace_regex_lock);
3013 if (file->f_mode & FMODE_READ) {
3016 seq_release(inode, file);
3018 iter = file->private_data;
3020 parser = &iter->parser;
3021 if (trace_parser_loaded(parser)) {
3022 parser->buffer[parser->idx] = 0;
3023 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3026 trace_parser_put(parser);
3028 if (file->f_mode & FMODE_WRITE) {
3029 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3032 orig_hash = &iter->ops->filter_hash;
3034 orig_hash = &iter->ops->notrace_hash;
3036 mutex_lock(&ftrace_lock);
3038 * Remove the current set, update the hash and add
3041 ftrace_hash_rec_disable(iter->ops, filter_hash);
3042 ret = ftrace_hash_move(orig_hash, iter->hash);
3044 ftrace_hash_rec_enable(iter->ops, filter_hash);
3045 if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
3047 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3049 mutex_unlock(&ftrace_lock);
3051 free_ftrace_hash(iter->hash);
3054 mutex_unlock(&ftrace_regex_lock);
3058 static const struct file_operations ftrace_avail_fops = {
3059 .open = ftrace_avail_open,
3061 .llseek = seq_lseek,
3062 .release = seq_release_private,
3065 static const struct file_operations ftrace_enabled_fops = {
3066 .open = ftrace_enabled_open,
3068 .llseek = seq_lseek,
3069 .release = seq_release_private,
3072 static const struct file_operations ftrace_filter_fops = {
3073 .open = ftrace_filter_open,
3075 .write = ftrace_filter_write,
3076 .llseek = ftrace_regex_lseek,
3077 .release = ftrace_regex_release,
3080 static const struct file_operations ftrace_notrace_fops = {
3081 .open = ftrace_notrace_open,
3083 .write = ftrace_notrace_write,
3084 .llseek = ftrace_regex_lseek,
3085 .release = ftrace_regex_release,
3088 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3090 static DEFINE_MUTEX(graph_lock);
3092 int ftrace_graph_count;
3093 int ftrace_graph_filter_enabled;
3094 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3097 __g_next(struct seq_file *m, loff_t *pos)
3099 if (*pos >= ftrace_graph_count)
3101 return &ftrace_graph_funcs[*pos];
3105 g_next(struct seq_file *m, void *v, loff_t *pos)
3108 return __g_next(m, pos);
3111 static void *g_start(struct seq_file *m, loff_t *pos)
3113 mutex_lock(&graph_lock);
3115 /* Nothing, tell g_show to print all functions are enabled */
3116 if (!ftrace_graph_filter_enabled && !*pos)
3119 return __g_next(m, pos);
3122 static void g_stop(struct seq_file *m, void *p)
3124 mutex_unlock(&graph_lock);
3127 static int g_show(struct seq_file *m, void *v)
3129 unsigned long *ptr = v;
3134 if (ptr == (unsigned long *)1) {
3135 seq_printf(m, "#### all functions enabled ####\n");
3139 seq_printf(m, "%ps\n", (void *)*ptr);
3144 static const struct seq_operations ftrace_graph_seq_ops = {
3152 ftrace_graph_open(struct inode *inode, struct file *file)
3156 if (unlikely(ftrace_disabled))
3159 mutex_lock(&graph_lock);
3160 if ((file->f_mode & FMODE_WRITE) &&
3161 (file->f_flags & O_TRUNC)) {
3162 ftrace_graph_filter_enabled = 0;
3163 ftrace_graph_count = 0;
3164 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3166 mutex_unlock(&graph_lock);
3168 if (file->f_mode & FMODE_READ)
3169 ret = seq_open(file, &ftrace_graph_seq_ops);
3175 ftrace_graph_release(struct inode *inode, struct file *file)
3177 if (file->f_mode & FMODE_READ)
3178 seq_release(inode, file);
3183 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3185 struct dyn_ftrace *rec;
3186 struct ftrace_page *pg;
3195 type = filter_parse_regex(buffer, strlen(buffer), &search, ¬);
3196 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3199 search_len = strlen(search);
3201 mutex_lock(&ftrace_lock);
3203 if (unlikely(ftrace_disabled)) {
3204 mutex_unlock(&ftrace_lock);
3208 do_for_each_ftrace_rec(pg, rec) {
3210 if (rec->flags & FTRACE_FL_FREE)
3213 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3214 /* if it is in the array */
3216 for (i = 0; i < *idx; i++) {
3217 if (array[i] == rec->ip) {
3226 array[(*idx)++] = rec->ip;
3227 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3232 array[i] = array[--(*idx)];
3238 } while_for_each_ftrace_rec();
3240 mutex_unlock(&ftrace_lock);
3245 ftrace_graph_filter_enabled = 1;
3250 ftrace_graph_write(struct file *file, const char __user *ubuf,
3251 size_t cnt, loff_t *ppos)
3253 struct trace_parser parser;
3259 mutex_lock(&graph_lock);
3261 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3266 read = trace_get_user(&parser, ubuf, cnt, ppos);
3268 if (read >= 0 && trace_parser_loaded((&parser))) {
3269 parser.buffer[parser.idx] = 0;
3271 /* we allow only one expression at a time */
3272 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3281 trace_parser_put(&parser);
3283 mutex_unlock(&graph_lock);
3288 static const struct file_operations ftrace_graph_fops = {
3289 .open = ftrace_graph_open,
3291 .write = ftrace_graph_write,
3292 .release = ftrace_graph_release,
3293 .llseek = seq_lseek,
3295 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3297 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3300 trace_create_file("available_filter_functions", 0444,
3301 d_tracer, NULL, &ftrace_avail_fops);
3303 trace_create_file("enabled_functions", 0444,
3304 d_tracer, NULL, &ftrace_enabled_fops);
3306 trace_create_file("set_ftrace_filter", 0644, d_tracer,
3307 NULL, &ftrace_filter_fops);
3309 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3310 NULL, &ftrace_notrace_fops);
3312 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3313 trace_create_file("set_graph_function", 0444, d_tracer,
3315 &ftrace_graph_fops);
3316 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3321 static int ftrace_process_locs(struct module *mod,
3322 unsigned long *start,
3328 mutex_lock(&ftrace_lock);
3331 addr = ftrace_call_adjust(*p++);
3333 * Some architecture linkers will pad between
3334 * the different mcount_loc sections of different
3335 * object files to satisfy alignments.
3336 * Skip any NULL pointers.
3340 ftrace_record_ip(addr);
3343 ftrace_update_code(mod);
3344 mutex_unlock(&ftrace_lock);
3349 #ifdef CONFIG_MODULES
3350 void ftrace_release_mod(struct module *mod)
3352 struct dyn_ftrace *rec;
3353 struct ftrace_page *pg;
3355 mutex_lock(&ftrace_lock);
3357 if (ftrace_disabled)
3360 do_for_each_ftrace_rec(pg, rec) {
3361 if (within_module_core(rec->ip, mod)) {
3363 * rec->ip is changed in ftrace_free_rec()
3364 * It should not between s and e if record was freed.
3366 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3367 ftrace_free_rec(rec);
3369 } while_for_each_ftrace_rec();
3371 mutex_unlock(&ftrace_lock);
3374 static void ftrace_init_module(struct module *mod,
3375 unsigned long *start, unsigned long *end)
3377 if (ftrace_disabled || start == end)
3379 ftrace_process_locs(mod, start, end);
3382 static int ftrace_module_notify(struct notifier_block *self,
3383 unsigned long val, void *data)
3385 struct module *mod = data;
3388 case MODULE_STATE_COMING:
3389 ftrace_init_module(mod, mod->ftrace_callsites,
3390 mod->ftrace_callsites +
3391 mod->num_ftrace_callsites);
3393 case MODULE_STATE_GOING:
3394 ftrace_release_mod(mod);
3401 static int ftrace_module_notify(struct notifier_block *self,
3402 unsigned long val, void *data)
3406 #endif /* CONFIG_MODULES */
3408 struct notifier_block ftrace_module_nb = {
3409 .notifier_call = ftrace_module_notify,
3413 extern unsigned long __start_mcount_loc[];
3414 extern unsigned long __stop_mcount_loc[];
3416 void __init ftrace_init(void)
3418 unsigned long count, addr, flags;
3421 /* Keep the ftrace pointer to the stub */
3422 addr = (unsigned long)ftrace_stub;
3424 local_irq_save(flags);
3425 ftrace_dyn_arch_init(&addr);
3426 local_irq_restore(flags);
3428 /* ftrace_dyn_arch_init places the return code in addr */
3432 count = __stop_mcount_loc - __start_mcount_loc;
3434 ret = ftrace_dyn_table_alloc(count);
3438 last_ftrace_enabled = ftrace_enabled = 1;
3440 ret = ftrace_process_locs(NULL,
3444 ret = register_module_notifier(&ftrace_module_nb);
3446 pr_warning("Failed to register trace ftrace module notifier\n");
3448 set_ftrace_early_filters();
3452 ftrace_disabled = 1;
3457 static struct ftrace_ops global_ops = {
3458 .func = ftrace_stub,
3461 static int __init ftrace_nodyn_init(void)
3466 device_initcall(ftrace_nodyn_init);
3468 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3469 static inline void ftrace_startup_enable(int command) { }
3470 /* Keep as macros so we do not need to define the commands */
3471 # define ftrace_startup(ops, command) \
3473 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
3476 # define ftrace_shutdown(ops, command) do { } while (0)
3477 # define ftrace_startup_sysctl() do { } while (0)
3478 # define ftrace_shutdown_sysctl() do { } while (0)
3481 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3486 #endif /* CONFIG_DYNAMIC_FTRACE */
3489 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3491 struct ftrace_ops *op;
3494 * Some of the ops may be dynamically allocated,
3495 * they must be freed after a synchronize_sched().
3497 preempt_disable_notrace();
3498 op = rcu_dereference_raw(ftrace_ops_list);
3499 while (op != &ftrace_list_end) {
3500 if (ftrace_ops_test(op, ip))
3501 op->func(ip, parent_ip);
3502 op = rcu_dereference_raw(op->next);
3504 preempt_enable_notrace();
3507 static void clear_ftrace_swapper(void)
3509 struct task_struct *p;
3513 for_each_online_cpu(cpu) {
3515 clear_tsk_trace_trace(p);
3520 static void set_ftrace_swapper(void)
3522 struct task_struct *p;
3526 for_each_online_cpu(cpu) {
3528 set_tsk_trace_trace(p);
3533 static void clear_ftrace_pid(struct pid *pid)
3535 struct task_struct *p;
3538 do_each_pid_task(pid, PIDTYPE_PID, p) {
3539 clear_tsk_trace_trace(p);
3540 } while_each_pid_task(pid, PIDTYPE_PID, p);
3546 static void set_ftrace_pid(struct pid *pid)
3548 struct task_struct *p;
3551 do_each_pid_task(pid, PIDTYPE_PID, p) {
3552 set_tsk_trace_trace(p);
3553 } while_each_pid_task(pid, PIDTYPE_PID, p);
3557 static void clear_ftrace_pid_task(struct pid *pid)
3559 if (pid == ftrace_swapper_pid)
3560 clear_ftrace_swapper();
3562 clear_ftrace_pid(pid);
3565 static void set_ftrace_pid_task(struct pid *pid)
3567 if (pid == ftrace_swapper_pid)
3568 set_ftrace_swapper();
3570 set_ftrace_pid(pid);
3573 static int ftrace_pid_add(int p)
3576 struct ftrace_pid *fpid;
3579 mutex_lock(&ftrace_lock);
3582 pid = ftrace_swapper_pid;
3584 pid = find_get_pid(p);
3591 list_for_each_entry(fpid, &ftrace_pids, list)
3592 if (fpid->pid == pid)
3597 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3601 list_add(&fpid->list, &ftrace_pids);
3604 set_ftrace_pid_task(pid);
3606 ftrace_update_pid_func();
3607 ftrace_startup_enable(0);
3609 mutex_unlock(&ftrace_lock);
3613 if (pid != ftrace_swapper_pid)
3617 mutex_unlock(&ftrace_lock);
3621 static void ftrace_pid_reset(void)
3623 struct ftrace_pid *fpid, *safe;
3625 mutex_lock(&ftrace_lock);
3626 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3627 struct pid *pid = fpid->pid;
3629 clear_ftrace_pid_task(pid);
3631 list_del(&fpid->list);
3635 ftrace_update_pid_func();
3636 ftrace_startup_enable(0);
3638 mutex_unlock(&ftrace_lock);
3641 static void *fpid_start(struct seq_file *m, loff_t *pos)
3643 mutex_lock(&ftrace_lock);
3645 if (list_empty(&ftrace_pids) && (!*pos))
3648 return seq_list_start(&ftrace_pids, *pos);
3651 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3656 return seq_list_next(v, &ftrace_pids, pos);
3659 static void fpid_stop(struct seq_file *m, void *p)
3661 mutex_unlock(&ftrace_lock);
3664 static int fpid_show(struct seq_file *m, void *v)
3666 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3668 if (v == (void *)1) {
3669 seq_printf(m, "no pid\n");
3673 if (fpid->pid == ftrace_swapper_pid)
3674 seq_printf(m, "swapper tasks\n");
3676 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3681 static const struct seq_operations ftrace_pid_sops = {
3682 .start = fpid_start,
3689 ftrace_pid_open(struct inode *inode, struct file *file)
3693 if ((file->f_mode & FMODE_WRITE) &&
3694 (file->f_flags & O_TRUNC))
3697 if (file->f_mode & FMODE_READ)
3698 ret = seq_open(file, &ftrace_pid_sops);
3704 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3705 size_t cnt, loff_t *ppos)
3711 if (cnt >= sizeof(buf))
3714 if (copy_from_user(&buf, ubuf, cnt))
3720 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3721 * to clean the filter quietly.
3723 tmp = strstrip(buf);
3724 if (strlen(tmp) == 0)
3727 ret = strict_strtol(tmp, 10, &val);
3731 ret = ftrace_pid_add(val);
3733 return ret ? ret : cnt;
3737 ftrace_pid_release(struct inode *inode, struct file *file)
3739 if (file->f_mode & FMODE_READ)
3740 seq_release(inode, file);
3745 static const struct file_operations ftrace_pid_fops = {
3746 .open = ftrace_pid_open,
3747 .write = ftrace_pid_write,
3749 .llseek = seq_lseek,
3750 .release = ftrace_pid_release,
3753 static __init int ftrace_init_debugfs(void)
3755 struct dentry *d_tracer;
3757 d_tracer = tracing_init_dentry();
3761 ftrace_init_dyn_debugfs(d_tracer);
3763 trace_create_file("set_ftrace_pid", 0644, d_tracer,
3764 NULL, &ftrace_pid_fops);
3766 ftrace_profile_debugfs(d_tracer);
3770 fs_initcall(ftrace_init_debugfs);
3773 * ftrace_kill - kill ftrace
3775 * This function should be used by panic code. It stops ftrace
3776 * but in a not so nice way. If you need to simply kill ftrace
3777 * from a non-atomic section, use ftrace_kill.
3779 void ftrace_kill(void)
3781 ftrace_disabled = 1;
3783 clear_ftrace_function();
3787 * register_ftrace_function - register a function for profiling
3788 * @ops - ops structure that holds the function for profiling.
3790 * Register a function to be called by all functions in the
3793 * Note: @ops->func and all the functions it calls must be labeled
3794 * with "notrace", otherwise it will go into a
3797 int register_ftrace_function(struct ftrace_ops *ops)
3801 mutex_lock(&ftrace_lock);
3803 if (unlikely(ftrace_disabled))
3806 ret = __register_ftrace_function(ops);
3808 ret = ftrace_startup(ops, 0);
3812 mutex_unlock(&ftrace_lock);
3815 EXPORT_SYMBOL_GPL(register_ftrace_function);
3818 * unregister_ftrace_function - unregister a function for profiling.
3819 * @ops - ops structure that holds the function to unregister
3821 * Unregister a function that was added to be called by ftrace profiling.
3823 int unregister_ftrace_function(struct ftrace_ops *ops)
3827 mutex_lock(&ftrace_lock);
3828 ret = __unregister_ftrace_function(ops);
3830 ftrace_shutdown(ops, 0);
3831 mutex_unlock(&ftrace_lock);
3835 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
3838 ftrace_enable_sysctl(struct ctl_table *table, int write,
3839 void __user *buffer, size_t *lenp,
3844 mutex_lock(&ftrace_lock);
3846 if (unlikely(ftrace_disabled))
3849 ret = proc_dointvec(table, write, buffer, lenp, ppos);
3851 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3854 last_ftrace_enabled = !!ftrace_enabled;
3856 if (ftrace_enabled) {
3858 ftrace_startup_sysctl();
3860 /* we are starting ftrace again */
3861 if (ftrace_ops_list != &ftrace_list_end) {
3862 if (ftrace_ops_list->next == &ftrace_list_end)
3863 ftrace_trace_function = ftrace_ops_list->func;
3865 ftrace_trace_function = ftrace_ops_list_func;
3869 /* stopping ftrace calls (just send to ftrace_stub) */
3870 ftrace_trace_function = ftrace_stub;
3872 ftrace_shutdown_sysctl();
3876 mutex_unlock(&ftrace_lock);
3880 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3882 static int ftrace_graph_active;
3883 static struct notifier_block ftrace_suspend_notifier;
3885 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3890 /* The callbacks that hook a function */
3891 trace_func_graph_ret_t ftrace_graph_return =
3892 (trace_func_graph_ret_t)ftrace_stub;
3893 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3895 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3896 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3900 unsigned long flags;
3901 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3902 struct task_struct *g, *t;
3904 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3905 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3906 * sizeof(struct ftrace_ret_stack),
3908 if (!ret_stack_list[i]) {
3916 read_lock_irqsave(&tasklist_lock, flags);
3917 do_each_thread(g, t) {
3923 if (t->ret_stack == NULL) {
3924 atomic_set(&t->tracing_graph_pause, 0);
3925 atomic_set(&t->trace_overrun, 0);
3926 t->curr_ret_stack = -1;
3927 /* Make sure the tasks see the -1 first: */
3929 t->ret_stack = ret_stack_list[start++];
3931 } while_each_thread(g, t);
3934 read_unlock_irqrestore(&tasklist_lock, flags);
3936 for (i = start; i < end; i++)
3937 kfree(ret_stack_list[i]);
3942 ftrace_graph_probe_sched_switch(void *ignore,
3943 struct task_struct *prev, struct task_struct *next)
3945 unsigned long long timestamp;
3949 * Does the user want to count the time a function was asleep.
3950 * If so, do not update the time stamps.
3952 if (trace_flags & TRACE_ITER_SLEEP_TIME)
3955 timestamp = trace_clock_local();
3957 prev->ftrace_timestamp = timestamp;
3959 /* only process tasks that we timestamped */
3960 if (!next->ftrace_timestamp)
3964 * Update all the counters in next to make up for the
3965 * time next was sleeping.
3967 timestamp -= next->ftrace_timestamp;
3969 for (index = next->curr_ret_stack; index >= 0; index--)
3970 next->ret_stack[index].calltime += timestamp;
3973 /* Allocate a return stack for each task */
3974 static int start_graph_tracing(void)
3976 struct ftrace_ret_stack **ret_stack_list;
3979 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3980 sizeof(struct ftrace_ret_stack *),
3983 if (!ret_stack_list)
3986 /* The cpu_boot init_task->ret_stack will never be freed */
3987 for_each_online_cpu(cpu) {
3988 if (!idle_task(cpu)->ret_stack)
3989 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
3993 ret = alloc_retstack_tasklist(ret_stack_list);
3994 } while (ret == -EAGAIN);
3997 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3999 pr_info("ftrace_graph: Couldn't activate tracepoint"
4000 " probe to kernel_sched_switch\n");
4003 kfree(ret_stack_list);
4008 * Hibernation protection.
4009 * The state of the current task is too much unstable during
4010 * suspend/restore to disk. We want to protect against that.
4013 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4017 case PM_HIBERNATION_PREPARE:
4018 pause_graph_tracing();
4021 case PM_POST_HIBERNATION:
4022 unpause_graph_tracing();
4028 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4029 trace_func_graph_ent_t entryfunc)
4033 mutex_lock(&ftrace_lock);
4035 /* we currently allow only one tracer registered at a time */
4036 if (ftrace_graph_active) {
4041 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4042 register_pm_notifier(&ftrace_suspend_notifier);
4044 ftrace_graph_active++;
4045 ret = start_graph_tracing();
4047 ftrace_graph_active--;
4051 ftrace_graph_return = retfunc;
4052 ftrace_graph_entry = entryfunc;
4054 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4057 mutex_unlock(&ftrace_lock);
4061 void unregister_ftrace_graph(void)
4063 mutex_lock(&ftrace_lock);
4065 if (unlikely(!ftrace_graph_active))
4068 ftrace_graph_active--;
4069 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4070 ftrace_graph_entry = ftrace_graph_entry_stub;
4071 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4072 unregister_pm_notifier(&ftrace_suspend_notifier);
4073 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4076 mutex_unlock(&ftrace_lock);
4079 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4082 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4084 atomic_set(&t->tracing_graph_pause, 0);
4085 atomic_set(&t->trace_overrun, 0);
4086 t->ftrace_timestamp = 0;
4087 /* make curr_ret_stack visible before we add the ret_stack */
4089 t->ret_stack = ret_stack;
4093 * Allocate a return stack for the idle task. May be the first
4094 * time through, or it may be done by CPU hotplug online.
4096 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4098 t->curr_ret_stack = -1;
4100 * The idle task has no parent, it either has its own
4101 * stack or no stack at all.
4104 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4106 if (ftrace_graph_active) {
4107 struct ftrace_ret_stack *ret_stack;
4109 ret_stack = per_cpu(idle_ret_stack, cpu);
4111 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4112 * sizeof(struct ftrace_ret_stack),
4116 per_cpu(idle_ret_stack, cpu) = ret_stack;
4118 graph_init_task(t, ret_stack);
4122 /* Allocate a return stack for newly created task */
4123 void ftrace_graph_init_task(struct task_struct *t)
4125 /* Make sure we do not use the parent ret_stack */
4126 t->ret_stack = NULL;
4127 t->curr_ret_stack = -1;
4129 if (ftrace_graph_active) {
4130 struct ftrace_ret_stack *ret_stack;
4132 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4133 * sizeof(struct ftrace_ret_stack),
4137 graph_init_task(t, ret_stack);
4141 void ftrace_graph_exit_task(struct task_struct *t)
4143 struct ftrace_ret_stack *ret_stack = t->ret_stack;
4145 t->ret_stack = NULL;
4146 /* NULL must become visible to IRQs before we free it: */
4152 void ftrace_graph_stop(void)