ftrace: Use the rcu _notrace variants for rcu_dereference_raw() and friends
[firefly-linux-kernel-4.4.55.git] / kernel / trace / ftrace.c
index b3fde6d7b7fc47683244a5a432ab35a543551499..6c508ff33c6206df8e028e1eab43e913565f927e 100644 (file)
 
 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+#define INIT_REGEX_LOCK(opsname)       \
+       .regex_lock     = __MUTEX_INITIALIZER(opsname.regex_lock),
+#else
+#define INIT_REGEX_LOCK(opsname)
+#endif
+
 static struct ftrace_ops ftrace_list_end __read_mostly = {
        .func           = ftrace_stub,
        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
@@ -113,24 +120,34 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
 
 /*
  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
- * can use rcu_dereference_raw() is that elements removed from this list
+ * can use rcu_dereference_raw_notrace() is that elements removed from this list
  * are simply leaked, so there is no need to interact with a grace-period
- * mechanism.  The rcu_dereference_raw() calls are needed to handle
+ * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
  * concurrent insertions into the ftrace_global_list.
  *
  * Silly Alpha and silly pointer-speculation compiler optimizations!
  */
 #define do_for_each_ftrace_op(op, list)                        \
-       op = rcu_dereference_raw(list);                 \
+       op = rcu_dereference_raw_notrace(list);                 \
        do
 
 /*
  * Optimized for just a single item in the list (as that is the normal case).
  */
 #define while_for_each_ftrace_op(op)                           \
-       while (likely(op = rcu_dereference_raw((op)->next)) &&  \
+       while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&  \
               unlikely((op) != &ftrace_list_end))
 
+static inline void ftrace_ops_init(struct ftrace_ops *ops)
+{
+#ifdef CONFIG_DYNAMIC_FTRACE
+       if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
+               mutex_init(&ops->regex_lock);
+               ops->flags |= FTRACE_OPS_FL_INITIALIZED;
+       }
+#endif
+}
+
 /**
  * ftrace_nr_registered_ops - return number of ops registered
  *
@@ -486,7 +503,6 @@ struct ftrace_profile_stat {
 #define PROFILES_PER_PAGE                                      \
        (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
 
-static int ftrace_profile_bits __read_mostly;
 static int ftrace_profile_enabled __read_mostly;
 
 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
@@ -494,7 +510,8 @@ static DEFINE_MUTEX(ftrace_profile_lock);
 
 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
 
-#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
+#define FTRACE_PROFILE_HASH_BITS 10
+#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
 
 static void *
 function_stat_next(void *v, int idx)
@@ -676,7 +693,7 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 
        pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
 
-       for (i = 0; i < pages; i++) {
+       for (i = 1; i < pages; i++) {
                pg->next = (void *)get_zeroed_page(GFP_KERNEL);
                if (!pg->next)
                        goto out_free;
@@ -724,13 +741,6 @@ static int ftrace_profile_init_cpu(int cpu)
        if (!stat->hash)
                return -ENOMEM;
 
-       if (!ftrace_profile_bits) {
-               size--;
-
-               for (; size; size >>= 1)
-                       ftrace_profile_bits++;
-       }
-
        /* Preallocate the function profiling pages */
        if (ftrace_profile_pages_init(stat) < 0) {
                kfree(stat->hash);
@@ -763,13 +773,13 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
        struct hlist_head *hhd;
        unsigned long key;
 
-       key = hash_long(ip, ftrace_profile_bits);
+       key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
        hhd = &stat->hash[key];
 
        if (hlist_empty(hhd))
                return NULL;
 
-       hlist_for_each_entry_rcu(rec, hhd, node) {
+       hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
                if (rec->ip == ip)
                        return rec;
        }
@@ -782,7 +792,7 @@ static void ftrace_add_profile(struct ftrace_profile_stat *stat,
 {
        unsigned long key;
 
-       key = hash_long(rec->ip, ftrace_profile_bits);
+       key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
        hlist_add_head_rcu(&rec->node, &stat->hash[key]);
 }
 
@@ -914,7 +924,8 @@ static void unregister_ftrace_profiler(void)
 #else
 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
        .func           = function_profile_call,
-       .flags          = FTRACE_OPS_FL_RECURSION_SAFE,
+       .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(ftrace_profile_ops)
 };
 
 static int register_ftrace_profiler(void)
@@ -1079,7 +1090,7 @@ struct ftrace_func_probe {
        unsigned long           flags;
        unsigned long           ip;
        void                    *data;
-       struct rcu_head         rcu;
+       struct list_head        free_list;
 };
 
 struct ftrace_func_entry {
@@ -1110,11 +1121,10 @@ static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
        .notrace_hash           = EMPTY_HASH,
        .filter_hash            = EMPTY_HASH,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(global_ops)
 };
 
-static DEFINE_MUTEX(ftrace_regex_lock);
-
 struct ftrace_page {
        struct ftrace_page      *next;
        struct dyn_ftrace       *records;
@@ -1155,7 +1165,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
 
        hhd = &hash->buckets[key];
 
-       hlist_for_each_entry_rcu(entry, hhd, hlist) {
+       hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
                if (entry->ip == ip)
                        return entry;
        }
@@ -1254,6 +1264,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
 
 void ftrace_free_filter(struct ftrace_ops *ops)
 {
+       ftrace_ops_init(ops);
        free_ftrace_hash(ops->filter_hash);
        free_ftrace_hash(ops->notrace_hash);
 }
@@ -1329,7 +1340,6 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        struct hlist_head *hhd;
        struct ftrace_hash *old_hash;
        struct ftrace_hash *new_hash;
-       unsigned long key;
        int size = src->count;
        int bits = 0;
        int ret;
@@ -1372,10 +1382,6 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        for (i = 0; i < size; i++) {
                hhd = &src->buckets[i];
                hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
-                       if (bits > 0)
-                               key = hash_long(entry->ip, bits);
-                       else
-                               key = 0;
                        remove_hash_entry(src, entry);
                        __add_hash_entry(new_hash, entry);
                }
@@ -1416,8 +1422,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
        struct ftrace_hash *notrace_hash;
        int ret;
 
-       filter_hash = rcu_dereference_raw(ops->filter_hash);
-       notrace_hash = rcu_dereference_raw(ops->notrace_hash);
+       filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
+       notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
 
        if ((ftrace_hash_empty(filter_hash) ||
             ftrace_lookup_ip(filter_hash, ip)) &&
@@ -2453,7 +2459,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                     !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
 
                    ((iter->flags & FTRACE_ITER_ENABLED) &&
-                    !(rec->flags & ~FTRACE_FL_MASK))) {
+                    !(rec->flags & FTRACE_FL_ENABLED))) {
 
                        rec = NULL;
                        goto retry;
@@ -2636,6 +2642,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
        struct ftrace_hash *hash;
        int ret = 0;
 
+       ftrace_ops_init(ops);
+
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
@@ -2648,28 +2656,26 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                return -ENOMEM;
        }
 
+       iter->ops = ops;
+       iter->flags = flag;
+
+       mutex_lock(&ops->regex_lock);
+
        if (flag & FTRACE_ITER_NOTRACE)
                hash = ops->notrace_hash;
        else
                hash = ops->filter_hash;
 
-       iter->ops = ops;
-       iter->flags = flag;
-
        if (file->f_mode & FMODE_WRITE) {
-               mutex_lock(&ftrace_lock);
                iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
-               mutex_unlock(&ftrace_lock);
-
                if (!iter->hash) {
                        trace_parser_put(&iter->parser);
                        kfree(iter);
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto out_unlock;
                }
        }
 
-       mutex_lock(&ftrace_regex_lock);
-
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC))
                ftrace_filter_reset(iter->hash);
@@ -2689,7 +2695,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                }
        } else
                file->private_data = iter;
-       mutex_unlock(&ftrace_regex_lock);
+
+ out_unlock:
+       mutex_unlock(&ops->regex_lock);
 
        return ret;
 }
@@ -2912,7 +2920,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
         * on the hash. rcu_read_lock is too dangerous here.
         */
        preempt_disable_notrace();
-       hlist_for_each_entry_rcu(entry, hhd, node) {
+       hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
                if (entry->ip == ip)
                        entry->ops->func(ip, parent_ip, &entry->data);
        }
@@ -2922,6 +2930,8 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
 static struct ftrace_ops trace_probe_ops __read_mostly =
 {
        .func           = function_trace_probe_call,
+       .flags          = FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(trace_probe_ops)
 };
 
 static int ftrace_probe_registered;
@@ -2931,8 +2941,12 @@ static void __enable_ftrace_function_probe(void)
        int ret;
        int i;
 
-       if (ftrace_probe_registered)
+       if (ftrace_probe_registered) {
+               /* still need to update the function call sites */
+               if (ftrace_enabled)
+                       ftrace_run_update_code(FTRACE_UPDATE_CALLS);
                return;
+       }
 
        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
                struct hlist_head *hhd = &ftrace_func_hash[i];
@@ -2973,28 +2987,27 @@ static void __disable_ftrace_function_probe(void)
 }
 
 
-static void ftrace_free_entry_rcu(struct rcu_head *rhp)
+static void ftrace_free_entry(struct ftrace_func_probe *entry)
 {
-       struct ftrace_func_probe *entry =
-               container_of(rhp, struct ftrace_func_probe, rcu);
-
        if (entry->ops->free)
-               entry->ops->free(&entry->data);
+               entry->ops->free(entry->ops, entry->ip, &entry->data);
        kfree(entry);
 }
 
-
 int
 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                              void *data)
 {
        struct ftrace_func_probe *entry;
+       struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
+       struct ftrace_hash *hash;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
        int type, len, not;
        unsigned long key;
        int count = 0;
        char *search;
+       int ret;
 
        type = filter_parse_regex(glob, strlen(glob), &search, &not);
        len = strlen(search);
@@ -3003,10 +3016,20 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        if (WARN_ON(not))
                return -EINVAL;
 
-       mutex_lock(&ftrace_lock);
+       mutex_lock(&trace_probe_ops.regex_lock);
 
-       if (unlikely(ftrace_disabled))
-               goto out_unlock;
+       hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+       if (!hash) {
+               count = -ENOMEM;
+               goto out;
+       }
+
+       if (unlikely(ftrace_disabled)) {
+               count = -ENODEV;
+               goto out;
+       }
+
+       mutex_lock(&ftrace_lock);
 
        do_for_each_ftrace_rec(pg, rec) {
 
@@ -3030,14 +3053,21 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                 * for each function we find. We call the callback
                 * to give the caller an opportunity to do so.
                 */
-               if (ops->callback) {
-                       if (ops->callback(rec->ip, &entry->data) < 0) {
+               if (ops->init) {
+                       if (ops->init(ops, rec->ip, &entry->data) < 0) {
                                /* caller does not like this func */
                                kfree(entry);
                                continue;
                        }
                }
 
+               ret = enter_record(hash, rec, 0);
+               if (ret < 0) {
+                       kfree(entry);
+                       count = ret;
+                       goto out_unlock;
+               }
+
                entry->ops = ops;
                entry->ip = rec->ip;
 
@@ -3045,10 +3075,18 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
 
        } while_for_each_ftrace_rec();
+
+       ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
+       if (ret < 0)
+               count = ret;
+
        __enable_ftrace_function_probe();
 
  out_unlock:
        mutex_unlock(&ftrace_lock);
+ out:
+       mutex_unlock(&trace_probe_ops.regex_lock);
+       free_ftrace_hash(hash);
 
        return count;
 }
@@ -3062,7 +3100,12 @@ static void
 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                                  void *data, int flags)
 {
+       struct ftrace_func_entry *rec_entry;
        struct ftrace_func_probe *entry;
+       struct ftrace_func_probe *p;
+       struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
+       struct list_head free_list;
+       struct ftrace_hash *hash;
        struct hlist_node *tmp;
        char str[KSYM_SYMBOL_LEN];
        int type = MATCH_FULL;
@@ -3082,7 +3125,15 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                        return;
        }
 
-       mutex_lock(&ftrace_lock);
+       mutex_lock(&trace_probe_ops.regex_lock);
+
+       hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+       if (!hash)
+               /* Hmm, should report this somehow */
+               goto out_unlock;
+
+       INIT_LIST_HEAD(&free_list);
+
        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
                struct hlist_head *hhd = &ftrace_func_hash[i];
 
@@ -3103,12 +3154,32 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                                        continue;
                        }
 
+                       rec_entry = ftrace_lookup_ip(hash, entry->ip);
+                       /* It is possible more than one entry had this ip */
+                       if (rec_entry)
+                               free_hash_entry(hash, rec_entry);
+
                        hlist_del_rcu(&entry->node);
-                       call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
+                       list_add(&entry->free_list, &free_list);
                }
        }
+       mutex_lock(&ftrace_lock);
        __disable_ftrace_function_probe();
+       /*
+        * Remove after the disable is called. Otherwise, if the last
+        * probe is removed, a null hash means *all enabled*.
+        */
+       ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
+       synchronize_sched();
+       list_for_each_entry_safe(entry, p, &free_list, free_list) {
+               list_del(&entry->free_list);
+               ftrace_free_entry(entry);
+       }
        mutex_unlock(&ftrace_lock);
+               
+ out_unlock:
+       mutex_unlock(&trace_probe_ops.regex_lock);
+       free_ftrace_hash(hash);
 }
 
 void
@@ -3217,18 +3288,17 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
        if (!cnt)
                return 0;
 
-       mutex_lock(&ftrace_regex_lock);
-
-       ret = -ENODEV;
-       if (unlikely(ftrace_disabled))
-               goto out_unlock;
-
        if (file->f_mode & FMODE_READ) {
                struct seq_file *m = file->private_data;
                iter = m->private;
        } else
                iter = file->private_data;
 
+       if (unlikely(ftrace_disabled))
+               return -ENODEV;
+
+       /* iter->hash is a local copy, so we don't need regex_lock */
+
        parser = &iter->parser;
        read = trace_get_user(parser, ubuf, cnt, ppos);
 
@@ -3237,14 +3307,12 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
                ret = ftrace_process_regex(iter->hash, parser->buffer,
                                           parser->idx, enable);
                trace_parser_clear(parser);
-               if (ret)
-                       goto out_unlock;
+               if (ret < 0)
+                       goto out;
        }
 
        ret = read;
-out_unlock:
-       mutex_unlock(&ftrace_regex_lock);
-
+ out:
        return ret;
 }
 
@@ -3296,16 +3364,19 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
+       mutex_lock(&ops->regex_lock);
+
        if (enable)
                orig_hash = &ops->filter_hash;
        else
                orig_hash = &ops->notrace_hash;
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
-       if (!hash)
-               return -ENOMEM;
+       if (!hash) {
+               ret = -ENOMEM;
+               goto out_regex_unlock;
+       }
 
-       mutex_lock(&ftrace_regex_lock);
        if (reset)
                ftrace_filter_reset(hash);
        if (buf && !ftrace_match_records(hash, buf, len)) {
@@ -3327,7 +3398,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        mutex_unlock(&ftrace_lock);
 
  out_regex_unlock:
-       mutex_unlock(&ftrace_regex_lock);
+       mutex_unlock(&ops->regex_lock);
 
        free_ftrace_hash(hash);
        return ret;
@@ -3353,6 +3424,7 @@ ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
                         int remove, int reset)
 {
+       ftrace_ops_init(ops);
        return ftrace_set_addr(ops, ip, remove, reset, 1);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
@@ -3377,6 +3449,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
                       int len, int reset)
 {
+       ftrace_ops_init(ops);
        return ftrace_set_regex(ops, buf, len, reset, 1);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_filter);
@@ -3395,6 +3468,7 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
                        int len, int reset)
 {
+       ftrace_ops_init(ops);
        return ftrace_set_regex(ops, buf, len, reset, 0);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
@@ -3485,6 +3559,8 @@ ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
 {
        char *func;
 
+       ftrace_ops_init(ops);
+
        while (buf) {
                func = strsep(&buf, ",");
                ftrace_set_regex(ops, func, strlen(func), 0, enable);
@@ -3512,10 +3588,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
        int filter_hash;
        int ret;
 
-       mutex_lock(&ftrace_regex_lock);
        if (file->f_mode & FMODE_READ) {
                iter = m->private;
-
                seq_release(inode, file);
        } else
                iter = file->private_data;
@@ -3528,6 +3602,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
        trace_parser_put(parser);
 
+       mutex_lock(&iter->ops->regex_lock);
+
        if (file->f_mode & FMODE_WRITE) {
                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
 
@@ -3545,10 +3621,11 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
                mutex_unlock(&ftrace_lock);
        }
+
+       mutex_unlock(&iter->ops->regex_lock);
        free_ftrace_hash(iter->hash);
        kfree(iter);
 
-       mutex_unlock(&ftrace_regex_lock);
        return 0;
 }
 
@@ -3736,7 +3813,8 @@ out:
        if (fail)
                return -EINVAL;
 
-       ftrace_graph_filter_enabled = 1;
+       ftrace_graph_filter_enabled = !!(*idx);
+
        return 0;
 }
 
@@ -4086,7 +4164,8 @@ void __init ftrace_init(void)
 
 static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(global_ops)
 };
 
 static int __init ftrace_nodyn_init(void)
@@ -4140,8 +4219,9 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
 }
 
 static struct ftrace_ops control_ops = {
-       .func = ftrace_ops_control_func,
-       .flags = FTRACE_OPS_FL_RECURSION_SAFE,
+       .func   = ftrace_ops_control_func,
+       .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(control_ops)
 };
 
 static inline void
@@ -4499,6 +4579,8 @@ int register_ftrace_function(struct ftrace_ops *ops)
 {
        int ret = -1;
 
+       ftrace_ops_init(ops);
+
        mutex_lock(&ftrace_lock);
 
        ret = __register_ftrace_function(ops);