ftrace: Use schedule_on_each_cpu() as a heavy synchronize_sched()
[firefly-linux-kernel-4.4.55.git] / kernel / trace / ftrace.c
index 6c508ff33c6206df8e028e1eab43e913565f927e..800a8a2fbddb34ec06381a0cd4cf881af965f7e1 100644 (file)
@@ -413,6 +413,17 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        return 0;
 }
 
+static void ftrace_sync(struct work_struct *work)
+{
+       /*
+        * This function is just a stub to implement a hard force
+        * of synchronize_sched(). This requires synchronizing
+        * tasks even in userspace and idle.
+        *
+        * Yes, function tracing is rude.
+        */
+}
+
 static int __unregister_ftrace_function(struct ftrace_ops *ops)
 {
        int ret;
@@ -440,8 +451,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
                         * so there'll be no new users. We must ensure
                         * all current users are done before we free
                         * the control data.
+                        * Note synchronize_sched() is not enough, as we
+                        * use preempt_disable() to do RCU, but the function
+                        * tracer can be called where RCU is not active
+                        * (before user_exit()).
                         */
-                       synchronize_sched();
+                       schedule_on_each_cpu(ftrace_sync);
                        control_ops_free(ops);
                }
        } else
@@ -456,9 +471,13 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
        /*
         * Dynamic ops may be freed, we must make sure that all
         * callers are done before leaving this function.
+        *
+        * Again, normal synchronize_sched() is not good enough.
+        * We need to do a hard force of sched synchronization.
         */
        if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
-               synchronize_sched();
+               schedule_on_each_cpu(ftrace_sync);
+
 
        return 0;
 }