tracing: Protect ftrace_trace_arrays list in trace_events.c
authorAlexander Z Lam <azl@google.com>
Tue, 2 Jul 2013 02:37:54 +0000 (19:37 -0700)
committerSteven Rostedt <rostedt@goodmis.org>
Tue, 2 Jul 2013 03:30:08 +0000 (23:30 -0400)
There are multiple places where the ftrace_trace_arrays list is accessed in
trace_events.c without the trace_types_lock held.

Link: http://lkml.kernel.org/r/1372732674-22726-1-git-send-email-azl@google.com
Cc: Vaibhav Nagarnaik <vnagarnaik@google.com>
Cc: David Sharp <dhsharp@google.com>
Cc: Alexander Z Lam <lambchop468@gmail.com>
Cc: stable@vger.kernel.org # 3.10
Signed-off-by: Alexander Z Lam <azl@google.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_events.c

index e04e7119633d0bf131d97a097781cc054fc9d46a..e36da7ff59bf5312e77057df0cded1fe92077888 100644 (file)
@@ -266,7 +266,7 @@ static struct tracer                *trace_types __read_mostly;
 /*
  * trace_types_lock is used to protect the trace_types list.
  */
-static DEFINE_MUTEX(trace_types_lock);
+DEFINE_MUTEX(trace_types_lock);
 
 /*
  * serialize the access of the ring buffer
index a88939e666b7b74793c02bc3c60baab45cccc7b0..2c3cba59552d805e170641231d97f3c6e082d111 100644 (file)
@@ -224,6 +224,8 @@ enum {
 
 extern struct list_head ftrace_trace_arrays;
 
+extern struct mutex trace_types_lock;
+
 /*
  * The global tracer (top) should be the first trace array added,
  * but we check the flag anyway.
index 5892470bc2ee587881a49bad3619a99b3851210e..35c6f23c71b2ea9fb727e9df5744af0b8f8f99a2 100644 (file)
@@ -1008,6 +1008,7 @@ static int subsystem_open(struct inode *inode, struct file *filp)
        int ret;
 
        /* Make sure the system still exists */
+       mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
                list_for_each_entry(dir, &tr->systems, list) {
@@ -1023,6 +1024,7 @@ static int subsystem_open(struct inode *inode, struct file *filp)
        }
  exit_loop:
        mutex_unlock(&event_mutex);
+       mutex_unlock(&trace_types_lock);
 
        if (!system)
                return -ENODEV;
@@ -1617,6 +1619,7 @@ static void __add_event_to_tracers(struct ftrace_event_call *call,
 int trace_add_event_call(struct ftrace_event_call *call)
 {
        int ret;
+       mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
 
        ret = __register_event(call, NULL);
@@ -1624,11 +1627,13 @@ int trace_add_event_call(struct ftrace_event_call *call)
                __add_event_to_tracers(call, NULL);
 
        mutex_unlock(&event_mutex);
+       mutex_unlock(&trace_types_lock);
        return ret;
 }
 
 /*
- * Must be called under locking both of event_mutex and trace_event_sem.
+ * Must be called under locking of trace_types_lock, event_mutex and
+ * trace_event_sem.
  */
 static void __trace_remove_event_call(struct ftrace_event_call *call)
 {
@@ -1640,11 +1645,13 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
 /* Remove an event_call */
 void trace_remove_event_call(struct ftrace_event_call *call)
 {
+       mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
        down_write(&trace_event_sem);
        __trace_remove_event_call(call);
        up_write(&trace_event_sem);
        mutex_unlock(&event_mutex);
+       mutex_unlock(&trace_types_lock);
 }
 
 #define for_each_event(event, start, end)                      \
@@ -1788,6 +1795,7 @@ static int trace_module_notify(struct notifier_block *self,
 {
        struct module *mod = data;
 
+       mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
        switch (val) {
        case MODULE_STATE_COMING:
@@ -1798,6 +1806,7 @@ static int trace_module_notify(struct notifier_block *self,
                break;
        }
        mutex_unlock(&event_mutex);
+       mutex_unlock(&trace_types_lock);
 
        return 0;
 }