tracing: Fix recursive user stack trace
authorSteven Rostedt <srostedt@redhat.com>
Wed, 10 Nov 2010 11:56:12 +0000 (12:56 +0100)
committerSteven Rostedt <rostedt@goodmis.org>
Sat, 13 Nov 2010 02:20:08 +0000 (21:20 -0500)
The user stack trace can fault when examining the trace. Which
would call the do_page_fault handler, which would trace again,
which would do the user stack trace, which would fault and call
do_page_fault again ...

Thus this is causing a recursive bug. We need to have a recursion
detector here.

[ Resubmitted by Jiri Olsa ]

[ Eric Dumazet recommended using __this_cpu_* instead of __get_cpu_* ]

Cc: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
LKML-Reference: <1289390172-9730-3-git-send-email-jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
kernel/trace/trace.c

index 82d9b8106cd078970ea0c7343e12ad2b6a6eed36..ee6a7339cf0e63d84076dc271b9b79144316b693 100644 (file)
@@ -1284,6 +1284,8 @@ void trace_dump_stack(void)
        __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
 }
 
+static DEFINE_PER_CPU(int, user_stack_count);
+
 void
 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
 {
@@ -1302,6 +1304,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
        if (unlikely(in_nmi()))
                return;
 
+       /*
+        * prevent recursion, since the user stack tracing may
+        * trigger other kernel events.
+        */
+       preempt_disable();
+       if (__this_cpu_read(user_stack_count))
+               goto out;
+
+       __this_cpu_inc(user_stack_count);
+
+
+
        event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
                                          sizeof(*entry), flags, pc);
        if (!event)
@@ -1319,6 +1333,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
        save_stack_trace_user(&trace);
        if (!filter_check_discard(call, entry, buffer, event))
                ring_buffer_unlock_commit(buffer, event);
+
+       __this_cpu_dec(user_stack_count);
+
+ out:
+       preempt_enable();
 }
 
 #ifdef UNUSED