tracing: Have branch tracer use recursive field of task struct
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>
Tue, 7 Jul 2015 19:05:03 +0000 (15:05 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Aug 2015 16:29:45 +0000 (09:29 -0700)
commit 6224beb12e190ff11f3c7d4bf50cb2922878f600 upstream.

Fengguang Wu's tests triggered a bug in the branch tracer's start up
test when CONFIG_DEBUG_PREEMPT set. This was because that config
adds some debug logic in the per cpu field, which calls back into
the branch tracer.

The branch tracer has its own recursive checks, but uses a per cpu
variable to implement it. If retrieving the per cpu variable calls
back into the branch tracer, you can see how things will break.

Instead of using a per cpu variable, use the trace_recursion field
of the current task struct. Simply set a bit when entering the
branch tracing and clear it when leaving. If the bit is set on
entry, just don't do the tracing.

There's also the case with lockdep, as the local_irq_save() called
before the recursion can also trigger code that can call back into
the function. Changing that to a raw_local_irq_save() will protect
that as well.

This prevents the recursion and the inevitable crash that follows.

Link: http://lkml.kernel.org/r/20150630141803.GA28071@wfg-t540p.sh.intel.com
Reported-by: Fengguang Wu <fengguang.wu@intel.com>
Tested-by: Fengguang Wu <fengguang.wu@intel.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
kernel/trace/trace.h
kernel/trace/trace_branch.c

index aa0e736b72ac84d77a633ab8c83f8d1378883147..fe576073580ab28d31c108d87426c5d1286944bc 100644 (file)
@@ -428,6 +428,7 @@ enum {
 
        TRACE_CONTROL_BIT,
 
+       TRACE_BRANCH_BIT,
 /*
  * Abuse of the trace_recursion.
  * As we need a way to maintain state if we are tracing the function
index d594da0dc03ce53db530e238df1d410c4b89e29a..cb89197adf5c4e344a9d03f3b6df47fae6e03a66 100644 (file)
@@ -37,9 +37,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        struct trace_branch *entry;
        struct ring_buffer *buffer;
        unsigned long flags;
-       int cpu, pc;
+       int pc;
        const char *p;
 
+       if (current->trace_recursion & TRACE_BRANCH_BIT)
+               return;
+
        /*
         * I would love to save just the ftrace_likely_data pointer, but
         * this code can also be used by modules. Ugly things can happen
@@ -50,10 +53,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        if (unlikely(!tr))
                return;
 
-       local_irq_save(flags);
-       cpu = raw_smp_processor_id();
-       data = per_cpu_ptr(tr->trace_buffer.data, cpu);
-       if (atomic_inc_return(&data->disabled) != 1)
+       raw_local_irq_save(flags);
+       current->trace_recursion |= TRACE_BRANCH_BIT;
+       data = this_cpu_ptr(tr->trace_buffer.data);
+       if (atomic_read(&data->disabled))
                goto out;
 
        pc = preempt_count();
@@ -82,8 +85,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
                __buffer_unlock_commit(buffer, event);
 
  out:
-       atomic_dec(&data->disabled);
-       local_irq_restore(flags);
+       current->trace_recursion &= ~TRACE_BRANCH_BIT;
+       raw_local_irq_restore(flags);
 }
 
 static inline