tracing/fastboot: use sched switch tracer from boot tracer
authorFrederic Weisbecker <fweisbec@gmail.com>
Fri, 31 Oct 2008 12:20:08 +0000 (13:20 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 4 Nov 2008 16:14:06 +0000 (17:14 +0100)
Impact: enhance boot trace output with scheduling events

Use the sched_switch tracer from the boot tracer.

We also can trace schedule events inside the initcalls.
Sched tracing is disabled after the initcall has finished and
then reenabled before the next one is started.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_boot.c
kernel/trace/trace_sched_switch.c

index e4c40c868d67fb0f3028d5bf2ef3f806b23ab62f..50d7018163f6e6f06a7c25b88f3208fde37dea81 100644 (file)
@@ -3251,6 +3251,8 @@ __init static int tracer_alloc_buffers(void)
 
        register_tracer(&nop_trace);
 #ifdef CONFIG_BOOT_TRACER
+       /* We don't want to launch sched_switch tracer yet */
+       global_trace.ctrl = 0;
        register_tracer(&boot_tracer);
        current_trace = &boot_tracer;
        current_trace->init(&global_trace);
index 8465ad052707afe380e2fba0017c0f37fb1d5093..9911277b268bd07553082516b2349eed01656eb0 100644 (file)
@@ -49,6 +49,7 @@ struct ftrace_entry {
        unsigned long           parent_ip;
 };
 extern struct tracer boot_tracer;
+extern struct tracer sched_switch_trace; /* Used by the boot tracer */
 
 /*
  * Context switch trace entry - which task (and prio) we switched from/to:
index d104d5b464131f2e36c0967c9289f4cd331a75f3..6bbc8794a6df3d7cfe2074d3f3bf1a7485cd64d7 100644 (file)
@@ -27,10 +27,14 @@ void start_boot_trace(void)
 
 void enable_boot_trace(void)
 {
+       if (pre_initcalls_finished)
+               tracing_start_cmdline_record();
 }
 
 void disable_boot_trace(void)
 {
+       if (pre_initcalls_finished)
+               tracing_stop_cmdline_record();
 }
 
 void reset_boot_trace(struct trace_array *tr)
@@ -45,6 +49,8 @@ static void boot_trace_init(struct trace_array *tr)
 
        for_each_cpu_mask(cpu, cpu_possible_map)
                tracing_reset(tr, cpu);
+
+       sched_switch_trace.init(tr);
 }
 
 static void boot_trace_ctrl_update(struct trace_array *tr)
index 96620c714300a5a5eb3632eb7402a77d3a307c85..9d7bdac331dd262561055e926a913641b5f7bdfc 100644 (file)
@@ -127,6 +127,7 @@ static void tracing_start_sched_switch(void)
        long ref;
 
        mutex_lock(&tracepoint_mutex);
+       tracer_enabled = 1;
        ref = atomic_inc_return(&sched_ref);
        if (ref == 1)
                tracing_sched_register();
@@ -138,6 +139,7 @@ static void tracing_stop_sched_switch(void)
        long ref;
 
        mutex_lock(&tracepoint_mutex);
+       tracer_enabled = 0;
        ref = atomic_dec_and_test(&sched_ref);
        if (ref)
                tracing_sched_unregister();
@@ -158,12 +160,10 @@ static void start_sched_trace(struct trace_array *tr)
 {
        sched_switch_reset(tr);
        tracing_start_cmdline_record();
-       tracer_enabled = 1;
 }
 
 static void stop_sched_trace(struct trace_array *tr)
 {
-       tracer_enabled = 0;
        tracing_stop_cmdline_record();
 }
 
@@ -190,7 +190,7 @@ static void sched_switch_trace_ctrl_update(struct trace_array *tr)
                stop_sched_trace(tr);
 }
 
-static struct tracer sched_switch_trace __read_mostly =
+struct tracer sched_switch_trace __read_mostly =
 {
        .name           = "sched_switch",
        .init           = sched_switch_trace_init,