tracing: Use RING_BUFFER_ALL_CPUS for TRACE_PIPE_ALL_CPU
authorSteven Rostedt <srostedt@redhat.com>
Wed, 23 Jan 2013 20:22:59 +0000 (15:22 -0500)
committerSteven Rostedt <rostedt@goodmis.org>
Fri, 15 Mar 2013 04:34:41 +0000 (00:34 -0400)
Both RING_BUFFER_ALL_CPUS and TRACE_PIPE_ALL_CPU are defined as
-1 and used to say that all the ring buffers are to be modified
or read (instead of just a single cpu, which would be >= 0).

There's no reason to keep TRACE_PIPE_ALL_CPU as it is also started
to be used for more than what it was created for, and now that
the ring buffer code added a generic RING_BUFFER_ALL_CPUS define,
we can clean up the trace code to use that instead and remove
the TRACE_PIPE_ALL_CPU macro.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_kdb.c

index 932931897b8d2dd4d3544889e731191f4ba67346..59953aa288459aa4aea0164c14e6ed76a800ac67 100644 (file)
@@ -287,13 +287,13 @@ static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
 
 static inline void trace_access_lock(int cpu)
 {
-       if (cpu == TRACE_PIPE_ALL_CPU) {
+       if (cpu == RING_BUFFER_ALL_CPUS) {
                /* gain it for accessing the whole ring buffer. */
                down_write(&all_cpu_access_lock);
        } else {
                /* gain it for accessing a cpu ring buffer. */
 
-               /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
+               /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
                down_read(&all_cpu_access_lock);
 
                /* Secondly block other access to this @cpu ring buffer. */
@@ -303,7 +303,7 @@ static inline void trace_access_lock(int cpu)
 
 static inline void trace_access_unlock(int cpu)
 {
-       if (cpu == TRACE_PIPE_ALL_CPU) {
+       if (cpu == RING_BUFFER_ALL_CPUS) {
                up_write(&all_cpu_access_lock);
        } else {
                mutex_unlock(&per_cpu(cpu_access_lock, cpu));
@@ -1823,7 +1823,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
         * If we are in a per_cpu trace file, don't bother by iterating over
         * all cpu and peek directly.
         */
-       if (cpu_file > TRACE_PIPE_ALL_CPU) {
+       if (cpu_file > RING_BUFFER_ALL_CPUS) {
                if (ring_buffer_empty_cpu(buffer, cpu_file))
                        return NULL;
                ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
@@ -1983,7 +1983,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
                iter->cpu = 0;
                iter->idx = -1;
 
-               if (cpu_file == TRACE_PIPE_ALL_CPU) {
+               if (cpu_file == RING_BUFFER_ALL_CPUS) {
                        for_each_tracing_cpu(cpu)
                                tracing_iter_reset(iter, cpu);
                } else
@@ -2291,7 +2291,7 @@ int trace_empty(struct trace_iterator *iter)
        int cpu;
 
        /* If we are looking at one CPU buffer, only check that one */
-       if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
+       if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
                cpu = iter->cpu_file;
                buf_iter = trace_buffer_iter(iter, cpu);
                if (buf_iter) {
@@ -2533,7 +2533,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
        if (!iter->snapshot)
                tracing_stop();
 
-       if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
+       if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
                for_each_tracing_cpu(cpu) {
                        iter->buffer_iter[cpu] =
                                ring_buffer_read_prepare(iter->tr->buffer, cpu);
@@ -2617,7 +2617,7 @@ static int tracing_open(struct inode *inode, struct file *file)
            (file->f_flags & O_TRUNC)) {
                long cpu = (long) inode->i_private;
 
-               if (cpu == TRACE_PIPE_ALL_CPU)
+               if (cpu == RING_BUFFER_ALL_CPUS)
                        tracing_reset_online_cpus(&global_trace);
                else
                        tracing_reset(&global_trace, cpu);
@@ -5035,7 +5035,7 @@ static __init int tracer_init_debugfs(void)
                        NULL, &tracing_cpumask_fops);
 
        trace_create_file("trace", 0644, d_tracer,
-                       (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
+                       (void *) RING_BUFFER_ALL_CPUS, &tracing_fops);
 
        trace_create_file("available_tracers", 0444, d_tracer,
                        &global_trace, &show_traces_fops);
@@ -5055,7 +5055,7 @@ static __init int tracer_init_debugfs(void)
                        NULL, &tracing_readme_fops);
 
        trace_create_file("trace_pipe", 0444, d_tracer,
-                       (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
+                       (void *) RING_BUFFER_ALL_CPUS, &tracing_pipe_fops);
 
        trace_create_file("buffer_size_kb", 0644, d_tracer,
                        (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
@@ -5085,7 +5085,7 @@ static __init int tracer_init_debugfs(void)
 
 #ifdef CONFIG_TRACER_SNAPSHOT
        trace_create_file("snapshot", 0644, d_tracer,
-                         (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops);
+                         (void *) RING_BUFFER_ALL_CPUS, &snapshot_fops);
 #endif
 
        create_trace_options_dir();
@@ -5162,7 +5162,7 @@ void trace_init_global_iter(struct trace_iterator *iter)
 {
        iter->tr = &global_trace;
        iter->trace = current_trace;
-       iter->cpu_file = TRACE_PIPE_ALL_CPU;
+       iter->cpu_file = RING_BUFFER_ALL_CPUS;
 }
 
 static void
@@ -5210,7 +5210,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
 
        switch (oops_dump_mode) {
        case DUMP_ALL:
-               iter.cpu_file = TRACE_PIPE_ALL_CPU;
+               iter.cpu_file = RING_BUFFER_ALL_CPUS;
                break;
        case DUMP_ORIG:
                iter.cpu_file = raw_smp_processor_id();
@@ -5219,7 +5219,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
                goto out_enable;
        default:
                printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
-               iter.cpu_file = TRACE_PIPE_ALL_CPU;
+               iter.cpu_file = RING_BUFFER_ALL_CPUS;
        }
 
        printk(KERN_TRACE "Dumping ftrace buffer:\n");
index 037f7eb03d6948bb1e351c11337571bae19c18a3..da09a037abcdd5828a2cc9fb175ede97c0360b2e 100644 (file)
@@ -453,8 +453,6 @@ static __always_inline void trace_clear_recursion(int bit)
        current->trace_recursion = val;
 }
 
-#define TRACE_PIPE_ALL_CPU     -1
-
 static inline struct ring_buffer_iter *
 trace_buffer_iter(struct trace_iterator *iter, int cpu)
 {
index 3c5c5dfea0b3be1740189be137d64b946a34c378..cc1dbdc5ee5d86229278c2c357f8935bcac102cc 100644 (file)
@@ -43,7 +43,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
        iter.iter_flags |= TRACE_FILE_LAT_FMT;
        iter.pos = -1;
 
-       if (cpu_file == TRACE_PIPE_ALL_CPU) {
+       if (cpu_file == RING_BUFFER_ALL_CPUS) {
                for_each_tracing_cpu(cpu) {
                        iter.buffer_iter[cpu] =
                        ring_buffer_read_prepare(iter.tr->buffer, cpu);
@@ -115,7 +115,7 @@ static int kdb_ftdump(int argc, const char **argv)
                    !cpu_online(cpu_file))
                        return KDB_BADINT;
        } else {
-               cpu_file = TRACE_PIPE_ALL_CPU;
+               cpu_file = RING_BUFFER_ALL_CPUS;
        }
 
        kdb_trap_printk++;