2 * trace irqs off critical timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * From code in the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/kallsyms.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/ftrace.h>
19 static struct trace_array *irqsoff_trace __read_mostly;
20 static int tracer_enabled __read_mostly;
22 static DEFINE_PER_CPU(int, tracing_cpu);
24 static DEFINE_RAW_SPINLOCK(max_trace_lock);
27 TRACER_IRQS_OFF = (1 << 1),
28 TRACER_PREEMPT_OFF = (1 << 2),
31 static int trace_type __read_mostly;
33 static int save_flags;
34 static bool function_enabled;
36 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
37 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
39 #ifdef CONFIG_PREEMPT_TRACER
43 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
46 # define preempt_trace() (0)
49 #ifdef CONFIG_IRQSOFF_TRACER
53 return ((trace_type & TRACER_IRQS_OFF) &&
57 # define irq_trace() (0)
60 #define is_graph() (trace_flags & TRACE_ITER_DISPLAY_GRAPH)
62 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
63 static int irqsoff_display_graph(struct trace_array *tr, int set);
65 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
72 * Sequence count - we record it when starting a measurement and
73 * skip the latency if the sequence has changed - some other section
74 * did a maximum and could disturb our measurement with serial console
75 * printouts, etc. Truly coinciding maximum latencies should be rare
76 * and what happens together happens separately as well, so this doesn't
77 * decrease the validity of the maximum found:
79 static __cacheline_aligned_in_smp unsigned long max_sequence;
81 #ifdef CONFIG_FUNCTION_TRACER
83 * Prologue for the preempt and irqs off function tracers.
85 * Returns 1 if it is OK to continue, and data->disabled is
87 * 0 if the trace is to be ignored, and data->disabled
90 * Note, this function is also used outside this ifdef but
91 * inside the #ifdef of the function graph tracer below.
92 * This is OK, since the function graph tracer is
93 * dependent on the function tracer.
95 static int func_prolog_dec(struct trace_array *tr,
96 struct trace_array_cpu **data,
103 * Does not matter if we preempt. We test the flags
104 * afterward, to see if irqs are disabled or not.
105 * If we preempt and get a false positive, the flags
108 cpu = raw_smp_processor_id();
109 if (likely(!per_cpu(tracing_cpu, cpu)))
112 local_save_flags(*flags);
113 /* slight chance to get a false positive on tracing_cpu */
114 if (!irqs_disabled_flags(*flags))
117 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
118 disabled = atomic_inc_return(&(*data)->disabled);
120 if (likely(disabled == 1))
123 atomic_dec(&(*data)->disabled);
129 * irqsoff uses its own tracer function to keep the overhead down:
132 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
133 struct ftrace_ops *op, struct pt_regs *pt_regs)
135 struct trace_array *tr = irqsoff_trace;
136 struct trace_array_cpu *data;
139 if (!func_prolog_dec(tr, &data, &flags))
142 trace_function(tr, ip, parent_ip, flags, preempt_count());
144 atomic_dec(&data->disabled);
146 #endif /* CONFIG_FUNCTION_TRACER */
148 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
149 static int irqsoff_display_graph(struct trace_array *tr, int set)
153 if (!(is_graph() ^ set))
156 stop_irqsoff_tracer(irqsoff_trace, !set);
158 for_each_possible_cpu(cpu)
159 per_cpu(tracing_cpu, cpu) = 0;
162 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
164 return start_irqsoff_tracer(irqsoff_trace, set);
167 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
169 struct trace_array *tr = irqsoff_trace;
170 struct trace_array_cpu *data;
175 if (!func_prolog_dec(tr, &data, &flags))
178 pc = preempt_count();
179 ret = __trace_graph_entry(tr, trace, flags, pc);
180 atomic_dec(&data->disabled);
185 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
187 struct trace_array *tr = irqsoff_trace;
188 struct trace_array_cpu *data;
192 if (!func_prolog_dec(tr, &data, &flags))
195 pc = preempt_count();
196 __trace_graph_return(tr, trace, flags, pc);
197 atomic_dec(&data->disabled);
200 static void irqsoff_trace_open(struct trace_iterator *iter)
203 graph_trace_open(iter);
207 static void irqsoff_trace_close(struct trace_iterator *iter)
210 graph_trace_close(iter);
213 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
214 TRACE_GRAPH_PRINT_PROC | \
215 TRACE_GRAPH_PRINT_ABS_TIME | \
216 TRACE_GRAPH_PRINT_DURATION)
218 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
221 * In graph mode call the graph tracer output function,
222 * otherwise go with the TRACE_FN event handler
225 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
227 return TRACE_TYPE_UNHANDLED;
230 static void irqsoff_print_header(struct seq_file *s)
233 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
235 trace_default_header(s);
239 __trace_function(struct trace_array *tr,
240 unsigned long ip, unsigned long parent_ip,
241 unsigned long flags, int pc)
244 trace_graph_function(tr, ip, parent_ip, flags, pc);
246 trace_function(tr, ip, parent_ip, flags, pc);
250 #define __trace_function trace_function
252 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
257 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
259 return TRACE_TYPE_UNHANDLED;
262 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
263 static void irqsoff_trace_open(struct trace_iterator *iter) { }
264 static void irqsoff_trace_close(struct trace_iterator *iter) { }
266 #ifdef CONFIG_FUNCTION_TRACER
267 static void irqsoff_print_header(struct seq_file *s)
269 trace_default_header(s);
272 static void irqsoff_print_header(struct seq_file *s)
274 trace_latency_header(s);
276 #endif /* CONFIG_FUNCTION_TRACER */
277 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
280 * Should this new latency be reported/recorded?
282 static int report_latency(struct trace_array *tr, cycle_t delta)
284 if (tracing_thresh) {
285 if (delta < tracing_thresh)
288 if (delta <= tr->max_latency)
295 check_critical_timing(struct trace_array *tr,
296 struct trace_array_cpu *data,
297 unsigned long parent_ip,
300 cycle_t T0, T1, delta;
304 T0 = data->preempt_timestamp;
305 T1 = ftrace_now(cpu);
308 local_save_flags(flags);
310 pc = preempt_count();
312 if (!report_latency(tr, delta))
315 raw_spin_lock_irqsave(&max_trace_lock, flags);
317 /* check if we are still the max latency */
318 if (!report_latency(tr, delta))
321 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
322 /* Skip 5 functions to get to the irq/preempt enable function */
323 __trace_stack(tr, flags, 5, pc);
325 if (data->critical_sequence != max_sequence)
328 data->critical_end = parent_ip;
330 if (likely(!is_tracing_stopped())) {
331 tr->max_latency = delta;
332 update_max_tr_single(tr, current, cpu);
338 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
341 data->critical_sequence = max_sequence;
342 data->preempt_timestamp = ftrace_now(cpu);
343 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
347 start_critical_timing(unsigned long ip, unsigned long parent_ip)
350 struct trace_array *tr = irqsoff_trace;
351 struct trace_array_cpu *data;
354 if (!tracer_enabled || !tracing_is_enabled())
357 cpu = raw_smp_processor_id();
359 if (per_cpu(tracing_cpu, cpu))
362 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
364 if (unlikely(!data) || atomic_read(&data->disabled))
367 atomic_inc(&data->disabled);
369 data->critical_sequence = max_sequence;
370 data->preempt_timestamp = ftrace_now(cpu);
371 data->critical_start = parent_ip ? : ip;
373 local_save_flags(flags);
375 __trace_function(tr, ip, parent_ip, flags, preempt_count());
377 per_cpu(tracing_cpu, cpu) = 1;
379 atomic_dec(&data->disabled);
383 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
386 struct trace_array *tr = irqsoff_trace;
387 struct trace_array_cpu *data;
390 cpu = raw_smp_processor_id();
391 /* Always clear the tracing cpu on stopping the trace */
392 if (unlikely(per_cpu(tracing_cpu, cpu)))
393 per_cpu(tracing_cpu, cpu) = 0;
397 if (!tracer_enabled || !tracing_is_enabled())
400 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
402 if (unlikely(!data) ||
403 !data->critical_start || atomic_read(&data->disabled))
406 atomic_inc(&data->disabled);
408 local_save_flags(flags);
409 __trace_function(tr, ip, parent_ip, flags, preempt_count());
410 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
411 data->critical_start = 0;
412 atomic_dec(&data->disabled);
415 /* start and stop critical timings used to for stoppage (in idle) */
416 void start_critical_timings(void)
418 if (preempt_trace() || irq_trace())
419 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
421 EXPORT_SYMBOL_GPL(start_critical_timings);
423 void stop_critical_timings(void)
425 if (preempt_trace() || irq_trace())
426 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
428 EXPORT_SYMBOL_GPL(stop_critical_timings);
430 #ifdef CONFIG_IRQSOFF_TRACER
431 #ifdef CONFIG_PROVE_LOCKING
432 void time_hardirqs_on(unsigned long a0, unsigned long a1)
434 if (!preempt_trace() && irq_trace())
435 stop_critical_timing(a0, a1);
438 void time_hardirqs_off(unsigned long a0, unsigned long a1)
440 if (!preempt_trace() && irq_trace())
441 start_critical_timing(a0, a1);
444 #else /* !CONFIG_PROVE_LOCKING */
450 void trace_softirqs_on(unsigned long ip)
454 void trace_softirqs_off(unsigned long ip)
458 inline void print_irqtrace_events(struct task_struct *curr)
463 * We are only interested in hardirq on/off events:
465 void trace_hardirqs_on(void)
467 if (!preempt_trace() && irq_trace())
468 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
470 EXPORT_SYMBOL(trace_hardirqs_on);
472 void trace_hardirqs_off(void)
474 if (!preempt_trace() && irq_trace())
475 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
477 EXPORT_SYMBOL(trace_hardirqs_off);
479 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
481 if (!preempt_trace() && irq_trace())
482 stop_critical_timing(CALLER_ADDR0, caller_addr);
484 EXPORT_SYMBOL(trace_hardirqs_on_caller);
486 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
488 if (!preempt_trace() && irq_trace())
489 start_critical_timing(CALLER_ADDR0, caller_addr);
491 EXPORT_SYMBOL(trace_hardirqs_off_caller);
493 #endif /* CONFIG_PROVE_LOCKING */
494 #endif /* CONFIG_IRQSOFF_TRACER */
496 #ifdef CONFIG_PREEMPT_TRACER
497 void trace_preempt_on(unsigned long a0, unsigned long a1)
499 if (preempt_trace() && !irq_trace())
500 stop_critical_timing(a0, a1);
503 void trace_preempt_off(unsigned long a0, unsigned long a1)
505 if (preempt_trace() && !irq_trace())
506 start_critical_timing(a0, a1);
508 #endif /* CONFIG_PREEMPT_TRACER */
510 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
514 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
515 if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
519 ret = register_ftrace_graph(&irqsoff_graph_return,
520 &irqsoff_graph_entry);
522 ret = register_ftrace_function(tr->ops);
525 function_enabled = true;
530 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
532 if (!function_enabled)
536 unregister_ftrace_graph();
538 unregister_ftrace_function(tr->ops);
540 function_enabled = false;
543 static int irqsoff_function_set(struct trace_array *tr, int set)
546 register_irqsoff_function(tr, is_graph(), 1);
548 unregister_irqsoff_function(tr, is_graph());
552 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
554 struct tracer *tracer = tr->current_trace;
556 if (mask & TRACE_ITER_FUNCTION)
557 return irqsoff_function_set(tr, set);
559 if (mask & TRACE_ITER_DISPLAY_GRAPH)
560 return irqsoff_display_graph(tr, set);
562 return trace_keep_overwrite(tracer, mask, set);
565 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
569 ret = register_irqsoff_function(tr, graph, 0);
571 if (!ret && tracing_is_enabled())
579 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
583 unregister_irqsoff_function(tr, graph);
586 static bool irqsoff_busy;
588 static int __irqsoff_tracer_init(struct trace_array *tr)
593 save_flags = trace_flags;
595 /* non overwrite screws up the latency tracers */
596 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
597 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
601 /* make sure that the tracer is visible */
603 tracing_reset_online_cpus(&tr->trace_buffer);
605 ftrace_init_array_ops(tr, irqsoff_tracer_call);
607 /* Only toplevel instance supports graph tracing */
608 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
610 printk(KERN_ERR "failed to start irqsoff tracer\n");
616 static void irqsoff_tracer_reset(struct trace_array *tr)
618 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
619 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
621 stop_irqsoff_tracer(tr, is_graph());
623 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
624 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
625 ftrace_reset_array_ops(tr);
627 irqsoff_busy = false;
630 static void irqsoff_tracer_start(struct trace_array *tr)
635 static void irqsoff_tracer_stop(struct trace_array *tr)
640 #ifdef CONFIG_IRQSOFF_TRACER
641 static int irqsoff_tracer_init(struct trace_array *tr)
643 trace_type = TRACER_IRQS_OFF;
645 return __irqsoff_tracer_init(tr);
647 static struct tracer irqsoff_tracer __read_mostly =
650 .init = irqsoff_tracer_init,
651 .reset = irqsoff_tracer_reset,
652 .start = irqsoff_tracer_start,
653 .stop = irqsoff_tracer_stop,
655 .print_header = irqsoff_print_header,
656 .print_line = irqsoff_print_line,
657 .flag_changed = irqsoff_flag_changed,
658 #ifdef CONFIG_FTRACE_SELFTEST
659 .selftest = trace_selftest_startup_irqsoff,
661 .open = irqsoff_trace_open,
662 .close = irqsoff_trace_close,
663 .allow_instances = true,
666 # define register_irqsoff(trace) register_tracer(&trace)
668 # define register_irqsoff(trace) do { } while (0)
671 #ifdef CONFIG_PREEMPT_TRACER
672 static int preemptoff_tracer_init(struct trace_array *tr)
674 trace_type = TRACER_PREEMPT_OFF;
676 return __irqsoff_tracer_init(tr);
679 static struct tracer preemptoff_tracer __read_mostly =
681 .name = "preemptoff",
682 .init = preemptoff_tracer_init,
683 .reset = irqsoff_tracer_reset,
684 .start = irqsoff_tracer_start,
685 .stop = irqsoff_tracer_stop,
687 .print_header = irqsoff_print_header,
688 .print_line = irqsoff_print_line,
689 .flag_changed = irqsoff_flag_changed,
690 #ifdef CONFIG_FTRACE_SELFTEST
691 .selftest = trace_selftest_startup_preemptoff,
693 .open = irqsoff_trace_open,
694 .close = irqsoff_trace_close,
695 .allow_instances = true,
698 # define register_preemptoff(trace) register_tracer(&trace)
700 # define register_preemptoff(trace) do { } while (0)
703 #if defined(CONFIG_IRQSOFF_TRACER) && \
704 defined(CONFIG_PREEMPT_TRACER)
706 static int preemptirqsoff_tracer_init(struct trace_array *tr)
708 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
710 return __irqsoff_tracer_init(tr);
713 static struct tracer preemptirqsoff_tracer __read_mostly =
715 .name = "preemptirqsoff",
716 .init = preemptirqsoff_tracer_init,
717 .reset = irqsoff_tracer_reset,
718 .start = irqsoff_tracer_start,
719 .stop = irqsoff_tracer_stop,
721 .print_header = irqsoff_print_header,
722 .print_line = irqsoff_print_line,
723 .flag_changed = irqsoff_flag_changed,
724 #ifdef CONFIG_FTRACE_SELFTEST
725 .selftest = trace_selftest_startup_preemptirqsoff,
727 .open = irqsoff_trace_open,
728 .close = irqsoff_trace_close,
729 .allow_instances = true,
733 # define register_preemptirqsoff(trace) register_tracer(&trace)
735 # define register_preemptirqsoff(trace) do { } while (0)
738 __init static int init_irqsoff_tracer(void)
740 register_irqsoff(irqsoff_tracer);
741 register_preemptoff(preemptoff_tracer);
742 register_preemptirqsoff(preemptirqsoff_tracer);
746 core_initcall(init_irqsoff_tracer);