3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/slab.h>
15 #include "trace_output.h"
17 static bool kill_ftrace_graph;
20 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
22 * ftrace_graph_stop() is called when a severe error is detected in
23 * the function graph tracing. This function is called by the critical
24 * paths of function graph to keep those paths from doing any more harm.
26 bool ftrace_graph_is_dead(void)
28 return kill_ftrace_graph;
32 * ftrace_graph_stop - set to permanently disable function graph tracincg
34 * In case of an error int function graph tracing, this is called
35 * to try to keep function graph tracing from causing any more harm.
36 * Usually this is pretty severe and this is called to try to at least
37 * get a warning out to the user.
39 void ftrace_graph_stop(void)
41 kill_ftrace_graph = true;
44 /* When set, irq functions will be ignored */
45 static int ftrace_graph_skip_irqs;
47 struct fgraph_cpu_data {
52 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
56 struct fgraph_cpu_data __percpu *cpu_data;
58 /* Place to preserve last processed entry. */
59 struct ftrace_graph_ent_entry ent;
60 struct ftrace_graph_ret_entry ret;
65 #define TRACE_GRAPH_INDENT 2
68 #define TRACE_GRAPH_PRINT_FLAT 0x80
70 static unsigned int max_depth;
72 static struct tracer_opt trace_opts[] = {
73 /* Display overruns? (for self-debug purpose) */
74 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
76 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
77 /* Display Overhead ? */
78 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
79 /* Display proc name/pid */
80 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
81 /* Display duration of execution */
82 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
83 /* Display absolute time of an entry */
84 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
85 /* Display interrupts */
86 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
87 /* Display function name after trailing } */
88 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
89 /* Include sleep time (scheduled out) between entry and return */
90 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
91 /* Include time within nested functions */
92 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
93 /* Use standard trace formatting rather than hierarchical */
94 { TRACER_OPT(funcgraph-flat, TRACE_GRAPH_PRINT_FLAT) },
98 static struct tracer_flags tracer_flags = {
99 /* Don't display overruns, proc, or tail by default */
100 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
101 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
102 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
106 static struct trace_array *graph_array;
109 * DURATION column is being also used to display IRQ signs,
110 * following values are used by print_graph_irq and others
111 * to fill in space into DURATION column.
114 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
115 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
116 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
120 print_graph_duration(struct trace_array *tr, unsigned long long duration,
121 struct trace_seq *s, u32 flags);
123 /* Add a function return address to the trace stack on thread info.*/
125 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
126 unsigned long frame_pointer)
128 unsigned long long calltime;
131 if (unlikely(ftrace_graph_is_dead()))
134 if (!current->ret_stack)
138 * We must make sure the ret_stack is tested before we read
143 /* The return trace stack is full */
144 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
145 atomic_inc(¤t->trace_overrun);
150 * The curr_ret_stack is an index to ftrace return stack of
151 * current task. Its value should be in [0, FTRACE_RETFUNC_
152 * DEPTH) when the function graph tracer is used. To support
153 * filtering out specific functions, it makes the index
154 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
155 * so when it sees a negative index the ftrace will ignore
156 * the record. And the index gets recovered when returning
157 * from the filtered function by adding the FTRACE_NOTRACE_
158 * DEPTH and then it'll continue to record functions normally.
160 * The curr_ret_stack is initialized to -1 and get increased
161 * in this function. So it can be less than -1 only if it was
162 * filtered out via ftrace_graph_notrace_addr() which can be
163 * set from set_graph_notrace file in tracefs by user.
165 if (current->curr_ret_stack < -1)
168 calltime = trace_clock_local();
170 index = ++current->curr_ret_stack;
171 if (ftrace_graph_notrace_addr(func))
172 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
174 current->ret_stack[index].ret = ret;
175 current->ret_stack[index].func = func;
176 current->ret_stack[index].calltime = calltime;
177 current->ret_stack[index].subtime = 0;
178 current->ret_stack[index].fp = frame_pointer;
179 *depth = current->curr_ret_stack;
184 /* Retrieve a function return address to the trace stack on thread info.*/
186 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
187 unsigned long frame_pointer)
191 index = current->curr_ret_stack;
194 * A negative index here means that it's just returned from a
195 * notrace'd function. Recover index to get an original
196 * return address. See ftrace_push_return_trace().
198 * TODO: Need to check whether the stack gets corrupted.
201 index += FTRACE_NOTRACE_DEPTH;
203 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
206 /* Might as well panic, otherwise we have no where to go */
207 *ret = (unsigned long)panic;
211 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
213 * The arch may choose to record the frame pointer used
214 * and check it here to make sure that it is what we expect it
215 * to be. If gcc does not set the place holder of the return
216 * address in the frame pointer, and does a copy instead, then
217 * the function graph trace will fail. This test detects this
220 * Currently, x86_32 with optimize for size (-Os) makes the latest
223 * Note, -mfentry does not use frame pointers, and this test
224 * is not needed if CC_USING_FENTRY is set.
226 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
228 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
229 " from func %ps return to %lx\n",
230 current->ret_stack[index].fp,
232 (void *)current->ret_stack[index].func,
233 current->ret_stack[index].ret);
234 *ret = (unsigned long)panic;
239 *ret = current->ret_stack[index].ret;
240 trace->func = current->ret_stack[index].func;
241 trace->calltime = current->ret_stack[index].calltime;
242 trace->overrun = atomic_read(¤t->trace_overrun);
243 trace->depth = index;
247 * Send the trace to the ring-buffer.
248 * @return the original return address.
250 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
252 struct ftrace_graph_ret trace;
255 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
256 trace.rettime = trace_clock_local();
258 current->curr_ret_stack--;
260 * The curr_ret_stack can be less than -1 only if it was
261 * filtered out and it's about to return from the function.
262 * Recover the index and continue to trace normal functions.
264 if (current->curr_ret_stack < -1) {
265 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
270 * The trace should run after decrementing the ret counter
271 * in case an interrupt were to come in. We don't want to
272 * lose the interrupt if max_depth is set.
274 ftrace_graph_return(&trace);
276 if (unlikely(!ret)) {
279 /* Might as well panic. What else to do? */
280 ret = (unsigned long)panic;
286 int __trace_graph_entry(struct trace_array *tr,
287 struct ftrace_graph_ent *trace,
291 struct trace_event_call *call = &event_funcgraph_entry;
292 struct ring_buffer_event *event;
293 struct ring_buffer *buffer = tr->trace_buffer.buffer;
294 struct ftrace_graph_ent_entry *entry;
296 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
297 sizeof(*entry), flags, pc);
300 entry = ring_buffer_event_data(event);
301 entry->graph_ent = *trace;
302 if (!call_filter_check_discard(call, entry, buffer, event))
303 __buffer_unlock_commit(buffer, event);
308 static inline int ftrace_graph_ignore_irqs(void)
310 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
316 int trace_graph_entry(struct ftrace_graph_ent *trace)
318 struct trace_array *tr = graph_array;
319 struct trace_array_cpu *data;
326 if (!ftrace_trace_task(current))
329 /* trace it when it is-nested-in or is a function enabled. */
330 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
331 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
332 (max_depth && trace->depth >= max_depth))
336 * Do not trace a function if it's filtered by set_graph_notrace.
337 * Make the index of ret stack negative to indicate that it should
338 * ignore further functions. But it needs its own ret stack entry
339 * to recover the original index in order to continue tracing after
340 * returning from the function.
342 if (ftrace_graph_notrace_addr(trace->func))
345 local_irq_save(flags);
346 cpu = raw_smp_processor_id();
347 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
348 disabled = atomic_inc_return(&data->disabled);
349 if (likely(disabled == 1)) {
350 pc = preempt_count();
351 ret = __trace_graph_entry(tr, trace, flags, pc);
356 atomic_dec(&data->disabled);
357 local_irq_restore(flags);
362 static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
367 return trace_graph_entry(trace);
371 __trace_graph_function(struct trace_array *tr,
372 unsigned long ip, unsigned long flags, int pc)
374 u64 time = trace_clock_local();
375 struct ftrace_graph_ent ent = {
379 struct ftrace_graph_ret ret = {
386 __trace_graph_entry(tr, &ent, flags, pc);
387 __trace_graph_return(tr, &ret, flags, pc);
391 trace_graph_function(struct trace_array *tr,
392 unsigned long ip, unsigned long parent_ip,
393 unsigned long flags, int pc)
395 __trace_graph_function(tr, ip, flags, pc);
398 void __trace_graph_return(struct trace_array *tr,
399 struct ftrace_graph_ret *trace,
403 struct trace_event_call *call = &event_funcgraph_exit;
404 struct ring_buffer_event *event;
405 struct ring_buffer *buffer = tr->trace_buffer.buffer;
406 struct ftrace_graph_ret_entry *entry;
408 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
409 sizeof(*entry), flags, pc);
412 entry = ring_buffer_event_data(event);
414 if (!call_filter_check_discard(call, entry, buffer, event))
415 __buffer_unlock_commit(buffer, event);
418 void trace_graph_return(struct ftrace_graph_ret *trace)
420 struct trace_array *tr = graph_array;
421 struct trace_array_cpu *data;
427 local_irq_save(flags);
428 cpu = raw_smp_processor_id();
429 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
430 disabled = atomic_inc_return(&data->disabled);
431 if (likely(disabled == 1)) {
432 pc = preempt_count();
433 __trace_graph_return(tr, trace, flags, pc);
435 atomic_dec(&data->disabled);
436 local_irq_restore(flags);
439 void set_graph_array(struct trace_array *tr)
443 /* Make graph_array visible before we start tracing */
448 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
450 if (tracing_thresh &&
451 (trace->rettime - trace->calltime < tracing_thresh))
454 trace_graph_return(trace);
457 static int graph_trace_init(struct trace_array *tr)
463 ret = register_ftrace_graph(&trace_graph_thresh_return,
464 &trace_graph_thresh_entry);
466 ret = register_ftrace_graph(&trace_graph_return,
470 tracing_start_cmdline_record();
475 static void graph_trace_reset(struct trace_array *tr)
477 tracing_stop_cmdline_record();
478 unregister_ftrace_graph();
481 static int graph_trace_update_thresh(struct trace_array *tr)
483 graph_trace_reset(tr);
484 return graph_trace_init(tr);
487 static int max_bytes_for_cpu;
489 static void print_graph_cpu(struct trace_seq *s, int cpu)
492 * Start with a space character - to make it stand out
493 * to the right a bit when trace output is pasted into
496 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
499 #define TRACE_GRAPH_PROCINFO_LENGTH 14
501 static void print_graph_proc(struct trace_seq *s, pid_t pid)
503 char comm[TASK_COMM_LEN];
504 /* sign + log10(MAX_INT) + '\0' */
510 trace_find_cmdline(pid, comm);
512 sprintf(pid_str, "%d", pid);
514 /* 1 stands for the "-" character */
515 len = strlen(comm) + strlen(pid_str) + 1;
517 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
518 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
520 /* First spaces to align center */
521 for (i = 0; i < spaces / 2; i++)
522 trace_seq_putc(s, ' ');
524 trace_seq_printf(s, "%s-%s", comm, pid_str);
526 /* Last spaces to align center */
527 for (i = 0; i < spaces - (spaces / 2); i++)
528 trace_seq_putc(s, ' ');
532 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
534 trace_seq_putc(s, ' ');
535 trace_print_lat_fmt(s, entry);
538 /* If the pid changed since the last trace, output this event */
540 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
548 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
550 if (*last_pid == pid)
553 prev_pid = *last_pid;
559 * Context-switch trace line:
561 ------------------------------------------
562 | 1) migration/0--1 => sshd-1755
563 ------------------------------------------
566 trace_seq_puts(s, " ------------------------------------------\n");
567 print_graph_cpu(s, cpu);
568 print_graph_proc(s, prev_pid);
569 trace_seq_puts(s, " => ");
570 print_graph_proc(s, pid);
571 trace_seq_puts(s, "\n ------------------------------------------\n\n");
574 static struct ftrace_graph_ret_entry *
575 get_return_for_leaf(struct trace_iterator *iter,
576 struct ftrace_graph_ent_entry *curr)
578 struct fgraph_data *data = iter->private;
579 struct ring_buffer_iter *ring_iter = NULL;
580 struct ring_buffer_event *event;
581 struct ftrace_graph_ret_entry *next;
584 * If the previous output failed to write to the seq buffer,
585 * then we just reuse the data from before.
587 if (data && data->failed) {
592 ring_iter = trace_buffer_iter(iter, iter->cpu);
594 /* First peek to compare current entry and the next one */
596 event = ring_buffer_iter_peek(ring_iter, NULL);
599 * We need to consume the current entry to see
602 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
604 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
611 next = ring_buffer_event_data(event);
615 * Save current and next entries for later reference
616 * if the output fails.
620 * If the next event is not a return type, then
621 * we only care about what type it is. Otherwise we can
622 * safely copy the entire event.
624 if (next->ent.type == TRACE_GRAPH_RET)
627 data->ret.ent.type = next->ent.type;
631 if (next->ent.type != TRACE_GRAPH_RET)
634 if (curr->ent.pid != next->ent.pid ||
635 curr->graph_ent.func != next->ret.func)
638 /* this is a leaf, now advance the iterator */
640 ring_buffer_read(ring_iter, NULL);
645 static void print_graph_abs_time(u64 t, struct trace_seq *s)
647 unsigned long usecs_rem;
649 usecs_rem = do_div(t, NSEC_PER_SEC);
652 trace_seq_printf(s, "%5lu.%06lu | ",
653 (unsigned long)t, usecs_rem);
657 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
658 enum trace_type type, int cpu, pid_t pid, u32 flags)
660 struct trace_array *tr = iter->tr;
661 struct trace_seq *s = &iter->seq;
662 struct trace_entry *ent = iter->ent;
664 if (addr < (unsigned long)__irqentry_text_start ||
665 addr >= (unsigned long)__irqentry_text_end)
668 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
670 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
671 print_graph_abs_time(iter->ts, s);
674 if (flags & TRACE_GRAPH_PRINT_CPU)
675 print_graph_cpu(s, cpu);
678 if (flags & TRACE_GRAPH_PRINT_PROC) {
679 print_graph_proc(s, pid);
680 trace_seq_puts(s, " | ");
684 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
685 print_graph_lat_fmt(s, ent);
689 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
691 if (type == TRACE_GRAPH_ENT)
692 trace_seq_puts(s, "==========>");
694 trace_seq_puts(s, "<==========");
696 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
697 trace_seq_putc(s, '\n');
701 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
703 unsigned long nsecs_rem = do_div(duration, 1000);
704 /* log10(ULONG_MAX) + '\0' */
710 sprintf(usecs_str, "%lu", (unsigned long) duration);
713 trace_seq_printf(s, "%s", usecs_str);
715 len = strlen(usecs_str);
717 /* Print nsecs (we don't want to exceed 7 numbers) */
719 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
721 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
722 trace_seq_printf(s, ".%s", nsecs_str);
723 len += strlen(nsecs_str) + 1;
726 trace_seq_puts(s, " us ");
728 /* Print remaining spaces to fit the row's width */
729 for (i = len; i < 8; i++)
730 trace_seq_putc(s, ' ');
734 print_graph_duration(struct trace_array *tr, unsigned long long duration,
735 struct trace_seq *s, u32 flags)
737 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
738 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
741 /* No real adata, just filling the column with spaces */
742 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
743 case FLAGS_FILL_FULL:
744 trace_seq_puts(s, " | ");
746 case FLAGS_FILL_START:
747 trace_seq_puts(s, " ");
750 trace_seq_puts(s, " |");
754 /* Signal a overhead of time execution to the output */
755 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
756 trace_seq_printf(s, "%c ", trace_find_mark(duration));
758 trace_seq_puts(s, " ");
760 trace_print_graph_duration(duration, s);
761 trace_seq_puts(s, "| ");
764 /* Case of a leaf function on its call entry */
765 static enum print_line_t
766 print_graph_entry_leaf(struct trace_iterator *iter,
767 struct ftrace_graph_ent_entry *entry,
768 struct ftrace_graph_ret_entry *ret_entry,
769 struct trace_seq *s, u32 flags)
771 struct fgraph_data *data = iter->private;
772 struct trace_array *tr = iter->tr;
773 struct ftrace_graph_ret *graph_ret;
774 struct ftrace_graph_ent *call;
775 unsigned long long duration;
778 graph_ret = &ret_entry->ret;
779 call = &entry->graph_ent;
780 duration = graph_ret->rettime - graph_ret->calltime;
783 struct fgraph_cpu_data *cpu_data;
786 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
789 * Comments display at + 1 to depth. Since
790 * this is a leaf function, keep the comments
791 * equal to this depth.
793 cpu_data->depth = call->depth - 1;
795 /* No need to keep this function around for this depth */
796 if (call->depth < FTRACE_RETFUNC_DEPTH)
797 cpu_data->enter_funcs[call->depth] = 0;
800 /* Overhead and duration */
801 print_graph_duration(tr, duration, s, flags);
804 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
805 trace_seq_putc(s, ' ');
807 trace_seq_printf(s, "%ps();\n", (void *)call->func);
809 return trace_handle_return(s);
812 static enum print_line_t
813 print_graph_entry_nested(struct trace_iterator *iter,
814 struct ftrace_graph_ent_entry *entry,
815 struct trace_seq *s, int cpu, u32 flags)
817 struct ftrace_graph_ent *call = &entry->graph_ent;
818 struct fgraph_data *data = iter->private;
819 struct trace_array *tr = iter->tr;
823 struct fgraph_cpu_data *cpu_data;
826 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
827 cpu_data->depth = call->depth;
829 /* Save this function pointer to see if the exit matches */
830 if (call->depth < FTRACE_RETFUNC_DEPTH)
831 cpu_data->enter_funcs[call->depth] = call->func;
835 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
838 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
839 trace_seq_putc(s, ' ');
841 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
843 if (trace_seq_has_overflowed(s))
844 return TRACE_TYPE_PARTIAL_LINE;
847 * we already consumed the current entry to check the next one
848 * and see if this is a leaf.
850 return TRACE_TYPE_NO_CONSUME;
854 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
855 int type, unsigned long addr, u32 flags)
857 struct fgraph_data *data = iter->private;
858 struct trace_entry *ent = iter->ent;
859 struct trace_array *tr = iter->tr;
863 verif_pid(s, ent->pid, cpu, data);
867 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
869 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
873 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
874 print_graph_abs_time(iter->ts, s);
877 if (flags & TRACE_GRAPH_PRINT_CPU)
878 print_graph_cpu(s, cpu);
881 if (flags & TRACE_GRAPH_PRINT_PROC) {
882 print_graph_proc(s, ent->pid);
883 trace_seq_puts(s, " | ");
887 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
888 print_graph_lat_fmt(s, ent);
894 * Entry check for irq code
897 * - we are inside irq code
898 * - we just entered irq code
901 * - funcgraph-interrupts option is set
902 * - we are not inside irq code
905 check_irq_entry(struct trace_iterator *iter, u32 flags,
906 unsigned long addr, int depth)
910 struct fgraph_data *data = iter->private;
913 * If we are either displaying irqs, or we got called as
914 * a graph event and private data does not exist,
915 * then we bypass the irq check.
917 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
921 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
924 * We are inside the irq code
929 if ((addr < (unsigned long)__irqentry_text_start) ||
930 (addr >= (unsigned long)__irqentry_text_end))
934 * We are entering irq code.
941 * Return check for irq code
944 * - we are inside irq code
945 * - we just left irq code
948 * - funcgraph-interrupts option is set
949 * - we are not inside irq code
952 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
956 struct fgraph_data *data = iter->private;
959 * If we are either displaying irqs, or we got called as
960 * a graph event and private data does not exist,
961 * then we bypass the irq check.
963 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
967 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
970 * We are not inside the irq code.
972 if (*depth_irq == -1)
976 * We are inside the irq code, and this is returning entry.
977 * Let's not trace it and clear the entry depth, since
978 * we are out of irq code.
980 * This condition ensures that we 'leave the irq code' once
981 * we are out of the entry depth. Thus protecting us from
982 * the RETURN entry loss.
984 if (*depth_irq >= depth) {
990 * We are inside the irq code, and this is not the entry.
995 static enum print_line_t
996 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
997 struct trace_iterator *iter, u32 flags)
999 struct fgraph_data *data = iter->private;
1000 struct ftrace_graph_ent *call = &field->graph_ent;
1001 struct ftrace_graph_ret_entry *leaf_ret;
1002 static enum print_line_t ret;
1003 int cpu = iter->cpu;
1005 if (check_irq_entry(iter, flags, call->func, call->depth))
1006 return TRACE_TYPE_HANDLED;
1008 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1010 leaf_ret = get_return_for_leaf(iter, field);
1012 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1014 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1018 * If we failed to write our output, then we need to make
1019 * note of it. Because we already consumed our entry.
1031 static enum print_line_t
1032 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1033 struct trace_entry *ent, struct trace_iterator *iter,
1036 unsigned long long duration = trace->rettime - trace->calltime;
1037 struct fgraph_data *data = iter->private;
1038 struct trace_array *tr = iter->tr;
1039 pid_t pid = ent->pid;
1040 int cpu = iter->cpu;
1044 if (check_irq_return(iter, flags, trace->depth))
1045 return TRACE_TYPE_HANDLED;
1048 struct fgraph_cpu_data *cpu_data;
1049 int cpu = iter->cpu;
1051 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1054 * Comments display at + 1 to depth. This is the
1055 * return from a function, we now want the comments
1056 * to display at the same level of the bracket.
1058 cpu_data->depth = trace->depth - 1;
1060 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1061 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1063 cpu_data->enter_funcs[trace->depth] = 0;
1067 print_graph_prologue(iter, s, 0, 0, flags);
1069 /* Overhead and duration */
1070 print_graph_duration(tr, duration, s, flags);
1073 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1074 trace_seq_putc(s, ' ');
1077 * If the return function does not have a matching entry,
1078 * then the entry was lost. Instead of just printing
1079 * the '}' and letting the user guess what function this
1080 * belongs to, write out the function name. Always do
1081 * that if the funcgraph-tail option is enabled.
1083 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1084 trace_seq_puts(s, "}\n");
1086 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1089 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1090 trace_seq_printf(s, " (Overruns: %lu)\n",
1093 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1096 return trace_handle_return(s);
1099 static enum print_line_t
1100 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1101 struct trace_iterator *iter, u32 flags)
1103 struct trace_array *tr = iter->tr;
1104 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1105 struct fgraph_data *data = iter->private;
1106 struct trace_event *event;
1112 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1114 print_graph_prologue(iter, s, 0, 0, flags);
1117 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1121 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1122 trace_seq_putc(s, ' ');
1125 trace_seq_puts(s, "/* ");
1127 switch (iter->ent->type) {
1129 ret = trace_print_bprintk_msg_only(iter);
1130 if (ret != TRACE_TYPE_HANDLED)
1134 ret = trace_print_printk_msg_only(iter);
1135 if (ret != TRACE_TYPE_HANDLED)
1139 event = ftrace_find_event(ent->type);
1141 return TRACE_TYPE_UNHANDLED;
1143 ret = event->funcs->trace(iter, sym_flags, event);
1144 if (ret != TRACE_TYPE_HANDLED)
1148 if (trace_seq_has_overflowed(s))
1151 /* Strip ending newline */
1152 if (s->buffer[s->seq.len - 1] == '\n') {
1153 s->buffer[s->seq.len - 1] = '\0';
1157 trace_seq_puts(s, " */\n");
1159 return trace_handle_return(s);
1164 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1166 struct ftrace_graph_ent_entry *field;
1167 struct fgraph_data *data = iter->private;
1168 struct trace_entry *entry = iter->ent;
1169 struct trace_seq *s = &iter->seq;
1170 int cpu = iter->cpu;
1173 if (flags & TRACE_GRAPH_PRINT_FLAT)
1174 return TRACE_TYPE_UNHANDLED;
1176 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1177 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1178 return TRACE_TYPE_HANDLED;
1182 * If the last output failed, there's a possibility we need
1183 * to print out the missing entry which would never go out.
1185 if (data && data->failed) {
1187 iter->cpu = data->cpu;
1188 ret = print_graph_entry(field, s, iter, flags);
1189 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1190 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1191 ret = TRACE_TYPE_NO_CONSUME;
1197 switch (entry->type) {
1198 case TRACE_GRAPH_ENT: {
1200 * print_graph_entry() may consume the current event,
1201 * thus @field may become invalid, so we need to save it.
1202 * sizeof(struct ftrace_graph_ent_entry) is very small,
1203 * it can be safely saved at the stack.
1205 struct ftrace_graph_ent_entry saved;
1206 trace_assign_type(field, entry);
1208 return print_graph_entry(&saved, s, iter, flags);
1210 case TRACE_GRAPH_RET: {
1211 struct ftrace_graph_ret_entry *field;
1212 trace_assign_type(field, entry);
1213 return print_graph_return(&field->ret, s, entry, iter, flags);
1217 /* dont trace stack and functions as comments */
1218 return TRACE_TYPE_UNHANDLED;
1221 return print_graph_comment(s, entry, iter, flags);
1224 return TRACE_TYPE_HANDLED;
1227 static enum print_line_t
1228 print_graph_function(struct trace_iterator *iter)
1230 return print_graph_function_flags(iter, tracer_flags.val);
1233 static void print_lat_header(struct seq_file *s, u32 flags)
1235 static const char spaces[] = " " /* 16 spaces */
1237 " "; /* 17 spaces */
1240 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1242 if (flags & TRACE_GRAPH_PRINT_CPU)
1244 if (flags & TRACE_GRAPH_PRINT_PROC)
1247 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1248 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1249 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1250 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1251 seq_printf(s, "#%.*s||| / \n", size, spaces);
1254 static void __print_graph_headers_flags(struct trace_array *tr,
1255 struct seq_file *s, u32 flags)
1257 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1260 print_lat_header(s, flags);
1264 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1265 seq_puts(s, " TIME ");
1266 if (flags & TRACE_GRAPH_PRINT_CPU)
1267 seq_puts(s, " CPU");
1268 if (flags & TRACE_GRAPH_PRINT_PROC)
1269 seq_puts(s, " TASK/PID ");
1271 seq_puts(s, "||||");
1272 if (flags & TRACE_GRAPH_PRINT_DURATION)
1273 seq_puts(s, " DURATION ");
1274 seq_puts(s, " FUNCTION CALLS\n");
1278 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1280 if (flags & TRACE_GRAPH_PRINT_CPU)
1282 if (flags & TRACE_GRAPH_PRINT_PROC)
1283 seq_puts(s, " | | ");
1285 seq_puts(s, "||||");
1286 if (flags & TRACE_GRAPH_PRINT_DURATION)
1287 seq_puts(s, " | | ");
1288 seq_puts(s, " | | | |\n");
1291 static void print_graph_headers(struct seq_file *s)
1293 print_graph_headers_flags(s, tracer_flags.val);
1296 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1298 struct trace_iterator *iter = s->private;
1299 struct trace_array *tr = iter->tr;
1301 if (flags & TRACE_GRAPH_PRINT_FLAT) {
1302 trace_default_header(s);
1306 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1309 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1310 /* print nothing if the buffers are empty */
1311 if (trace_empty(iter))
1314 print_trace_header(s, iter);
1317 __print_graph_headers_flags(tr, s, flags);
1320 void graph_trace_open(struct trace_iterator *iter)
1322 /* pid and depth on the last trace processed */
1323 struct fgraph_data *data;
1327 iter->private = NULL;
1329 /* We can be called in atomic context via ftrace_dump() */
1330 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1332 data = kzalloc(sizeof(*data), gfpflags);
1336 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1337 if (!data->cpu_data)
1340 for_each_possible_cpu(cpu) {
1341 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1342 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1343 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1344 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1352 iter->private = data;
1359 pr_warning("function graph tracer: not enough memory\n");
1362 void graph_trace_close(struct trace_iterator *iter)
1364 struct fgraph_data *data = iter->private;
1367 free_percpu(data->cpu_data);
1373 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1375 if (bit == TRACE_GRAPH_PRINT_IRQS)
1376 ftrace_graph_skip_irqs = !set;
1378 if (bit == TRACE_GRAPH_SLEEP_TIME)
1379 ftrace_graph_sleep_time_control(set);
1381 if (bit == TRACE_GRAPH_GRAPH_TIME)
1382 ftrace_graph_graph_time_control(set);
1388 static struct tracer graph_trace __tracer_data = {
1389 .name = "function_graph",
1390 .update_thresh = graph_trace_update_thresh,
1391 .open = graph_trace_open,
1392 .pipe_open = graph_trace_open,
1393 .close = graph_trace_close,
1394 .pipe_close = graph_trace_close,
1395 .init = graph_trace_init,
1396 .reset = graph_trace_reset,
1397 .print_line = print_graph_function,
1398 .print_header = print_graph_headers,
1399 .flags = &tracer_flags,
1400 .set_flag = func_graph_set_flag,
1401 #ifdef CONFIG_FTRACE_SELFTEST
1402 .selftest = trace_selftest_startup_function_graph,
1408 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1414 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1426 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1429 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1432 n = sprintf(buf, "%d\n", max_depth);
1434 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1437 static const struct file_operations graph_depth_fops = {
1438 .open = tracing_open_generic,
1439 .write = graph_depth_write,
1440 .read = graph_depth_read,
1441 .llseek = generic_file_llseek,
1444 static __init int init_graph_tracefs(void)
1446 struct dentry *d_tracer;
1448 d_tracer = tracing_init_dentry();
1449 if (IS_ERR(d_tracer))
1452 trace_create_file("max_graph_depth", 0644, d_tracer,
1453 NULL, &graph_depth_fops);
1457 fs_initcall(init_graph_tracefs);
1459 static __init int init_graph_trace(void)
1461 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1463 return register_tracer(&graph_trace);
1466 core_initcall(init_graph_trace);