2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/module.h>
12 #include <linux/sysctl.h>
13 #include <linux/init.h>
15 #include <asm/setup.h>
19 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
20 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
21 unsigned stack_trace_index[STACK_TRACE_ENTRIES];
24 * Reserve one entry for the passed in ip. This will allow
25 * us to remove most or all of the stack size overhead
26 * added by the stack tracer itself.
28 struct stack_trace stack_trace_max = {
29 .max_entries = STACK_TRACE_ENTRIES - 1,
30 .entries = &stack_dump_trace[0],
33 unsigned long stack_trace_max_size;
34 arch_spinlock_t stack_trace_max_lock =
35 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
37 static DEFINE_PER_CPU(int, trace_active);
38 static DEFINE_MUTEX(stack_sysctl_mutex);
40 int stack_tracer_enabled;
41 static int last_stack_tracer_enabled;
43 void stack_trace_print(void)
48 pr_emerg(" Depth Size Location (%d entries)\n"
49 " ----- ---- --------\n",
50 stack_trace_max.nr_entries);
52 for (i = 0; i < stack_trace_max.nr_entries; i++) {
53 if (stack_dump_trace[i] == ULONG_MAX)
55 if (i+1 == stack_trace_max.nr_entries ||
56 stack_dump_trace[i+1] == ULONG_MAX)
57 size = stack_trace_index[i];
59 size = stack_trace_index[i] - stack_trace_index[i+1];
61 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
62 size, (void *)stack_dump_trace[i]);
67 * When arch-specific code overides this function, the following
68 * data should be filled up, assuming stack_trace_max_lock is held to
69 * prevent concurrent updates.
72 * stack_trace_max_size
75 check_stack(unsigned long ip, unsigned long *stack)
77 unsigned long this_size, flags; unsigned long *p, *top, *start;
78 static int tracer_frame;
79 int frame_size = ACCESS_ONCE(tracer_frame);
82 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
83 this_size = THREAD_SIZE - this_size;
84 /* Remove the frame of the tracer */
85 this_size -= frame_size;
87 if (this_size <= stack_trace_max_size)
90 /* we do not handle interrupt stacks yet */
91 if (!object_is_on_stack(stack))
94 /* Can't do this from NMI context (can cause deadlocks) */
98 local_irq_save(flags);
99 arch_spin_lock(&stack_trace_max_lock);
102 * RCU may not be watching, make it see us.
103 * The stack trace code uses rcu_sched.
107 /* In case another CPU set the tracer_frame on us */
108 if (unlikely(!frame_size))
109 this_size -= tracer_frame;
111 /* a race could have already updated it */
112 if (this_size <= stack_trace_max_size)
115 stack_trace_max_size = this_size;
117 stack_trace_max.nr_entries = 0;
118 stack_trace_max.skip = 3;
120 save_stack_trace(&stack_trace_max);
122 /* Skip over the overhead of the stack tracer itself */
123 for (i = 0; i < stack_trace_max.nr_entries; i++) {
124 if (stack_dump_trace[i] == ip)
129 * Now find where in the stack these are.
133 top = (unsigned long *)
134 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
137 * Loop through all the entries. One of the entries may
138 * for some reason be missed on the stack, so we may
139 * have to account for them. If they are all there, this
140 * loop will only happen once. This code only takes place
141 * on a new max, so it is far from a fast path.
143 while (i < stack_trace_max.nr_entries) {
146 stack_trace_index[x] = this_size;
149 for (; p < top && i < stack_trace_max.nr_entries; p++) {
150 if (stack_dump_trace[i] == ULONG_MAX)
152 if (*p == stack_dump_trace[i]) {
153 stack_dump_trace[x] = stack_dump_trace[i++];
154 this_size = stack_trace_index[x++] =
155 (top - p) * sizeof(unsigned long);
157 /* Start the search from here */
160 * We do not want to show the overhead
161 * of the stack tracer stack in the
162 * max stack. If we haven't figured
163 * out what that is, then figure it out
166 if (unlikely(!tracer_frame)) {
167 tracer_frame = (p - stack) *
168 sizeof(unsigned long);
169 stack_trace_max_size -= tracer_frame;
178 stack_trace_max.nr_entries = x;
180 stack_dump_trace[x] = ULONG_MAX;
182 if (task_stack_end_corrupted(current)) {
189 arch_spin_unlock(&stack_trace_max_lock);
190 local_irq_restore(flags);
194 stack_trace_call(unsigned long ip, unsigned long parent_ip,
195 struct ftrace_ops *op, struct pt_regs *pt_regs)
200 preempt_disable_notrace();
202 cpu = raw_smp_processor_id();
203 /* no atomic needed, we only modify this variable by this cpu */
204 if (per_cpu(trace_active, cpu)++ != 0)
207 ip += MCOUNT_INSN_SIZE;
209 check_stack(ip, &stack);
212 per_cpu(trace_active, cpu)--;
213 /* prevent recursion in schedule */
214 preempt_enable_notrace();
217 static struct ftrace_ops trace_ops __read_mostly =
219 .func = stack_trace_call,
220 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
224 stack_max_size_read(struct file *filp, char __user *ubuf,
225 size_t count, loff_t *ppos)
227 unsigned long *ptr = filp->private_data;
231 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
234 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
238 stack_max_size_write(struct file *filp, const char __user *ubuf,
239 size_t count, loff_t *ppos)
241 long *ptr = filp->private_data;
242 unsigned long val, flags;
246 ret = kstrtoul_from_user(ubuf, count, 10, &val);
250 local_irq_save(flags);
253 * In case we trace inside arch_spin_lock() or after (NMI),
254 * we will cause circular lock, so we also need to increase
255 * the percpu trace_active here.
257 cpu = smp_processor_id();
258 per_cpu(trace_active, cpu)++;
260 arch_spin_lock(&stack_trace_max_lock);
262 arch_spin_unlock(&stack_trace_max_lock);
264 per_cpu(trace_active, cpu)--;
265 local_irq_restore(flags);
270 static const struct file_operations stack_max_size_fops = {
271 .open = tracing_open_generic,
272 .read = stack_max_size_read,
273 .write = stack_max_size_write,
274 .llseek = default_llseek,
278 __next(struct seq_file *m, loff_t *pos)
282 if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
285 m->private = (void *)n;
290 t_next(struct seq_file *m, void *v, loff_t *pos)
293 return __next(m, pos);
296 static void *t_start(struct seq_file *m, loff_t *pos)
302 cpu = smp_processor_id();
303 per_cpu(trace_active, cpu)++;
305 arch_spin_lock(&stack_trace_max_lock);
308 return SEQ_START_TOKEN;
310 return __next(m, pos);
313 static void t_stop(struct seq_file *m, void *p)
317 arch_spin_unlock(&stack_trace_max_lock);
319 cpu = smp_processor_id();
320 per_cpu(trace_active, cpu)--;
325 static void trace_lookup_stack(struct seq_file *m, long i)
327 unsigned long addr = stack_dump_trace[i];
329 seq_printf(m, "%pS\n", (void *)addr);
332 static void print_disabled(struct seq_file *m)
335 "# Stack tracer disabled\n"
337 "# To enable the stack tracer, either add 'stacktrace' to the\n"
338 "# kernel command line\n"
339 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
343 static int t_show(struct seq_file *m, void *v)
348 if (v == SEQ_START_TOKEN) {
349 seq_printf(m, " Depth Size Location"
351 " ----- ---- --------\n",
352 stack_trace_max.nr_entries);
354 if (!stack_tracer_enabled && !stack_trace_max_size)
362 if (i >= stack_trace_max.nr_entries ||
363 stack_dump_trace[i] == ULONG_MAX)
366 if (i+1 == stack_trace_max.nr_entries ||
367 stack_dump_trace[i+1] == ULONG_MAX)
368 size = stack_trace_index[i];
370 size = stack_trace_index[i] - stack_trace_index[i+1];
372 seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
374 trace_lookup_stack(m, i);
379 static const struct seq_operations stack_trace_seq_ops = {
386 static int stack_trace_open(struct inode *inode, struct file *file)
388 return seq_open(file, &stack_trace_seq_ops);
391 static const struct file_operations stack_trace_fops = {
392 .open = stack_trace_open,
395 .release = seq_release,
399 stack_trace_filter_open(struct inode *inode, struct file *file)
401 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
405 static const struct file_operations stack_trace_filter_fops = {
406 .open = stack_trace_filter_open,
408 .write = ftrace_filter_write,
409 .llseek = tracing_lseek,
410 .release = ftrace_regex_release,
414 stack_trace_sysctl(struct ctl_table *table, int write,
415 void __user *buffer, size_t *lenp,
420 mutex_lock(&stack_sysctl_mutex);
422 ret = proc_dointvec(table, write, buffer, lenp, ppos);
425 (last_stack_tracer_enabled == !!stack_tracer_enabled))
428 last_stack_tracer_enabled = !!stack_tracer_enabled;
430 if (stack_tracer_enabled)
431 register_ftrace_function(&trace_ops);
433 unregister_ftrace_function(&trace_ops);
436 mutex_unlock(&stack_sysctl_mutex);
440 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
442 static __init int enable_stacktrace(char *str)
444 if (strncmp(str, "_filter=", 8) == 0)
445 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
447 stack_tracer_enabled = 1;
448 last_stack_tracer_enabled = 1;
451 __setup("stacktrace", enable_stacktrace);
453 static __init int stack_trace_init(void)
455 struct dentry *d_tracer;
457 d_tracer = tracing_init_dentry();
458 if (IS_ERR(d_tracer))
461 trace_create_file("stack_max_size", 0644, d_tracer,
462 &stack_trace_max_size, &stack_max_size_fops);
464 trace_create_file("stack_trace", 0444, d_tracer,
465 NULL, &stack_trace_fops);
467 trace_create_file("stack_trace_filter", 0444, d_tracer,
468 NULL, &stack_trace_filter_fops);
470 if (stack_trace_filter_buf[0])
471 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
473 if (stack_tracer_enabled)
474 register_ftrace_function(&trace_ops);
479 device_initcall(stack_trace_init);