2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
18 #define STACK_TRACE_ENTRIES 500
20 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
21 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
22 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
24 static struct stack_trace max_stack_trace = {
25 .max_entries = STACK_TRACE_ENTRIES,
26 .entries = stack_dump_trace,
29 static unsigned long max_stack_size;
30 static arch_spinlock_t max_stack_lock =
31 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
33 static int stack_trace_disabled __read_mostly;
34 static DEFINE_PER_CPU(int, trace_active);
35 static DEFINE_MUTEX(stack_sysctl_mutex);
37 int stack_tracer_enabled;
38 static int last_stack_tracer_enabled;
40 static inline void check_stack(void)
42 unsigned long this_size, flags;
43 unsigned long *p, *top, *start;
46 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
47 this_size = THREAD_SIZE - this_size;
49 if (this_size <= max_stack_size)
52 /* we do not handle interrupt stacks yet */
53 if (!object_is_on_stack(&this_size))
56 local_irq_save(flags);
57 arch_spin_lock(&max_stack_lock);
59 /* a race could have already updated it */
60 if (this_size <= max_stack_size)
63 max_stack_size = this_size;
65 max_stack_trace.nr_entries = 0;
66 max_stack_trace.skip = 3;
68 save_stack_trace(&max_stack_trace);
71 * Now find where in the stack these are.
75 top = (unsigned long *)
76 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
79 * Loop through all the entries. One of the entries may
80 * for some reason be missed on the stack, so we may
81 * have to account for them. If they are all there, this
82 * loop will only happen once. This code only takes place
83 * on a new max, so it is far from a fast path.
85 while (i < max_stack_trace.nr_entries) {
88 stack_dump_index[i] = this_size;
91 for (; p < top && i < max_stack_trace.nr_entries; p++) {
92 if (*p == stack_dump_trace[i]) {
93 this_size = stack_dump_index[i++] =
94 (top - p) * sizeof(unsigned long);
96 /* Start the search from here */
106 arch_spin_unlock(&max_stack_lock);
107 local_irq_restore(flags);
111 stack_trace_call(unsigned long ip, unsigned long parent_ip)
115 if (unlikely(!ftrace_enabled || stack_trace_disabled))
118 preempt_disable_notrace();
120 cpu = raw_smp_processor_id();
121 /* no atomic needed, we only modify this variable by this cpu */
122 if (per_cpu(trace_active, cpu)++ != 0)
128 per_cpu(trace_active, cpu)--;
129 /* prevent recursion in schedule */
130 preempt_enable_notrace();
133 static struct ftrace_ops trace_ops __read_mostly =
135 .func = stack_trace_call,
136 .flags = FTRACE_OPS_FL_GLOBAL,
140 stack_max_size_read(struct file *filp, char __user *ubuf,
141 size_t count, loff_t *ppos)
143 unsigned long *ptr = filp->private_data;
147 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
150 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
154 stack_max_size_write(struct file *filp, const char __user *ubuf,
155 size_t count, loff_t *ppos)
157 long *ptr = filp->private_data;
158 unsigned long val, flags;
163 if (count >= sizeof(buf))
166 if (copy_from_user(&buf, ubuf, count))
171 ret = strict_strtoul(buf, 10, &val);
175 local_irq_save(flags);
178 * In case we trace inside arch_spin_lock() or after (NMI),
179 * we will cause circular lock, so we also need to increase
180 * the percpu trace_active here.
182 cpu = smp_processor_id();
183 per_cpu(trace_active, cpu)++;
185 arch_spin_lock(&max_stack_lock);
187 arch_spin_unlock(&max_stack_lock);
189 per_cpu(trace_active, cpu)--;
190 local_irq_restore(flags);
195 static const struct file_operations stack_max_size_fops = {
196 .open = tracing_open_generic,
197 .read = stack_max_size_read,
198 .write = stack_max_size_write,
199 .llseek = default_llseek,
203 __next(struct seq_file *m, loff_t *pos)
207 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
210 m->private = (void *)n;
215 t_next(struct seq_file *m, void *v, loff_t *pos)
218 return __next(m, pos);
221 static void *t_start(struct seq_file *m, loff_t *pos)
227 cpu = smp_processor_id();
228 per_cpu(trace_active, cpu)++;
230 arch_spin_lock(&max_stack_lock);
233 return SEQ_START_TOKEN;
235 return __next(m, pos);
238 static void t_stop(struct seq_file *m, void *p)
242 arch_spin_unlock(&max_stack_lock);
244 cpu = smp_processor_id();
245 per_cpu(trace_active, cpu)--;
250 static int trace_lookup_stack(struct seq_file *m, long i)
252 unsigned long addr = stack_dump_trace[i];
254 return seq_printf(m, "%pS\n", (void *)addr);
257 static void print_disabled(struct seq_file *m)
260 "# Stack tracer disabled\n"
262 "# To enable the stack tracer, either add 'stacktrace' to the\n"
263 "# kernel command line\n"
264 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
268 static int t_show(struct seq_file *m, void *v)
273 if (v == SEQ_START_TOKEN) {
274 seq_printf(m, " Depth Size Location"
276 " ----- ---- --------\n",
277 max_stack_trace.nr_entries - 1);
279 if (!stack_tracer_enabled && !max_stack_size)
287 if (i >= max_stack_trace.nr_entries ||
288 stack_dump_trace[i] == ULONG_MAX)
291 if (i+1 == max_stack_trace.nr_entries ||
292 stack_dump_trace[i+1] == ULONG_MAX)
293 size = stack_dump_index[i];
295 size = stack_dump_index[i] - stack_dump_index[i+1];
297 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
299 trace_lookup_stack(m, i);
304 static const struct seq_operations stack_trace_seq_ops = {
311 static int stack_trace_open(struct inode *inode, struct file *file)
313 return seq_open(file, &stack_trace_seq_ops);
316 static const struct file_operations stack_trace_fops = {
317 .open = stack_trace_open,
320 .release = seq_release,
324 stack_trace_sysctl(struct ctl_table *table, int write,
325 void __user *buffer, size_t *lenp,
330 mutex_lock(&stack_sysctl_mutex);
332 ret = proc_dointvec(table, write, buffer, lenp, ppos);
335 (last_stack_tracer_enabled == !!stack_tracer_enabled))
338 last_stack_tracer_enabled = !!stack_tracer_enabled;
340 if (stack_tracer_enabled)
341 register_ftrace_function(&trace_ops);
343 unregister_ftrace_function(&trace_ops);
346 mutex_unlock(&stack_sysctl_mutex);
350 static __init int enable_stacktrace(char *str)
352 stack_tracer_enabled = 1;
353 last_stack_tracer_enabled = 1;
356 __setup("stacktrace", enable_stacktrace);
358 static __init int stack_trace_init(void)
360 struct dentry *d_tracer;
362 d_tracer = tracing_init_dentry();
364 trace_create_file("stack_max_size", 0644, d_tracer,
365 &max_stack_size, &stack_max_size_fops);
367 trace_create_file("stack_trace", 0444, d_tracer,
368 NULL, &stack_trace_fops);
370 if (stack_tracer_enabled)
371 register_ftrace_function(&trace_ops);
376 device_initcall(stack_trace_init);