2 * thread-stack.c: Synthesize a thread's stack using call / return events
3 * Copyright (c) 2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #include <linux/rbtree.h>
17 #include <linux/list.h>
25 #include "thread-stack.h"
27 #define CALL_PATH_BLOCK_SHIFT 8
28 #define CALL_PATH_BLOCK_SIZE (1 << CALL_PATH_BLOCK_SHIFT)
29 #define CALL_PATH_BLOCK_MASK (CALL_PATH_BLOCK_SIZE - 1)
31 struct call_path_block {
32 struct call_path cp[CALL_PATH_BLOCK_SIZE];
33 struct list_head node;
37 * struct call_path_root - root of all call paths.
38 * @call_path: root call path
39 * @blocks: list of blocks to store call paths
40 * @next: next free space
41 * @sz: number of spaces
43 struct call_path_root {
44 struct call_path call_path;
45 struct list_head blocks;
51 * struct call_return_processor - provides a call-back to consume call-return
53 * @cpr: call path root
54 * @process: call-back that accepts call/return information
55 * @data: anonymous data for call-back
57 struct call_return_processor {
58 struct call_path_root *cpr;
59 int (*process)(struct call_return *cr, void *data);
63 #define STACK_GROWTH 2048
66 * struct thread_stack_entry - thread stack entry.
67 * @ret_addr: return address
68 * @timestamp: timestamp (if known)
69 * @ref: external reference (e.g. db_id of sample)
70 * @branch_count: the branch count when the entry was created
72 * @no_call: a 'call' was not seen
74 struct thread_stack_entry {
84 * struct thread_stack - thread stack constructed from 'call' and 'return'
86 * @stack: array that holds the stack
87 * @cnt: number of entries in the stack
88 * @sz: current maximum stack size
89 * @trace_nr: current trace number
90 * @branch_count: running branch count
91 * @kernel_start: kernel start address
92 * @last_time: last timestamp
93 * @crp: call/return processor
97 struct thread_stack_entry *stack;
104 struct call_return_processor *crp;
108 static int thread_stack__grow(struct thread_stack *ts)
110 struct thread_stack_entry *new_stack;
113 new_sz = ts->sz + STACK_GROWTH;
114 sz = new_sz * sizeof(struct thread_stack_entry);
116 new_stack = realloc(ts->stack, sz);
120 ts->stack = new_stack;
126 static struct thread_stack *thread_stack__new(struct thread *thread,
127 struct call_return_processor *crp)
129 struct thread_stack *ts;
131 ts = zalloc(sizeof(struct thread_stack));
135 if (thread_stack__grow(ts)) {
140 if (thread->mg && thread->mg->machine)
141 ts->kernel_start = machine__kernel_start(thread->mg->machine);
143 ts->kernel_start = 1ULL << 63;
149 static int thread_stack__push(struct thread_stack *ts, u64 ret_addr)
153 if (ts->cnt == ts->sz) {
154 err = thread_stack__grow(ts);
156 pr_warning("Out of memory: discarding thread stack\n");
161 ts->stack[ts->cnt++].ret_addr = ret_addr;
166 static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
171 * In some cases there may be functions which are not seen to return.
172 * For example when setjmp / longjmp has been used. Or the perf context
173 * switch in the kernel which doesn't stop and start tracing in exactly
174 * the same code path. When that happens the return address will be
175 * further down the stack. If the return address is not found at all,
176 * we assume the opposite (i.e. this is a return for a call that wasn't
177 * seen for some reason) and leave the stack alone.
179 for (i = ts->cnt; i; ) {
180 if (ts->stack[--i].ret_addr == ret_addr) {
187 static bool thread_stack__in_kernel(struct thread_stack *ts)
192 return ts->stack[ts->cnt - 1].cp->in_kernel;
195 static int thread_stack__call_return(struct thread *thread,
196 struct thread_stack *ts, size_t idx,
197 u64 timestamp, u64 ref, bool no_return)
199 struct call_return_processor *crp = ts->crp;
200 struct thread_stack_entry *tse;
201 struct call_return cr = {
207 tse = &ts->stack[idx];
209 cr.call_time = tse->timestamp;
210 cr.return_time = timestamp;
211 cr.branch_count = ts->branch_count - tse->branch_count;
212 cr.call_ref = tse->ref;
215 cr.flags |= CALL_RETURN_NO_CALL;
217 cr.flags |= CALL_RETURN_NO_RETURN;
219 return crp->process(&cr, crp->data);
222 static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
224 struct call_return_processor *crp = ts->crp;
233 err = thread_stack__call_return(thread, ts, --ts->cnt,
234 ts->last_time, 0, true);
236 pr_err("Error flushing thread stack!\n");
245 int thread_stack__flush(struct thread *thread)
248 return __thread_stack__flush(thread, thread->ts);
253 int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
254 u64 to_ip, u16 insn_len, u64 trace_nr)
260 thread->ts = thread_stack__new(thread, NULL);
262 pr_warning("Out of memory: no thread stack\n");
265 thread->ts->trace_nr = trace_nr;
269 * When the trace is discontinuous, the trace_nr changes. In that case
270 * the stack might be completely invalid. Better to report nothing than
271 * to report something misleading, so flush the stack.
273 if (trace_nr != thread->ts->trace_nr) {
274 if (thread->ts->trace_nr)
275 __thread_stack__flush(thread, thread->ts);
276 thread->ts->trace_nr = trace_nr;
279 /* Stop here if thread_stack__process() is in use */
283 if (flags & PERF_IP_FLAG_CALL) {
288 ret_addr = from_ip + insn_len;
289 if (ret_addr == to_ip)
290 return 0; /* Zero-length calls are excluded */
291 return thread_stack__push(thread->ts, ret_addr);
292 } else if (flags & PERF_IP_FLAG_RETURN) {
295 thread_stack__pop(thread->ts, to_ip);
301 void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
303 if (!thread || !thread->ts)
306 if (trace_nr != thread->ts->trace_nr) {
307 if (thread->ts->trace_nr)
308 __thread_stack__flush(thread, thread->ts);
309 thread->ts->trace_nr = trace_nr;
313 void thread_stack__free(struct thread *thread)
316 __thread_stack__flush(thread, thread->ts);
317 zfree(&thread->ts->stack);
322 void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
327 if (!thread || !thread->ts)
330 chain->nr = min(sz, thread->ts->cnt + 1);
334 for (i = 1; i < chain->nr; i++)
335 chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr;
338 static void call_path__init(struct call_path *cp, struct call_path *parent,
339 struct symbol *sym, u64 ip, bool in_kernel)
343 cp->ip = sym ? 0 : ip;
345 cp->in_kernel = in_kernel;
346 RB_CLEAR_NODE(&cp->rb_node);
347 cp->children = RB_ROOT;
350 static struct call_path_root *call_path_root__new(void)
352 struct call_path_root *cpr;
354 cpr = zalloc(sizeof(struct call_path_root));
357 call_path__init(&cpr->call_path, NULL, NULL, 0, false);
358 INIT_LIST_HEAD(&cpr->blocks);
362 static void call_path_root__free(struct call_path_root *cpr)
364 struct call_path_block *pos, *n;
366 list_for_each_entry_safe(pos, n, &cpr->blocks, node) {
367 list_del(&pos->node);
373 static struct call_path *call_path__new(struct call_path_root *cpr,
374 struct call_path *parent,
375 struct symbol *sym, u64 ip,
378 struct call_path_block *cpb;
379 struct call_path *cp;
382 if (cpr->next < cpr->sz) {
383 cpb = list_last_entry(&cpr->blocks, struct call_path_block,
386 cpb = zalloc(sizeof(struct call_path_block));
389 list_add_tail(&cpb->node, &cpr->blocks);
390 cpr->sz += CALL_PATH_BLOCK_SIZE;
393 n = cpr->next++ & CALL_PATH_BLOCK_MASK;
396 call_path__init(cp, parent, sym, ip, in_kernel);
401 static struct call_path *call_path__findnew(struct call_path_root *cpr,
402 struct call_path *parent,
403 struct symbol *sym, u64 ip, u64 ks)
406 struct rb_node *node_parent = NULL;
407 struct call_path *cp;
408 bool in_kernel = ip >= ks;
414 return call_path__new(cpr, parent, sym, ip, in_kernel);
416 p = &parent->children.rb_node;
419 cp = rb_entry(node_parent, struct call_path, rb_node);
421 if (cp->sym == sym && cp->ip == ip)
424 if (sym < cp->sym || (sym == cp->sym && ip < cp->ip))
430 cp = call_path__new(cpr, parent, sym, ip, in_kernel);
434 rb_link_node(&cp->rb_node, node_parent, p);
435 rb_insert_color(&cp->rb_node, &parent->children);
440 struct call_return_processor *
441 call_return_processor__new(int (*process)(struct call_return *cr, void *data),
444 struct call_return_processor *crp;
446 crp = zalloc(sizeof(struct call_return_processor));
449 crp->cpr = call_path_root__new();
452 crp->process = process;
461 void call_return_processor__free(struct call_return_processor *crp)
464 call_path_root__free(crp->cpr);
469 static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
470 u64 timestamp, u64 ref, struct call_path *cp,
473 struct thread_stack_entry *tse;
476 if (ts->cnt == ts->sz) {
477 err = thread_stack__grow(ts);
482 tse = &ts->stack[ts->cnt++];
483 tse->ret_addr = ret_addr;
484 tse->timestamp = timestamp;
486 tse->branch_count = ts->branch_count;
488 tse->no_call = no_call;
493 static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
494 u64 ret_addr, u64 timestamp, u64 ref,
503 struct thread_stack_entry *tse = &ts->stack[0];
505 if (tse->cp->sym == sym)
506 return thread_stack__call_return(thread, ts, --ts->cnt,
507 timestamp, ref, false);
510 if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) {
511 return thread_stack__call_return(thread, ts, --ts->cnt,
512 timestamp, ref, false);
514 size_t i = ts->cnt - 1;
517 if (ts->stack[i].ret_addr != ret_addr)
520 while (ts->cnt > i) {
521 err = thread_stack__call_return(thread, ts,
528 return thread_stack__call_return(thread, ts, --ts->cnt,
529 timestamp, ref, false);
536 static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts,
537 struct perf_sample *sample,
538 struct addr_location *from_al,
539 struct addr_location *to_al, u64 ref)
541 struct call_path_root *cpr = ts->crp->cpr;
542 struct call_path *cp;
549 } else if (sample->addr) {
556 cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
561 return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp,
565 static int thread_stack__no_call_return(struct thread *thread,
566 struct thread_stack *ts,
567 struct perf_sample *sample,
568 struct addr_location *from_al,
569 struct addr_location *to_al, u64 ref)
571 struct call_path_root *cpr = ts->crp->cpr;
572 struct call_path *cp, *parent;
573 u64 ks = ts->kernel_start;
576 if (sample->ip >= ks && sample->addr < ks) {
577 /* Return to userspace, so pop all kernel addresses */
578 while (thread_stack__in_kernel(ts)) {
579 err = thread_stack__call_return(thread, ts, --ts->cnt,
586 /* If the stack is empty, push the userspace address */
588 cp = call_path__findnew(cpr, &cpr->call_path,
589 to_al->sym, sample->addr,
593 return thread_stack__push_cp(ts, 0, sample->time, ref,
596 } else if (thread_stack__in_kernel(ts) && sample->ip < ks) {
597 /* Return to userspace, so pop all kernel addresses */
598 while (thread_stack__in_kernel(ts)) {
599 err = thread_stack__call_return(thread, ts, --ts->cnt,
608 parent = ts->stack[ts->cnt - 1].cp;
610 parent = &cpr->call_path;
612 /* This 'return' had no 'call', so push and pop top of stack */
613 cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip,
618 err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp,
623 return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref,
627 static int thread_stack__trace_begin(struct thread *thread,
628 struct thread_stack *ts, u64 timestamp,
631 struct thread_stack_entry *tse;
638 tse = &ts->stack[ts->cnt - 1];
639 if (tse->cp->sym == NULL && tse->cp->ip == 0) {
640 err = thread_stack__call_return(thread, ts, --ts->cnt,
641 timestamp, ref, false);
649 static int thread_stack__trace_end(struct thread_stack *ts,
650 struct perf_sample *sample, u64 ref)
652 struct call_path_root *cpr = ts->crp->cpr;
653 struct call_path *cp;
656 /* No point having 'trace end' on the bottom of the stack */
657 if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
660 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
665 ret_addr = sample->ip + sample->insn_len;
667 return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
671 int thread_stack__process(struct thread *thread, struct comm *comm,
672 struct perf_sample *sample,
673 struct addr_location *from_al,
674 struct addr_location *to_al, u64 ref,
675 struct call_return_processor *crp)
677 struct thread_stack *ts = thread->ts;
682 /* Supersede thread_stack__event() */
683 thread_stack__free(thread);
684 thread->ts = thread_stack__new(thread, crp);
691 thread->ts = thread_stack__new(thread, crp);
698 /* Flush stack on exec */
699 if (ts->comm != comm && thread->pid_ == thread->tid) {
700 err = __thread_stack__flush(thread, ts);
706 /* If the stack is empty, put the current symbol on the stack */
708 err = thread_stack__bottom(thread, ts, sample, from_al, to_al,
714 ts->branch_count += 1;
715 ts->last_time = sample->time;
717 if (sample->flags & PERF_IP_FLAG_CALL) {
718 struct call_path_root *cpr = ts->crp->cpr;
719 struct call_path *cp;
722 if (!sample->ip || !sample->addr)
725 ret_addr = sample->ip + sample->insn_len;
726 if (ret_addr == sample->addr)
727 return 0; /* Zero-length calls are excluded */
729 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
730 to_al->sym, sample->addr,
734 err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
736 } else if (sample->flags & PERF_IP_FLAG_RETURN) {
737 if (!sample->ip || !sample->addr)
740 err = thread_stack__pop_cp(thread, ts, sample->addr,
741 sample->time, ref, from_al->sym);
745 err = thread_stack__no_call_return(thread, ts, sample,
746 from_al, to_al, ref);
748 } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
749 err = thread_stack__trace_begin(thread, ts, sample->time, ref);
750 } else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
751 err = thread_stack__trace_end(ts, sample, ref);