2 * thread-stack.c: Synthesize a thread's stack using call / return events
3 * Copyright (c) 2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #include <linux/rbtree.h>
17 #include <linux/list.h>
26 #include "call-path.h"
27 #include "thread-stack.h"
29 #define STACK_GROWTH 2048
32 * struct thread_stack_entry - thread stack entry.
33 * @ret_addr: return address
34 * @timestamp: timestamp (if known)
35 * @ref: external reference (e.g. db_id of sample)
36 * @branch_count: the branch count when the entry was created
38 * @no_call: a 'call' was not seen
39 * @trace_end: a 'call' but trace ended
41 struct thread_stack_entry {
52 * struct thread_stack - thread stack constructed from 'call' and 'return'
54 * @stack: array that holds the stack
55 * @cnt: number of entries in the stack
56 * @sz: current maximum stack size
57 * @trace_nr: current trace number
58 * @branch_count: running branch count
59 * @kernel_start: kernel start address
60 * @last_time: last timestamp
61 * @crp: call/return processor
65 struct thread_stack_entry *stack;
72 struct call_return_processor *crp;
76 static int thread_stack__grow(struct thread_stack *ts)
78 struct thread_stack_entry *new_stack;
81 new_sz = ts->sz + STACK_GROWTH;
82 sz = new_sz * sizeof(struct thread_stack_entry);
84 new_stack = realloc(ts->stack, sz);
88 ts->stack = new_stack;
94 static struct thread_stack *thread_stack__new(struct thread *thread,
95 struct call_return_processor *crp)
97 struct thread_stack *ts;
99 ts = zalloc(sizeof(struct thread_stack));
103 if (thread_stack__grow(ts)) {
108 if (thread->mg && thread->mg->machine)
109 ts->kernel_start = machine__kernel_start(thread->mg->machine);
111 ts->kernel_start = 1ULL << 63;
117 static int thread_stack__push(struct thread_stack *ts, u64 ret_addr,
122 if (ts->cnt == ts->sz) {
123 err = thread_stack__grow(ts);
125 pr_warning("Out of memory: discarding thread stack\n");
130 ts->stack[ts->cnt].trace_end = trace_end;
131 ts->stack[ts->cnt++].ret_addr = ret_addr;
136 static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
141 * In some cases there may be functions which are not seen to return.
142 * For example when setjmp / longjmp has been used. Or the perf context
143 * switch in the kernel which doesn't stop and start tracing in exactly
144 * the same code path. When that happens the return address will be
145 * further down the stack. If the return address is not found at all,
146 * we assume the opposite (i.e. this is a return for a call that wasn't
147 * seen for some reason) and leave the stack alone.
149 for (i = ts->cnt; i; ) {
150 if (ts->stack[--i].ret_addr == ret_addr) {
157 static void thread_stack__pop_trace_end(struct thread_stack *ts)
161 for (i = ts->cnt; i; ) {
162 if (ts->stack[--i].trace_end)
169 static bool thread_stack__in_kernel(struct thread_stack *ts)
174 return ts->stack[ts->cnt - 1].cp->in_kernel;
177 static int thread_stack__call_return(struct thread *thread,
178 struct thread_stack *ts, size_t idx,
179 u64 timestamp, u64 ref, bool no_return)
181 struct call_return_processor *crp = ts->crp;
182 struct thread_stack_entry *tse;
183 struct call_return cr = {
189 tse = &ts->stack[idx];
191 cr.call_time = tse->timestamp;
192 cr.return_time = timestamp;
193 cr.branch_count = ts->branch_count - tse->branch_count;
194 cr.call_ref = tse->ref;
197 cr.flags |= CALL_RETURN_NO_CALL;
199 cr.flags |= CALL_RETURN_NO_RETURN;
201 return crp->process(&cr, crp->data);
204 static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
206 struct call_return_processor *crp = ts->crp;
215 err = thread_stack__call_return(thread, ts, --ts->cnt,
216 ts->last_time, 0, true);
218 pr_err("Error flushing thread stack!\n");
227 int thread_stack__flush(struct thread *thread)
230 return __thread_stack__flush(thread, thread->ts);
235 int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
236 u64 to_ip, u16 insn_len, u64 trace_nr)
242 thread->ts = thread_stack__new(thread, NULL);
244 pr_warning("Out of memory: no thread stack\n");
247 thread->ts->trace_nr = trace_nr;
251 * When the trace is discontinuous, the trace_nr changes. In that case
252 * the stack might be completely invalid. Better to report nothing than
253 * to report something misleading, so flush the stack.
255 if (trace_nr != thread->ts->trace_nr) {
256 if (thread->ts->trace_nr)
257 __thread_stack__flush(thread, thread->ts);
258 thread->ts->trace_nr = trace_nr;
261 /* Stop here if thread_stack__process() is in use */
265 if (flags & PERF_IP_FLAG_CALL) {
270 ret_addr = from_ip + insn_len;
271 if (ret_addr == to_ip)
272 return 0; /* Zero-length calls are excluded */
273 return thread_stack__push(thread->ts, ret_addr,
274 flags & PERF_IP_FLAG_TRACE_END);
275 } else if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
277 * If the caller did not change the trace number (which would
278 * have flushed the stack) then try to make sense of the stack.
279 * Possibly, tracing began after returning to the current
280 * address, so try to pop that. Also, do not expect a call made
281 * when the trace ended, to return, so pop that.
283 thread_stack__pop(thread->ts, to_ip);
284 thread_stack__pop_trace_end(thread->ts);
285 } else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) {
286 thread_stack__pop(thread->ts, to_ip);
292 void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
294 if (!thread || !thread->ts)
297 if (trace_nr != thread->ts->trace_nr) {
298 if (thread->ts->trace_nr)
299 __thread_stack__flush(thread, thread->ts);
300 thread->ts->trace_nr = trace_nr;
304 void thread_stack__free(struct thread *thread)
307 __thread_stack__flush(thread, thread->ts);
308 zfree(&thread->ts->stack);
313 static inline u64 callchain_context(u64 ip, u64 kernel_start)
315 return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL;
318 void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
319 size_t sz, u64 ip, u64 kernel_start)
321 u64 context = callchain_context(ip, kernel_start);
330 chain->ips[0] = context;
333 if (!thread || !thread->ts) {
338 last_context = context;
340 for (i = 2, j = 1; i < sz && j <= thread->ts->cnt; i++, j++) {
341 ip = thread->ts->stack[thread->ts->cnt - j].ret_addr;
342 context = callchain_context(ip, kernel_start);
343 if (context != last_context) {
346 chain->ips[i++] = context;
347 last_context = context;
355 struct call_return_processor *
356 call_return_processor__new(int (*process)(struct call_return *cr, void *data),
359 struct call_return_processor *crp;
361 crp = zalloc(sizeof(struct call_return_processor));
364 crp->cpr = call_path_root__new();
367 crp->process = process;
376 void call_return_processor__free(struct call_return_processor *crp)
379 call_path_root__free(crp->cpr);
384 static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
385 u64 timestamp, u64 ref, struct call_path *cp,
386 bool no_call, bool trace_end)
388 struct thread_stack_entry *tse;
391 if (ts->cnt == ts->sz) {
392 err = thread_stack__grow(ts);
397 tse = &ts->stack[ts->cnt++];
398 tse->ret_addr = ret_addr;
399 tse->timestamp = timestamp;
401 tse->branch_count = ts->branch_count;
403 tse->no_call = no_call;
404 tse->trace_end = trace_end;
409 static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
410 u64 ret_addr, u64 timestamp, u64 ref,
419 struct thread_stack_entry *tse = &ts->stack[0];
421 if (tse->cp->sym == sym)
422 return thread_stack__call_return(thread, ts, --ts->cnt,
423 timestamp, ref, false);
426 if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) {
427 return thread_stack__call_return(thread, ts, --ts->cnt,
428 timestamp, ref, false);
430 size_t i = ts->cnt - 1;
433 if (ts->stack[i].ret_addr != ret_addr)
436 while (ts->cnt > i) {
437 err = thread_stack__call_return(thread, ts,
444 return thread_stack__call_return(thread, ts, --ts->cnt,
445 timestamp, ref, false);
452 static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts,
453 struct perf_sample *sample,
454 struct addr_location *from_al,
455 struct addr_location *to_al, u64 ref)
457 struct call_path_root *cpr = ts->crp->cpr;
458 struct call_path *cp;
465 } else if (sample->addr) {
472 cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
477 return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp,
481 static int thread_stack__no_call_return(struct thread *thread,
482 struct thread_stack *ts,
483 struct perf_sample *sample,
484 struct addr_location *from_al,
485 struct addr_location *to_al, u64 ref)
487 struct call_path_root *cpr = ts->crp->cpr;
488 struct call_path *cp, *parent;
489 u64 ks = ts->kernel_start;
492 if (sample->ip >= ks && sample->addr < ks) {
493 /* Return to userspace, so pop all kernel addresses */
494 while (thread_stack__in_kernel(ts)) {
495 err = thread_stack__call_return(thread, ts, --ts->cnt,
502 /* If the stack is empty, push the userspace address */
504 cp = call_path__findnew(cpr, &cpr->call_path,
505 to_al->sym, sample->addr,
509 return thread_stack__push_cp(ts, 0, sample->time, ref,
512 } else if (thread_stack__in_kernel(ts) && sample->ip < ks) {
513 /* Return to userspace, so pop all kernel addresses */
514 while (thread_stack__in_kernel(ts)) {
515 err = thread_stack__call_return(thread, ts, --ts->cnt,
524 parent = ts->stack[ts->cnt - 1].cp;
526 parent = &cpr->call_path;
528 /* This 'return' had no 'call', so push and pop top of stack */
529 cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip,
534 err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp,
539 return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref,
543 static int thread_stack__trace_begin(struct thread *thread,
544 struct thread_stack *ts, u64 timestamp,
547 struct thread_stack_entry *tse;
554 tse = &ts->stack[ts->cnt - 1];
555 if (tse->trace_end) {
556 err = thread_stack__call_return(thread, ts, --ts->cnt,
557 timestamp, ref, false);
565 static int thread_stack__trace_end(struct thread_stack *ts,
566 struct perf_sample *sample, u64 ref)
568 struct call_path_root *cpr = ts->crp->cpr;
569 struct call_path *cp;
572 /* No point having 'trace end' on the bottom of the stack */
573 if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
576 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
581 ret_addr = sample->ip + sample->insn_len;
583 return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
587 int thread_stack__process(struct thread *thread, struct comm *comm,
588 struct perf_sample *sample,
589 struct addr_location *from_al,
590 struct addr_location *to_al, u64 ref,
591 struct call_return_processor *crp)
593 struct thread_stack *ts = thread->ts;
598 /* Supersede thread_stack__event() */
599 thread_stack__free(thread);
600 thread->ts = thread_stack__new(thread, crp);
607 thread->ts = thread_stack__new(thread, crp);
614 /* Flush stack on exec */
615 if (ts->comm != comm && thread->pid_ == thread->tid) {
616 err = __thread_stack__flush(thread, ts);
622 /* If the stack is empty, put the current symbol on the stack */
624 err = thread_stack__bottom(thread, ts, sample, from_al, to_al,
630 ts->branch_count += 1;
631 ts->last_time = sample->time;
633 if (sample->flags & PERF_IP_FLAG_CALL) {
634 bool trace_end = sample->flags & PERF_IP_FLAG_TRACE_END;
635 struct call_path_root *cpr = ts->crp->cpr;
636 struct call_path *cp;
639 if (!sample->ip || !sample->addr)
642 ret_addr = sample->ip + sample->insn_len;
643 if (ret_addr == sample->addr)
644 return 0; /* Zero-length calls are excluded */
646 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
647 to_al->sym, sample->addr,
651 err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
652 cp, false, trace_end);
653 } else if (sample->flags & PERF_IP_FLAG_RETURN) {
654 if (!sample->ip || !sample->addr)
657 err = thread_stack__pop_cp(thread, ts, sample->addr,
658 sample->time, ref, from_al->sym);
662 err = thread_stack__no_call_return(thread, ts, sample,
663 from_al, to_al, ref);
665 } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
666 err = thread_stack__trace_begin(thread, ts, sample->time, ref);
667 } else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
668 err = thread_stack__trace_end(ts, sample, ref);
674 size_t thread_stack__depth(struct thread *thread)
678 return thread->ts->cnt;