1 // SPDX-License-Identifier: GPL-2.0-only
3 * thread-stack.c: Synthesize a thread's stack using call / return events
4 * Copyright (c) 2014, Intel Corporation.
7 #include <linux/rbtree.h>
8 #include <linux/list.h>
9 #include <linux/log2.h>
10 #include <linux/zalloc.h>
21 #include "call-path.h"
22 #include "thread-stack.h"
24 #define STACK_GROWTH 2048
27 * State of retpoline detection.
29 * RETPOLINE_NONE: no retpoline detection
30 * X86_RETPOLINE_POSSIBLE: x86 retpoline possible
31 * X86_RETPOLINE_DETECTED: x86 retpoline detected
33 enum retpoline_state_t {
35 X86_RETPOLINE_POSSIBLE,
36 X86_RETPOLINE_DETECTED,
40 * struct thread_stack_entry - thread stack entry.
41 * @ret_addr: return address
42 * @timestamp: timestamp (if known)
43 * @ref: external reference (e.g. db_id of sample)
44 * @branch_count: the branch count when the entry was created
45 * @insn_count: the instruction count when the entry was created
46 * @cyc_count the cycle count when the entry was created
47 * @db_id: id used for db-export
49 * @no_call: a 'call' was not seen
50 * @trace_end: a 'call' but trace ended
51 * @non_call: a branch but not a 'call' to the start of a different symbol
53 struct thread_stack_entry {
68 * struct thread_stack - thread stack constructed from 'call' and 'return'
70 * @stack: array that holds the stack
71 * @cnt: number of entries in the stack
72 * @sz: current maximum stack size
73 * @trace_nr: current trace number
74 * @branch_count: running branch count
75 * @insn_count: running instruction count
76 * @cyc_count running cycle count
77 * @kernel_start: kernel start address
78 * @last_time: last timestamp
79 * @crp: call/return processor
81 * @arr_sz: size of array if this is the first element of an array
82 * @rstate: used to detect retpolines
83 * @br_stack_rb: branch stack (ring buffer)
84 * @br_stack_sz: maximum branch stack size
85 * @br_stack_pos: current position in @br_stack_rb
86 * @mispred_all: mark all branches as mispredicted
89 struct thread_stack_entry *stack;
98 struct call_return_processor *crp;
101 enum retpoline_state_t rstate;
102 struct branch_stack *br_stack_rb;
103 unsigned int br_stack_sz;
104 unsigned int br_stack_pos;
109 * Assume pid == tid == 0 identifies the idle task as defined by
110 * perf_session__register_idle_thread(). The idle task is really 1 task per cpu,
111 * and therefore requires a stack for each cpu.
113 static inline bool thread_stack__per_cpu(struct thread *thread)
115 return !(thread__tid(thread) || thread__pid(thread));
118 static int thread_stack__grow(struct thread_stack *ts)
120 struct thread_stack_entry *new_stack;
123 new_sz = ts->sz + STACK_GROWTH;
124 sz = new_sz * sizeof(struct thread_stack_entry);
126 new_stack = realloc(ts->stack, sz);
130 ts->stack = new_stack;
136 static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
137 struct call_return_processor *crp,
138 bool callstack, unsigned int br_stack_sz)
143 err = thread_stack__grow(ts);
149 size_t sz = sizeof(struct branch_stack);
151 sz += br_stack_sz * sizeof(struct branch_entry);
152 ts->br_stack_rb = zalloc(sz);
153 if (!ts->br_stack_rb)
155 ts->br_stack_sz = br_stack_sz;
158 if (thread__maps(thread) && maps__machine(thread__maps(thread))) {
159 struct machine *machine = maps__machine(thread__maps(thread));
160 const char *arch = perf_env__arch(machine->env);
162 ts->kernel_start = machine__kernel_start(machine);
163 if (!strcmp(arch, "x86"))
164 ts->rstate = X86_RETPOLINE_POSSIBLE;
166 ts->kernel_start = 1ULL << 63;
173 static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
174 struct call_return_processor *crp,
176 unsigned int br_stack_sz)
178 struct thread_stack *ts = thread__ts(thread), *new_ts;
179 unsigned int old_sz = ts ? ts->arr_sz : 0;
180 unsigned int new_sz = 1;
182 if (thread_stack__per_cpu(thread) && cpu > 0)
183 new_sz = roundup_pow_of_two(cpu + 1);
185 if (!ts || new_sz > old_sz) {
186 new_ts = calloc(new_sz, sizeof(*ts));
190 memcpy(new_ts, ts, old_sz * sizeof(*ts));
191 new_ts->arr_sz = new_sz;
192 free(thread__ts(thread));
193 thread__set_ts(thread, new_ts);
197 if (thread_stack__per_cpu(thread) && cpu > 0 &&
198 (unsigned int)cpu < ts->arr_sz)
202 thread_stack__init(ts, thread, crp, callstack, br_stack_sz))
208 static struct thread_stack *thread__cpu_stack(struct thread *thread, int cpu)
210 struct thread_stack *ts = thread__ts(thread);
215 if (!ts || (unsigned int)cpu >= ts->arr_sz)
226 static inline struct thread_stack *thread__stack(struct thread *thread,
232 if (thread_stack__per_cpu(thread))
233 return thread__cpu_stack(thread, cpu);
235 return thread__ts(thread);
238 static int thread_stack__push(struct thread_stack *ts, u64 ret_addr,
243 if (ts->cnt == ts->sz) {
244 err = thread_stack__grow(ts);
246 pr_warning("Out of memory: discarding thread stack\n");
251 ts->stack[ts->cnt].trace_end = trace_end;
252 ts->stack[ts->cnt++].ret_addr = ret_addr;
257 static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
262 * In some cases there may be functions which are not seen to return.
263 * For example when setjmp / longjmp has been used. Or the perf context
264 * switch in the kernel which doesn't stop and start tracing in exactly
265 * the same code path. When that happens the return address will be
266 * further down the stack. If the return address is not found at all,
267 * we assume the opposite (i.e. this is a return for a call that wasn't
268 * seen for some reason) and leave the stack alone.
270 for (i = ts->cnt; i; ) {
271 if (ts->stack[--i].ret_addr == ret_addr) {
278 static void thread_stack__pop_trace_end(struct thread_stack *ts)
282 for (i = ts->cnt; i; ) {
283 if (ts->stack[--i].trace_end)
290 static bool thread_stack__in_kernel(struct thread_stack *ts)
295 return ts->stack[ts->cnt - 1].cp->in_kernel;
298 static int thread_stack__call_return(struct thread *thread,
299 struct thread_stack *ts, size_t idx,
300 u64 timestamp, u64 ref, bool no_return)
302 struct call_return_processor *crp = ts->crp;
303 struct thread_stack_entry *tse;
304 struct call_return cr = {
311 tse = &ts->stack[idx];
313 cr.call_time = tse->timestamp;
314 cr.return_time = timestamp;
315 cr.branch_count = ts->branch_count - tse->branch_count;
316 cr.insn_count = ts->insn_count - tse->insn_count;
317 cr.cyc_count = ts->cyc_count - tse->cyc_count;
318 cr.db_id = tse->db_id;
319 cr.call_ref = tse->ref;
322 cr.flags |= CALL_RETURN_NO_CALL;
324 cr.flags |= CALL_RETURN_NO_RETURN;
326 cr.flags |= CALL_RETURN_NON_CALL;
329 * The parent db_id must be assigned before exporting the child. Note
330 * it is not possible to export the parent first because its information
331 * is not yet complete because its 'return' has not yet been processed.
333 parent_db_id = idx ? &(tse - 1)->db_id : NULL;
335 return crp->process(&cr, parent_db_id, crp->data);
338 static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
340 struct call_return_processor *crp = ts->crp;
345 ts->br_stack_pos = 0;
347 ts->br_stack_rb->nr = 0;
352 err = thread_stack__call_return(thread, ts, --ts->cnt,
353 ts->last_time, 0, true);
355 pr_err("Error flushing thread stack!\n");
364 int thread_stack__flush(struct thread *thread)
366 struct thread_stack *ts = thread__ts(thread);
371 for (pos = 0; pos < ts->arr_sz; pos++) {
372 int ret = __thread_stack__flush(thread, ts + pos);
382 static void thread_stack__update_br_stack(struct thread_stack *ts, u32 flags,
383 u64 from_ip, u64 to_ip)
385 struct branch_stack *bs = ts->br_stack_rb;
386 struct branch_entry *be;
388 if (!ts->br_stack_pos)
389 ts->br_stack_pos = ts->br_stack_sz;
391 ts->br_stack_pos -= 1;
393 be = &bs->entries[ts->br_stack_pos];
397 be->flags.abort = !!(flags & PERF_IP_FLAG_TX_ABORT);
398 be->flags.in_tx = !!(flags & PERF_IP_FLAG_IN_TX);
399 /* No support for mispredict */
400 be->flags.mispred = ts->mispred_all;
402 if (bs->nr < ts->br_stack_sz)
406 int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
407 u64 to_ip, u16 insn_len, u64 trace_nr, bool callstack,
408 unsigned int br_stack_sz, bool mispred_all)
410 struct thread_stack *ts = thread__stack(thread, cpu);
416 ts = thread_stack__new(thread, cpu, NULL, callstack, br_stack_sz);
418 pr_warning("Out of memory: no thread stack\n");
421 ts->trace_nr = trace_nr;
422 ts->mispred_all = mispred_all;
426 * When the trace is discontinuous, the trace_nr changes. In that case
427 * the stack might be completely invalid. Better to report nothing than
428 * to report something misleading, so flush the stack.
430 if (trace_nr != ts->trace_nr) {
432 __thread_stack__flush(thread, ts);
433 ts->trace_nr = trace_nr;
437 thread_stack__update_br_stack(ts, flags, from_ip, to_ip);
440 * Stop here if thread_stack__process() is in use, or not recording call
443 if (ts->crp || !callstack)
446 if (flags & PERF_IP_FLAG_CALL) {
451 ret_addr = from_ip + insn_len;
452 if (ret_addr == to_ip)
453 return 0; /* Zero-length calls are excluded */
454 return thread_stack__push(ts, ret_addr,
455 flags & PERF_IP_FLAG_TRACE_END);
456 } else if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
458 * If the caller did not change the trace number (which would
459 * have flushed the stack) then try to make sense of the stack.
460 * Possibly, tracing began after returning to the current
461 * address, so try to pop that. Also, do not expect a call made
462 * when the trace ended, to return, so pop that.
464 thread_stack__pop(ts, to_ip);
465 thread_stack__pop_trace_end(ts);
466 } else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) {
467 thread_stack__pop(ts, to_ip);
473 void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr)
475 struct thread_stack *ts = thread__stack(thread, cpu);
480 if (trace_nr != ts->trace_nr) {
482 __thread_stack__flush(thread, ts);
483 ts->trace_nr = trace_nr;
487 static void __thread_stack__free(struct thread *thread, struct thread_stack *ts)
489 __thread_stack__flush(thread, ts);
491 zfree(&ts->br_stack_rb);
494 static void thread_stack__reset(struct thread *thread, struct thread_stack *ts)
496 unsigned int arr_sz = ts->arr_sz;
498 __thread_stack__free(thread, ts);
499 memset(ts, 0, sizeof(*ts));
503 void thread_stack__free(struct thread *thread)
505 struct thread_stack *ts = thread__ts(thread);
509 for (pos = 0; pos < ts->arr_sz; pos++)
510 __thread_stack__free(thread, ts + pos);
511 free(thread__ts(thread));
512 thread__set_ts(thread, NULL);
516 static inline u64 callchain_context(u64 ip, u64 kernel_start)
518 return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL;
521 void thread_stack__sample(struct thread *thread, int cpu,
522 struct ip_callchain *chain,
523 size_t sz, u64 ip, u64 kernel_start)
525 struct thread_stack *ts = thread__stack(thread, cpu);
526 u64 context = callchain_context(ip, kernel_start);
535 chain->ips[0] = context;
543 last_context = context;
545 for (i = 2, j = 1; i < sz && j <= ts->cnt; i++, j++) {
546 ip = ts->stack[ts->cnt - j].ret_addr;
547 context = callchain_context(ip, kernel_start);
548 if (context != last_context) {
551 chain->ips[i++] = context;
552 last_context = context;
561 * Hardware sample records, created some time after the event occurred, need to
562 * have subsequent addresses removed from the call chain.
564 void thread_stack__sample_late(struct thread *thread, int cpu,
565 struct ip_callchain *chain, size_t sz,
566 u64 sample_ip, u64 kernel_start)
568 struct thread_stack *ts = thread__stack(thread, cpu);
569 u64 sample_context = callchain_context(sample_ip, kernel_start);
570 u64 last_context, context, ip;
582 * When tracing kernel space, kernel addresses occur at the top of the
583 * call chain after the event occurred but before tracing stopped.
586 for (j = 1; j <= ts->cnt; j++) {
587 ip = ts->stack[ts->cnt - j].ret_addr;
588 context = callchain_context(ip, kernel_start);
589 if (context == PERF_CONTEXT_USER ||
590 (context == sample_context && ip == sample_ip))
594 last_context = sample_ip; /* Use sample_ip as an invalid context */
596 for (; nr < sz && j <= ts->cnt; nr++, j++) {
597 ip = ts->stack[ts->cnt - j].ret_addr;
598 context = callchain_context(ip, kernel_start);
599 if (context != last_context) {
602 chain->ips[nr++] = context;
603 last_context = context;
611 chain->ips[0] = sample_context;
612 chain->ips[1] = sample_ip;
617 void thread_stack__br_sample(struct thread *thread, int cpu,
618 struct branch_stack *dst, unsigned int sz)
620 struct thread_stack *ts = thread__stack(thread, cpu);
621 const size_t bsz = sizeof(struct branch_entry);
622 struct branch_stack *src;
623 struct branch_entry *be;
631 src = ts->br_stack_rb;
635 dst->nr = min((unsigned int)src->nr, sz);
637 be = &dst->entries[0];
638 nr = min(ts->br_stack_sz - ts->br_stack_pos, (unsigned int)dst->nr);
639 memcpy(be, &src->entries[ts->br_stack_pos], bsz * nr);
641 if (src->nr >= ts->br_stack_sz) {
643 be = &dst->entries[nr];
644 nr = min(ts->br_stack_pos, sz);
645 memcpy(be, &src->entries[0], bsz * ts->br_stack_pos);
649 /* Start of user space branch entries */
650 static bool us_start(struct branch_entry *be, u64 kernel_start, bool *start)
653 *start = be->to && be->to < kernel_start;
659 * Start of branch entries after the ip fell in between 2 branches, or user
660 * space branch entries.
662 static bool ks_start(struct branch_entry *be, u64 sample_ip, u64 kernel_start,
663 bool *start, struct branch_entry *nb)
666 *start = (nb && sample_ip >= be->to && sample_ip <= nb->from) ||
667 be->from < kernel_start ||
668 (be->to && be->to < kernel_start);
675 * Hardware sample records, created some time after the event occurred, need to
676 * have subsequent addresses removed from the branch stack.
678 void thread_stack__br_sample_late(struct thread *thread, int cpu,
679 struct branch_stack *dst, unsigned int sz,
680 u64 ip, u64 kernel_start)
682 struct thread_stack *ts = thread__stack(thread, cpu);
683 struct branch_entry *d, *s, *spos, *ssz;
684 struct branch_stack *src;
693 src = ts->br_stack_rb;
697 spos = &src->entries[ts->br_stack_pos];
698 ssz = &src->entries[ts->br_stack_sz];
700 d = &dst->entries[0];
703 if (ip < kernel_start) {
705 * User space sample: start copying branch entries when the
706 * branch is in user space.
708 for (s = spos; s < ssz && nr < sz; s++) {
709 if (us_start(s, kernel_start, &start)) {
715 if (src->nr >= ts->br_stack_sz) {
716 for (s = &src->entries[0]; s < spos && nr < sz; s++) {
717 if (us_start(s, kernel_start, &start)) {
724 struct branch_entry *nb = NULL;
727 * Kernel space sample: start copying branch entries when the ip
728 * falls in between 2 branches (or the branch is in user space
729 * because then the start must have been missed).
731 for (s = spos; s < ssz && nr < sz; s++) {
732 if (ks_start(s, ip, kernel_start, &start, nb)) {
739 if (src->nr >= ts->br_stack_sz) {
740 for (s = &src->entries[0]; s < spos && nr < sz; s++) {
741 if (ks_start(s, ip, kernel_start, &start, nb)) {
753 struct call_return_processor *
754 call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
757 struct call_return_processor *crp;
759 crp = zalloc(sizeof(struct call_return_processor));
762 crp->cpr = call_path_root__new();
765 crp->process = process;
774 void call_return_processor__free(struct call_return_processor *crp)
777 call_path_root__free(crp->cpr);
782 static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
783 u64 timestamp, u64 ref, struct call_path *cp,
784 bool no_call, bool trace_end)
786 struct thread_stack_entry *tse;
792 if (ts->cnt == ts->sz) {
793 err = thread_stack__grow(ts);
798 tse = &ts->stack[ts->cnt++];
799 tse->ret_addr = ret_addr;
800 tse->timestamp = timestamp;
802 tse->branch_count = ts->branch_count;
803 tse->insn_count = ts->insn_count;
804 tse->cyc_count = ts->cyc_count;
806 tse->no_call = no_call;
807 tse->trace_end = trace_end;
808 tse->non_call = false;
814 static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
815 u64 ret_addr, u64 timestamp, u64 ref,
824 struct thread_stack_entry *tse = &ts->stack[0];
826 if (tse->cp->sym == sym)
827 return thread_stack__call_return(thread, ts, --ts->cnt,
828 timestamp, ref, false);
831 if (ts->stack[ts->cnt - 1].ret_addr == ret_addr &&
832 !ts->stack[ts->cnt - 1].non_call) {
833 return thread_stack__call_return(thread, ts, --ts->cnt,
834 timestamp, ref, false);
836 size_t i = ts->cnt - 1;
839 if (ts->stack[i].ret_addr != ret_addr ||
840 ts->stack[i].non_call)
843 while (ts->cnt > i) {
844 err = thread_stack__call_return(thread, ts,
851 return thread_stack__call_return(thread, ts, --ts->cnt,
852 timestamp, ref, false);
859 static int thread_stack__bottom(struct thread_stack *ts,
860 struct perf_sample *sample,
861 struct addr_location *from_al,
862 struct addr_location *to_al, u64 ref)
864 struct call_path_root *cpr = ts->crp->cpr;
865 struct call_path *cp;
872 } else if (sample->addr) {
879 cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
882 return thread_stack__push_cp(ts, ip, sample->time, ref, cp,
886 static int thread_stack__pop_ks(struct thread *thread, struct thread_stack *ts,
887 struct perf_sample *sample, u64 ref)
889 u64 tm = sample->time;
892 /* Return to userspace, so pop all kernel addresses */
893 while (thread_stack__in_kernel(ts)) {
894 err = thread_stack__call_return(thread, ts, --ts->cnt,
903 static int thread_stack__no_call_return(struct thread *thread,
904 struct thread_stack *ts,
905 struct perf_sample *sample,
906 struct addr_location *from_al,
907 struct addr_location *to_al, u64 ref)
909 struct call_path_root *cpr = ts->crp->cpr;
910 struct call_path *root = &cpr->call_path;
911 struct symbol *fsym = from_al->sym;
912 struct symbol *tsym = to_al->sym;
913 struct call_path *cp, *parent;
914 u64 ks = ts->kernel_start;
915 u64 addr = sample->addr;
916 u64 tm = sample->time;
920 if (ip >= ks && addr < ks) {
921 /* Return to userspace, so pop all kernel addresses */
922 err = thread_stack__pop_ks(thread, ts, sample, ref);
926 /* If the stack is empty, push the userspace address */
928 cp = call_path__findnew(cpr, root, tsym, addr, ks);
929 return thread_stack__push_cp(ts, 0, tm, ref, cp, true,
932 } else if (thread_stack__in_kernel(ts) && ip < ks) {
933 /* Return to userspace, so pop all kernel addresses */
934 err = thread_stack__pop_ks(thread, ts, sample, ref);
940 parent = ts->stack[ts->cnt - 1].cp;
944 if (parent->sym == from_al->sym) {
946 * At the bottom of the stack, assume the missing 'call' was
947 * before the trace started. So, pop the current symbol and push
951 err = thread_stack__call_return(thread, ts, --ts->cnt,
958 cp = call_path__findnew(cpr, root, tsym, addr, ks);
960 return thread_stack__push_cp(ts, addr, tm, ref, cp,
965 * Otherwise assume the 'return' is being used as a jump (e.g.
966 * retpoline) and just push the 'to' symbol.
968 cp = call_path__findnew(cpr, parent, tsym, addr, ks);
970 err = thread_stack__push_cp(ts, 0, tm, ref, cp, true, false);
972 ts->stack[ts->cnt - 1].non_call = true;
978 * Assume 'parent' has not yet returned, so push 'to', and then push and
982 cp = call_path__findnew(cpr, parent, tsym, addr, ks);
984 err = thread_stack__push_cp(ts, addr, tm, ref, cp, true, false);
988 cp = call_path__findnew(cpr, cp, fsym, ip, ks);
990 err = thread_stack__push_cp(ts, ip, tm, ref, cp, true, false);
994 return thread_stack__call_return(thread, ts, --ts->cnt, tm, ref, false);
997 static int thread_stack__trace_begin(struct thread *thread,
998 struct thread_stack *ts, u64 timestamp,
1001 struct thread_stack_entry *tse;
1008 tse = &ts->stack[ts->cnt - 1];
1009 if (tse->trace_end) {
1010 err = thread_stack__call_return(thread, ts, --ts->cnt,
1011 timestamp, ref, false);
1019 static int thread_stack__trace_end(struct thread_stack *ts,
1020 struct perf_sample *sample, u64 ref)
1022 struct call_path_root *cpr = ts->crp->cpr;
1023 struct call_path *cp;
1026 /* No point having 'trace end' on the bottom of the stack */
1027 if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
1030 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
1033 ret_addr = sample->ip + sample->insn_len;
1035 return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
1039 static bool is_x86_retpoline(const char *name)
1041 return strstr(name, "__x86_indirect_thunk_") == name;
1045 * x86 retpoline functions pollute the call graph. This function removes them.
1046 * This does not handle function return thunks, nor is there any improvement
1047 * for the handling of inline thunks or extern thunks.
1049 static int thread_stack__x86_retpoline(struct thread_stack *ts,
1050 struct perf_sample *sample,
1051 struct addr_location *to_al)
1053 struct thread_stack_entry *tse = &ts->stack[ts->cnt - 1];
1054 struct call_path_root *cpr = ts->crp->cpr;
1055 struct symbol *sym = tse->cp->sym;
1056 struct symbol *tsym = to_al->sym;
1057 struct call_path *cp;
1059 if (sym && is_x86_retpoline(sym->name)) {
1061 * This is a x86 retpoline fn. It pollutes the call graph by
1062 * showing up everywhere there is an indirect branch, but does
1063 * not itself mean anything. Here the top-of-stack is removed,
1064 * by decrementing the stack count, and then further down, the
1065 * resulting top-of-stack is replaced with the actual target.
1066 * The result is that the retpoline functions will no longer
1067 * appear in the call graph. Note this only affects the call
1068 * graph, since all the original branches are left unchanged.
1071 sym = ts->stack[ts->cnt - 2].cp->sym;
1072 if (sym && sym == tsym && to_al->addr != tsym->start) {
1074 * Target is back to the middle of the symbol we came
1075 * from so assume it is an indirect jmp and forget it
1081 } else if (sym && sym == tsym) {
1083 * Target is back to the symbol we came from so assume it is an
1084 * indirect jmp and forget it altogether.
1090 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 2].cp, tsym,
1091 sample->addr, ts->kernel_start);
1095 /* Replace the top-of-stack with the actual target */
1096 ts->stack[ts->cnt - 1].cp = cp;
1101 int thread_stack__process(struct thread *thread, struct comm *comm,
1102 struct perf_sample *sample,
1103 struct addr_location *from_al,
1104 struct addr_location *to_al, u64 ref,
1105 struct call_return_processor *crp)
1107 struct thread_stack *ts = thread__stack(thread, sample->cpu);
1108 enum retpoline_state_t rstate;
1111 if (ts && !ts->crp) {
1112 /* Supersede thread_stack__event() */
1113 thread_stack__reset(thread, ts);
1118 ts = thread_stack__new(thread, sample->cpu, crp, true, 0);
1124 rstate = ts->rstate;
1125 if (rstate == X86_RETPOLINE_DETECTED)
1126 ts->rstate = X86_RETPOLINE_POSSIBLE;
1128 /* Flush stack on exec */
1129 if (ts->comm != comm && thread__pid(thread) == thread__tid(thread)) {
1130 err = __thread_stack__flush(thread, ts);
1136 /* If the stack is empty, put the current symbol on the stack */
1138 err = thread_stack__bottom(ts, sample, from_al, to_al, ref);
1143 ts->branch_count += 1;
1144 ts->insn_count += sample->insn_cnt;
1145 ts->cyc_count += sample->cyc_cnt;
1146 ts->last_time = sample->time;
1148 if (sample->flags & PERF_IP_FLAG_CALL) {
1149 bool trace_end = sample->flags & PERF_IP_FLAG_TRACE_END;
1150 struct call_path_root *cpr = ts->crp->cpr;
1151 struct call_path *cp;
1154 if (!sample->ip || !sample->addr)
1157 ret_addr = sample->ip + sample->insn_len;
1158 if (ret_addr == sample->addr)
1159 return 0; /* Zero-length calls are excluded */
1161 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
1162 to_al->sym, sample->addr,
1164 err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
1165 cp, false, trace_end);
1168 * A call to the same symbol but not the start of the symbol,
1169 * may be the start of a x86 retpoline.
1171 if (!err && rstate == X86_RETPOLINE_POSSIBLE && to_al->sym &&
1172 from_al->sym == to_al->sym &&
1173 to_al->addr != to_al->sym->start)
1174 ts->rstate = X86_RETPOLINE_DETECTED;
1176 } else if (sample->flags & PERF_IP_FLAG_RETURN) {
1177 if (!sample->addr) {
1178 u32 return_from_kernel = PERF_IP_FLAG_SYSCALLRET |
1179 PERF_IP_FLAG_INTERRUPT;
1181 if (!(sample->flags & return_from_kernel))
1184 /* Pop kernel stack */
1185 return thread_stack__pop_ks(thread, ts, sample, ref);
1191 /* x86 retpoline 'return' doesn't match the stack */
1192 if (rstate == X86_RETPOLINE_DETECTED && ts->cnt > 2 &&
1193 ts->stack[ts->cnt - 1].ret_addr != sample->addr)
1194 return thread_stack__x86_retpoline(ts, sample, to_al);
1196 err = thread_stack__pop_cp(thread, ts, sample->addr,
1197 sample->time, ref, from_al->sym);
1201 err = thread_stack__no_call_return(thread, ts, sample,
1202 from_al, to_al, ref);
1204 } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
1205 err = thread_stack__trace_begin(thread, ts, sample->time, ref);
1206 } else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
1207 err = thread_stack__trace_end(ts, sample, ref);
1208 } else if (sample->flags & PERF_IP_FLAG_BRANCH &&
1209 from_al->sym != to_al->sym && to_al->sym &&
1210 to_al->addr == to_al->sym->start) {
1211 struct call_path_root *cpr = ts->crp->cpr;
1212 struct call_path *cp;
1215 * The compiler might optimize a call/ret combination by making
1216 * it a jmp. Make that visible by recording on the stack a
1217 * branch to the start of a different symbol. Note, that means
1218 * when a ret pops the stack, all jmps must be popped off first.
1220 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
1221 to_al->sym, sample->addr,
1223 err = thread_stack__push_cp(ts, 0, sample->time, ref, cp, false,
1226 ts->stack[ts->cnt - 1].non_call = true;
1232 size_t thread_stack__depth(struct thread *thread, int cpu)
1234 struct thread_stack *ts = thread__stack(thread, cpu);