Commit | Line | Data |
---|---|---|
00447ccd AH |
1 | /* |
2 | * thread-stack.c: Synthesize a thread's stack using call / return events | |
3 | * Copyright (c) 2014, Intel Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | */ | |
15 | ||
92a9e4f7 AH |
16 | #include <linux/rbtree.h> |
17 | #include <linux/list.h> | |
256d92bc | 18 | #include <linux/log2.h> |
a43783ae | 19 | #include <errno.h> |
00447ccd AH |
20 | #include "thread.h" |
21 | #include "event.h" | |
92a9e4f7 | 22 | #include "machine.h" |
00447ccd AH |
23 | #include "util.h" |
24 | #include "debug.h" | |
92a9e4f7 AH |
25 | #include "symbol.h" |
26 | #include "comm.h" | |
451db126 | 27 | #include "call-path.h" |
00447ccd AH |
28 | #include "thread-stack.h" |
29 | ||
92a9e4f7 AH |
30 | #define STACK_GROWTH 2048 |
31 | ||
32 | /** | |
33 | * struct thread_stack_entry - thread stack entry. | |
34 | * @ret_addr: return address | |
35 | * @timestamp: timestamp (if known) | |
36 | * @ref: external reference (e.g. db_id of sample) | |
37 | * @branch_count: the branch count when the entry was created | |
38 | * @cp: call path | |
39 | * @no_call: a 'call' was not seen | |
4d60e5e3 | 40 | * @trace_end: a 'call' but trace ended |
92a9e4f7 | 41 | */ |
00447ccd AH |
42 | struct thread_stack_entry { |
43 | u64 ret_addr; | |
92a9e4f7 AH |
44 | u64 timestamp; |
45 | u64 ref; | |
46 | u64 branch_count; | |
47 | struct call_path *cp; | |
48 | bool no_call; | |
4d60e5e3 | 49 | bool trace_end; |
00447ccd AH |
50 | }; |
51 | ||
92a9e4f7 AH |
52 | /** |
53 | * struct thread_stack - thread stack constructed from 'call' and 'return' | |
54 | * branch samples. | |
55 | * @stack: array that holds the stack | |
56 | * @cnt: number of entries in the stack | |
57 | * @sz: current maximum stack size | |
58 | * @trace_nr: current trace number | |
59 | * @branch_count: running branch count | |
60 | * @kernel_start: kernel start address | |
61 | * @last_time: last timestamp | |
62 | * @crp: call/return processor | |
63 | * @comm: current comm | |
f6060ac6 | 64 | * @arr_sz: size of array if this is the first element of an array |
92a9e4f7 | 65 | */ |
00447ccd AH |
66 | struct thread_stack { |
67 | struct thread_stack_entry *stack; | |
68 | size_t cnt; | |
69 | size_t sz; | |
70 | u64 trace_nr; | |
92a9e4f7 AH |
71 | u64 branch_count; |
72 | u64 kernel_start; | |
73 | u64 last_time; | |
74 | struct call_return_processor *crp; | |
75 | struct comm *comm; | |
f6060ac6 | 76 | unsigned int arr_sz; |
00447ccd AH |
77 | }; |
78 | ||
256d92bc AH |
79 | /* |
80 | * Assume pid == tid == 0 identifies the idle task as defined by | |
81 | * perf_session__register_idle_thread(). The idle task is really 1 task per cpu, | |
82 | * and therefore requires a stack for each cpu. | |
83 | */ | |
84 | static inline bool thread_stack__per_cpu(struct thread *thread) | |
85 | { | |
86 | return !(thread->tid || thread->pid_); | |
87 | } | |
88 | ||
00447ccd AH |
89 | static int thread_stack__grow(struct thread_stack *ts) |
90 | { | |
91 | struct thread_stack_entry *new_stack; | |
92 | size_t sz, new_sz; | |
93 | ||
94 | new_sz = ts->sz + STACK_GROWTH; | |
95 | sz = new_sz * sizeof(struct thread_stack_entry); | |
96 | ||
97 | new_stack = realloc(ts->stack, sz); | |
98 | if (!new_stack) | |
99 | return -ENOMEM; | |
100 | ||
101 | ts->stack = new_stack; | |
102 | ts->sz = new_sz; | |
103 | ||
104 | return 0; | |
105 | } | |
106 | ||
2e9e8688 AH |
107 | static int thread_stack__init(struct thread_stack *ts, struct thread *thread, |
108 | struct call_return_processor *crp) | |
109 | { | |
110 | int err; | |
111 | ||
112 | err = thread_stack__grow(ts); | |
113 | if (err) | |
114 | return err; | |
115 | ||
116 | if (thread->mg && thread->mg->machine) | |
117 | ts->kernel_start = machine__kernel_start(thread->mg->machine); | |
118 | else | |
119 | ts->kernel_start = 1ULL << 63; | |
120 | ts->crp = crp; | |
121 | ||
122 | return 0; | |
123 | } | |
124 | ||
256d92bc | 125 | static struct thread_stack *thread_stack__new(struct thread *thread, int cpu, |
92a9e4f7 | 126 | struct call_return_processor *crp) |
00447ccd | 127 | { |
139f42f3 AH |
128 | struct thread_stack *ts = thread->ts, *new_ts; |
129 | unsigned int old_sz = ts ? ts->arr_sz : 0; | |
130 | unsigned int new_sz = 1; | |
131 | ||
256d92bc AH |
132 | if (thread_stack__per_cpu(thread) && cpu > 0) |
133 | new_sz = roundup_pow_of_two(cpu + 1); | |
134 | ||
139f42f3 AH |
135 | if (!ts || new_sz > old_sz) { |
136 | new_ts = calloc(new_sz, sizeof(*ts)); | |
137 | if (!new_ts) | |
138 | return NULL; | |
139 | if (ts) | |
140 | memcpy(new_ts, ts, old_sz * sizeof(*ts)); | |
141 | new_ts->arr_sz = new_sz; | |
142 | zfree(&thread->ts); | |
143 | thread->ts = new_ts; | |
144 | ts = new_ts; | |
00447ccd AH |
145 | } |
146 | ||
256d92bc AH |
147 | if (thread_stack__per_cpu(thread) && cpu > 0 && |
148 | (unsigned int)cpu < ts->arr_sz) | |
149 | ts += cpu; | |
150 | ||
139f42f3 AH |
151 | if (!ts->stack && |
152 | thread_stack__init(ts, thread, crp)) | |
153 | return NULL; | |
bd8e68ac | 154 | |
00447ccd AH |
155 | return ts; |
156 | } | |
157 | ||
256d92bc | 158 | static struct thread_stack *thread__cpu_stack(struct thread *thread, int cpu) |
bd8e68ac | 159 | { |
256d92bc AH |
160 | struct thread_stack *ts = thread->ts; |
161 | ||
162 | if (cpu < 0) | |
163 | cpu = 0; | |
164 | ||
165 | if (!ts || (unsigned int)cpu >= ts->arr_sz) | |
166 | return NULL; | |
167 | ||
168 | ts += cpu; | |
169 | ||
170 | if (!ts->stack) | |
171 | return NULL; | |
172 | ||
173 | return ts; | |
174 | } | |
175 | ||
176 | static inline struct thread_stack *thread__stack(struct thread *thread, | |
177 | int cpu) | |
178 | { | |
179 | if (!thread) | |
180 | return NULL; | |
181 | ||
182 | if (thread_stack__per_cpu(thread)) | |
183 | return thread__cpu_stack(thread, cpu); | |
184 | ||
185 | return thread->ts; | |
bd8e68ac AH |
186 | } |
187 | ||
4d60e5e3 AH |
188 | static int thread_stack__push(struct thread_stack *ts, u64 ret_addr, |
189 | bool trace_end) | |
00447ccd AH |
190 | { |
191 | int err = 0; | |
192 | ||
193 | if (ts->cnt == ts->sz) { | |
194 | err = thread_stack__grow(ts); | |
195 | if (err) { | |
196 | pr_warning("Out of memory: discarding thread stack\n"); | |
197 | ts->cnt = 0; | |
198 | } | |
199 | } | |
200 | ||
4d60e5e3 | 201 | ts->stack[ts->cnt].trace_end = trace_end; |
00447ccd AH |
202 | ts->stack[ts->cnt++].ret_addr = ret_addr; |
203 | ||
204 | return err; | |
205 | } | |
206 | ||
207 | static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr) | |
208 | { | |
209 | size_t i; | |
210 | ||
211 | /* | |
212 | * In some cases there may be functions which are not seen to return. | |
213 | * For example when setjmp / longjmp has been used. Or the perf context | |
214 | * switch in the kernel which doesn't stop and start tracing in exactly | |
215 | * the same code path. When that happens the return address will be | |
216 | * further down the stack. If the return address is not found at all, | |
217 | * we assume the opposite (i.e. this is a return for a call that wasn't | |
218 | * seen for some reason) and leave the stack alone. | |
219 | */ | |
220 | for (i = ts->cnt; i; ) { | |
221 | if (ts->stack[--i].ret_addr == ret_addr) { | |
222 | ts->cnt = i; | |
223 | return; | |
224 | } | |
225 | } | |
226 | } | |
227 | ||
4d60e5e3 AH |
228 | static void thread_stack__pop_trace_end(struct thread_stack *ts) |
229 | { | |
230 | size_t i; | |
231 | ||
232 | for (i = ts->cnt; i; ) { | |
233 | if (ts->stack[--i].trace_end) | |
234 | ts->cnt = i; | |
235 | else | |
236 | return; | |
237 | } | |
238 | } | |
239 | ||
92a9e4f7 AH |
240 | static bool thread_stack__in_kernel(struct thread_stack *ts) |
241 | { | |
242 | if (!ts->cnt) | |
243 | return false; | |
244 | ||
245 | return ts->stack[ts->cnt - 1].cp->in_kernel; | |
246 | } | |
247 | ||
248 | static int thread_stack__call_return(struct thread *thread, | |
249 | struct thread_stack *ts, size_t idx, | |
250 | u64 timestamp, u64 ref, bool no_return) | |
251 | { | |
252 | struct call_return_processor *crp = ts->crp; | |
253 | struct thread_stack_entry *tse; | |
254 | struct call_return cr = { | |
255 | .thread = thread, | |
256 | .comm = ts->comm, | |
257 | .db_id = 0, | |
258 | }; | |
259 | ||
260 | tse = &ts->stack[idx]; | |
261 | cr.cp = tse->cp; | |
262 | cr.call_time = tse->timestamp; | |
263 | cr.return_time = timestamp; | |
264 | cr.branch_count = ts->branch_count - tse->branch_count; | |
265 | cr.call_ref = tse->ref; | |
266 | cr.return_ref = ref; | |
267 | if (tse->no_call) | |
268 | cr.flags |= CALL_RETURN_NO_CALL; | |
269 | if (no_return) | |
270 | cr.flags |= CALL_RETURN_NO_RETURN; | |
271 | ||
272 | return crp->process(&cr, crp->data); | |
273 | } | |
274 | ||
a5499b37 | 275 | static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts) |
92a9e4f7 AH |
276 | { |
277 | struct call_return_processor *crp = ts->crp; | |
278 | int err; | |
279 | ||
280 | if (!crp) { | |
281 | ts->cnt = 0; | |
282 | return 0; | |
283 | } | |
284 | ||
285 | while (ts->cnt) { | |
286 | err = thread_stack__call_return(thread, ts, --ts->cnt, | |
287 | ts->last_time, 0, true); | |
288 | if (err) { | |
289 | pr_err("Error flushing thread stack!\n"); | |
290 | ts->cnt = 0; | |
291 | return err; | |
292 | } | |
293 | } | |
294 | ||
295 | return 0; | |
296 | } | |
297 | ||
a5499b37 AH |
298 | int thread_stack__flush(struct thread *thread) |
299 | { | |
bd8e68ac | 300 | struct thread_stack *ts = thread->ts; |
f6060ac6 AH |
301 | unsigned int pos; |
302 | int err = 0; | |
bd8e68ac | 303 | |
f6060ac6 AH |
304 | if (ts) { |
305 | for (pos = 0; pos < ts->arr_sz; pos++) { | |
306 | int ret = __thread_stack__flush(thread, ts + pos); | |
a5499b37 | 307 | |
f6060ac6 AH |
308 | if (ret) |
309 | err = ret; | |
310 | } | |
311 | } | |
312 | ||
313 | return err; | |
a5499b37 AH |
314 | } |
315 | ||
256d92bc | 316 | int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip, |
00447ccd AH |
317 | u64 to_ip, u16 insn_len, u64 trace_nr) |
318 | { | |
256d92bc | 319 | struct thread_stack *ts = thread__stack(thread, cpu); |
bd8e68ac | 320 | |
00447ccd AH |
321 | if (!thread) |
322 | return -EINVAL; | |
323 | ||
bd8e68ac | 324 | if (!ts) { |
256d92bc | 325 | ts = thread_stack__new(thread, cpu, NULL); |
bd8e68ac | 326 | if (!ts) { |
00447ccd AH |
327 | pr_warning("Out of memory: no thread stack\n"); |
328 | return -ENOMEM; | |
329 | } | |
bd8e68ac | 330 | ts->trace_nr = trace_nr; |
00447ccd AH |
331 | } |
332 | ||
333 | /* | |
334 | * When the trace is discontinuous, the trace_nr changes. In that case | |
335 | * the stack might be completely invalid. Better to report nothing than | |
92a9e4f7 | 336 | * to report something misleading, so flush the stack. |
00447ccd | 337 | */ |
bd8e68ac AH |
338 | if (trace_nr != ts->trace_nr) { |
339 | if (ts->trace_nr) | |
340 | __thread_stack__flush(thread, ts); | |
341 | ts->trace_nr = trace_nr; | |
00447ccd AH |
342 | } |
343 | ||
92a9e4f7 | 344 | /* Stop here if thread_stack__process() is in use */ |
bd8e68ac | 345 | if (ts->crp) |
92a9e4f7 AH |
346 | return 0; |
347 | ||
00447ccd AH |
348 | if (flags & PERF_IP_FLAG_CALL) { |
349 | u64 ret_addr; | |
350 | ||
351 | if (!to_ip) | |
352 | return 0; | |
353 | ret_addr = from_ip + insn_len; | |
354 | if (ret_addr == to_ip) | |
355 | return 0; /* Zero-length calls are excluded */ | |
bd8e68ac | 356 | return thread_stack__push(ts, ret_addr, |
4d60e5e3 AH |
357 | flags & PERF_IP_FLAG_TRACE_END); |
358 | } else if (flags & PERF_IP_FLAG_TRACE_BEGIN) { | |
359 | /* | |
360 | * If the caller did not change the trace number (which would | |
361 | * have flushed the stack) then try to make sense of the stack. | |
362 | * Possibly, tracing began after returning to the current | |
363 | * address, so try to pop that. Also, do not expect a call made | |
364 | * when the trace ended, to return, so pop that. | |
365 | */ | |
bd8e68ac AH |
366 | thread_stack__pop(ts, to_ip); |
367 | thread_stack__pop_trace_end(ts); | |
4d60e5e3 | 368 | } else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) { |
bd8e68ac | 369 | thread_stack__pop(ts, to_ip); |
00447ccd AH |
370 | } |
371 | ||
372 | return 0; | |
373 | } | |
374 | ||
256d92bc | 375 | void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr) |
92a9e4f7 | 376 | { |
256d92bc | 377 | struct thread_stack *ts = thread__stack(thread, cpu); |
bd8e68ac AH |
378 | |
379 | if (!ts) | |
92a9e4f7 AH |
380 | return; |
381 | ||
bd8e68ac AH |
382 | if (trace_nr != ts->trace_nr) { |
383 | if (ts->trace_nr) | |
384 | __thread_stack__flush(thread, ts); | |
385 | ts->trace_nr = trace_nr; | |
92a9e4f7 AH |
386 | } |
387 | } | |
388 | ||
f6060ac6 AH |
389 | static void __thread_stack__free(struct thread *thread, struct thread_stack *ts) |
390 | { | |
391 | __thread_stack__flush(thread, ts); | |
392 | zfree(&ts->stack); | |
393 | } | |
394 | ||
395 | static void thread_stack__reset(struct thread *thread, struct thread_stack *ts) | |
396 | { | |
397 | unsigned int arr_sz = ts->arr_sz; | |
398 | ||
399 | __thread_stack__free(thread, ts); | |
400 | memset(ts, 0, sizeof(*ts)); | |
401 | ts->arr_sz = arr_sz; | |
402 | } | |
403 | ||
00447ccd AH |
404 | void thread_stack__free(struct thread *thread) |
405 | { | |
bd8e68ac | 406 | struct thread_stack *ts = thread->ts; |
f6060ac6 | 407 | unsigned int pos; |
bd8e68ac AH |
408 | |
409 | if (ts) { | |
f6060ac6 AH |
410 | for (pos = 0; pos < ts->arr_sz; pos++) |
411 | __thread_stack__free(thread, ts + pos); | |
00447ccd AH |
412 | zfree(&thread->ts); |
413 | } | |
414 | } | |
415 | ||
24248306 AH |
416 | static inline u64 callchain_context(u64 ip, u64 kernel_start) |
417 | { | |
418 | return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL; | |
419 | } | |
420 | ||
256d92bc AH |
421 | void thread_stack__sample(struct thread *thread, int cpu, |
422 | struct ip_callchain *chain, | |
24248306 | 423 | size_t sz, u64 ip, u64 kernel_start) |
00447ccd | 424 | { |
256d92bc | 425 | struct thread_stack *ts = thread__stack(thread, cpu); |
24248306 AH |
426 | u64 context = callchain_context(ip, kernel_start); |
427 | u64 last_context; | |
428 | size_t i, j; | |
00447ccd | 429 | |
24248306 AH |
430 | if (sz < 2) { |
431 | chain->nr = 0; | |
432 | return; | |
433 | } | |
00447ccd | 434 | |
24248306 AH |
435 | chain->ips[0] = context; |
436 | chain->ips[1] = ip; | |
437 | ||
bd8e68ac | 438 | if (!ts) { |
24248306 AH |
439 | chain->nr = 2; |
440 | return; | |
441 | } | |
442 | ||
443 | last_context = context; | |
444 | ||
bd8e68ac AH |
445 | for (i = 2, j = 1; i < sz && j <= ts->cnt; i++, j++) { |
446 | ip = ts->stack[ts->cnt - j].ret_addr; | |
24248306 AH |
447 | context = callchain_context(ip, kernel_start); |
448 | if (context != last_context) { | |
449 | if (i >= sz - 1) | |
450 | break; | |
451 | chain->ips[i++] = context; | |
452 | last_context = context; | |
453 | } | |
454 | chain->ips[i] = ip; | |
455 | } | |
00447ccd | 456 | |
24248306 | 457 | chain->nr = i; |
00447ccd | 458 | } |
92a9e4f7 | 459 | |
92a9e4f7 AH |
460 | struct call_return_processor * |
461 | call_return_processor__new(int (*process)(struct call_return *cr, void *data), | |
462 | void *data) | |
463 | { | |
464 | struct call_return_processor *crp; | |
465 | ||
466 | crp = zalloc(sizeof(struct call_return_processor)); | |
467 | if (!crp) | |
468 | return NULL; | |
469 | crp->cpr = call_path_root__new(); | |
470 | if (!crp->cpr) | |
471 | goto out_free; | |
472 | crp->process = process; | |
473 | crp->data = data; | |
474 | return crp; | |
475 | ||
476 | out_free: | |
477 | free(crp); | |
478 | return NULL; | |
479 | } | |
480 | ||
481 | void call_return_processor__free(struct call_return_processor *crp) | |
482 | { | |
483 | if (crp) { | |
484 | call_path_root__free(crp->cpr); | |
485 | free(crp); | |
486 | } | |
487 | } | |
488 | ||
489 | static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr, | |
490 | u64 timestamp, u64 ref, struct call_path *cp, | |
2dcde4e1 | 491 | bool no_call, bool trace_end) |
92a9e4f7 AH |
492 | { |
493 | struct thread_stack_entry *tse; | |
494 | int err; | |
495 | ||
e7a3a055 AH |
496 | if (!cp) |
497 | return -ENOMEM; | |
498 | ||
92a9e4f7 AH |
499 | if (ts->cnt == ts->sz) { |
500 | err = thread_stack__grow(ts); | |
501 | if (err) | |
502 | return err; | |
503 | } | |
504 | ||
505 | tse = &ts->stack[ts->cnt++]; | |
506 | tse->ret_addr = ret_addr; | |
507 | tse->timestamp = timestamp; | |
508 | tse->ref = ref; | |
509 | tse->branch_count = ts->branch_count; | |
510 | tse->cp = cp; | |
511 | tse->no_call = no_call; | |
2dcde4e1 | 512 | tse->trace_end = trace_end; |
92a9e4f7 AH |
513 | |
514 | return 0; | |
515 | } | |
516 | ||
517 | static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts, | |
518 | u64 ret_addr, u64 timestamp, u64 ref, | |
519 | struct symbol *sym) | |
520 | { | |
521 | int err; | |
522 | ||
523 | if (!ts->cnt) | |
524 | return 1; | |
525 | ||
526 | if (ts->cnt == 1) { | |
527 | struct thread_stack_entry *tse = &ts->stack[0]; | |
528 | ||
529 | if (tse->cp->sym == sym) | |
530 | return thread_stack__call_return(thread, ts, --ts->cnt, | |
531 | timestamp, ref, false); | |
532 | } | |
533 | ||
534 | if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) { | |
535 | return thread_stack__call_return(thread, ts, --ts->cnt, | |
536 | timestamp, ref, false); | |
537 | } else { | |
538 | size_t i = ts->cnt - 1; | |
539 | ||
540 | while (i--) { | |
541 | if (ts->stack[i].ret_addr != ret_addr) | |
542 | continue; | |
543 | i += 1; | |
544 | while (ts->cnt > i) { | |
545 | err = thread_stack__call_return(thread, ts, | |
546 | --ts->cnt, | |
547 | timestamp, ref, | |
548 | true); | |
549 | if (err) | |
550 | return err; | |
551 | } | |
552 | return thread_stack__call_return(thread, ts, --ts->cnt, | |
553 | timestamp, ref, false); | |
554 | } | |
555 | } | |
556 | ||
557 | return 1; | |
558 | } | |
559 | ||
e0b89511 | 560 | static int thread_stack__bottom(struct thread_stack *ts, |
92a9e4f7 AH |
561 | struct perf_sample *sample, |
562 | struct addr_location *from_al, | |
563 | struct addr_location *to_al, u64 ref) | |
564 | { | |
565 | struct call_path_root *cpr = ts->crp->cpr; | |
566 | struct call_path *cp; | |
567 | struct symbol *sym; | |
568 | u64 ip; | |
569 | ||
570 | if (sample->ip) { | |
571 | ip = sample->ip; | |
572 | sym = from_al->sym; | |
573 | } else if (sample->addr) { | |
574 | ip = sample->addr; | |
575 | sym = to_al->sym; | |
576 | } else { | |
577 | return 0; | |
578 | } | |
579 | ||
580 | cp = call_path__findnew(cpr, &cpr->call_path, sym, ip, | |
581 | ts->kernel_start); | |
92a9e4f7 | 582 | |
e0b89511 | 583 | return thread_stack__push_cp(ts, ip, sample->time, ref, cp, |
2dcde4e1 | 584 | true, false); |
92a9e4f7 AH |
585 | } |
586 | ||
587 | static int thread_stack__no_call_return(struct thread *thread, | |
588 | struct thread_stack *ts, | |
589 | struct perf_sample *sample, | |
590 | struct addr_location *from_al, | |
591 | struct addr_location *to_al, u64 ref) | |
592 | { | |
593 | struct call_path_root *cpr = ts->crp->cpr; | |
594 | struct call_path *cp, *parent; | |
595 | u64 ks = ts->kernel_start; | |
596 | int err; | |
597 | ||
598 | if (sample->ip >= ks && sample->addr < ks) { | |
599 | /* Return to userspace, so pop all kernel addresses */ | |
600 | while (thread_stack__in_kernel(ts)) { | |
601 | err = thread_stack__call_return(thread, ts, --ts->cnt, | |
602 | sample->time, ref, | |
603 | true); | |
604 | if (err) | |
605 | return err; | |
606 | } | |
607 | ||
608 | /* If the stack is empty, push the userspace address */ | |
609 | if (!ts->cnt) { | |
610 | cp = call_path__findnew(cpr, &cpr->call_path, | |
611 | to_al->sym, sample->addr, | |
612 | ts->kernel_start); | |
92a9e4f7 | 613 | return thread_stack__push_cp(ts, 0, sample->time, ref, |
2dcde4e1 | 614 | cp, true, false); |
92a9e4f7 AH |
615 | } |
616 | } else if (thread_stack__in_kernel(ts) && sample->ip < ks) { | |
617 | /* Return to userspace, so pop all kernel addresses */ | |
618 | while (thread_stack__in_kernel(ts)) { | |
619 | err = thread_stack__call_return(thread, ts, --ts->cnt, | |
620 | sample->time, ref, | |
621 | true); | |
622 | if (err) | |
623 | return err; | |
624 | } | |
625 | } | |
626 | ||
627 | if (ts->cnt) | |
628 | parent = ts->stack[ts->cnt - 1].cp; | |
629 | else | |
630 | parent = &cpr->call_path; | |
631 | ||
632 | /* This 'return' had no 'call', so push and pop top of stack */ | |
633 | cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip, | |
634 | ts->kernel_start); | |
92a9e4f7 AH |
635 | |
636 | err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp, | |
2dcde4e1 | 637 | true, false); |
92a9e4f7 AH |
638 | if (err) |
639 | return err; | |
640 | ||
641 | return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref, | |
642 | to_al->sym); | |
643 | } | |
644 | ||
645 | static int thread_stack__trace_begin(struct thread *thread, | |
646 | struct thread_stack *ts, u64 timestamp, | |
647 | u64 ref) | |
648 | { | |
649 | struct thread_stack_entry *tse; | |
650 | int err; | |
651 | ||
652 | if (!ts->cnt) | |
653 | return 0; | |
654 | ||
655 | /* Pop trace end */ | |
656 | tse = &ts->stack[ts->cnt - 1]; | |
2dcde4e1 | 657 | if (tse->trace_end) { |
92a9e4f7 AH |
658 | err = thread_stack__call_return(thread, ts, --ts->cnt, |
659 | timestamp, ref, false); | |
660 | if (err) | |
661 | return err; | |
662 | } | |
663 | ||
664 | return 0; | |
665 | } | |
666 | ||
667 | static int thread_stack__trace_end(struct thread_stack *ts, | |
668 | struct perf_sample *sample, u64 ref) | |
669 | { | |
670 | struct call_path_root *cpr = ts->crp->cpr; | |
671 | struct call_path *cp; | |
672 | u64 ret_addr; | |
673 | ||
674 | /* No point having 'trace end' on the bottom of the stack */ | |
675 | if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref)) | |
676 | return 0; | |
677 | ||
678 | cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0, | |
679 | ts->kernel_start); | |
92a9e4f7 AH |
680 | |
681 | ret_addr = sample->ip + sample->insn_len; | |
682 | ||
683 | return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp, | |
2dcde4e1 | 684 | false, true); |
92a9e4f7 AH |
685 | } |
686 | ||
687 | int thread_stack__process(struct thread *thread, struct comm *comm, | |
688 | struct perf_sample *sample, | |
689 | struct addr_location *from_al, | |
690 | struct addr_location *to_al, u64 ref, | |
691 | struct call_return_processor *crp) | |
692 | { | |
256d92bc | 693 | struct thread_stack *ts = thread__stack(thread, sample->cpu); |
92a9e4f7 AH |
694 | int err = 0; |
695 | ||
03b32cb2 AH |
696 | if (ts && !ts->crp) { |
697 | /* Supersede thread_stack__event() */ | |
f6060ac6 | 698 | thread_stack__reset(thread, ts); |
03b32cb2 AH |
699 | ts = NULL; |
700 | } | |
701 | ||
702 | if (!ts) { | |
256d92bc | 703 | ts = thread_stack__new(thread, sample->cpu, crp); |
bd8e68ac | 704 | if (!ts) |
92a9e4f7 | 705 | return -ENOMEM; |
92a9e4f7 AH |
706 | ts->comm = comm; |
707 | } | |
708 | ||
709 | /* Flush stack on exec */ | |
710 | if (ts->comm != comm && thread->pid_ == thread->tid) { | |
a5499b37 | 711 | err = __thread_stack__flush(thread, ts); |
92a9e4f7 AH |
712 | if (err) |
713 | return err; | |
714 | ts->comm = comm; | |
715 | } | |
716 | ||
717 | /* If the stack is empty, put the current symbol on the stack */ | |
718 | if (!ts->cnt) { | |
e0b89511 | 719 | err = thread_stack__bottom(ts, sample, from_al, to_al, ref); |
92a9e4f7 AH |
720 | if (err) |
721 | return err; | |
722 | } | |
723 | ||
724 | ts->branch_count += 1; | |
725 | ts->last_time = sample->time; | |
726 | ||
727 | if (sample->flags & PERF_IP_FLAG_CALL) { | |
2dcde4e1 | 728 | bool trace_end = sample->flags & PERF_IP_FLAG_TRACE_END; |
92a9e4f7 AH |
729 | struct call_path_root *cpr = ts->crp->cpr; |
730 | struct call_path *cp; | |
731 | u64 ret_addr; | |
732 | ||
733 | if (!sample->ip || !sample->addr) | |
734 | return 0; | |
735 | ||
736 | ret_addr = sample->ip + sample->insn_len; | |
737 | if (ret_addr == sample->addr) | |
738 | return 0; /* Zero-length calls are excluded */ | |
739 | ||
740 | cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, | |
741 | to_al->sym, sample->addr, | |
742 | ts->kernel_start); | |
92a9e4f7 | 743 | err = thread_stack__push_cp(ts, ret_addr, sample->time, ref, |
2dcde4e1 | 744 | cp, false, trace_end); |
92a9e4f7 AH |
745 | } else if (sample->flags & PERF_IP_FLAG_RETURN) { |
746 | if (!sample->ip || !sample->addr) | |
747 | return 0; | |
748 | ||
749 | err = thread_stack__pop_cp(thread, ts, sample->addr, | |
750 | sample->time, ref, from_al->sym); | |
751 | if (err) { | |
752 | if (err < 0) | |
753 | return err; | |
754 | err = thread_stack__no_call_return(thread, ts, sample, | |
755 | from_al, to_al, ref); | |
756 | } | |
757 | } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) { | |
758 | err = thread_stack__trace_begin(thread, ts, sample->time, ref); | |
759 | } else if (sample->flags & PERF_IP_FLAG_TRACE_END) { | |
760 | err = thread_stack__trace_end(ts, sample, ref); | |
761 | } | |
762 | ||
763 | return err; | |
764 | } | |
e216708d | 765 | |
256d92bc | 766 | size_t thread_stack__depth(struct thread *thread, int cpu) |
e216708d | 767 | { |
256d92bc | 768 | struct thread_stack *ts = thread__stack(thread, cpu); |
bd8e68ac AH |
769 | |
770 | if (!ts) | |
e216708d | 771 | return 0; |
bd8e68ac | 772 | return ts->cnt; |
e216708d | 773 | } |