perf arm64: Inject missing frames when using 'perf record --call-graph=fp'
[linux-2.6-block.git] / tools / perf / util / machine.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
76b31a29 2#include <dirent.h>
a43783ae 3#include <errno.h>
fd20e811 4#include <inttypes.h>
1eae20c1 5#include <regex.h>
f2a39fe8 6#include <stdlib.h>
3f067dca 7#include "callchain.h"
b0a7d1a0 8#include "debug.h"
4a3cec84 9#include "dso.h"
f2a39fe8 10#include "env.h"
b0a7d1a0 11#include "event.h"
3f067dca
ACM
12#include "evsel.h"
13#include "hist.h"
9d2f8e22
ACM
14#include "machine.h"
15#include "map.h"
d3300a3c
ACM
16#include "map_symbol.h"
17#include "branch.h"
18#include "mem-events.h"
97b9d866 19#include "srcline.h"
daecf9e0 20#include "symbol.h"
3f067dca 21#include "sort.h"
69d2591a 22#include "strlist.h"
aeb00b1a 23#include "target.h"
9d2f8e22 24#include "thread.h"
97b9d866 25#include "util.h"
d027b640 26#include "vdso.h"
9d2f8e22 27#include <stdbool.h>
7a8ef4c4
ACM
28#include <sys/types.h>
29#include <sys/stat.h>
30#include <unistd.h>
3f067dca 31#include "unwind.h"
8b7bad58 32#include "linux/hash.h"
f3b3614a 33#include "asm/bug.h"
45178a92 34#include "bpf-event.h"
20f2be1d 35#include <internal/lib.h> // page_size
d1277aa3 36#include "cgroup.h"
b9f6fbb3 37#include "arm64-frame-pointer-unwind-support.h"
9d2f8e22 38
3052ba56 39#include <linux/ctype.h>
3d689ed6 40#include <symbol/kallsyms.h>
0f476f2b 41#include <linux/mman.h>
8520a98d 42#include <linux/string.h>
7f7c536f 43#include <linux/zalloc.h>
3d689ed6 44
b91fc39f
ACM
45static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
46
93730f85
ACM
47static struct dso *machine__kernel_dso(struct machine *machine)
48{
49 return machine->vmlinux_map->dso;
50}
51
e167f995
ACM
52static void dsos__init(struct dsos *dsos)
53{
54 INIT_LIST_HEAD(&dsos->head);
55 dsos->root = RB_ROOT;
0a7c74ea 56 init_rwsem(&dsos->lock);
e167f995
ACM
57}
58
91e467bc
KL
59static void machine__threads_init(struct machine *machine)
60{
61 int i;
62
63 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
64 struct threads *threads = &machine->threads[i];
f3acb3a8 65 threads->entries = RB_ROOT_CACHED;
0a7c74ea 66 init_rwsem(&threads->lock);
91e467bc
KL
67 threads->nr = 0;
68 INIT_LIST_HEAD(&threads->dead);
69 threads->last_match = NULL;
70 }
71}
72
8c7f1bb3
JO
73static int machine__set_mmap_name(struct machine *machine)
74{
c192524e
JO
75 if (machine__is_host(machine))
76 machine->mmap_name = strdup("[kernel.kallsyms]");
77 else if (machine__is_default_guest(machine))
78 machine->mmap_name = strdup("[guest.kernel.kallsyms]");
79 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
80 machine->pid) < 0)
81 machine->mmap_name = NULL;
8c7f1bb3
JO
82
83 return machine->mmap_name ? 0 : -ENOMEM;
84}
85
69d2591a
ACM
86int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
87{
81f981d7
JO
88 int err = -ENOMEM;
89
93b0ba3c 90 memset(machine, 0, sizeof(*machine));
79b6bb73 91 maps__init(&machine->kmaps, machine);
69d2591a 92 RB_CLEAR_NODE(&machine->rb_node);
3d39ac53 93 dsos__init(&machine->dsos);
69d2591a 94
91e467bc 95 machine__threads_init(machine);
69d2591a 96
d027b640 97 machine->vdso_info = NULL;
4cde998d 98 machine->env = NULL;
d027b640 99
69d2591a
ACM
100 machine->pid = pid;
101
14bd6d20 102 machine->id_hdr_size = 0;
caf8a0d0 103 machine->kptr_restrict_warned = false;
cfe1c414 104 machine->comm_exec = false;
fbe2af45 105 machine->kernel_start = 0;
3183f8ca 106 machine->vmlinux_map = NULL;
cc1121ab 107
69d2591a
ACM
108 machine->root_dir = strdup(root_dir);
109 if (machine->root_dir == NULL)
110 return -ENOMEM;
111
8c7f1bb3
JO
112 if (machine__set_mmap_name(machine))
113 goto out;
114
69d2591a 115 if (pid != HOST_KERNEL_ID) {
1fcb8768 116 struct thread *thread = machine__findnew_thread(machine, -1,
314add6b 117 pid);
69d2591a
ACM
118 char comm[64];
119
120 if (thread == NULL)
81f981d7 121 goto out;
69d2591a
ACM
122
123 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
162f0bef 124 thread__set_comm(thread, comm, 0);
b91fc39f 125 thread__put(thread);
69d2591a
ACM
126 }
127
b9d266ba 128 machine->current_tid = NULL;
81f981d7 129 err = 0;
b9d266ba 130
81f981d7 131out:
8c7f1bb3 132 if (err) {
81f981d7 133 zfree(&machine->root_dir);
8c7f1bb3
JO
134 zfree(&machine->mmap_name);
135 }
69d2591a
ACM
136 return 0;
137}
138
8fb598e5
DA
139struct machine *machine__new_host(void)
140{
141 struct machine *machine = malloc(sizeof(*machine));
142
143 if (machine != NULL) {
144 machine__init(machine, "", HOST_KERNEL_ID);
145
146 if (machine__create_kernel_maps(machine) < 0)
147 goto out_delete;
148 }
149
150 return machine;
151out_delete:
152 free(machine);
153 return NULL;
154}
155
7d132caa
ACM
156struct machine *machine__new_kallsyms(void)
157{
158 struct machine *machine = machine__new_host();
159 /*
160 * FIXME:
adba1634 161 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
7d132caa
ACM
162 * ask for not using the kcore parsing code, once this one is fixed
163 * to create a map per module.
164 */
329f0ade 165 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
7d132caa
ACM
166 machine__delete(machine);
167 machine = NULL;
168 }
169
170 return machine;
171}
172
d3a7c489 173static void dsos__purge(struct dsos *dsos)
69d2591a
ACM
174{
175 struct dso *pos, *n;
176
0a7c74ea 177 down_write(&dsos->lock);
e8807844 178
8fa7d87f 179 list_for_each_entry_safe(pos, n, &dsos->head, node) {
4598a0a6 180 RB_CLEAR_NODE(&pos->rb_node);
e266a753 181 pos->root = NULL;
d3a7c489
ACM
182 list_del_init(&pos->node);
183 dso__put(pos);
69d2591a 184 }
e8807844 185
0a7c74ea 186 up_write(&dsos->lock);
d3a7c489 187}
e8807844 188
d3a7c489
ACM
189static void dsos__exit(struct dsos *dsos)
190{
191 dsos__purge(dsos);
0a7c74ea 192 exit_rwsem(&dsos->lock);
69d2591a
ACM
193}
194
3f067dca
ACM
195void machine__delete_threads(struct machine *machine)
196{
b91fc39f 197 struct rb_node *nd;
91e467bc 198 int i;
3f067dca 199
91e467bc
KL
200 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
201 struct threads *threads = &machine->threads[i];
0a7c74ea 202 down_write(&threads->lock);
f3acb3a8 203 nd = rb_first_cached(&threads->entries);
91e467bc
KL
204 while (nd) {
205 struct thread *t = rb_entry(nd, struct thread, rb_node);
3f067dca 206
91e467bc
KL
207 nd = rb_next(nd);
208 __machine__remove_thread(machine, t, false);
209 }
0a7c74ea 210 up_write(&threads->lock);
3f067dca
ACM
211 }
212}
213
69d2591a
ACM
214void machine__exit(struct machine *machine)
215{
91e467bc
KL
216 int i;
217
19993b82
ACM
218 if (machine == NULL)
219 return;
220
ebe9729c 221 machine__destroy_kernel_maps(machine);
79b6bb73 222 maps__exit(&machine->kmaps);
e8807844 223 dsos__exit(&machine->dsos);
9a4388c7 224 machine__exit_vdso(machine);
04662523 225 zfree(&machine->root_dir);
8c7f1bb3 226 zfree(&machine->mmap_name);
b9d266ba 227 zfree(&machine->current_tid);
91e467bc
KL
228
229 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
230 struct threads *threads = &machine->threads[i];
4c00af0e
ACM
231 struct thread *thread, *n;
232 /*
233 * Forget about the dead, at this point whatever threads were
234 * left in the dead lists better have a reference count taken
235 * by who is using them, and then, when they drop those references
236 * and it finally hits zero, thread__put() will check and see that
237 * its not in the dead threads list and will not try to remove it
238 * from there, just calling thread__delete() straight away.
239 */
240 list_for_each_entry_safe(thread, n, &threads->dead, node)
241 list_del_init(&thread->node);
242
0a7c74ea 243 exit_rwsem(&threads->lock);
91e467bc 244 }
69d2591a
ACM
245}
246
247void machine__delete(struct machine *machine)
248{
32ca678d
ACM
249 if (machine) {
250 machine__exit(machine);
251 free(machine);
252 }
69d2591a
ACM
253}
254
876650e6
ACM
255void machines__init(struct machines *machines)
256{
257 machine__init(&machines->host, "", HOST_KERNEL_ID);
f3acb3a8 258 machines->guests = RB_ROOT_CACHED;
876650e6
ACM
259}
260
261void machines__exit(struct machines *machines)
262{
263 machine__exit(&machines->host);
264 /* XXX exit guest */
265}
266
267struct machine *machines__add(struct machines *machines, pid_t pid,
69d2591a
ACM
268 const char *root_dir)
269{
f3acb3a8 270 struct rb_node **p = &machines->guests.rb_root.rb_node;
69d2591a
ACM
271 struct rb_node *parent = NULL;
272 struct machine *pos, *machine = malloc(sizeof(*machine));
f3acb3a8 273 bool leftmost = true;
69d2591a
ACM
274
275 if (machine == NULL)
276 return NULL;
277
278 if (machine__init(machine, root_dir, pid) != 0) {
279 free(machine);
280 return NULL;
281 }
282
283 while (*p != NULL) {
284 parent = *p;
285 pos = rb_entry(parent, struct machine, rb_node);
286 if (pid < pos->pid)
287 p = &(*p)->rb_left;
f3acb3a8 288 else {
69d2591a 289 p = &(*p)->rb_right;
f3acb3a8
DB
290 leftmost = false;
291 }
69d2591a
ACM
292 }
293
294 rb_link_node(&machine->rb_node, parent, p);
f3acb3a8 295 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
69d2591a
ACM
296
297 return machine;
298}
299
cfe1c414
AH
300void machines__set_comm_exec(struct machines *machines, bool comm_exec)
301{
302 struct rb_node *nd;
303
304 machines->host.comm_exec = comm_exec;
305
f3acb3a8 306 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
cfe1c414
AH
307 struct machine *machine = rb_entry(nd, struct machine, rb_node);
308
309 machine->comm_exec = comm_exec;
310 }
311}
312
876650e6 313struct machine *machines__find(struct machines *machines, pid_t pid)
69d2591a 314{
f3acb3a8 315 struct rb_node **p = &machines->guests.rb_root.rb_node;
69d2591a
ACM
316 struct rb_node *parent = NULL;
317 struct machine *machine;
318 struct machine *default_machine = NULL;
319
876650e6
ACM
320 if (pid == HOST_KERNEL_ID)
321 return &machines->host;
322
69d2591a
ACM
323 while (*p != NULL) {
324 parent = *p;
325 machine = rb_entry(parent, struct machine, rb_node);
326 if (pid < machine->pid)
327 p = &(*p)->rb_left;
328 else if (pid > machine->pid)
329 p = &(*p)->rb_right;
330 else
331 return machine;
332 if (!machine->pid)
333 default_machine = machine;
334 }
335
336 return default_machine;
337}
338
876650e6 339struct machine *machines__findnew(struct machines *machines, pid_t pid)
69d2591a
ACM
340{
341 char path[PATH_MAX];
342 const char *root_dir = "";
343 struct machine *machine = machines__find(machines, pid);
344
345 if (machine && (machine->pid == pid))
346 goto out;
347
348 if ((pid != HOST_KERNEL_ID) &&
349 (pid != DEFAULT_GUEST_KERNEL_ID) &&
350 (symbol_conf.guestmount)) {
351 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
352 if (access(path, R_OK)) {
353 static struct strlist *seen;
354
355 if (!seen)
4a77e218 356 seen = strlist__new(NULL, NULL);
69d2591a
ACM
357
358 if (!strlist__has_entry(seen, path)) {
359 pr_err("Can't access file %s\n", path);
360 strlist__add(seen, path);
361 }
362 machine = NULL;
363 goto out;
364 }
365 root_dir = path;
366 }
367
368 machine = machines__add(machines, pid, root_dir);
369out:
370 return machine;
371}
372
fcda5ff7
AH
373struct machine *machines__find_guest(struct machines *machines, pid_t pid)
374{
375 struct machine *machine = machines__find(machines, pid);
376
377 if (!machine)
378 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
379 return machine;
380}
381
876650e6
ACM
382void machines__process_guests(struct machines *machines,
383 machine__process_t process, void *data)
69d2591a
ACM
384{
385 struct rb_node *nd;
386
f3acb3a8 387 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
69d2591a
ACM
388 struct machine *pos = rb_entry(nd, struct machine, rb_node);
389 process(pos, data);
390 }
391}
392
876650e6 393void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
69d2591a
ACM
394{
395 struct rb_node *node;
396 struct machine *machine;
397
876650e6
ACM
398 machines->host.id_hdr_size = id_hdr_size;
399
f3acb3a8
DB
400 for (node = rb_first_cached(&machines->guests); node;
401 node = rb_next(node)) {
69d2591a
ACM
402 machine = rb_entry(node, struct machine, rb_node);
403 machine->id_hdr_size = id_hdr_size;
404 }
405
406 return;
407}
408
29ce3612
AH
409static void machine__update_thread_pid(struct machine *machine,
410 struct thread *th, pid_t pid)
411{
412 struct thread *leader;
413
414 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
415 return;
416
417 th->pid_ = pid;
418
419 if (th->pid_ == th->tid)
420 return;
421
b91fc39f 422 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
29ce3612
AH
423 if (!leader)
424 goto out_err;
425
fe87797d
ACM
426 if (!leader->maps)
427 leader->maps = maps__new(machine);
29ce3612 428
fe87797d 429 if (!leader->maps)
29ce3612
AH
430 goto out_err;
431
fe87797d 432 if (th->maps == leader->maps)
29ce3612
AH
433 return;
434
fe87797d 435 if (th->maps) {
29ce3612
AH
436 /*
437 * Maps are created from MMAP events which provide the pid and
438 * tid. Consequently there never should be any maps on a thread
439 * with an unknown pid. Just print an error if there are.
440 */
fe87797d 441 if (!maps__empty(th->maps))
29ce3612
AH
442 pr_err("Discarding thread maps for %d:%d\n",
443 th->pid_, th->tid);
fe87797d 444 maps__put(th->maps);
29ce3612
AH
445 }
446
fe87797d 447 th->maps = maps__get(leader->maps);
abd82868
ACM
448out_put:
449 thread__put(leader);
29ce3612 450 return;
29ce3612
AH
451out_err:
452 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
abd82868 453 goto out_put;
29ce3612
AH
454}
455
abd82868 456/*
f8b2ebb5
JO
457 * Front-end cache - TID lookups come in blocks,
458 * so most of the time we dont have to look up
459 * the full rbtree:
abd82868 460 */
f8b2ebb5 461static struct thread*
b57334b9
JO
462__threads__get_last_match(struct threads *threads, struct machine *machine,
463 int pid, int tid)
9d2f8e22 464{
9d2f8e22
ACM
465 struct thread *th;
466
91e467bc 467 th = threads->last_match;
f3b623b8
ACM
468 if (th != NULL) {
469 if (th->tid == tid) {
470 machine__update_thread_pid(machine, th, pid);
abd82868 471 return thread__get(th);
f3b623b8
ACM
472 }
473
91e467bc 474 threads->last_match = NULL;
99d725fc 475 }
9d2f8e22 476
f8b2ebb5
JO
477 return NULL;
478}
479
b57334b9
JO
480static struct thread*
481threads__get_last_match(struct threads *threads, struct machine *machine,
482 int pid, int tid)
483{
484 struct thread *th = NULL;
485
486 if (perf_singlethreaded)
487 th = __threads__get_last_match(threads, machine, pid, tid);
488
489 return th;
490}
491
67fda0f3 492static void
b57334b9 493__threads__set_last_match(struct threads *threads, struct thread *th)
67fda0f3
JO
494{
495 threads->last_match = th;
496}
497
b57334b9
JO
498static void
499threads__set_last_match(struct threads *threads, struct thread *th)
500{
501 if (perf_singlethreaded)
502 __threads__set_last_match(threads, th);
503}
504
f8b2ebb5
JO
505/*
506 * Caller must eventually drop thread->refcnt returned with a successful
507 * lookup/new thread inserted.
508 */
509static struct thread *____machine__findnew_thread(struct machine *machine,
510 struct threads *threads,
511 pid_t pid, pid_t tid,
512 bool create)
513{
f3acb3a8 514 struct rb_node **p = &threads->entries.rb_root.rb_node;
f8b2ebb5
JO
515 struct rb_node *parent = NULL;
516 struct thread *th;
f3acb3a8 517 bool leftmost = true;
f8b2ebb5
JO
518
519 th = threads__get_last_match(threads, machine, pid, tid);
520 if (th)
521 return th;
522
9d2f8e22
ACM
523 while (*p != NULL) {
524 parent = *p;
525 th = rb_entry(parent, struct thread, rb_node);
526
38051234 527 if (th->tid == tid) {
67fda0f3 528 threads__set_last_match(threads, th);
29ce3612 529 machine__update_thread_pid(machine, th, pid);
abd82868 530 return thread__get(th);
9d2f8e22
ACM
531 }
532
38051234 533 if (tid < th->tid)
9d2f8e22 534 p = &(*p)->rb_left;
f3acb3a8 535 else {
9d2f8e22 536 p = &(*p)->rb_right;
f3acb3a8
DB
537 leftmost = false;
538 }
9d2f8e22
ACM
539 }
540
541 if (!create)
542 return NULL;
543
99d725fc 544 th = thread__new(pid, tid);
9d2f8e22
ACM
545 if (th != NULL) {
546 rb_link_node(&th->rb_node, parent, p);
f3acb3a8 547 rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
cddcef60
JO
548
549 /*
79b6bb73 550 * We have to initialize maps separately after rb tree is updated.
cddcef60
JO
551 *
552 * The reason is that we call machine__findnew_thread
79b6bb73 553 * within thread__init_maps to find the thread
cddcef60
JO
554 * leader and that would screwed the rb tree.
555 */
79b6bb73 556 if (thread__init_maps(th, machine)) {
f3acb3a8 557 rb_erase_cached(&th->rb_node, &threads->entries);
b91fc39f 558 RB_CLEAR_NODE(&th->rb_node);
abd82868 559 thread__put(th);
cddcef60 560 return NULL;
418029b7 561 }
f3b623b8
ACM
562 /*
563 * It is now in the rbtree, get a ref
564 */
565 thread__get(th);
67fda0f3 566 threads__set_last_match(threads, th);
91e467bc 567 ++threads->nr;
9d2f8e22
ACM
568 }
569
570 return th;
571}
572
b91fc39f
ACM
573struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
574{
75e45e43 575 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
b91fc39f
ACM
576}
577
314add6b
AH
578struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
579 pid_t tid)
9d2f8e22 580{
91e467bc 581 struct threads *threads = machine__threads(machine, tid);
b91fc39f
ACM
582 struct thread *th;
583
0a7c74ea 584 down_write(&threads->lock);
abd82868 585 th = __machine__findnew_thread(machine, pid, tid);
0a7c74ea 586 up_write(&threads->lock);
b91fc39f 587 return th;
9d2f8e22
ACM
588}
589
d75e6097
JO
590struct thread *machine__find_thread(struct machine *machine, pid_t pid,
591 pid_t tid)
9d2f8e22 592{
91e467bc 593 struct threads *threads = machine__threads(machine, tid);
b91fc39f 594 struct thread *th;
91e467bc 595
0a7c74ea 596 down_read(&threads->lock);
75e45e43 597 th = ____machine__findnew_thread(machine, threads, pid, tid, false);
0a7c74ea 598 up_read(&threads->lock);
b91fc39f 599 return th;
9d2f8e22 600}
b0a7d1a0 601
3035cb6c
AH
602/*
603 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
604 * So here a single thread is created for that, but actually there is a separate
605 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
606 * is only 1. That causes problems for some tools, requiring workarounds. For
607 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
608 */
609struct thread *machine__idle_thread(struct machine *machine)
610{
611 struct thread *thread = machine__findnew_thread(machine, 0, 0);
612
613 if (!thread || thread__set_comm(thread, "swapper", 0) ||
614 thread__set_namespaces(thread, 0, NULL))
615 pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
616
617 return thread;
618}
619
cfe1c414
AH
620struct comm *machine__thread_exec_comm(struct machine *machine,
621 struct thread *thread)
622{
623 if (machine->comm_exec)
624 return thread__exec_comm(thread);
625 else
626 return thread__comm(thread);
627}
628
162f0bef
FW
629int machine__process_comm_event(struct machine *machine, union perf_event *event,
630 struct perf_sample *sample)
b0a7d1a0 631{
314add6b
AH
632 struct thread *thread = machine__findnew_thread(machine,
633 event->comm.pid,
634 event->comm.tid);
65de51f9 635 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
b91fc39f 636 int err = 0;
b0a7d1a0 637
cfe1c414
AH
638 if (exec)
639 machine->comm_exec = true;
640
b0a7d1a0
ACM
641 if (dump_trace)
642 perf_event__fprintf_comm(event, stdout);
643
65de51f9
AH
644 if (thread == NULL ||
645 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
b0a7d1a0 646 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
b91fc39f 647 err = -1;
b0a7d1a0
ACM
648 }
649
b91fc39f
ACM
650 thread__put(thread);
651
652 return err;
b0a7d1a0
ACM
653}
654
f3b3614a
HB
655int machine__process_namespaces_event(struct machine *machine __maybe_unused,
656 union perf_event *event,
657 struct perf_sample *sample __maybe_unused)
658{
659 struct thread *thread = machine__findnew_thread(machine,
660 event->namespaces.pid,
661 event->namespaces.tid);
662 int err = 0;
663
664 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
665 "\nWARNING: kernel seems to support more namespaces than perf"
666 " tool.\nTry updating the perf tool..\n\n");
667
668 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
669 "\nWARNING: perf tool seems to support more namespaces than"
670 " the kernel.\nTry updating the kernel..\n\n");
671
672 if (dump_trace)
673 perf_event__fprintf_namespaces(event, stdout);
674
675 if (thread == NULL ||
676 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
677 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
678 err = -1;
679 }
680
681 thread__put(thread);
682
683 return err;
684}
685
d1277aa3 686int machine__process_cgroup_event(struct machine *machine,
ba78c1c5
NK
687 union perf_event *event,
688 struct perf_sample *sample __maybe_unused)
689{
d1277aa3
NK
690 struct cgroup *cgrp;
691
ba78c1c5
NK
692 if (dump_trace)
693 perf_event__fprintf_cgroup(event, stdout);
694
d1277aa3
NK
695 cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
696 if (cgrp == NULL)
697 return -ENOMEM;
698
ba78c1c5
NK
699 return 0;
700}
701
b0a7d1a0 702int machine__process_lost_event(struct machine *machine __maybe_unused,
162f0bef 703 union perf_event *event, struct perf_sample *sample __maybe_unused)
b0a7d1a0 704{
5290ed69 705 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
b0a7d1a0
ACM
706 event->lost.id, event->lost.lost);
707 return 0;
708}
709
c4937a91
KL
710int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
711 union perf_event *event, struct perf_sample *sample)
712{
a2e254d8 713 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
c4937a91
KL
714 sample->id, event->lost_samples.lost);
715 return 0;
716}
717
9f2de315
ACM
718static struct dso *machine__findnew_module_dso(struct machine *machine,
719 struct kmod_path *m,
720 const char *filename)
da17ea33
JO
721{
722 struct dso *dso;
da17ea33 723
0a7c74ea 724 down_write(&machine->dsos.lock);
e8807844
ACM
725
726 dso = __dsos__find(&machine->dsos, m->name, true);
da17ea33 727 if (!dso) {
e8807844 728 dso = __dsos__addnew(&machine->dsos, m->name);
da17ea33 729 if (dso == NULL)
e8807844 730 goto out_unlock;
da17ea33 731
6b335e8f 732 dso__set_module_info(dso, m, machine);
ca33380a 733 dso__set_long_name(dso, strdup(filename), true);
1c695c88 734 dso->kernel = DSO_SPACE__KERNEL;
da17ea33
JO
735 }
736
d3a7c489 737 dso__get(dso);
e8807844 738out_unlock:
0a7c74ea 739 up_write(&machine->dsos.lock);
da17ea33
JO
740 return dso;
741}
742
4a96f7a0
AH
743int machine__process_aux_event(struct machine *machine __maybe_unused,
744 union perf_event *event)
745{
746 if (dump_trace)
747 perf_event__fprintf_aux(event, stdout);
748 return 0;
749}
750
0ad21f68
AH
751int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
752 union perf_event *event)
753{
754 if (dump_trace)
755 perf_event__fprintf_itrace_start(event, stdout);
756 return 0;
757}
758
61750473
AH
759int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused,
760 union perf_event *event)
761{
762 if (dump_trace)
763 perf_event__fprintf_aux_output_hw_id(event, stdout);
764 return 0;
765}
766
0286039f
AH
767int machine__process_switch_event(struct machine *machine __maybe_unused,
768 union perf_event *event)
769{
770 if (dump_trace)
771 perf_event__fprintf_switch(event, stdout);
772 return 0;
773}
774
9aa0bfa3
SL
775static int machine__process_ksymbol_register(struct machine *machine,
776 union perf_event *event,
777 struct perf_sample *sample __maybe_unused)
778{
779 struct symbol *sym;
79b6bb73 780 struct map *map = maps__find(&machine->kmaps, event->ksymbol.addr);
9aa0bfa3 781
9aa0bfa3 782 if (!map) {
4a4eb615
JO
783 struct dso *dso = dso__new(event->ksymbol.name);
784
785 if (dso) {
1c695c88 786 dso->kernel = DSO_SPACE__KERNEL;
4a4eb615 787 map = map__new2(0, dso);
c087e948 788 dso__put(dso);
4a4eb615
JO
789 }
790
791 if (!dso || !map) {
9aa0bfa3 792 return -ENOMEM;
4a4eb615 793 }
9aa0bfa3 794
789e2419
AH
795 if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
796 map->dso->binary_type = DSO_BINARY_TYPE__OOL;
797 map->dso->data.file_size = event->ksymbol.len;
798 dso__set_loaded(map->dso);
799 }
800
ebdba16e
ACM
801 map->start = event->ksymbol.addr;
802 map->end = map->start + event->ksymbol.len;
79b6bb73 803 maps__insert(&machine->kmaps, map);
c087e948 804 map__put(map);
7eddf7e7 805 dso__set_loaded(dso);
3c29d448
JO
806
807 if (is_bpf_image(event->ksymbol.name)) {
808 dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
809 dso__set_long_name(dso, "", false);
810 }
9aa0bfa3
SL
811 }
812
8529f2e6 813 sym = symbol__new(map->map_ip(map, map->start),
ebdba16e
ACM
814 event->ksymbol.len,
815 0, 0, event->ksymbol.name);
9aa0bfa3
SL
816 if (!sym)
817 return -ENOMEM;
818 dso__insert_symbol(map->dso, sym);
819 return 0;
820}
821
822static int machine__process_ksymbol_unregister(struct machine *machine,
823 union perf_event *event,
824 struct perf_sample *sample __maybe_unused)
825{
ab8bf5f2 826 struct symbol *sym;
9aa0bfa3
SL
827 struct map *map;
828
79b6bb73 829 map = maps__find(&machine->kmaps, event->ksymbol.addr);
ab8bf5f2
TR
830 if (!map)
831 return 0;
832
833 if (map != machine->vmlinux_map)
79b6bb73 834 maps__remove(&machine->kmaps, map);
ab8bf5f2
TR
835 else {
836 sym = dso__find_symbol(map->dso, map->map_ip(map, map->start));
837 if (sym)
838 dso__delete_symbol(map->dso, sym);
839 }
9aa0bfa3
SL
840
841 return 0;
842}
843
844int machine__process_ksymbol(struct machine *machine __maybe_unused,
845 union perf_event *event,
846 struct perf_sample *sample)
847{
848 if (dump_trace)
849 perf_event__fprintf_ksymbol(event, stdout);
850
ebdba16e 851 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
9aa0bfa3
SL
852 return machine__process_ksymbol_unregister(machine, event,
853 sample);
854 return machine__process_ksymbol_register(machine, event, sample);
855}
856
246eba8e
AH
857int machine__process_text_poke(struct machine *machine, union perf_event *event,
858 struct perf_sample *sample __maybe_unused)
859{
860 struct map *map = maps__find(&machine->kmaps, event->text_poke.addr);
861 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
862
863 if (dump_trace)
7eeb9855 864 perf_event__fprintf_text_poke(event, machine, stdout);
246eba8e
AH
865
866 if (!event->text_poke.new_len)
867 return 0;
868
869 if (cpumode != PERF_RECORD_MISC_KERNEL) {
870 pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
871 return 0;
872 }
873
874 if (map && map->dso) {
875 u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
876 int ret;
877
878 /*
879 * Kernel maps might be changed when loading symbols so loading
880 * must be done prior to using kernel maps.
881 */
882 map__load(map);
883 ret = dso__data_write_cache_addr(map->dso, map, machine,
884 event->text_poke.addr,
885 new_bytes,
886 event->text_poke.new_len);
887 if (ret != event->text_poke.new_len)
888 pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
889 event->text_poke.addr);
890 } else {
891 pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
892 event->text_poke.addr);
893 }
894
895 return 0;
896}
897
a94ab91a
ACM
898static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
899 const char *filename)
3f067dca 900{
ca33380a 901 struct map *map = NULL;
ca33380a 902 struct kmod_path m;
a94ab91a 903 struct dso *dso;
3f067dca 904
ca33380a 905 if (kmod_path__parse_name(&m, filename))
3f067dca
ACM
906 return NULL;
907
9f2de315 908 dso = machine__findnew_module_dso(machine, &m, filename);
ca33380a
JO
909 if (dso == NULL)
910 goto out;
911
3183f8ca 912 map = map__new2(start, dso);
3f067dca 913 if (map == NULL)
ca33380a 914 goto out;
3f067dca 915
79b6bb73 916 maps__insert(&machine->kmaps, map);
ca33380a 917
4d39c89f 918 /* Put the map here because maps__insert already got it */
9afcb420 919 map__put(map);
ca33380a 920out:
566c69c3
MH
921 /* put the dso here, corresponding to machine__findnew_module_dso */
922 dso__put(dso);
d8f9da24 923 zfree(&m.name);
3f067dca
ACM
924 return map;
925}
926
876650e6 927size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
3f067dca
ACM
928{
929 struct rb_node *nd;
3d39ac53 930 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
3f067dca 931
f3acb3a8 932 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca 933 struct machine *pos = rb_entry(nd, struct machine, rb_node);
3d39ac53 934 ret += __dsos__fprintf(&pos->dsos.head, fp);
3f067dca
ACM
935 }
936
937 return ret;
938}
939
8fa7d87f 940size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
3f067dca
ACM
941 bool (skip)(struct dso *dso, int parm), int parm)
942{
3d39ac53 943 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
3f067dca
ACM
944}
945
876650e6 946size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
3f067dca
ACM
947 bool (skip)(struct dso *dso, int parm), int parm)
948{
949 struct rb_node *nd;
876650e6 950 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
3f067dca 951
f3acb3a8 952 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca
ACM
953 struct machine *pos = rb_entry(nd, struct machine, rb_node);
954 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
955 }
956 return ret;
957}
958
959size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
960{
961 int i;
962 size_t printed = 0;
93730f85 963 struct dso *kdso = machine__kernel_dso(machine);
3f067dca
ACM
964
965 if (kdso->has_build_id) {
966 char filename[PATH_MAX];
d2396999
KJ
967 if (dso__build_id_filename(kdso, filename, sizeof(filename),
968 false))
3f067dca
ACM
969 printed += fprintf(fp, "[0] %s\n", filename);
970 }
971
972 for (i = 0; i < vmlinux_path__nr_entries; ++i)
973 printed += fprintf(fp, "[%d] %s\n",
974 i + kdso->has_build_id, vmlinux_path[i]);
975
976 return printed;
977}
978
979size_t machine__fprintf(struct machine *machine, FILE *fp)
980{
3f067dca 981 struct rb_node *nd;
91e467bc
KL
982 size_t ret;
983 int i;
3f067dca 984
91e467bc
KL
985 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
986 struct threads *threads = &machine->threads[i];
0a7c74ea
ACM
987
988 down_read(&threads->lock);
d2c11034 989
91e467bc 990 ret = fprintf(fp, "Threads: %u\n", threads->nr);
3f067dca 991
f3acb3a8
DB
992 for (nd = rb_first_cached(&threads->entries); nd;
993 nd = rb_next(nd)) {
91e467bc 994 struct thread *pos = rb_entry(nd, struct thread, rb_node);
3f067dca 995
91e467bc
KL
996 ret += thread__fprintf(pos, fp);
997 }
b91fc39f 998
0a7c74ea 999 up_read(&threads->lock);
91e467bc 1000 }
3f067dca
ACM
1001 return ret;
1002}
1003
1004static struct dso *machine__get_kernel(struct machine *machine)
1005{
8c7f1bb3 1006 const char *vmlinux_name = machine->mmap_name;
3f067dca
ACM
1007 struct dso *kernel;
1008
1009 if (machine__is_host(machine)) {
c192524e
JO
1010 if (symbol_conf.vmlinux_name)
1011 vmlinux_name = symbol_conf.vmlinux_name;
1012
459ce518 1013 kernel = machine__findnew_kernel(machine, vmlinux_name,
1c695c88 1014 "[kernel]", DSO_SPACE__KERNEL);
3f067dca 1015 } else {
c192524e
JO
1016 if (symbol_conf.default_guest_vmlinux_name)
1017 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
1018
459ce518
ACM
1019 kernel = machine__findnew_kernel(machine, vmlinux_name,
1020 "[guest.kernel]",
1c695c88 1021 DSO_SPACE__KERNEL_GUEST);
3f067dca
ACM
1022 }
1023
1024 if (kernel != NULL && (!kernel->has_build_id))
1025 dso__read_running_kernel_build_id(kernel, machine);
1026
1027 return kernel;
1028}
1029
1030struct process_args {
1031 u64 start;
1032};
1033
1c5aae77
AH
1034void machine__get_kallsyms_filename(struct machine *machine, char *buf,
1035 size_t bufsz)
15a0a870
AH
1036{
1037 if (machine__is_default_guest(machine))
1038 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
1039 else
1040 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
1041}
1042
a93f0e55
SQ
1043const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
1044
1045/* Figure out the start address of kernel map from /proc/kallsyms.
1046 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
1047 * symbol_name if it's not that important.
1048 */
b843f62a 1049static int machine__get_running_kernel_start(struct machine *machine,
ed9adb20
JO
1050 const char **symbol_name,
1051 u64 *start, u64 *end)
3f067dca 1052{
15a0a870 1053 char filename[PATH_MAX];
b843f62a 1054 int i, err = -1;
a93f0e55
SQ
1055 const char *name;
1056 u64 addr = 0;
3f067dca 1057
15a0a870 1058 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
3f067dca
ACM
1059
1060 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1061 return 0;
1062
a93f0e55 1063 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
b843f62a
ACM
1064 err = kallsyms__get_function_start(filename, name, &addr);
1065 if (!err)
a93f0e55
SQ
1066 break;
1067 }
1068
b843f62a
ACM
1069 if (err)
1070 return -1;
1071
a93f0e55
SQ
1072 if (symbol_name)
1073 *symbol_name = name;
3f067dca 1074
b843f62a 1075 *start = addr;
ed9adb20
JO
1076
1077 err = kallsyms__get_function_start(filename, "_etext", &addr);
1078 if (!err)
1079 *end = addr;
1080
b843f62a 1081 return 0;
3f067dca
ACM
1082}
1083
1c5aae77
AH
1084int machine__create_extra_kernel_map(struct machine *machine,
1085 struct dso *kernel,
1086 struct extra_kernel_map *xm)
4d99e413
AH
1087{
1088 struct kmap *kmap;
1089 struct map *map;
1090
1091 map = map__new2(xm->start, kernel);
1092 if (!map)
1093 return -1;
1094
1095 map->end = xm->end;
1096 map->pgoff = xm->pgoff;
1097
1098 kmap = map__kmap(map);
1099
5759a682 1100 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
4d99e413 1101
79b6bb73 1102 maps__insert(&machine->kmaps, map);
4d99e413 1103
5759a682
AH
1104 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1105 kmap->name, map->start, map->end);
4d99e413
AH
1106
1107 map__put(map);
1108
1109 return 0;
1110}
1111
1112static u64 find_entry_trampoline(struct dso *dso)
1113{
1114 /* Duplicates are removed so lookup all aliases */
1115 const char *syms[] = {
1116 "_entry_trampoline",
1117 "__entry_trampoline_start",
1118 "entry_SYSCALL_64_trampoline",
1119 };
1120 struct symbol *sym = dso__first_symbol(dso);
1121 unsigned int i;
1122
1123 for (; sym; sym = dso__next_symbol(sym)) {
1124 if (sym->binding != STB_GLOBAL)
1125 continue;
1126 for (i = 0; i < ARRAY_SIZE(syms); i++) {
1127 if (!strcmp(sym->name, syms[i]))
1128 return sym->start;
1129 }
1130 }
1131
1132 return 0;
1133}
1134
1135/*
1136 * These values can be used for kernels that do not have symbols for the entry
1137 * trampolines in kallsyms.
1138 */
1139#define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1140#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1141#define X86_64_ENTRY_TRAMPOLINE 0x6000
1142
1143/* Map x86_64 PTI entry trampolines */
1144int machine__map_x86_64_entry_trampolines(struct machine *machine,
1145 struct dso *kernel)
1146{
79b6bb73 1147 struct maps *kmaps = &machine->kmaps;
4d99e413 1148 int nr_cpus_avail, cpu;
1c5aae77
AH
1149 bool found = false;
1150 struct map *map;
1151 u64 pgoff;
1152
1153 /*
1154 * In the vmlinux case, pgoff is a virtual address which must now be
1155 * mapped to a vmlinux offset.
1156 */
79b6bb73 1157 maps__for_each_entry(kmaps, map) {
1c5aae77
AH
1158 struct kmap *kmap = __map__kmap(map);
1159 struct map *dest_map;
1160
1161 if (!kmap || !is_entry_trampoline(kmap->name))
1162 continue;
1163
79b6bb73 1164 dest_map = maps__find(kmaps, map->pgoff);
1c5aae77
AH
1165 if (dest_map != map)
1166 map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
1167 found = true;
1168 }
1169 if (found || machine->trampolines_mapped)
1170 return 0;
4d99e413 1171
1c5aae77 1172 pgoff = find_entry_trampoline(kernel);
4d99e413
AH
1173 if (!pgoff)
1174 return 0;
1175
1176 nr_cpus_avail = machine__nr_cpus_avail(machine);
1177
1178 /* Add a 1 page map for each CPU's entry trampoline */
1179 for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1180 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1181 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1182 X86_64_ENTRY_TRAMPOLINE;
1183 struct extra_kernel_map xm = {
1184 .start = va,
1185 .end = va + page_size,
1186 .pgoff = pgoff,
1187 };
1188
5759a682
AH
1189 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1190
4d99e413
AH
1191 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1192 return -1;
1193 }
1194
1c5aae77
AH
1195 machine->trampolines_mapped = nr_cpus_avail;
1196
1197 return 0;
1198}
1199
1200int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1201 struct dso *kernel __maybe_unused)
1202{
4d99e413
AH
1203 return 0;
1204}
1205
1fb87b8e
JO
1206static int
1207__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
3f067dca 1208{
cc1121ab
MH
1209 /* In case of renewal the kernel map, destroy previous one */
1210 machine__destroy_kernel_maps(machine);
1211
3183f8ca
ACM
1212 machine->vmlinux_map = map__new2(0, kernel);
1213 if (machine->vmlinux_map == NULL)
1214 return -1;
3f067dca 1215
3183f8ca 1216 machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
484214f4 1217 maps__insert(&machine->kmaps, machine->vmlinux_map);
3f067dca
ACM
1218 return 0;
1219}
1220
1221void machine__destroy_kernel_maps(struct machine *machine)
1222{
3183f8ca
ACM
1223 struct kmap *kmap;
1224 struct map *map = machine__kernel_map(machine);
3f067dca 1225
3183f8ca
ACM
1226 if (map == NULL)
1227 return;
3f067dca 1228
3183f8ca 1229 kmap = map__kmap(map);
79b6bb73 1230 maps__remove(&machine->kmaps, map);
3183f8ca
ACM
1231 if (kmap && kmap->ref_reloc_sym) {
1232 zfree((char **)&kmap->ref_reloc_sym->name);
1233 zfree(&kmap->ref_reloc_sym);
3f067dca 1234 }
3183f8ca
ACM
1235
1236 map__zput(machine->vmlinux_map);
3f067dca
ACM
1237}
1238
876650e6 1239int machines__create_guest_kernel_maps(struct machines *machines)
3f067dca
ACM
1240{
1241 int ret = 0;
1242 struct dirent **namelist = NULL;
1243 int i, items = 0;
1244 char path[PATH_MAX];
1245 pid_t pid;
1246 char *endp;
1247
1248 if (symbol_conf.default_guest_vmlinux_name ||
1249 symbol_conf.default_guest_modules ||
1250 symbol_conf.default_guest_kallsyms) {
1251 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1252 }
1253
1254 if (symbol_conf.guestmount) {
1255 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1256 if (items <= 0)
1257 return -ENOENT;
1258 for (i = 0; i < items; i++) {
1259 if (!isdigit(namelist[i]->d_name[0])) {
1260 /* Filter out . and .. */
1261 continue;
1262 }
1263 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1264 if ((*endp != '\0') ||
1265 (endp == namelist[i]->d_name) ||
1266 (errno == ERANGE)) {
1267 pr_debug("invalid directory (%s). Skipping.\n",
1268 namelist[i]->d_name);
1269 continue;
1270 }
1271 sprintf(path, "%s/%s/proc/kallsyms",
1272 symbol_conf.guestmount,
1273 namelist[i]->d_name);
1274 ret = access(path, R_OK);
1275 if (ret) {
1276 pr_debug("Can't access file %s\n", path);
1277 goto failure;
1278 }
1279 machines__create_kernel_maps(machines, pid);
1280 }
1281failure:
1282 free(namelist);
1283 }
1284
1285 return ret;
1286}
1287
876650e6 1288void machines__destroy_kernel_maps(struct machines *machines)
3f067dca 1289{
f3acb3a8 1290 struct rb_node *next = rb_first_cached(&machines->guests);
876650e6
ACM
1291
1292 machine__destroy_kernel_maps(&machines->host);
3f067dca
ACM
1293
1294 while (next) {
1295 struct machine *pos = rb_entry(next, struct machine, rb_node);
1296
1297 next = rb_next(&pos->rb_node);
f3acb3a8 1298 rb_erase_cached(&pos->rb_node, &machines->guests);
3f067dca
ACM
1299 machine__delete(pos);
1300 }
1301}
1302
876650e6 1303int machines__create_kernel_maps(struct machines *machines, pid_t pid)
3f067dca
ACM
1304{
1305 struct machine *machine = machines__findnew(machines, pid);
1306
1307 if (machine == NULL)
1308 return -1;
1309
1310 return machine__create_kernel_maps(machine);
1311}
1312
3183f8ca 1313int machine__load_kallsyms(struct machine *machine, const char *filename)
3f067dca 1314{
a5e813c6 1315 struct map *map = machine__kernel_map(machine);
e8f3879f 1316 int ret = __dso__load_kallsyms(map->dso, filename, map, true);
3f067dca
ACM
1317
1318 if (ret > 0) {
3183f8ca 1319 dso__set_loaded(map->dso);
3f067dca
ACM
1320 /*
1321 * Since /proc/kallsyms will have multiple sessions for the
1322 * kernel, with modules between them, fixup the end of all
1323 * sections.
1324 */
79b6bb73 1325 maps__fixup_end(&machine->kmaps);
3f067dca
ACM
1326 }
1327
1328 return ret;
1329}
1330
1d1a2654 1331int machine__load_vmlinux_path(struct machine *machine)
3f067dca 1332{
a5e813c6 1333 struct map *map = machine__kernel_map(machine);
be39db9f 1334 int ret = dso__load_vmlinux_path(map->dso, map);
3f067dca 1335
39b12f78 1336 if (ret > 0)
3183f8ca 1337 dso__set_loaded(map->dso);
3f067dca
ACM
1338
1339 return ret;
1340}
1341
3f067dca
ACM
1342static char *get_kernel_version(const char *root_dir)
1343{
1344 char version[PATH_MAX];
1345 FILE *file;
1346 char *name, *tmp;
1347 const char *prefix = "Linux version ";
1348
1349 sprintf(version, "%s/proc/version", root_dir);
1350 file = fopen(version, "r");
1351 if (!file)
1352 return NULL;
1353
3f067dca
ACM
1354 tmp = fgets(version, sizeof(version), file);
1355 fclose(file);
34b65aff
DY
1356 if (!tmp)
1357 return NULL;
3f067dca
ACM
1358
1359 name = strstr(version, prefix);
1360 if (!name)
1361 return NULL;
1362 name += strlen(prefix);
1363 tmp = strchr(name, ' ');
1364 if (tmp)
1365 *tmp = '\0';
1366
1367 return strdup(name);
1368}
1369
bb58a8a4
JO
1370static bool is_kmod_dso(struct dso *dso)
1371{
1372 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1373 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1374}
1375
9a29ceee 1376static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
bb58a8a4 1377{
bb58a8a4 1378 char *long_name;
9a29ceee 1379 struct map *map = maps__find_by_name(maps, m->name);
bb58a8a4 1380
bb58a8a4
JO
1381 if (map == NULL)
1382 return 0;
1383
1384 long_name = strdup(path);
1385 if (long_name == NULL)
1386 return -ENOMEM;
1387
1388 dso__set_long_name(map->dso, long_name, true);
1389 dso__kernel_module_get_build_id(map->dso, "");
1390
1391 /*
1392 * Full name could reveal us kmod compression, so
1393 * we need to update the symtab_type if needed.
1394 */
2af52475 1395 if (m->comp && is_kmod_dso(map->dso)) {
bb58a8a4 1396 map->dso->symtab_type++;
2af52475
JO
1397 map->dso->comp = m->comp;
1398 }
bb58a8a4
JO
1399
1400 return 0;
1401}
1402
9a29ceee 1403static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
3f067dca
ACM
1404{
1405 struct dirent *dent;
1406 DIR *dir = opendir(dir_name);
1407 int ret = 0;
1408
1409 if (!dir) {
1410 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1411 return -1;
1412 }
1413
1414 while ((dent = readdir(dir)) != NULL) {
1415 char path[PATH_MAX];
1416 struct stat st;
1417
1418 /*sshfs might return bad dent->d_type, so we have to stat*/
1419 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1420 if (stat(path, &st))
1421 continue;
1422
1423 if (S_ISDIR(st.st_mode)) {
1424 if (!strcmp(dent->d_name, ".") ||
1425 !strcmp(dent->d_name, ".."))
1426 continue;
1427
61d4290c
RY
1428 /* Do not follow top-level source and build symlinks */
1429 if (depth == 0) {
1430 if (!strcmp(dent->d_name, "source") ||
1431 !strcmp(dent->d_name, "build"))
1432 continue;
1433 }
1434
9a29ceee 1435 ret = maps__set_modules_path_dir(maps, path, depth + 1);
3f067dca
ACM
1436 if (ret < 0)
1437 goto out;
1438 } else {
bb58a8a4 1439 struct kmod_path m;
3f067dca 1440
bb58a8a4
JO
1441 ret = kmod_path__parse_name(&m, dent->d_name);
1442 if (ret)
1443 goto out;
c00c48fc 1444
bb58a8a4 1445 if (m.kmod)
9a29ceee 1446 ret = maps__set_module_path(maps, path, &m);
c00c48fc 1447
d8f9da24 1448 zfree(&m.name);
3f067dca 1449
bb58a8a4 1450 if (ret)
3f067dca 1451 goto out;
3f067dca
ACM
1452 }
1453 }
1454
1455out:
1456 closedir(dir);
1457 return ret;
1458}
1459
1460static int machine__set_modules_path(struct machine *machine)
1461{
1462 char *version;
1463 char modules_path[PATH_MAX];
1464
1465 version = get_kernel_version(machine->root_dir);
1466 if (!version)
1467 return -1;
1468
61d4290c 1469 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
3f067dca
ACM
1470 machine->root_dir, version);
1471 free(version);
1472
79b6bb73 1473 return maps__set_modules_path_dir(&machine->kmaps, modules_path, 0);
3f067dca 1474}
203d8a4a 1475int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
12a6d294 1476 u64 *size __maybe_unused,
203d8a4a
SSG
1477 const char *name __maybe_unused)
1478{
1479 return 0;
1480}
3f067dca 1481
9ad4652b
TR
1482static int machine__create_module(void *arg, const char *name, u64 start,
1483 u64 size)
3f067dca 1484{
316d70d6 1485 struct machine *machine = arg;
3f067dca 1486 struct map *map;
316d70d6 1487
12a6d294 1488 if (arch__fix_module_text_start(&start, &size, name) < 0)
203d8a4a
SSG
1489 return -1;
1490
a94ab91a 1491 map = machine__addnew_module_map(machine, start, name);
316d70d6
AH
1492 if (map == NULL)
1493 return -1;
9ad4652b 1494 map->end = start + size;
316d70d6
AH
1495
1496 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1497
1498 return 0;
1499}
1500
1501static int machine__create_modules(struct machine *machine)
1502{
3f067dca
ACM
1503 const char *modules;
1504 char path[PATH_MAX];
1505
f4be904d 1506 if (machine__is_default_guest(machine)) {
3f067dca 1507 modules = symbol_conf.default_guest_modules;
f4be904d
AH
1508 } else {
1509 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
3f067dca
ACM
1510 modules = path;
1511 }
1512
aa7fe3b0 1513 if (symbol__restricted_filename(modules, "/proc/modules"))
3f067dca
ACM
1514 return -1;
1515
316d70d6 1516 if (modules__parse(modules, machine, machine__create_module))
3f067dca
ACM
1517 return -1;
1518
316d70d6
AH
1519 if (!machine__set_modules_path(machine))
1520 return 0;
3f067dca 1521
316d70d6 1522 pr_debug("Problems setting modules path maps, continuing anyway...\n");
3f067dca 1523
8f76fcd9 1524 return 0;
3f067dca
ACM
1525}
1526
1fb87b8e
JO
1527static void machine__set_kernel_mmap(struct machine *machine,
1528 u64 start, u64 end)
1529{
3183f8ca
ACM
1530 machine->vmlinux_map->start = start;
1531 machine->vmlinux_map->end = end;
1532 /*
1533 * Be a bit paranoid here, some perf.data file came with
1534 * a zero sized synthesized MMAP event for the kernel.
1535 */
1536 if (start == 0 && end == 0)
1537 machine->vmlinux_map->end = ~0ULL;
1fb87b8e
JO
1538}
1539
977c7a6d
WL
1540static void machine__update_kernel_mmap(struct machine *machine,
1541 u64 start, u64 end)
1542{
1543 struct map *map = machine__kernel_map(machine);
1544
1545 map__get(map);
79b6bb73 1546 maps__remove(&machine->kmaps, map);
977c7a6d
WL
1547
1548 machine__set_kernel_mmap(machine, start, end);
1549
79b6bb73 1550 maps__insert(&machine->kmaps, map);
977c7a6d
WL
1551 map__put(map);
1552}
1553
3f067dca
ACM
1554int machine__create_kernel_maps(struct machine *machine)
1555{
1556 struct dso *kernel = machine__get_kernel(machine);
b843f62a 1557 const char *name = NULL;
ee05d217 1558 struct map *map;
ed9adb20 1559 u64 start = 0, end = ~0ULL;
1154c957
MH
1560 int ret;
1561
45e90056 1562 if (kernel == NULL)
5512cf24 1563 return -1;
3f067dca 1564
1154c957 1565 ret = __machine__create_kernel_maps(machine, kernel);
1154c957 1566 if (ret < 0)
1c5aae77 1567 goto out_put;
3f067dca
ACM
1568
1569 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1570 if (machine__is_host(machine))
1571 pr_debug("Problems creating module maps, "
1572 "continuing anyway...\n");
1573 else
1574 pr_debug("Problems creating module maps for guest %d, "
1575 "continuing anyway...\n", machine->pid);
1576 }
1577
ed9adb20 1578 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
3f938ee2 1579 if (name &&
ed9adb20 1580 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
3f938ee2 1581 machine__destroy_kernel_maps(machine);
1c5aae77
AH
1582 ret = -1;
1583 goto out_put;
3f938ee2 1584 }
ee05d217 1585
977c7a6d
WL
1586 /*
1587 * we have a real start address now, so re-order the kmaps
1588 * assume it's the last in the kmaps
1589 */
ed9adb20 1590 machine__update_kernel_mmap(machine, start, end);
5512cf24
AH
1591 }
1592
1c5aae77
AH
1593 if (machine__create_extra_kernel_maps(machine, kernel))
1594 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1595
ed9adb20
JO
1596 if (end == ~0ULL) {
1597 /* update end address of the kernel map using adjacent module address */
1598 map = map__next(machine__kernel_map(machine));
1599 if (map)
1600 machine__set_kernel_mmap(machine, start, map->start);
1601 }
1602
1c5aae77
AH
1603out_put:
1604 dso__put(kernel);
1605 return ret;
3f067dca
ACM
1606}
1607
8e0cf965
AH
1608static bool machine__uses_kcore(struct machine *machine)
1609{
1610 struct dso *dso;
1611
3d39ac53 1612 list_for_each_entry(dso, &machine->dsos.head, node) {
8e0cf965
AH
1613 if (dso__is_kcore(dso))
1614 return true;
1615 }
1616
1617 return false;
1618}
1619
a8ce99b0 1620static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
031f112f 1621 struct extra_kernel_map *xm)
a8ce99b0
AH
1622{
1623 return machine__is(machine, "x86_64") &&
031f112f 1624 is_entry_trampoline(xm->name);
a8ce99b0
AH
1625}
1626
1627static int machine__process_extra_kernel_map(struct machine *machine,
031f112f 1628 struct extra_kernel_map *xm)
a8ce99b0 1629{
93730f85 1630 struct dso *kernel = machine__kernel_dso(machine);
a8ce99b0
AH
1631
1632 if (kernel == NULL)
1633 return -1;
1634
031f112f 1635 return machine__create_extra_kernel_map(machine, kernel, xm);
a8ce99b0
AH
1636}
1637
b0a7d1a0 1638static int machine__process_kernel_mmap_event(struct machine *machine,
1ca6e802
JO
1639 struct extra_kernel_map *xm,
1640 struct build_id *bid)
b0a7d1a0
ACM
1641{
1642 struct map *map;
1c695c88 1643 enum dso_space_type dso_space;
b0a7d1a0
ACM
1644 bool is_kernel_mmap;
1645
8e0cf965
AH
1646 /* If we have maps from kcore then we do not need or want any others */
1647 if (machine__uses_kcore(machine))
1648 return 0;
1649
b0a7d1a0 1650 if (machine__is_host(machine))
1c695c88 1651 dso_space = DSO_SPACE__KERNEL;
b0a7d1a0 1652 else
1c695c88 1653 dso_space = DSO_SPACE__KERNEL_GUEST;
b0a7d1a0 1654
031f112f 1655 is_kernel_mmap = memcmp(xm->name, machine->mmap_name,
8c7f1bb3 1656 strlen(machine->mmap_name) - 1) == 0;
031f112f
JO
1657 if (xm->name[0] == '/' ||
1658 (!is_kernel_mmap && xm->name[0] == '[')) {
1659 map = machine__addnew_module_map(machine, xm->start,
1660 xm->name);
b0a7d1a0
ACM
1661 if (map == NULL)
1662 goto out_problem;
1663
031f112f 1664 map->end = map->start + xm->end - xm->start;
1ca6e802
JO
1665
1666 if (build_id__is_defined(bid))
1667 dso__set_build_id(map->dso, bid);
1668
b0a7d1a0 1669 } else if (is_kernel_mmap) {
031f112f 1670 const char *symbol_name = (xm->name + strlen(machine->mmap_name));
b0a7d1a0
ACM
1671 /*
1672 * Should be there already, from the build-id table in
1673 * the header.
1674 */
b837a8bd
NK
1675 struct dso *kernel = NULL;
1676 struct dso *dso;
1677
0a7c74ea 1678 down_read(&machine->dsos.lock);
e8807844 1679
3d39ac53 1680 list_for_each_entry(dso, &machine->dsos.head, node) {
1f121b03
WN
1681
1682 /*
1683 * The cpumode passed to is_kernel_module is not the
1684 * cpumode of *this* event. If we insist on passing
1685 * correct cpumode to is_kernel_module, we should
1686 * record the cpumode when we adding this dso to the
1687 * linked list.
1688 *
1689 * However we don't really need passing correct
1690 * cpumode. We know the correct cpumode must be kernel
1691 * mode (if not, we should not link it onto kernel_dsos
1692 * list).
1693 *
1694 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1695 * is_kernel_module() treats it as a kernel cpumode.
1696 */
1697
1698 if (!dso->kernel ||
1699 is_kernel_module(dso->long_name,
1700 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
b837a8bd
NK
1701 continue;
1702
1f121b03 1703
b837a8bd
NK
1704 kernel = dso;
1705 break;
1706 }
1707
0a7c74ea 1708 up_read(&machine->dsos.lock);
e8807844 1709
b837a8bd 1710 if (kernel == NULL)
8c7f1bb3 1711 kernel = machine__findnew_dso(machine, machine->mmap_name);
b0a7d1a0
ACM
1712 if (kernel == NULL)
1713 goto out_problem;
1714
1c695c88 1715 kernel->kernel = dso_space;
d3a7c489
ACM
1716 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1717 dso__put(kernel);
b0a7d1a0 1718 goto out_problem;
d3a7c489 1719 }
b0a7d1a0 1720
330dfa22
NK
1721 if (strstr(kernel->long_name, "vmlinux"))
1722 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
96d78059 1723
031f112f 1724 machine__update_kernel_mmap(machine, xm->start, xm->end);
b0a7d1a0 1725
1ca6e802
JO
1726 if (build_id__is_defined(bid))
1727 dso__set_build_id(kernel, bid);
1728
b0a7d1a0
ACM
1729 /*
1730 * Avoid using a zero address (kptr_restrict) for the ref reloc
1731 * symbol. Effectively having zero here means that at record
1732 * time /proc/sys/kernel/kptr_restrict was non zero.
1733 */
031f112f 1734 if (xm->pgoff != 0) {
3183f8ca
ACM
1735 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1736 symbol_name,
031f112f 1737 xm->pgoff);
b0a7d1a0
ACM
1738 }
1739
1740 if (machine__is_default_guest(machine)) {
1741 /*
1742 * preload dso of guest kernel and modules
1743 */
be39db9f 1744 dso__load(kernel, machine__kernel_map(machine));
b0a7d1a0 1745 }
031f112f
JO
1746 } else if (perf_event__is_extra_kernel_mmap(machine, xm)) {
1747 return machine__process_extra_kernel_map(machine, xm);
b0a7d1a0
ACM
1748 }
1749 return 0;
1750out_problem:
1751 return -1;
1752}
1753
5c5e854b 1754int machine__process_mmap2_event(struct machine *machine,
162f0bef 1755 union perf_event *event,
473398a2 1756 struct perf_sample *sample)
5c5e854b 1757{
5c5e854b
SE
1758 struct thread *thread;
1759 struct map *map;
4a7380a5
ACM
1760 struct dso_id dso_id = {
1761 .maj = event->mmap2.maj,
1762 .min = event->mmap2.min,
1763 .ino = event->mmap2.ino,
1764 .ino_generation = event->mmap2.ino_generation,
1765 };
1ca6e802 1766 struct build_id __bid, *bid = NULL;
5c5e854b
SE
1767 int ret = 0;
1768
1769 if (dump_trace)
1770 perf_event__fprintf_mmap2(event, stdout);
1771
1ca6e802
JO
1772 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
1773 bid = &__bid;
1774 build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
1775 }
1776
473398a2
ACM
1777 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1778 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
031f112f
JO
1779 struct extra_kernel_map xm = {
1780 .start = event->mmap2.start,
1781 .end = event->mmap2.start + event->mmap2.len,
1782 .pgoff = event->mmap2.pgoff,
1783 };
1784
1785 strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
1ca6e802 1786 ret = machine__process_kernel_mmap_event(machine, &xm, bid);
5c5e854b
SE
1787 if (ret < 0)
1788 goto out_problem;
1789 return 0;
1790 }
1791
1792 thread = machine__findnew_thread(machine, event->mmap2.pid,
11c9abf2 1793 event->mmap2.tid);
5c5e854b
SE
1794 if (thread == NULL)
1795 goto out_problem;
1796
2a03068c 1797 map = map__new(machine, event->mmap2.start,
5c5e854b 1798 event->mmap2.len, event->mmap2.pgoff,
4a7380a5 1799 &dso_id, event->mmap2.prot,
1ca6e802 1800 event->mmap2.flags, bid,
3183f8ca 1801 event->mmap2.filename, thread);
5c5e854b
SE
1802
1803 if (map == NULL)
b91fc39f 1804 goto out_problem_map;
5c5e854b 1805
8132a2a8
HK
1806 ret = thread__insert_map(thread, map);
1807 if (ret)
1808 goto out_problem_insert;
1809
b91fc39f 1810 thread__put(thread);
84c2cafa 1811 map__put(map);
5c5e854b
SE
1812 return 0;
1813
8132a2a8
HK
1814out_problem_insert:
1815 map__put(map);
b91fc39f
ACM
1816out_problem_map:
1817 thread__put(thread);
5c5e854b
SE
1818out_problem:
1819 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1820 return 0;
1821}
1822
162f0bef 1823int machine__process_mmap_event(struct machine *machine, union perf_event *event,
473398a2 1824 struct perf_sample *sample)
b0a7d1a0 1825{
b0a7d1a0
ACM
1826 struct thread *thread;
1827 struct map *map;
0f476f2b 1828 u32 prot = 0;
b0a7d1a0
ACM
1829 int ret = 0;
1830
1831 if (dump_trace)
1832 perf_event__fprintf_mmap(event, stdout);
1833
473398a2
ACM
1834 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1835 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
031f112f
JO
1836 struct extra_kernel_map xm = {
1837 .start = event->mmap.start,
1838 .end = event->mmap.start + event->mmap.len,
1839 .pgoff = event->mmap.pgoff,
1840 };
1841
1842 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1ca6e802 1843 ret = machine__process_kernel_mmap_event(machine, &xm, NULL);
b0a7d1a0
ACM
1844 if (ret < 0)
1845 goto out_problem;
1846 return 0;
1847 }
1848
314add6b 1849 thread = machine__findnew_thread(machine, event->mmap.pid,
11c9abf2 1850 event->mmap.tid);
b0a7d1a0
ACM
1851 if (thread == NULL)
1852 goto out_problem;
bad40917 1853
3183f8ca 1854 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
0f476f2b 1855 prot = PROT_EXEC;
bad40917 1856
2a03068c 1857 map = map__new(machine, event->mmap.start,
b0a7d1a0 1858 event->mmap.len, event->mmap.pgoff,
1ca6e802 1859 NULL, prot, 0, NULL, event->mmap.filename, thread);
bad40917 1860
b0a7d1a0 1861 if (map == NULL)
b91fc39f 1862 goto out_problem_map;
b0a7d1a0 1863
8132a2a8
HK
1864 ret = thread__insert_map(thread, map);
1865 if (ret)
1866 goto out_problem_insert;
1867
b91fc39f 1868 thread__put(thread);
84c2cafa 1869 map__put(map);
b0a7d1a0
ACM
1870 return 0;
1871
8132a2a8
HK
1872out_problem_insert:
1873 map__put(map);
b91fc39f
ACM
1874out_problem_map:
1875 thread__put(thread);
b0a7d1a0
ACM
1876out_problem:
1877 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1878 return 0;
1879}
1880
b91fc39f 1881static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
236a3bbd 1882{
91e467bc
KL
1883 struct threads *threads = machine__threads(machine, th->tid);
1884
1885 if (threads->last_match == th)
67fda0f3 1886 threads__set_last_match(threads, NULL);
f3b623b8 1887
b91fc39f 1888 if (lock)
0a7c74ea 1889 down_write(&threads->lock);
4c00af0e
ACM
1890
1891 BUG_ON(refcount_read(&th->refcnt) == 0);
1892
f3acb3a8 1893 rb_erase_cached(&th->rb_node, &threads->entries);
b91fc39f 1894 RB_CLEAR_NODE(&th->rb_node);
91e467bc 1895 --threads->nr;
236a3bbd 1896 /*
f3b623b8
ACM
1897 * Move it first to the dead_threads list, then drop the reference,
1898 * if this is the last reference, then the thread__delete destructor
1899 * will be called and we will remove it from the dead_threads list.
236a3bbd 1900 */
91e467bc 1901 list_add_tail(&th->node, &threads->dead);
4c00af0e
ACM
1902
1903 /*
1904 * We need to do the put here because if this is the last refcount,
1905 * then we will be touching the threads->dead head when removing the
1906 * thread.
1907 */
1908 thread__put(th);
1909
b91fc39f 1910 if (lock)
0a7c74ea 1911 up_write(&threads->lock);
236a3bbd
DA
1912}
1913
b91fc39f
ACM
1914void machine__remove_thread(struct machine *machine, struct thread *th)
1915{
1916 return __machine__remove_thread(machine, th, true);
1917}
1918
162f0bef
FW
1919int machine__process_fork_event(struct machine *machine, union perf_event *event,
1920 struct perf_sample *sample)
b0a7d1a0 1921{
d75e6097
JO
1922 struct thread *thread = machine__find_thread(machine,
1923 event->fork.pid,
1924 event->fork.tid);
314add6b
AH
1925 struct thread *parent = machine__findnew_thread(machine,
1926 event->fork.ppid,
1927 event->fork.ptid);
4f8f382e 1928 bool do_maps_clone = true;
b91fc39f 1929 int err = 0;
b0a7d1a0 1930
5cb73340
AH
1931 if (dump_trace)
1932 perf_event__fprintf_task(event, stdout);
1933
1934 /*
1935 * There may be an existing thread that is not actually the parent,
1936 * either because we are processing events out of order, or because the
1937 * (fork) event that would have removed the thread was lost. Assume the
1938 * latter case and continue on as best we can.
1939 */
1940 if (parent->pid_ != (pid_t)event->fork.ppid) {
1941 dump_printf("removing erroneous parent thread %d/%d\n",
1942 parent->pid_, parent->tid);
1943 machine__remove_thread(machine, parent);
1944 thread__put(parent);
1945 parent = machine__findnew_thread(machine, event->fork.ppid,
1946 event->fork.ptid);
1947 }
1948
236a3bbd 1949 /* if a thread currently exists for the thread id remove it */
b91fc39f 1950 if (thread != NULL) {
236a3bbd 1951 machine__remove_thread(machine, thread);
b91fc39f
ACM
1952 thread__put(thread);
1953 }
236a3bbd 1954
314add6b
AH
1955 thread = machine__findnew_thread(machine, event->fork.pid,
1956 event->fork.tid);
4f8f382e
DM
1957 /*
1958 * When synthesizing FORK events, we are trying to create thread
1959 * objects for the already running tasks on the machine.
1960 *
1961 * Normally, for a kernel FORK event, we want to clone the parent's
1962 * maps because that is what the kernel just did.
1963 *
1964 * But when synthesizing, this should not be done. If we do, we end up
4d39c89f 1965 * with overlapping maps as we process the synthesized MMAP2 events that
4f8f382e
DM
1966 * get delivered shortly thereafter.
1967 *
1968 * Use the FORK event misc flags in an internal way to signal this
1969 * situation, so we can elide the map clone when appropriate.
1970 */
1971 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
1972 do_maps_clone = false;
b0a7d1a0
ACM
1973
1974 if (thread == NULL || parent == NULL ||
4f8f382e 1975 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
b0a7d1a0 1976 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
b91fc39f 1977 err = -1;
b0a7d1a0 1978 }
b91fc39f
ACM
1979 thread__put(thread);
1980 thread__put(parent);
b0a7d1a0 1981
b91fc39f 1982 return err;
b0a7d1a0
ACM
1983}
1984
162f0bef
FW
1985int machine__process_exit_event(struct machine *machine, union perf_event *event,
1986 struct perf_sample *sample __maybe_unused)
b0a7d1a0 1987{
d75e6097
JO
1988 struct thread *thread = machine__find_thread(machine,
1989 event->fork.pid,
1990 event->fork.tid);
b0a7d1a0
ACM
1991
1992 if (dump_trace)
1993 perf_event__fprintf_task(event, stdout);
1994
b91fc39f 1995 if (thread != NULL) {
236a3bbd 1996 thread__exited(thread);
b91fc39f
ACM
1997 thread__put(thread);
1998 }
b0a7d1a0
ACM
1999
2000 return 0;
2001}
2002
162f0bef
FW
2003int machine__process_event(struct machine *machine, union perf_event *event,
2004 struct perf_sample *sample)
b0a7d1a0
ACM
2005{
2006 int ret;
2007
2008 switch (event->header.type) {
2009 case PERF_RECORD_COMM:
162f0bef 2010 ret = machine__process_comm_event(machine, event, sample); break;
b0a7d1a0 2011 case PERF_RECORD_MMAP:
162f0bef 2012 ret = machine__process_mmap_event(machine, event, sample); break;
f3b3614a
HB
2013 case PERF_RECORD_NAMESPACES:
2014 ret = machine__process_namespaces_event(machine, event, sample); break;
ba78c1c5
NK
2015 case PERF_RECORD_CGROUP:
2016 ret = machine__process_cgroup_event(machine, event, sample); break;
5c5e854b 2017 case PERF_RECORD_MMAP2:
162f0bef 2018 ret = machine__process_mmap2_event(machine, event, sample); break;
b0a7d1a0 2019 case PERF_RECORD_FORK:
162f0bef 2020 ret = machine__process_fork_event(machine, event, sample); break;
b0a7d1a0 2021 case PERF_RECORD_EXIT:
162f0bef 2022 ret = machine__process_exit_event(machine, event, sample); break;
b0a7d1a0 2023 case PERF_RECORD_LOST:
162f0bef 2024 ret = machine__process_lost_event(machine, event, sample); break;
4a96f7a0
AH
2025 case PERF_RECORD_AUX:
2026 ret = machine__process_aux_event(machine, event); break;
0ad21f68 2027 case PERF_RECORD_ITRACE_START:
ceb92913 2028 ret = machine__process_itrace_start_event(machine, event); break;
c4937a91
KL
2029 case PERF_RECORD_LOST_SAMPLES:
2030 ret = machine__process_lost_samples_event(machine, event, sample); break;
0286039f
AH
2031 case PERF_RECORD_SWITCH:
2032 case PERF_RECORD_SWITCH_CPU_WIDE:
2033 ret = machine__process_switch_event(machine, event); break;
9aa0bfa3
SL
2034 case PERF_RECORD_KSYMBOL:
2035 ret = machine__process_ksymbol(machine, event, sample); break;
45178a92 2036 case PERF_RECORD_BPF_EVENT:
3f604b5f 2037 ret = machine__process_bpf(machine, event, sample); break;
246eba8e
AH
2038 case PERF_RECORD_TEXT_POKE:
2039 ret = machine__process_text_poke(machine, event, sample); break;
61750473
AH
2040 case PERF_RECORD_AUX_OUTPUT_HW_ID:
2041 ret = machine__process_aux_output_hw_id_event(machine, event); break;
b0a7d1a0
ACM
2042 default:
2043 ret = -1;
2044 break;
2045 }
2046
2047 return ret;
2048}
3f067dca 2049
b21484f1 2050static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
3f067dca 2051{
a7c3899c 2052 if (!regexec(regex, sym->name, 0, NULL, 0))
83ff0f93
JC
2053 return true;
2054 return false;
3f067dca
ACM
2055}
2056
bb871a9c 2057static void ip__resolve_ams(struct thread *thread,
3f067dca
ACM
2058 struct addr_map_symbol *ams,
2059 u64 ip)
2060{
2061 struct addr_location al;
3f067dca
ACM
2062
2063 memset(&al, 0, sizeof(al));
52a3cb8c
ACM
2064 /*
2065 * We cannot use the header.misc hint to determine whether a
2066 * branch stack address is user, kernel, guest, hypervisor.
2067 * Branches may straddle the kernel/user/hypervisor boundaries.
2068 * Thus, we have to try consecutively until we find a match
2069 * or else, the symbol is unknown
2070 */
26bd9331 2071 thread__find_cpumode_addr_location(thread, ip, &al);
3f067dca 2072
3f067dca
ACM
2073 ams->addr = ip;
2074 ams->al_addr = al.addr;
f2eaea09 2075 ams->ms.maps = al.maps;
d46a4cdf
ACM
2076 ams->ms.sym = al.sym;
2077 ams->ms.map = al.map;
8780fb25 2078 ams->phys_addr = 0;
a50d03e3 2079 ams->data_page_size = 0;
3f067dca
ACM
2080}
2081
bb871a9c 2082static void ip__resolve_data(struct thread *thread,
8780fb25 2083 u8 m, struct addr_map_symbol *ams,
a50d03e3 2084 u64 addr, u64 phys_addr, u64 daddr_page_size)
98a3b32c
SE
2085{
2086 struct addr_location al;
2087
2088 memset(&al, 0, sizeof(al));
2089
117d3c24 2090 thread__find_symbol(thread, m, addr, &al);
06b2afc0 2091
98a3b32c
SE
2092 ams->addr = addr;
2093 ams->al_addr = al.addr;
f2eaea09 2094 ams->ms.maps = al.maps;
d46a4cdf
ACM
2095 ams->ms.sym = al.sym;
2096 ams->ms.map = al.map;
8780fb25 2097 ams->phys_addr = phys_addr;
a50d03e3 2098 ams->data_page_size = daddr_page_size;
98a3b32c
SE
2099}
2100
e80faac0
ACM
2101struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2102 struct addr_location *al)
98a3b32c 2103{
9f87498f 2104 struct mem_info *mi = mem_info__new();
98a3b32c
SE
2105
2106 if (!mi)
2107 return NULL;
2108
bb871a9c 2109 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
8780fb25 2110 ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
a50d03e3
KL
2111 sample->addr, sample->phys_addr,
2112 sample->data_page_size);
98a3b32c
SE
2113 mi->data_src.val = sample->data_src;
2114
2115 return mi;
2116}
2117
5f0fef8a 2118static char *callchain_srcline(struct map_symbol *ms, u64 ip)
40a342cd 2119{
5f0fef8a 2120 struct map *map = ms->map;
21ac9d54
MW
2121 char *srcline = NULL;
2122
40a342cd 2123 if (!map || callchain_param.key == CCKEY_FUNCTION)
21ac9d54
MW
2124 return srcline;
2125
2126 srcline = srcline__tree_find(&map->dso->srclines, ip);
2127 if (!srcline) {
2128 bool show_sym = false;
2129 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2130
2131 srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
5f0fef8a 2132 ms->sym, show_sym, show_addr, ip);
21ac9d54
MW
2133 srcline__tree_insert(&map->dso->srclines, ip, srcline);
2134 }
40a342cd 2135
21ac9d54 2136 return srcline;
40a342cd
MW
2137}
2138
c4ee0625
JY
2139struct iterations {
2140 int nr_loop_iter;
2141 u64 cycles;
2142};
2143
37592b8a 2144static int add_callchain_ip(struct thread *thread,
91d7b2de 2145 struct callchain_cursor *cursor,
37592b8a
AK
2146 struct symbol **parent,
2147 struct addr_location *root_al,
73dbcd65 2148 u8 *cpumode,
410024db
JY
2149 u64 ip,
2150 bool branch,
2151 struct branch_flags *flags,
c4ee0625 2152 struct iterations *iter,
b851dd49 2153 u64 branch_from)
37592b8a 2154{
5f0fef8a 2155 struct map_symbol ms;
37592b8a 2156 struct addr_location al;
c4ee0625
JY
2157 int nr_loop_iter = 0;
2158 u64 iter_cycles = 0;
40a342cd 2159 const char *srcline = NULL;
37592b8a
AK
2160
2161 al.filtered = 0;
2162 al.sym = NULL;
57f0ff05 2163 al.srcline = NULL;
73dbcd65 2164 if (!cpumode) {
26bd9331 2165 thread__find_cpumode_addr_location(thread, ip, &al);
73dbcd65 2166 } else {
2e77784b
KL
2167 if (ip >= PERF_CONTEXT_MAX) {
2168 switch (ip) {
2169 case PERF_CONTEXT_HV:
73dbcd65 2170 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2e77784b
KL
2171 break;
2172 case PERF_CONTEXT_KERNEL:
73dbcd65 2173 *cpumode = PERF_RECORD_MISC_KERNEL;
2e77784b
KL
2174 break;
2175 case PERF_CONTEXT_USER:
73dbcd65 2176 *cpumode = PERF_RECORD_MISC_USER;
2e77784b
KL
2177 break;
2178 default:
2179 pr_debug("invalid callchain context: "
2180 "%"PRId64"\n", (s64) ip);
2181 /*
2182 * It seems the callchain is corrupted.
2183 * Discard all.
2184 */
91d7b2de 2185 callchain_cursor_reset(cursor);
2e77784b
KL
2186 return 1;
2187 }
2188 return 0;
2189 }
4546263d 2190 thread__find_symbol(thread, *cpumode, ip, &al);
2e77784b
KL
2191 }
2192
37592b8a 2193 if (al.sym != NULL) {
de7e6a7c 2194 if (perf_hpp_list.parent && !*parent &&
37592b8a
AK
2195 symbol__match_regex(al.sym, &parent_regex))
2196 *parent = al.sym;
2197 else if (have_ignore_callees && root_al &&
2198 symbol__match_regex(al.sym, &ignore_callees_regex)) {
2199 /* Treat this symbol as the root,
2200 forgetting its callees. */
2201 *root_al = al;
91d7b2de 2202 callchain_cursor_reset(cursor);
37592b8a
AK
2203 }
2204 }
2205
b49a8fe5
NK
2206 if (symbol_conf.hide_unresolved && al.sym == NULL)
2207 return 0;
c4ee0625
JY
2208
2209 if (iter) {
2210 nr_loop_iter = iter->nr_loop_iter;
2211 iter_cycles = iter->cycles;
2212 }
2213
f2eaea09 2214 ms.maps = al.maps;
5f0fef8a
ACM
2215 ms.map = al.map;
2216 ms.sym = al.sym;
2217 srcline = callchain_srcline(&ms, al.addr);
2218 return callchain_cursor_append(cursor, ip, &ms,
c4ee0625 2219 branch, flags, nr_loop_iter,
40a342cd 2220 iter_cycles, branch_from, srcline);
37592b8a
AK
2221}
2222
644f2df2
ACM
2223struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2224 struct addr_location *al)
3f067dca 2225{
3f067dca 2226 unsigned int i;
644f2df2 2227 const struct branch_stack *bs = sample->branch_stack;
42bbabed 2228 struct branch_entry *entries = perf_sample__branch_entries(sample);
644f2df2 2229 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
3f067dca 2230
3f067dca
ACM
2231 if (!bi)
2232 return NULL;
2233
2234 for (i = 0; i < bs->nr; i++) {
42bbabed
KL
2235 ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2236 ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2237 bi[i].flags = entries[i].flags;
3f067dca
ACM
2238 }
2239 return bi;
2240}
2241
c4ee0625
JY
2242static void save_iterations(struct iterations *iter,
2243 struct branch_entry *be, int nr)
2244{
2245 int i;
2246
a3366db0 2247 iter->nr_loop_iter++;
c4ee0625
JY
2248 iter->cycles = 0;
2249
2250 for (i = 0; i < nr; i++)
2251 iter->cycles += be[i].flags.cycles;
2252}
2253
8b7bad58
AK
2254#define CHASHSZ 127
2255#define CHASHBITS 7
2256#define NO_ENTRY 0xff
2257
2258#define PERF_MAX_BRANCH_DEPTH 127
2259
2260/* Remove loops. */
c4ee0625
JY
2261static int remove_loops(struct branch_entry *l, int nr,
2262 struct iterations *iter)
8b7bad58
AK
2263{
2264 int i, j, off;
2265 unsigned char chash[CHASHSZ];
2266
2267 memset(chash, NO_ENTRY, sizeof(chash));
2268
2269 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2270
2271 for (i = 0; i < nr; i++) {
2272 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2273
2274 /* no collision handling for now */
2275 if (chash[h] == NO_ENTRY) {
2276 chash[h] = i;
2277 } else if (l[chash[h]].from == l[i].from) {
2278 bool is_loop = true;
2279 /* check if it is a real loop */
2280 off = 0;
2281 for (j = chash[h]; j < i && i + off < nr; j++, off++)
2282 if (l[j].from != l[i + off].from) {
2283 is_loop = false;
2284 break;
2285 }
2286 if (is_loop) {
c4ee0625
JY
2287 j = nr - (i + off);
2288 if (j > 0) {
2289 save_iterations(iter + i + off,
2290 l + i, off);
2291
2292 memmove(iter + i, iter + i + off,
2293 j * sizeof(*iter));
2294
2295 memmove(l + i, l + i + off,
2296 j * sizeof(*l));
2297 }
2298
8b7bad58
AK
2299 nr -= off;
2300 }
2301 }
2302 }
2303 return nr;
2304}
2305
dd3e249a
KL
2306static int lbr_callchain_add_kernel_ip(struct thread *thread,
2307 struct callchain_cursor *cursor,
2308 struct perf_sample *sample,
2309 struct symbol **parent,
2310 struct addr_location *root_al,
2311 u64 branch_from,
2312 bool callee, int end)
2313{
2314 struct ip_callchain *chain = sample->callchain;
2315 u8 cpumode = PERF_RECORD_MISC_USER;
2316 int err, i;
2317
2318 if (callee) {
2319 for (i = 0; i < end + 1; i++) {
2320 err = add_callchain_ip(thread, cursor, parent,
2321 root_al, &cpumode, chain->ips[i],
2322 false, NULL, NULL, branch_from);
2323 if (err)
2324 return err;
2325 }
2326 return 0;
2327 }
2328
2329 for (i = end; i >= 0; i--) {
2330 err = add_callchain_ip(thread, cursor, parent,
2331 root_al, &cpumode, chain->ips[i],
2332 false, NULL, NULL, branch_from);
2333 if (err)
2334 return err;
2335 }
2336
2337 return 0;
2338}
2339
7f1d3931
KL
2340static void save_lbr_cursor_node(struct thread *thread,
2341 struct callchain_cursor *cursor,
2342 int idx)
2343{
2344 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2345
2346 if (!lbr_stitch)
2347 return;
2348
2349 if (cursor->pos == cursor->nr) {
2350 lbr_stitch->prev_lbr_cursor[idx].valid = false;
2351 return;
2352 }
2353
2354 if (!cursor->curr)
2355 cursor->curr = cursor->first;
2356 else
2357 cursor->curr = cursor->curr->next;
2358 memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2359 sizeof(struct callchain_cursor_node));
2360
2361 lbr_stitch->prev_lbr_cursor[idx].valid = true;
2362 cursor->pos++;
2363}
2364
e2b23483
KL
2365static int lbr_callchain_add_lbr_ip(struct thread *thread,
2366 struct callchain_cursor *cursor,
2367 struct perf_sample *sample,
2368 struct symbol **parent,
2369 struct addr_location *root_al,
2370 u64 *branch_from,
2371 bool callee)
2372{
2373 struct branch_stack *lbr_stack = sample->branch_stack;
2374 struct branch_entry *entries = perf_sample__branch_entries(sample);
2375 u8 cpumode = PERF_RECORD_MISC_USER;
2376 int lbr_nr = lbr_stack->nr;
2377 struct branch_flags *flags;
2378 int err, i;
2379 u64 ip;
2380
7f1d3931
KL
2381 /*
2382 * The curr and pos are not used in writing session. They are cleared
2383 * in callchain_cursor_commit() when the writing session is closed.
2384 * Using curr and pos to track the current cursor node.
2385 */
2386 if (thread->lbr_stitch) {
2387 cursor->curr = NULL;
2388 cursor->pos = cursor->nr;
2389 if (cursor->nr) {
2390 cursor->curr = cursor->first;
2391 for (i = 0; i < (int)(cursor->nr - 1); i++)
2392 cursor->curr = cursor->curr->next;
2393 }
2394 }
2395
e2b23483
KL
2396 if (callee) {
2397 /* Add LBR ip from first entries.to */
2398 ip = entries[0].to;
2399 flags = &entries[0].flags;
2400 *branch_from = entries[0].from;
2401 err = add_callchain_ip(thread, cursor, parent,
2402 root_al, &cpumode, ip,
2403 true, flags, NULL,
2404 *branch_from);
2405 if (err)
2406 return err;
2407
7f1d3931
KL
2408 /*
2409 * The number of cursor node increases.
2410 * Move the current cursor node.
2411 * But does not need to save current cursor node for entry 0.
2412 * It's impossible to stitch the whole LBRs of previous sample.
2413 */
2414 if (thread->lbr_stitch && (cursor->pos != cursor->nr)) {
2415 if (!cursor->curr)
2416 cursor->curr = cursor->first;
2417 else
2418 cursor->curr = cursor->curr->next;
2419 cursor->pos++;
2420 }
2421
e2b23483
KL
2422 /* Add LBR ip from entries.from one by one. */
2423 for (i = 0; i < lbr_nr; i++) {
2424 ip = entries[i].from;
2425 flags = &entries[i].flags;
2426 err = add_callchain_ip(thread, cursor, parent,
2427 root_al, &cpumode, ip,
2428 true, flags, NULL,
2429 *branch_from);
2430 if (err)
2431 return err;
7f1d3931 2432 save_lbr_cursor_node(thread, cursor, i);
e2b23483
KL
2433 }
2434 return 0;
2435 }
2436
2437 /* Add LBR ip from entries.from one by one. */
2438 for (i = lbr_nr - 1; i >= 0; i--) {
2439 ip = entries[i].from;
2440 flags = &entries[i].flags;
2441 err = add_callchain_ip(thread, cursor, parent,
2442 root_al, &cpumode, ip,
2443 true, flags, NULL,
2444 *branch_from);
2445 if (err)
2446 return err;
7f1d3931 2447 save_lbr_cursor_node(thread, cursor, i);
e2b23483
KL
2448 }
2449
2450 /* Add LBR ip from first entries.to */
2451 ip = entries[0].to;
2452 flags = &entries[0].flags;
2453 *branch_from = entries[0].from;
2454 err = add_callchain_ip(thread, cursor, parent,
2455 root_al, &cpumode, ip,
2456 true, flags, NULL,
2457 *branch_from);
2458 if (err)
2459 return err;
2460
2461 return 0;
2462}
2463
ff165628
KL
2464static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2465 struct callchain_cursor *cursor)
2466{
2467 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2468 struct callchain_cursor_node *cnode;
2469 struct stitch_list *stitch_node;
2470 int err;
2471
2472 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2473 cnode = &stitch_node->cursor;
2474
2475 err = callchain_cursor_append(cursor, cnode->ip,
2476 &cnode->ms,
2477 cnode->branch,
2478 &cnode->branch_flags,
2479 cnode->nr_loop_iter,
2480 cnode->iter_cycles,
2481 cnode->branch_from,
2482 cnode->srcline);
2483 if (err)
2484 return err;
2485 }
2486 return 0;
2487}
2488
2489static struct stitch_list *get_stitch_node(struct thread *thread)
2490{
2491 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2492 struct stitch_list *stitch_node;
2493
2494 if (!list_empty(&lbr_stitch->free_lists)) {
2495 stitch_node = list_first_entry(&lbr_stitch->free_lists,
2496 struct stitch_list, node);
2497 list_del(&stitch_node->node);
2498
2499 return stitch_node;
2500 }
2501
2502 return malloc(sizeof(struct stitch_list));
2503}
2504
2505static bool has_stitched_lbr(struct thread *thread,
2506 struct perf_sample *cur,
2507 struct perf_sample *prev,
2508 unsigned int max_lbr,
2509 bool callee)
2510{
2511 struct branch_stack *cur_stack = cur->branch_stack;
2512 struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2513 struct branch_stack *prev_stack = prev->branch_stack;
2514 struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2515 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2516 int i, j, nr_identical_branches = 0;
2517 struct stitch_list *stitch_node;
2518 u64 cur_base, distance;
2519
2520 if (!cur_stack || !prev_stack)
2521 return false;
2522
2523 /* Find the physical index of the base-of-stack for current sample. */
2524 cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2525
2526 distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2527 (max_lbr + prev_stack->hw_idx - cur_base);
2528 /* Previous sample has shorter stack. Nothing can be stitched. */
2529 if (distance + 1 > prev_stack->nr)
2530 return false;
2531
2532 /*
2533 * Check if there are identical LBRs between two samples.
4d39c89f 2534 * Identical LBRs must have same from, to and flags values. Also,
ff165628
KL
2535 * they have to be saved in the same LBR registers (same physical
2536 * index).
2537 *
2538 * Starts from the base-of-stack of current sample.
2539 */
2540 for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2541 if ((prev_entries[i].from != cur_entries[j].from) ||
2542 (prev_entries[i].to != cur_entries[j].to) ||
2543 (prev_entries[i].flags.value != cur_entries[j].flags.value))
2544 break;
2545 nr_identical_branches++;
2546 }
2547
2548 if (!nr_identical_branches)
2549 return false;
2550
2551 /*
2552 * Save the LBRs between the base-of-stack of previous sample
2553 * and the base-of-stack of current sample into lbr_stitch->lists.
2554 * These LBRs will be stitched later.
2555 */
2556 for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2557
2558 if (!lbr_stitch->prev_lbr_cursor[i].valid)
2559 continue;
2560
2561 stitch_node = get_stitch_node(thread);
2562 if (!stitch_node)
2563 return false;
2564
2565 memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2566 sizeof(struct callchain_cursor_node));
2567
2568 if (callee)
2569 list_add(&stitch_node->node, &lbr_stitch->lists);
2570 else
2571 list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2572 }
2573
2574 return true;
2575}
2576
7f1d3931 2577static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
9c6c3f47
KL
2578{
2579 if (thread->lbr_stitch)
2580 return true;
2581
2582 thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch));
2583 if (!thread->lbr_stitch)
2584 goto err;
2585
7f1d3931
KL
2586 thread->lbr_stitch->prev_lbr_cursor = calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2587 if (!thread->lbr_stitch->prev_lbr_cursor)
2588 goto free_lbr_stitch;
2589
ff165628
KL
2590 INIT_LIST_HEAD(&thread->lbr_stitch->lists);
2591 INIT_LIST_HEAD(&thread->lbr_stitch->free_lists);
2592
7f1d3931
KL
2593 return true;
2594
2595free_lbr_stitch:
2596 zfree(&thread->lbr_stitch);
9c6c3f47
KL
2597err:
2598 pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2599 thread->lbr_stitch_enable = false;
2600 return false;
2601}
2602
384b6055 2603/*
4d39c89f 2604 * Resolve LBR callstack chain sample
384b6055
KL
2605 * Return:
2606 * 1 on success get LBR callchain information
2607 * 0 no available LBR callchain information, should try fp
2608 * negative error code on other errors.
2609 */
2610static int resolve_lbr_callchain_sample(struct thread *thread,
91d7b2de 2611 struct callchain_cursor *cursor,
384b6055
KL
2612 struct perf_sample *sample,
2613 struct symbol **parent,
2614 struct addr_location *root_al,
7f1d3931
KL
2615 int max_stack,
2616 unsigned int max_lbr)
3f067dca 2617{
ff165628 2618 bool callee = (callchain_param.order == ORDER_CALLEE);
384b6055 2619 struct ip_callchain *chain = sample->callchain;
18ef15c6 2620 int chain_nr = min(max_stack, (int)chain->nr), i;
9c6c3f47 2621 struct lbr_stitch *lbr_stitch;
ff165628 2622 bool stitched_lbr = false;
e2b23483 2623 u64 branch_from = 0;
e48b8311 2624 int err;
384b6055
KL
2625
2626 for (i = 0; i < chain_nr; i++) {
2627 if (chain->ips[i] == PERF_CONTEXT_USER)
2628 break;
2629 }
2630
2631 /* LBR only affects the user callchain */
f8603267
KL
2632 if (i == chain_nr)
2633 return 0;
2634
9c6c3f47 2635 if (thread->lbr_stitch_enable && !sample->no_hw_idx &&
7f1d3931 2636 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
9c6c3f47
KL
2637 lbr_stitch = thread->lbr_stitch;
2638
ff165628
KL
2639 stitched_lbr = has_stitched_lbr(thread, sample,
2640 &lbr_stitch->prev_sample,
2641 max_lbr, callee);
2642
2643 if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2644 list_replace_init(&lbr_stitch->lists,
2645 &lbr_stitch->free_lists);
2646 }
9c6c3f47
KL
2647 memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2648 }
2649
ff165628 2650 if (callee) {
e48b8311 2651 /* Add kernel ip */
dd3e249a
KL
2652 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2653 parent, root_al, branch_from,
2654 true, i);
2655 if (err)
2656 goto error;
2657
e2b23483
KL
2658 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2659 root_al, &branch_from, true);
e48b8311
KL
2660 if (err)
2661 goto error;
384b6055 2662
ff165628
KL
2663 if (stitched_lbr) {
2664 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2665 if (err)
2666 goto error;
2667 }
2668
e48b8311 2669 } else {
ff165628
KL
2670 if (stitched_lbr) {
2671 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2672 if (err)
2673 goto error;
2674 }
e2b23483
KL
2675 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2676 root_al, &branch_from, false);
f8603267 2677 if (err)
e48b8311
KL
2678 goto error;
2679
2680 /* Add kernel ip */
dd3e249a
KL
2681 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2682 parent, root_al, branch_from,
2683 false, i);
2684 if (err)
2685 goto error;
f8603267
KL
2686 }
2687 return 1;
e48b8311
KL
2688
2689error:
2690 return (err < 0) ? err : 0;
384b6055
KL
2691}
2692
e9024d51
DM
2693static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2694 struct callchain_cursor *cursor,
2695 struct symbol **parent,
2696 struct addr_location *root_al,
2697 u8 *cpumode, int ent)
2698{
2699 int err = 0;
2700
2701 while (--ent >= 0) {
2702 u64 ip = chain->ips[ent];
2703
2704 if (ip >= PERF_CONTEXT_MAX) {
2705 err = add_callchain_ip(thread, cursor, parent,
2706 root_al, cpumode, ip,
2707 false, NULL, NULL, 0);
2708 break;
2709 }
2710 }
2711 return err;
2712}
2713
b9f6fbb3
AT
2714static u64 get_leaf_frame_caller(struct perf_sample *sample,
2715 struct thread *thread, int usr_idx)
32bfa5bf 2716{
b9f6fbb3
AT
2717 if (machine__normalized_is(thread->maps->machine, "arm64"))
2718 return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
2719 else
2720 return 0;
32bfa5bf
AT
2721}
2722
384b6055 2723static int thread__resolve_callchain_sample(struct thread *thread,
91d7b2de 2724 struct callchain_cursor *cursor,
32dcd021 2725 struct evsel *evsel,
384b6055
KL
2726 struct perf_sample *sample,
2727 struct symbol **parent,
2728 struct addr_location *root_al,
2729 int max_stack)
2730{
2731 struct branch_stack *branch = sample->branch_stack;
42bbabed 2732 struct branch_entry *entries = perf_sample__branch_entries(sample);
384b6055 2733 struct ip_callchain *chain = sample->callchain;
b49a821e 2734 int chain_nr = 0;
73dbcd65 2735 u8 cpumode = PERF_RECORD_MISC_USER;
32bfa5bf 2736 int i, j, err, nr_entries, usr_idx;
8b7bad58
AK
2737 int skip_idx = -1;
2738 int first_call = 0;
32bfa5bf 2739 u64 leaf_frame_caller;
8b7bad58 2740
b49a821e
JY
2741 if (chain)
2742 chain_nr = chain->nr;
2743
4f138a9e 2744 if (evsel__has_branch_callstack(evsel)) {
6e6d1d65 2745 struct perf_env *env = evsel__env(evsel);
7f1d3931 2746
91d7b2de 2747 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
7f1d3931
KL
2748 root_al, max_stack,
2749 !env ? 0 : env->max_branches);
384b6055
KL
2750 if (err)
2751 return (err < 0) ? err : 0;
2752 }
2753
8b7bad58
AK
2754 /*
2755 * Based on DWARF debug information, some architectures skip
2756 * a callchain entry saved by the kernel.
2757 */
bf8bddbf 2758 skip_idx = arch_skip_callchain_idx(thread, chain);
3f067dca 2759
8b7bad58
AK
2760 /*
2761 * Add branches to call stack for easier browsing. This gives
2762 * more context for a sample than just the callers.
2763 *
2764 * This uses individual histograms of paths compared to the
2765 * aggregated histograms the normal LBR mode uses.
2766 *
2767 * Limitations for now:
2768 * - No extra filters
2769 * - No annotations (should annotate somehow)
2770 */
2771
2772 if (branch && callchain_param.branch_callstack) {
2773 int nr = min(max_stack, (int)branch->nr);
2774 struct branch_entry be[nr];
c4ee0625 2775 struct iterations iter[nr];
8b7bad58
AK
2776
2777 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2778 pr_warning("corrupted branch chain. skipping...\n");
2779 goto check_calls;
2780 }
2781
2782 for (i = 0; i < nr; i++) {
2783 if (callchain_param.order == ORDER_CALLEE) {
42bbabed 2784 be[i] = entries[i];
b49a821e
JY
2785
2786 if (chain == NULL)
2787 continue;
2788
8b7bad58
AK
2789 /*
2790 * Check for overlap into the callchain.
2791 * The return address is one off compared to
2792 * the branch entry. To adjust for this
2793 * assume the calling instruction is not longer
2794 * than 8 bytes.
2795 */
2796 if (i == skip_idx ||
2797 chain->ips[first_call] >= PERF_CONTEXT_MAX)
2798 first_call++;
2799 else if (be[i].from < chain->ips[first_call] &&
2800 be[i].from >= chain->ips[first_call] - 8)
2801 first_call++;
2802 } else
42bbabed 2803 be[i] = entries[branch->nr - i - 1];
8b7bad58
AK
2804 }
2805
c4ee0625
JY
2806 memset(iter, 0, sizeof(struct iterations) * nr);
2807 nr = remove_loops(be, nr, iter);
410024db 2808
8b7bad58 2809 for (i = 0; i < nr; i++) {
c4ee0625
JY
2810 err = add_callchain_ip(thread, cursor, parent,
2811 root_al,
2812 NULL, be[i].to,
2813 true, &be[i].flags,
2814 NULL, be[i].from);
410024db 2815
8b7bad58 2816 if (!err)
91d7b2de 2817 err = add_callchain_ip(thread, cursor, parent, root_al,
410024db
JY
2818 NULL, be[i].from,
2819 true, &be[i].flags,
c4ee0625 2820 &iter[i], 0);
8b7bad58
AK
2821 if (err == -EINVAL)
2822 break;
2823 if (err)
2824 return err;
2825 }
b49a821e
JY
2826
2827 if (chain_nr == 0)
2828 return 0;
2829
8b7bad58
AK
2830 chain_nr -= nr;
2831 }
2832
2833check_calls:
aceb9826 2834 if (chain && callchain_param.order != ORDER_CALLEE) {
e9024d51
DM
2835 err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2836 &cpumode, chain->nr - first_call);
2837 if (err)
2838 return (err < 0) ? err : 0;
2839 }
bf8bddbf 2840 for (i = first_call, nr_entries = 0;
a29d5c9b 2841 i < chain_nr && nr_entries < max_stack; i++) {
3f067dca 2842 u64 ip;
3f067dca
ACM
2843
2844 if (callchain_param.order == ORDER_CALLEE)
a60335ba 2845 j = i;
3f067dca 2846 else
a60335ba
SB
2847 j = chain->nr - i - 1;
2848
2849#ifdef HAVE_SKIP_CALLCHAIN_IDX
2850 if (j == skip_idx)
2851 continue;
2852#endif
2853 ip = chain->ips[j];
bf8bddbf
ACM
2854 if (ip < PERF_CONTEXT_MAX)
2855 ++nr_entries;
e9024d51
DM
2856 else if (callchain_param.order != ORDER_CALLEE) {
2857 err = find_prev_cpumode(chain, thread, cursor, parent,
2858 root_al, &cpumode, j);
2859 if (err)
2860 return (err < 0) ? err : 0;
2861 continue;
2862 }
a29d5c9b 2863
32bfa5bf
AT
2864 /*
2865 * PERF_CONTEXT_USER allows us to locate where the user stack ends.
2866 * Depending on callchain_param.order and the position of PERF_CONTEXT_USER,
2867 * the index will be different in order to add the missing frame
2868 * at the right place.
2869 */
2870
2871 usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1;
2872
2873 if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) {
2874
2875 leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx);
2876
2877 /*
2878 * check if leaf_frame_Caller != ip to not add the same
2879 * value twice.
2880 */
2881
2882 if (leaf_frame_caller && leaf_frame_caller != ip) {
2883
2884 err = add_callchain_ip(thread, cursor, parent,
2885 root_al, &cpumode, leaf_frame_caller,
2886 false, NULL, NULL, 0);
2887 if (err)
2888 return (err < 0) ? err : 0;
2889 }
2890 }
2891
410024db
JY
2892 err = add_callchain_ip(thread, cursor, parent,
2893 root_al, &cpumode, ip,
c4ee0625 2894 false, NULL, NULL, 0);
3f067dca 2895
3f067dca 2896 if (err)
2e77784b 2897 return (err < 0) ? err : 0;
3f067dca
ACM
2898 }
2899
2900 return 0;
2901}
2902
c1529738 2903static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
11ea2515 2904{
c1529738
ACM
2905 struct symbol *sym = ms->sym;
2906 struct map *map = ms->map;
11ea2515
MW
2907 struct inline_node *inline_node;
2908 struct inline_list *ilist;
2909 u64 addr;
b38775cf 2910 int ret = 1;
11ea2515
MW
2911
2912 if (!symbol_conf.inline_name || !map || !sym)
b38775cf 2913 return ret;
11ea2515 2914
7a8a8fcf
MW
2915 addr = map__map_ip(map, ip);
2916 addr = map__rip_2objdump(map, addr);
11ea2515
MW
2917
2918 inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
2919 if (!inline_node) {
2920 inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
2921 if (!inline_node)
b38775cf 2922 return ret;
11ea2515
MW
2923 inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
2924 }
2925
2926 list_for_each_entry(ilist, &inline_node->val, list) {
5f0fef8a 2927 struct map_symbol ilist_ms = {
77b91c1a 2928 .maps = ms->maps,
5f0fef8a
ACM
2929 .map = map,
2930 .sym = ilist->symbol,
2931 };
2932 ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
b38775cf 2933 NULL, 0, 0, 0, ilist->srcline);
11ea2515
MW
2934
2935 if (ret != 0)
2936 return ret;
2937 }
2938
b38775cf 2939 return ret;
11ea2515
MW
2940}
2941
3f067dca
ACM
2942static int unwind_entry(struct unwind_entry *entry, void *arg)
2943{
2944 struct callchain_cursor *cursor = arg;
40a342cd 2945 const char *srcline = NULL;
ff4ce288 2946 u64 addr = entry->ip;
b49a8fe5 2947
c1529738 2948 if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
b49a8fe5 2949 return 0;
40a342cd 2950
c1529738 2951 if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
11ea2515
MW
2952 return 0;
2953
2a9d5050
SD
2954 /*
2955 * Convert entry->ip from a virtual address to an offset in
2956 * its corresponding binary.
2957 */
c1529738
ACM
2958 if (entry->ms.map)
2959 addr = map__map_ip(entry->ms.map, entry->ip);
2a9d5050 2960
5f0fef8a
ACM
2961 srcline = callchain_srcline(&entry->ms, addr);
2962 return callchain_cursor_append(cursor, entry->ip, &entry->ms,
40a342cd 2963 false, NULL, 0, 0, 0, srcline);
3f067dca
ACM
2964}
2965
9919a65e
CP
2966static int thread__resolve_callchain_unwind(struct thread *thread,
2967 struct callchain_cursor *cursor,
32dcd021 2968 struct evsel *evsel,
9919a65e
CP
2969 struct perf_sample *sample,
2970 int max_stack)
3f067dca 2971{
3f067dca 2972 /* Can we do dwarf post unwind? */
1fc632ce
JO
2973 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2974 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
3f067dca
ACM
2975 return 0;
2976
2977 /* Bail out if nothing was captured. */
2978 if ((!sample->user_regs.regs) ||
2979 (!sample->user_stack.size))
2980 return 0;
2981
91d7b2de 2982 return unwind__get_entries(unwind_entry, cursor,
352ea45a 2983 thread, sample, max_stack);
9919a65e 2984}
3f067dca 2985
9919a65e
CP
2986int thread__resolve_callchain(struct thread *thread,
2987 struct callchain_cursor *cursor,
32dcd021 2988 struct evsel *evsel,
9919a65e
CP
2989 struct perf_sample *sample,
2990 struct symbol **parent,
2991 struct addr_location *root_al,
2992 int max_stack)
2993{
2994 int ret = 0;
2995
914eb9ca 2996 callchain_cursor_reset(cursor);
9919a65e
CP
2997
2998 if (callchain_param.order == ORDER_CALLEE) {
2999 ret = thread__resolve_callchain_sample(thread, cursor,
3000 evsel, sample,
3001 parent, root_al,
3002 max_stack);
3003 if (ret)
3004 return ret;
3005 ret = thread__resolve_callchain_unwind(thread, cursor,
3006 evsel, sample,
3007 max_stack);
3008 } else {
3009 ret = thread__resolve_callchain_unwind(thread, cursor,
3010 evsel, sample,
3011 max_stack);
3012 if (ret)
3013 return ret;
3014 ret = thread__resolve_callchain_sample(thread, cursor,
3015 evsel, sample,
3016 parent, root_al,
3017 max_stack);
3018 }
3019
3020 return ret;
3f067dca 3021}
35feee19
DA
3022
3023int machine__for_each_thread(struct machine *machine,
3024 int (*fn)(struct thread *thread, void *p),
3025 void *priv)
3026{
91e467bc 3027 struct threads *threads;
35feee19
DA
3028 struct rb_node *nd;
3029 struct thread *thread;
3030 int rc = 0;
91e467bc 3031 int i;
35feee19 3032
91e467bc
KL
3033 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
3034 threads = &machine->threads[i];
f3acb3a8
DB
3035 for (nd = rb_first_cached(&threads->entries); nd;
3036 nd = rb_next(nd)) {
91e467bc
KL
3037 thread = rb_entry(nd, struct thread, rb_node);
3038 rc = fn(thread, priv);
3039 if (rc != 0)
3040 return rc;
3041 }
35feee19 3042
91e467bc
KL
3043 list_for_each_entry(thread, &threads->dead, node) {
3044 rc = fn(thread, priv);
3045 if (rc != 0)
3046 return rc;
3047 }
35feee19
DA
3048 }
3049 return rc;
3050}
58d925dc 3051
a5499b37
AH
3052int machines__for_each_thread(struct machines *machines,
3053 int (*fn)(struct thread *thread, void *p),
3054 void *priv)
3055{
3056 struct rb_node *nd;
3057 int rc = 0;
3058
3059 rc = machine__for_each_thread(&machines->host, fn, priv);
3060 if (rc != 0)
3061 return rc;
3062
f3acb3a8 3063 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
a5499b37
AH
3064 struct machine *machine = rb_entry(nd, struct machine, rb_node);
3065
3066 rc = machine__for_each_thread(machine, fn, priv);
3067 if (rc != 0)
3068 return rc;
3069 }
3070 return rc;
3071}
3072
b9d266ba
AH
3073pid_t machine__get_current_tid(struct machine *machine, int cpu)
3074{
5501e922 3075 int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
8c727469
KM
3076
3077 if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
b9d266ba
AH
3078 return -1;
3079
3080 return machine->current_tid[cpu];
3081}
3082
3083int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
3084 pid_t tid)
3085{
3086 struct thread *thread;
5501e922 3087 int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
b9d266ba
AH
3088
3089 if (cpu < 0)
3090 return -EINVAL;
3091
3092 if (!machine->current_tid) {
3093 int i;
3094
8c727469 3095 machine->current_tid = calloc(nr_cpus, sizeof(pid_t));
b9d266ba
AH
3096 if (!machine->current_tid)
3097 return -ENOMEM;
8c727469 3098 for (i = 0; i < nr_cpus; i++)
b9d266ba
AH
3099 machine->current_tid[i] = -1;
3100 }
3101
8c727469 3102 if (cpu >= nr_cpus) {
b9d266ba
AH
3103 pr_err("Requested CPU %d too large. ", cpu);
3104 pr_err("Consider raising MAX_NR_CPUS\n");
3105 return -EINVAL;
3106 }
3107
3108 machine->current_tid[cpu] = tid;
3109
3110 thread = machine__findnew_thread(machine, pid, tid);
3111 if (!thread)
3112 return -ENOMEM;
3113
3114 thread->cpu = cpu;
b91fc39f 3115 thread__put(thread);
b9d266ba
AH
3116
3117 return 0;
3118}
fbe2af45 3119
dbbd34a6 3120/*
b9f6fbb3
AT
3121 * Compares the raw arch string. N.B. see instead perf_env__arch() or
3122 * machine__normalized_is() if a normalized arch is needed.
dbbd34a6
AH
3123 */
3124bool machine__is(struct machine *machine, const char *arch)
3125{
3126 return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
3127}
3128
b9f6fbb3
AT
3129bool machine__normalized_is(struct machine *machine, const char *arch)
3130{
3131 return machine && !strcmp(perf_env__arch(machine->env), arch);
3132}
3133
9cecca32
AH
3134int machine__nr_cpus_avail(struct machine *machine)
3135{
3136 return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
3137}
3138
fbe2af45
AH
3139int machine__get_kernel_start(struct machine *machine)
3140{
a5e813c6 3141 struct map *map = machine__kernel_map(machine);
fbe2af45
AH
3142 int err = 0;
3143
3144 /*
3145 * The only addresses above 2^63 are kernel addresses of a 64-bit
3146 * kernel. Note that addresses are unsigned so that on a 32-bit system
3147 * all addresses including kernel addresses are less than 2^32. In
3148 * that case (32-bit system), if the kernel mapping is unknown, all
3149 * addresses will be assumed to be in user space - see
3150 * machine__kernel_ip().
3151 */
3152 machine->kernel_start = 1ULL << 63;
3153 if (map) {
be39db9f 3154 err = map__load(map);
19422a9f
AH
3155 /*
3156 * On x86_64, PTI entry trampolines are less than the
3157 * start of kernel text, but still above 2^63. So leave
3158 * kernel_start = 1ULL << 63 for x86_64.
3159 */
3160 if (!err && !machine__is(machine, "x86_64"))
fbe2af45
AH
3161 machine->kernel_start = map->start;
3162 }
3163 return err;
3164}
aa7cc2ae 3165
8e80ad99
AH
3166u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3167{
3168 u8 addr_cpumode = cpumode;
3169 bool kernel_ip;
3170
3171 if (!machine->single_address_space)
3172 goto out;
3173
3174 kernel_ip = machine__kernel_ip(machine, addr);
3175 switch (cpumode) {
3176 case PERF_RECORD_MISC_KERNEL:
3177 case PERF_RECORD_MISC_USER:
3178 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3179 PERF_RECORD_MISC_USER;
3180 break;
3181 case PERF_RECORD_MISC_GUEST_KERNEL:
3182 case PERF_RECORD_MISC_GUEST_USER:
3183 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3184 PERF_RECORD_MISC_GUEST_USER;
3185 break;
3186 default:
3187 break;
3188 }
3189out:
3190 return addr_cpumode;
3191}
3192
0e3149f8
ACM
3193struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
3194{
3195 return dsos__findnew_id(&machine->dsos, filename, id);
3196}
3197
aa7cc2ae
ACM
3198struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3199{
0e3149f8 3200 return machine__findnew_dso_id(machine, filename, NULL);
aa7cc2ae 3201}
c3168b0d
ACM
3202
3203char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3204{
3205 struct machine *machine = vmachine;
3206 struct map *map;
107cad95 3207 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
c3168b0d
ACM
3208
3209 if (sym == NULL)
3210 return NULL;
3211
3212 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
3213 *addrp = map->unmap_ip(map, sym->start);
3214 return sym->name;
3215}
c57f5eaa
JO
3216
3217int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
3218{
3219 struct dso *pos;
3220 int err = 0;
3221
3222 list_for_each_entry(pos, &machine->dsos.head, node) {
3223 if (fn(pos, machine, priv))
3224 err = -1;
3225 }
3226 return err;
3227}