perf symbols: Use map->prot in place of type==MAP__FUNCTION
[linux-2.6-block.git] / tools / perf / util / machine.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
76b31a29 2#include <dirent.h>
a43783ae 3#include <errno.h>
fd20e811 4#include <inttypes.h>
1eae20c1 5#include <regex.h>
3f067dca 6#include "callchain.h"
b0a7d1a0
ACM
7#include "debug.h"
8#include "event.h"
3f067dca
ACM
9#include "evsel.h"
10#include "hist.h"
9d2f8e22
ACM
11#include "machine.h"
12#include "map.h"
3f067dca 13#include "sort.h"
69d2591a 14#include "strlist.h"
9d2f8e22 15#include "thread.h"
d027b640 16#include "vdso.h"
9d2f8e22 17#include <stdbool.h>
7a8ef4c4
ACM
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <unistd.h>
3f067dca 21#include "unwind.h"
8b7bad58 22#include "linux/hash.h"
f3b3614a 23#include "asm/bug.h"
9d2f8e22 24
3d689ed6
ACM
25#include "sane_ctype.h"
26#include <symbol/kallsyms.h>
0f476f2b 27#include <linux/mman.h>
3d689ed6 28
b91fc39f
ACM
29static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
30
e167f995
ACM
31static void dsos__init(struct dsos *dsos)
32{
33 INIT_LIST_HEAD(&dsos->head);
34 dsos->root = RB_ROOT;
0a7c74ea 35 init_rwsem(&dsos->lock);
e167f995
ACM
36}
37
91e467bc
KL
38static void machine__threads_init(struct machine *machine)
39{
40 int i;
41
42 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
43 struct threads *threads = &machine->threads[i];
44 threads->entries = RB_ROOT;
0a7c74ea 45 init_rwsem(&threads->lock);
91e467bc
KL
46 threads->nr = 0;
47 INIT_LIST_HEAD(&threads->dead);
48 threads->last_match = NULL;
49 }
50}
51
8c7f1bb3
JO
52static int machine__set_mmap_name(struct machine *machine)
53{
c192524e
JO
54 if (machine__is_host(machine))
55 machine->mmap_name = strdup("[kernel.kallsyms]");
56 else if (machine__is_default_guest(machine))
57 machine->mmap_name = strdup("[guest.kernel.kallsyms]");
58 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
59 machine->pid) < 0)
60 machine->mmap_name = NULL;
8c7f1bb3
JO
61
62 return machine->mmap_name ? 0 : -ENOMEM;
63}
64
69d2591a
ACM
65int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
66{
81f981d7
JO
67 int err = -ENOMEM;
68
93b0ba3c 69 memset(machine, 0, sizeof(*machine));
11246c70 70 map_groups__init(&machine->kmaps, machine);
69d2591a 71 RB_CLEAR_NODE(&machine->rb_node);
3d39ac53 72 dsos__init(&machine->dsos);
69d2591a 73
91e467bc 74 machine__threads_init(machine);
69d2591a 75
d027b640 76 machine->vdso_info = NULL;
4cde998d 77 machine->env = NULL;
d027b640 78
69d2591a
ACM
79 machine->pid = pid;
80
14bd6d20 81 machine->id_hdr_size = 0;
caf8a0d0 82 machine->kptr_restrict_warned = false;
cfe1c414 83 machine->comm_exec = false;
fbe2af45 84 machine->kernel_start = 0;
611a5ce8 85
cc1121ab
MH
86 memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps));
87
69d2591a
ACM
88 machine->root_dir = strdup(root_dir);
89 if (machine->root_dir == NULL)
90 return -ENOMEM;
91
8c7f1bb3
JO
92 if (machine__set_mmap_name(machine))
93 goto out;
94
69d2591a 95 if (pid != HOST_KERNEL_ID) {
1fcb8768 96 struct thread *thread = machine__findnew_thread(machine, -1,
314add6b 97 pid);
69d2591a
ACM
98 char comm[64];
99
100 if (thread == NULL)
81f981d7 101 goto out;
69d2591a
ACM
102
103 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
162f0bef 104 thread__set_comm(thread, comm, 0);
b91fc39f 105 thread__put(thread);
69d2591a
ACM
106 }
107
b9d266ba 108 machine->current_tid = NULL;
81f981d7 109 err = 0;
b9d266ba 110
81f981d7 111out:
8c7f1bb3 112 if (err) {
81f981d7 113 zfree(&machine->root_dir);
8c7f1bb3
JO
114 zfree(&machine->mmap_name);
115 }
69d2591a
ACM
116 return 0;
117}
118
8fb598e5
DA
119struct machine *machine__new_host(void)
120{
121 struct machine *machine = malloc(sizeof(*machine));
122
123 if (machine != NULL) {
124 machine__init(machine, "", HOST_KERNEL_ID);
125
126 if (machine__create_kernel_maps(machine) < 0)
127 goto out_delete;
128 }
129
130 return machine;
131out_delete:
132 free(machine);
133 return NULL;
134}
135
7d132caa
ACM
136struct machine *machine__new_kallsyms(void)
137{
138 struct machine *machine = machine__new_host();
139 /*
140 * FIXME:
329f0ade 141 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitely
7d132caa
ACM
142 * ask for not using the kcore parsing code, once this one is fixed
143 * to create a map per module.
144 */
329f0ade 145 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
7d132caa
ACM
146 machine__delete(machine);
147 machine = NULL;
148 }
149
150 return machine;
151}
152
d3a7c489 153static void dsos__purge(struct dsos *dsos)
69d2591a
ACM
154{
155 struct dso *pos, *n;
156
0a7c74ea 157 down_write(&dsos->lock);
e8807844 158
8fa7d87f 159 list_for_each_entry_safe(pos, n, &dsos->head, node) {
4598a0a6 160 RB_CLEAR_NODE(&pos->rb_node);
e266a753 161 pos->root = NULL;
d3a7c489
ACM
162 list_del_init(&pos->node);
163 dso__put(pos);
69d2591a 164 }
e8807844 165
0a7c74ea 166 up_write(&dsos->lock);
d3a7c489 167}
e8807844 168
d3a7c489
ACM
169static void dsos__exit(struct dsos *dsos)
170{
171 dsos__purge(dsos);
0a7c74ea 172 exit_rwsem(&dsos->lock);
69d2591a
ACM
173}
174
3f067dca
ACM
175void machine__delete_threads(struct machine *machine)
176{
b91fc39f 177 struct rb_node *nd;
91e467bc 178 int i;
3f067dca 179
91e467bc
KL
180 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
181 struct threads *threads = &machine->threads[i];
0a7c74ea 182 down_write(&threads->lock);
91e467bc
KL
183 nd = rb_first(&threads->entries);
184 while (nd) {
185 struct thread *t = rb_entry(nd, struct thread, rb_node);
3f067dca 186
91e467bc
KL
187 nd = rb_next(nd);
188 __machine__remove_thread(machine, t, false);
189 }
0a7c74ea 190 up_write(&threads->lock);
3f067dca
ACM
191 }
192}
193
69d2591a
ACM
194void machine__exit(struct machine *machine)
195{
91e467bc
KL
196 int i;
197
19993b82
ACM
198 if (machine == NULL)
199 return;
200
ebe9729c 201 machine__destroy_kernel_maps(machine);
69d2591a 202 map_groups__exit(&machine->kmaps);
e8807844 203 dsos__exit(&machine->dsos);
9a4388c7 204 machine__exit_vdso(machine);
04662523 205 zfree(&machine->root_dir);
8c7f1bb3 206 zfree(&machine->mmap_name);
b9d266ba 207 zfree(&machine->current_tid);
91e467bc
KL
208
209 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
210 struct threads *threads = &machine->threads[i];
0a7c74ea 211 exit_rwsem(&threads->lock);
91e467bc 212 }
69d2591a
ACM
213}
214
215void machine__delete(struct machine *machine)
216{
32ca678d
ACM
217 if (machine) {
218 machine__exit(machine);
219 free(machine);
220 }
69d2591a
ACM
221}
222
876650e6
ACM
223void machines__init(struct machines *machines)
224{
225 machine__init(&machines->host, "", HOST_KERNEL_ID);
226 machines->guests = RB_ROOT;
227}
228
229void machines__exit(struct machines *machines)
230{
231 machine__exit(&machines->host);
232 /* XXX exit guest */
233}
234
235struct machine *machines__add(struct machines *machines, pid_t pid,
69d2591a
ACM
236 const char *root_dir)
237{
876650e6 238 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
239 struct rb_node *parent = NULL;
240 struct machine *pos, *machine = malloc(sizeof(*machine));
241
242 if (machine == NULL)
243 return NULL;
244
245 if (machine__init(machine, root_dir, pid) != 0) {
246 free(machine);
247 return NULL;
248 }
249
250 while (*p != NULL) {
251 parent = *p;
252 pos = rb_entry(parent, struct machine, rb_node);
253 if (pid < pos->pid)
254 p = &(*p)->rb_left;
255 else
256 p = &(*p)->rb_right;
257 }
258
259 rb_link_node(&machine->rb_node, parent, p);
876650e6 260 rb_insert_color(&machine->rb_node, &machines->guests);
69d2591a
ACM
261
262 return machine;
263}
264
cfe1c414
AH
265void machines__set_comm_exec(struct machines *machines, bool comm_exec)
266{
267 struct rb_node *nd;
268
269 machines->host.comm_exec = comm_exec;
270
271 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
272 struct machine *machine = rb_entry(nd, struct machine, rb_node);
273
274 machine->comm_exec = comm_exec;
275 }
276}
277
876650e6 278struct machine *machines__find(struct machines *machines, pid_t pid)
69d2591a 279{
876650e6 280 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
281 struct rb_node *parent = NULL;
282 struct machine *machine;
283 struct machine *default_machine = NULL;
284
876650e6
ACM
285 if (pid == HOST_KERNEL_ID)
286 return &machines->host;
287
69d2591a
ACM
288 while (*p != NULL) {
289 parent = *p;
290 machine = rb_entry(parent, struct machine, rb_node);
291 if (pid < machine->pid)
292 p = &(*p)->rb_left;
293 else if (pid > machine->pid)
294 p = &(*p)->rb_right;
295 else
296 return machine;
297 if (!machine->pid)
298 default_machine = machine;
299 }
300
301 return default_machine;
302}
303
876650e6 304struct machine *machines__findnew(struct machines *machines, pid_t pid)
69d2591a
ACM
305{
306 char path[PATH_MAX];
307 const char *root_dir = "";
308 struct machine *machine = machines__find(machines, pid);
309
310 if (machine && (machine->pid == pid))
311 goto out;
312
313 if ((pid != HOST_KERNEL_ID) &&
314 (pid != DEFAULT_GUEST_KERNEL_ID) &&
315 (symbol_conf.guestmount)) {
316 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
317 if (access(path, R_OK)) {
318 static struct strlist *seen;
319
320 if (!seen)
4a77e218 321 seen = strlist__new(NULL, NULL);
69d2591a
ACM
322
323 if (!strlist__has_entry(seen, path)) {
324 pr_err("Can't access file %s\n", path);
325 strlist__add(seen, path);
326 }
327 machine = NULL;
328 goto out;
329 }
330 root_dir = path;
331 }
332
333 machine = machines__add(machines, pid, root_dir);
334out:
335 return machine;
336}
337
876650e6
ACM
338void machines__process_guests(struct machines *machines,
339 machine__process_t process, void *data)
69d2591a
ACM
340{
341 struct rb_node *nd;
342
876650e6 343 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
69d2591a
ACM
344 struct machine *pos = rb_entry(nd, struct machine, rb_node);
345 process(pos, data);
346 }
347}
348
876650e6 349void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
69d2591a
ACM
350{
351 struct rb_node *node;
352 struct machine *machine;
353
876650e6
ACM
354 machines->host.id_hdr_size = id_hdr_size;
355
356 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
69d2591a
ACM
357 machine = rb_entry(node, struct machine, rb_node);
358 machine->id_hdr_size = id_hdr_size;
359 }
360
361 return;
362}
363
29ce3612
AH
364static void machine__update_thread_pid(struct machine *machine,
365 struct thread *th, pid_t pid)
366{
367 struct thread *leader;
368
369 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
370 return;
371
372 th->pid_ = pid;
373
374 if (th->pid_ == th->tid)
375 return;
376
b91fc39f 377 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
29ce3612
AH
378 if (!leader)
379 goto out_err;
380
381 if (!leader->mg)
11246c70 382 leader->mg = map_groups__new(machine);
29ce3612
AH
383
384 if (!leader->mg)
385 goto out_err;
386
387 if (th->mg == leader->mg)
388 return;
389
390 if (th->mg) {
391 /*
392 * Maps are created from MMAP events which provide the pid and
393 * tid. Consequently there never should be any maps on a thread
394 * with an unknown pid. Just print an error if there are.
395 */
396 if (!map_groups__empty(th->mg))
397 pr_err("Discarding thread maps for %d:%d\n",
398 th->pid_, th->tid);
8e160b2e 399 map_groups__put(th->mg);
29ce3612
AH
400 }
401
402 th->mg = map_groups__get(leader->mg);
abd82868
ACM
403out_put:
404 thread__put(leader);
29ce3612 405 return;
29ce3612
AH
406out_err:
407 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
abd82868 408 goto out_put;
29ce3612
AH
409}
410
abd82868 411/*
bd1a0be5 412 * Caller must eventually drop thread->refcnt returned with a successful
abd82868
ACM
413 * lookup/new thread inserted.
414 */
b91fc39f 415static struct thread *____machine__findnew_thread(struct machine *machine,
75e45e43 416 struct threads *threads,
b91fc39f
ACM
417 pid_t pid, pid_t tid,
418 bool create)
9d2f8e22 419{
91e467bc 420 struct rb_node **p = &threads->entries.rb_node;
9d2f8e22
ACM
421 struct rb_node *parent = NULL;
422 struct thread *th;
423
424 /*
38051234 425 * Front-end cache - TID lookups come in blocks,
9d2f8e22
ACM
426 * so most of the time we dont have to look up
427 * the full rbtree:
428 */
91e467bc 429 th = threads->last_match;
f3b623b8
ACM
430 if (th != NULL) {
431 if (th->tid == tid) {
432 machine__update_thread_pid(machine, th, pid);
abd82868 433 return thread__get(th);
f3b623b8
ACM
434 }
435
91e467bc 436 threads->last_match = NULL;
99d725fc 437 }
9d2f8e22
ACM
438
439 while (*p != NULL) {
440 parent = *p;
441 th = rb_entry(parent, struct thread, rb_node);
442
38051234 443 if (th->tid == tid) {
91e467bc 444 threads->last_match = th;
29ce3612 445 machine__update_thread_pid(machine, th, pid);
abd82868 446 return thread__get(th);
9d2f8e22
ACM
447 }
448
38051234 449 if (tid < th->tid)
9d2f8e22
ACM
450 p = &(*p)->rb_left;
451 else
452 p = &(*p)->rb_right;
453 }
454
455 if (!create)
456 return NULL;
457
99d725fc 458 th = thread__new(pid, tid);
9d2f8e22
ACM
459 if (th != NULL) {
460 rb_link_node(&th->rb_node, parent, p);
91e467bc 461 rb_insert_color(&th->rb_node, &threads->entries);
cddcef60
JO
462
463 /*
464 * We have to initialize map_groups separately
465 * after rb tree is updated.
466 *
467 * The reason is that we call machine__findnew_thread
468 * within thread__init_map_groups to find the thread
469 * leader and that would screwed the rb tree.
470 */
418029b7 471 if (thread__init_map_groups(th, machine)) {
91e467bc 472 rb_erase_init(&th->rb_node, &threads->entries);
b91fc39f 473 RB_CLEAR_NODE(&th->rb_node);
abd82868 474 thread__put(th);
cddcef60 475 return NULL;
418029b7 476 }
f3b623b8
ACM
477 /*
478 * It is now in the rbtree, get a ref
479 */
480 thread__get(th);
91e467bc
KL
481 threads->last_match = th;
482 ++threads->nr;
9d2f8e22
ACM
483 }
484
485 return th;
486}
487
b91fc39f
ACM
488struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
489{
75e45e43 490 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
b91fc39f
ACM
491}
492
314add6b
AH
493struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
494 pid_t tid)
9d2f8e22 495{
91e467bc 496 struct threads *threads = machine__threads(machine, tid);
b91fc39f
ACM
497 struct thread *th;
498
0a7c74ea 499 down_write(&threads->lock);
abd82868 500 th = __machine__findnew_thread(machine, pid, tid);
0a7c74ea 501 up_write(&threads->lock);
b91fc39f 502 return th;
9d2f8e22
ACM
503}
504
d75e6097
JO
505struct thread *machine__find_thread(struct machine *machine, pid_t pid,
506 pid_t tid)
9d2f8e22 507{
91e467bc 508 struct threads *threads = machine__threads(machine, tid);
b91fc39f 509 struct thread *th;
91e467bc 510
0a7c74ea 511 down_read(&threads->lock);
75e45e43 512 th = ____machine__findnew_thread(machine, threads, pid, tid, false);
0a7c74ea 513 up_read(&threads->lock);
b91fc39f 514 return th;
9d2f8e22 515}
b0a7d1a0 516
cfe1c414
AH
517struct comm *machine__thread_exec_comm(struct machine *machine,
518 struct thread *thread)
519{
520 if (machine->comm_exec)
521 return thread__exec_comm(thread);
522 else
523 return thread__comm(thread);
524}
525
162f0bef
FW
526int machine__process_comm_event(struct machine *machine, union perf_event *event,
527 struct perf_sample *sample)
b0a7d1a0 528{
314add6b
AH
529 struct thread *thread = machine__findnew_thread(machine,
530 event->comm.pid,
531 event->comm.tid);
65de51f9 532 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
b91fc39f 533 int err = 0;
b0a7d1a0 534
cfe1c414
AH
535 if (exec)
536 machine->comm_exec = true;
537
b0a7d1a0
ACM
538 if (dump_trace)
539 perf_event__fprintf_comm(event, stdout);
540
65de51f9
AH
541 if (thread == NULL ||
542 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
b0a7d1a0 543 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
b91fc39f 544 err = -1;
b0a7d1a0
ACM
545 }
546
b91fc39f
ACM
547 thread__put(thread);
548
549 return err;
b0a7d1a0
ACM
550}
551
f3b3614a
HB
552int machine__process_namespaces_event(struct machine *machine __maybe_unused,
553 union perf_event *event,
554 struct perf_sample *sample __maybe_unused)
555{
556 struct thread *thread = machine__findnew_thread(machine,
557 event->namespaces.pid,
558 event->namespaces.tid);
559 int err = 0;
560
561 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
562 "\nWARNING: kernel seems to support more namespaces than perf"
563 " tool.\nTry updating the perf tool..\n\n");
564
565 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
566 "\nWARNING: perf tool seems to support more namespaces than"
567 " the kernel.\nTry updating the kernel..\n\n");
568
569 if (dump_trace)
570 perf_event__fprintf_namespaces(event, stdout);
571
572 if (thread == NULL ||
573 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
574 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
575 err = -1;
576 }
577
578 thread__put(thread);
579
580 return err;
581}
582
b0a7d1a0 583int machine__process_lost_event(struct machine *machine __maybe_unused,
162f0bef 584 union perf_event *event, struct perf_sample *sample __maybe_unused)
b0a7d1a0
ACM
585{
586 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
587 event->lost.id, event->lost.lost);
588 return 0;
589}
590
c4937a91
KL
591int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
592 union perf_event *event, struct perf_sample *sample)
593{
594 dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n",
595 sample->id, event->lost_samples.lost);
596 return 0;
597}
598
9f2de315
ACM
599static struct dso *machine__findnew_module_dso(struct machine *machine,
600 struct kmod_path *m,
601 const char *filename)
da17ea33
JO
602{
603 struct dso *dso;
da17ea33 604
0a7c74ea 605 down_write(&machine->dsos.lock);
e8807844
ACM
606
607 dso = __dsos__find(&machine->dsos, m->name, true);
da17ea33 608 if (!dso) {
e8807844 609 dso = __dsos__addnew(&machine->dsos, m->name);
da17ea33 610 if (dso == NULL)
e8807844 611 goto out_unlock;
da17ea33 612
6b335e8f 613 dso__set_module_info(dso, m, machine);
ca33380a 614 dso__set_long_name(dso, strdup(filename), true);
da17ea33
JO
615 }
616
d3a7c489 617 dso__get(dso);
e8807844 618out_unlock:
0a7c74ea 619 up_write(&machine->dsos.lock);
da17ea33
JO
620 return dso;
621}
622
4a96f7a0
AH
623int machine__process_aux_event(struct machine *machine __maybe_unused,
624 union perf_event *event)
625{
626 if (dump_trace)
627 perf_event__fprintf_aux(event, stdout);
628 return 0;
629}
630
0ad21f68
AH
631int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
632 union perf_event *event)
633{
634 if (dump_trace)
635 perf_event__fprintf_itrace_start(event, stdout);
636 return 0;
637}
638
0286039f
AH
639int machine__process_switch_event(struct machine *machine __maybe_unused,
640 union perf_event *event)
641{
642 if (dump_trace)
643 perf_event__fprintf_switch(event, stdout);
644 return 0;
645}
646
c03d5184
WN
647static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
648{
649 const char *dup_filename;
650
651 if (!filename || !dso || !dso->long_name)
652 return;
653 if (dso->long_name[0] != '[')
654 return;
655 if (!strchr(filename, '/'))
656 return;
657
658 dup_filename = strdup(filename);
659 if (!dup_filename)
660 return;
661
5dcf16df 662 dso__set_long_name(dso, dup_filename, true);
c03d5184
WN
663}
664
9f2de315
ACM
665struct map *machine__findnew_module_map(struct machine *machine, u64 start,
666 const char *filename)
3f067dca 667{
ca33380a 668 struct map *map = NULL;
566c69c3 669 struct dso *dso = NULL;
ca33380a 670 struct kmod_path m;
3f067dca 671
ca33380a 672 if (kmod_path__parse_name(&m, filename))
3f067dca
ACM
673 return NULL;
674
83cf774b 675 map = map_groups__find_by_name(&machine->kmaps, m.name);
c03d5184
WN
676 if (map) {
677 /*
678 * If the map's dso is an offline module, give dso__load()
679 * a chance to find the file path of that module by fixing
680 * long_name.
681 */
682 dso__adjust_kmod_long_name(map->dso, filename);
bc84f464 683 goto out;
c03d5184 684 }
bc84f464 685
9f2de315 686 dso = machine__findnew_module_dso(machine, &m, filename);
ca33380a
JO
687 if (dso == NULL)
688 goto out;
689
3f067dca
ACM
690 map = map__new2(start, dso, MAP__FUNCTION);
691 if (map == NULL)
ca33380a 692 goto out;
3f067dca 693
3f067dca 694 map_groups__insert(&machine->kmaps, map);
ca33380a 695
9afcb420
MH
696 /* Put the map here because map_groups__insert alread got it */
697 map__put(map);
ca33380a 698out:
566c69c3
MH
699 /* put the dso here, corresponding to machine__findnew_module_dso */
700 dso__put(dso);
ca33380a 701 free(m.name);
3f067dca
ACM
702 return map;
703}
704
876650e6 705size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
3f067dca
ACM
706{
707 struct rb_node *nd;
3d39ac53 708 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
3f067dca 709
876650e6 710 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca 711 struct machine *pos = rb_entry(nd, struct machine, rb_node);
3d39ac53 712 ret += __dsos__fprintf(&pos->dsos.head, fp);
3f067dca
ACM
713 }
714
715 return ret;
716}
717
8fa7d87f 718size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
3f067dca
ACM
719 bool (skip)(struct dso *dso, int parm), int parm)
720{
3d39ac53 721 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
3f067dca
ACM
722}
723
876650e6 724size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
3f067dca
ACM
725 bool (skip)(struct dso *dso, int parm), int parm)
726{
727 struct rb_node *nd;
876650e6 728 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
3f067dca 729
876650e6 730 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca
ACM
731 struct machine *pos = rb_entry(nd, struct machine, rb_node);
732 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
733 }
734 return ret;
735}
736
737size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
738{
739 int i;
740 size_t printed = 0;
a5e813c6 741 struct dso *kdso = machine__kernel_map(machine)->dso;
3f067dca
ACM
742
743 if (kdso->has_build_id) {
744 char filename[PATH_MAX];
d2396999
KJ
745 if (dso__build_id_filename(kdso, filename, sizeof(filename),
746 false))
3f067dca
ACM
747 printed += fprintf(fp, "[0] %s\n", filename);
748 }
749
750 for (i = 0; i < vmlinux_path__nr_entries; ++i)
751 printed += fprintf(fp, "[%d] %s\n",
752 i + kdso->has_build_id, vmlinux_path[i]);
753
754 return printed;
755}
756
757size_t machine__fprintf(struct machine *machine, FILE *fp)
758{
3f067dca 759 struct rb_node *nd;
91e467bc
KL
760 size_t ret;
761 int i;
3f067dca 762
91e467bc
KL
763 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
764 struct threads *threads = &machine->threads[i];
0a7c74ea
ACM
765
766 down_read(&threads->lock);
d2c11034 767
91e467bc 768 ret = fprintf(fp, "Threads: %u\n", threads->nr);
3f067dca 769
91e467bc
KL
770 for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
771 struct thread *pos = rb_entry(nd, struct thread, rb_node);
3f067dca 772
91e467bc
KL
773 ret += thread__fprintf(pos, fp);
774 }
b91fc39f 775
0a7c74ea 776 up_read(&threads->lock);
91e467bc 777 }
3f067dca
ACM
778 return ret;
779}
780
781static struct dso *machine__get_kernel(struct machine *machine)
782{
8c7f1bb3 783 const char *vmlinux_name = machine->mmap_name;
3f067dca
ACM
784 struct dso *kernel;
785
786 if (machine__is_host(machine)) {
c192524e
JO
787 if (symbol_conf.vmlinux_name)
788 vmlinux_name = symbol_conf.vmlinux_name;
789
459ce518
ACM
790 kernel = machine__findnew_kernel(machine, vmlinux_name,
791 "[kernel]", DSO_TYPE_KERNEL);
3f067dca 792 } else {
c192524e
JO
793 if (symbol_conf.default_guest_vmlinux_name)
794 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
795
459ce518
ACM
796 kernel = machine__findnew_kernel(machine, vmlinux_name,
797 "[guest.kernel]",
798 DSO_TYPE_GUEST_KERNEL);
3f067dca
ACM
799 }
800
801 if (kernel != NULL && (!kernel->has_build_id))
802 dso__read_running_kernel_build_id(kernel, machine);
803
804 return kernel;
805}
806
807struct process_args {
808 u64 start;
809};
810
15a0a870
AH
811static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
812 size_t bufsz)
813{
814 if (machine__is_default_guest(machine))
815 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
816 else
817 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
818}
819
a93f0e55
SQ
820const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
821
822/* Figure out the start address of kernel map from /proc/kallsyms.
823 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
824 * symbol_name if it's not that important.
825 */
b843f62a
ACM
826static int machine__get_running_kernel_start(struct machine *machine,
827 const char **symbol_name, u64 *start)
3f067dca 828{
15a0a870 829 char filename[PATH_MAX];
b843f62a 830 int i, err = -1;
a93f0e55
SQ
831 const char *name;
832 u64 addr = 0;
3f067dca 833
15a0a870 834 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
3f067dca
ACM
835
836 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
837 return 0;
838
a93f0e55 839 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
b843f62a
ACM
840 err = kallsyms__get_function_start(filename, name, &addr);
841 if (!err)
a93f0e55
SQ
842 break;
843 }
844
b843f62a
ACM
845 if (err)
846 return -1;
847
a93f0e55
SQ
848 if (symbol_name)
849 *symbol_name = name;
3f067dca 850
b843f62a
ACM
851 *start = addr;
852 return 0;
3f067dca
ACM
853}
854
1fb87b8e
JO
855static int
856__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
3f067dca 857{
a0b2f5af 858 int type;
3f067dca 859
cc1121ab
MH
860 /* In case of renewal the kernel map, destroy previous one */
861 machine__destroy_kernel_maps(machine);
862
3f067dca
ACM
863 for (type = 0; type < MAP__NR_TYPES; ++type) {
864 struct kmap *kmap;
77e65977 865 struct map *map;
3f067dca 866
1fb87b8e 867 machine->vmlinux_maps[type] = map__new2(0, kernel, type);
3f067dca
ACM
868 if (machine->vmlinux_maps[type] == NULL)
869 return -1;
870
871 machine->vmlinux_maps[type]->map_ip =
872 machine->vmlinux_maps[type]->unmap_ip =
873 identity__map_ip;
a5e813c6 874 map = __machine__kernel_map(machine, type);
77e65977 875 kmap = map__kmap(map);
ba92732e
WN
876 if (!kmap)
877 return -1;
878
3f067dca 879 kmap->kmaps = &machine->kmaps;
77e65977 880 map_groups__insert(&machine->kmaps, map);
3f067dca
ACM
881 }
882
883 return 0;
884}
885
886void machine__destroy_kernel_maps(struct machine *machine)
887{
a0b2f5af 888 int type;
3f067dca
ACM
889
890 for (type = 0; type < MAP__NR_TYPES; ++type) {
891 struct kmap *kmap;
a5e813c6 892 struct map *map = __machine__kernel_map(machine, type);
3f067dca 893
77e65977 894 if (map == NULL)
3f067dca
ACM
895 continue;
896
77e65977
ACM
897 kmap = map__kmap(map);
898 map_groups__remove(&machine->kmaps, map);
ba92732e 899 if (kmap && kmap->ref_reloc_sym) {
3f067dca
ACM
900 /*
901 * ref_reloc_sym is shared among all maps, so free just
902 * on one of them.
903 */
904 if (type == MAP__FUNCTION) {
04662523
ACM
905 zfree((char **)&kmap->ref_reloc_sym->name);
906 zfree(&kmap->ref_reloc_sym);
907 } else
908 kmap->ref_reloc_sym = NULL;
3f067dca
ACM
909 }
910
e96e4078 911 map__put(machine->vmlinux_maps[type]);
3f067dca
ACM
912 machine->vmlinux_maps[type] = NULL;
913 }
914}
915
876650e6 916int machines__create_guest_kernel_maps(struct machines *machines)
3f067dca
ACM
917{
918 int ret = 0;
919 struct dirent **namelist = NULL;
920 int i, items = 0;
921 char path[PATH_MAX];
922 pid_t pid;
923 char *endp;
924
925 if (symbol_conf.default_guest_vmlinux_name ||
926 symbol_conf.default_guest_modules ||
927 symbol_conf.default_guest_kallsyms) {
928 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
929 }
930
931 if (symbol_conf.guestmount) {
932 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
933 if (items <= 0)
934 return -ENOENT;
935 for (i = 0; i < items; i++) {
936 if (!isdigit(namelist[i]->d_name[0])) {
937 /* Filter out . and .. */
938 continue;
939 }
940 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
941 if ((*endp != '\0') ||
942 (endp == namelist[i]->d_name) ||
943 (errno == ERANGE)) {
944 pr_debug("invalid directory (%s). Skipping.\n",
945 namelist[i]->d_name);
946 continue;
947 }
948 sprintf(path, "%s/%s/proc/kallsyms",
949 symbol_conf.guestmount,
950 namelist[i]->d_name);
951 ret = access(path, R_OK);
952 if (ret) {
953 pr_debug("Can't access file %s\n", path);
954 goto failure;
955 }
956 machines__create_kernel_maps(machines, pid);
957 }
958failure:
959 free(namelist);
960 }
961
962 return ret;
963}
964
876650e6 965void machines__destroy_kernel_maps(struct machines *machines)
3f067dca 966{
876650e6
ACM
967 struct rb_node *next = rb_first(&machines->guests);
968
969 machine__destroy_kernel_maps(&machines->host);
3f067dca
ACM
970
971 while (next) {
972 struct machine *pos = rb_entry(next, struct machine, rb_node);
973
974 next = rb_next(&pos->rb_node);
876650e6 975 rb_erase(&pos->rb_node, &machines->guests);
3f067dca
ACM
976 machine__delete(pos);
977 }
978}
979
876650e6 980int machines__create_kernel_maps(struct machines *machines, pid_t pid)
3f067dca
ACM
981{
982 struct machine *machine = machines__findnew(machines, pid);
983
984 if (machine == NULL)
985 return -1;
986
987 return machine__create_kernel_maps(machine);
988}
989
329f0ade 990int __machine__load_kallsyms(struct machine *machine, const char *filename,
e8f3879f 991 enum map_type type)
3f067dca 992{
a5e813c6 993 struct map *map = machine__kernel_map(machine);
e8f3879f 994 int ret = __dso__load_kallsyms(map->dso, filename, map, true);
3f067dca
ACM
995
996 if (ret > 0) {
997 dso__set_loaded(map->dso, type);
998 /*
999 * Since /proc/kallsyms will have multiple sessions for the
1000 * kernel, with modules between them, fixup the end of all
1001 * sections.
1002 */
1003 __map_groups__fixup_end(&machine->kmaps, type);
1004 }
1005
1006 return ret;
1007}
1008
1d1a2654 1009int machine__load_vmlinux_path(struct machine *machine)
3f067dca 1010{
a5e813c6 1011 struct map *map = machine__kernel_map(machine);
be39db9f 1012 int ret = dso__load_vmlinux_path(map->dso, map);
3f067dca 1013
39b12f78 1014 if (ret > 0)
1d1a2654 1015 dso__set_loaded(map->dso, map->type);
3f067dca
ACM
1016
1017 return ret;
1018}
1019
3f067dca
ACM
1020static char *get_kernel_version(const char *root_dir)
1021{
1022 char version[PATH_MAX];
1023 FILE *file;
1024 char *name, *tmp;
1025 const char *prefix = "Linux version ";
1026
1027 sprintf(version, "%s/proc/version", root_dir);
1028 file = fopen(version, "r");
1029 if (!file)
1030 return NULL;
1031
1032 version[0] = '\0';
1033 tmp = fgets(version, sizeof(version), file);
1034 fclose(file);
1035
1036 name = strstr(version, prefix);
1037 if (!name)
1038 return NULL;
1039 name += strlen(prefix);
1040 tmp = strchr(name, ' ');
1041 if (tmp)
1042 *tmp = '\0';
1043
1044 return strdup(name);
1045}
1046
bb58a8a4
JO
1047static bool is_kmod_dso(struct dso *dso)
1048{
1049 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1050 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1051}
1052
1053static int map_groups__set_module_path(struct map_groups *mg, const char *path,
1054 struct kmod_path *m)
1055{
bb58a8a4 1056 char *long_name;
83cf774b 1057 struct map *map = map_groups__find_by_name(mg, m->name);
bb58a8a4 1058
bb58a8a4
JO
1059 if (map == NULL)
1060 return 0;
1061
1062 long_name = strdup(path);
1063 if (long_name == NULL)
1064 return -ENOMEM;
1065
1066 dso__set_long_name(map->dso, long_name, true);
1067 dso__kernel_module_get_build_id(map->dso, "");
1068
1069 /*
1070 * Full name could reveal us kmod compression, so
1071 * we need to update the symtab_type if needed.
1072 */
1073 if (m->comp && is_kmod_dso(map->dso))
1074 map->dso->symtab_type++;
1075
1076 return 0;
1077}
1078
3f067dca 1079static int map_groups__set_modules_path_dir(struct map_groups *mg,
61d4290c 1080 const char *dir_name, int depth)
3f067dca
ACM
1081{
1082 struct dirent *dent;
1083 DIR *dir = opendir(dir_name);
1084 int ret = 0;
1085
1086 if (!dir) {
1087 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1088 return -1;
1089 }
1090
1091 while ((dent = readdir(dir)) != NULL) {
1092 char path[PATH_MAX];
1093 struct stat st;
1094
1095 /*sshfs might return bad dent->d_type, so we have to stat*/
1096 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1097 if (stat(path, &st))
1098 continue;
1099
1100 if (S_ISDIR(st.st_mode)) {
1101 if (!strcmp(dent->d_name, ".") ||
1102 !strcmp(dent->d_name, ".."))
1103 continue;
1104
61d4290c
RY
1105 /* Do not follow top-level source and build symlinks */
1106 if (depth == 0) {
1107 if (!strcmp(dent->d_name, "source") ||
1108 !strcmp(dent->d_name, "build"))
1109 continue;
1110 }
1111
1112 ret = map_groups__set_modules_path_dir(mg, path,
1113 depth + 1);
3f067dca
ACM
1114 if (ret < 0)
1115 goto out;
1116 } else {
bb58a8a4 1117 struct kmod_path m;
3f067dca 1118
bb58a8a4
JO
1119 ret = kmod_path__parse_name(&m, dent->d_name);
1120 if (ret)
1121 goto out;
c00c48fc 1122
bb58a8a4
JO
1123 if (m.kmod)
1124 ret = map_groups__set_module_path(mg, path, &m);
c00c48fc 1125
bb58a8a4 1126 free(m.name);
3f067dca 1127
bb58a8a4 1128 if (ret)
3f067dca 1129 goto out;
3f067dca
ACM
1130 }
1131 }
1132
1133out:
1134 closedir(dir);
1135 return ret;
1136}
1137
1138static int machine__set_modules_path(struct machine *machine)
1139{
1140 char *version;
1141 char modules_path[PATH_MAX];
1142
1143 version = get_kernel_version(machine->root_dir);
1144 if (!version)
1145 return -1;
1146
61d4290c 1147 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
3f067dca
ACM
1148 machine->root_dir, version);
1149 free(version);
1150
61d4290c 1151 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
3f067dca 1152}
203d8a4a
SSG
1153int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1154 const char *name __maybe_unused)
1155{
1156 return 0;
1157}
3f067dca 1158
9ad4652b
TR
1159static int machine__create_module(void *arg, const char *name, u64 start,
1160 u64 size)
3f067dca 1161{
316d70d6 1162 struct machine *machine = arg;
3f067dca 1163 struct map *map;
316d70d6 1164
203d8a4a
SSG
1165 if (arch__fix_module_text_start(&start, name) < 0)
1166 return -1;
1167
9f2de315 1168 map = machine__findnew_module_map(machine, start, name);
316d70d6
AH
1169 if (map == NULL)
1170 return -1;
9ad4652b 1171 map->end = start + size;
316d70d6
AH
1172
1173 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1174
1175 return 0;
1176}
1177
1178static int machine__create_modules(struct machine *machine)
1179{
3f067dca
ACM
1180 const char *modules;
1181 char path[PATH_MAX];
1182
f4be904d 1183 if (machine__is_default_guest(machine)) {
3f067dca 1184 modules = symbol_conf.default_guest_modules;
f4be904d
AH
1185 } else {
1186 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
3f067dca
ACM
1187 modules = path;
1188 }
1189
aa7fe3b0 1190 if (symbol__restricted_filename(modules, "/proc/modules"))
3f067dca
ACM
1191 return -1;
1192
316d70d6 1193 if (modules__parse(modules, machine, machine__create_module))
3f067dca
ACM
1194 return -1;
1195
316d70d6
AH
1196 if (!machine__set_modules_path(machine))
1197 return 0;
3f067dca 1198
316d70d6 1199 pr_debug("Problems setting modules path maps, continuing anyway...\n");
3f067dca 1200
8f76fcd9 1201 return 0;
3f067dca
ACM
1202}
1203
1fb87b8e
JO
1204static void machine__set_kernel_mmap(struct machine *machine,
1205 u64 start, u64 end)
1206{
1207 int i;
1208
1209 for (i = 0; i < MAP__NR_TYPES; i++) {
1210 machine->vmlinux_maps[i]->start = start;
1211 machine->vmlinux_maps[i]->end = end;
1212
1213 /*
1214 * Be a bit paranoid here, some perf.data file came with
1215 * a zero sized synthesized MMAP event for the kernel.
1216 */
1d12cec6 1217 if (start == 0 && end == 0)
1fb87b8e
JO
1218 machine->vmlinux_maps[i]->end = ~0ULL;
1219 }
1220}
1221
3f067dca
ACM
1222int machine__create_kernel_maps(struct machine *machine)
1223{
1224 struct dso *kernel = machine__get_kernel(machine);
b843f62a 1225 const char *name = NULL;
ee05d217 1226 struct map *map;
b843f62a 1227 u64 addr = 0;
1154c957
MH
1228 int ret;
1229
45e90056 1230 if (kernel == NULL)
5512cf24 1231 return -1;
3f067dca 1232
1154c957
MH
1233 ret = __machine__create_kernel_maps(machine, kernel);
1234 dso__put(kernel);
1235 if (ret < 0)
3f067dca
ACM
1236 return -1;
1237
1238 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1239 if (machine__is_host(machine))
1240 pr_debug("Problems creating module maps, "
1241 "continuing anyway...\n");
1242 else
1243 pr_debug("Problems creating module maps for guest %d, "
1244 "continuing anyway...\n", machine->pid);
1245 }
1246
3f938ee2
JO
1247 if (!machine__get_running_kernel_start(machine, &name, &addr)) {
1248 if (name &&
1249 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
1250 machine__destroy_kernel_maps(machine);
1251 return -1;
1252 }
ee05d217
NK
1253
1254 /* we have a real start address now, so re-order the kmaps */
1255 map = machine__kernel_map(machine);
1256
1257 map__get(map);
1258 map_groups__remove(&machine->kmaps, map);
1259
1260 /* assume it's the last in the kmaps */
1261 machine__set_kernel_mmap(machine, addr, ~0ULL);
1262
1263 map_groups__insert(&machine->kmaps, map);
1264 map__put(map);
5512cf24
AH
1265 }
1266
ee05d217
NK
1267 /* update end address of the kernel map using adjacent module address */
1268 map = map__next(machine__kernel_map(machine));
1269 if (map)
1270 machine__set_kernel_mmap(machine, addr, map->start);
1271
3f067dca
ACM
1272 return 0;
1273}
1274
8e0cf965
AH
1275static bool machine__uses_kcore(struct machine *machine)
1276{
1277 struct dso *dso;
1278
3d39ac53 1279 list_for_each_entry(dso, &machine->dsos.head, node) {
8e0cf965
AH
1280 if (dso__is_kcore(dso))
1281 return true;
1282 }
1283
1284 return false;
1285}
1286
b0a7d1a0
ACM
1287static int machine__process_kernel_mmap_event(struct machine *machine,
1288 union perf_event *event)
1289{
1290 struct map *map;
b0a7d1a0
ACM
1291 enum dso_kernel_type kernel_type;
1292 bool is_kernel_mmap;
1293
8e0cf965
AH
1294 /* If we have maps from kcore then we do not need or want any others */
1295 if (machine__uses_kcore(machine))
1296 return 0;
1297
b0a7d1a0
ACM
1298 if (machine__is_host(machine))
1299 kernel_type = DSO_TYPE_KERNEL;
1300 else
1301 kernel_type = DSO_TYPE_GUEST_KERNEL;
1302
1303 is_kernel_mmap = memcmp(event->mmap.filename,
8c7f1bb3
JO
1304 machine->mmap_name,
1305 strlen(machine->mmap_name) - 1) == 0;
b0a7d1a0
ACM
1306 if (event->mmap.filename[0] == '/' ||
1307 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
9f2de315
ACM
1308 map = machine__findnew_module_map(machine, event->mmap.start,
1309 event->mmap.filename);
b0a7d1a0
ACM
1310 if (map == NULL)
1311 goto out_problem;
1312
b0a7d1a0
ACM
1313 map->end = map->start + event->mmap.len;
1314 } else if (is_kernel_mmap) {
1315 const char *symbol_name = (event->mmap.filename +
8c7f1bb3 1316 strlen(machine->mmap_name));
b0a7d1a0
ACM
1317 /*
1318 * Should be there already, from the build-id table in
1319 * the header.
1320 */
b837a8bd
NK
1321 struct dso *kernel = NULL;
1322 struct dso *dso;
1323
0a7c74ea 1324 down_read(&machine->dsos.lock);
e8807844 1325
3d39ac53 1326 list_for_each_entry(dso, &machine->dsos.head, node) {
1f121b03
WN
1327
1328 /*
1329 * The cpumode passed to is_kernel_module is not the
1330 * cpumode of *this* event. If we insist on passing
1331 * correct cpumode to is_kernel_module, we should
1332 * record the cpumode when we adding this dso to the
1333 * linked list.
1334 *
1335 * However we don't really need passing correct
1336 * cpumode. We know the correct cpumode must be kernel
1337 * mode (if not, we should not link it onto kernel_dsos
1338 * list).
1339 *
1340 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1341 * is_kernel_module() treats it as a kernel cpumode.
1342 */
1343
1344 if (!dso->kernel ||
1345 is_kernel_module(dso->long_name,
1346 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
b837a8bd
NK
1347 continue;
1348
1f121b03 1349
b837a8bd
NK
1350 kernel = dso;
1351 break;
1352 }
1353
0a7c74ea 1354 up_read(&machine->dsos.lock);
e8807844 1355
b837a8bd 1356 if (kernel == NULL)
8c7f1bb3 1357 kernel = machine__findnew_dso(machine, machine->mmap_name);
b0a7d1a0
ACM
1358 if (kernel == NULL)
1359 goto out_problem;
1360
1361 kernel->kernel = kernel_type;
d3a7c489
ACM
1362 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1363 dso__put(kernel);
b0a7d1a0 1364 goto out_problem;
d3a7c489 1365 }
b0a7d1a0 1366
330dfa22
NK
1367 if (strstr(kernel->long_name, "vmlinux"))
1368 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
96d78059 1369
05db6ff7
JO
1370 machine__set_kernel_mmap(machine, event->mmap.start,
1371 event->mmap.start + event->mmap.len);
b0a7d1a0
ACM
1372
1373 /*
1374 * Avoid using a zero address (kptr_restrict) for the ref reloc
1375 * symbol. Effectively having zero here means that at record
1376 * time /proc/sys/kernel/kptr_restrict was non zero.
1377 */
1378 if (event->mmap.pgoff != 0) {
1379 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1380 symbol_name,
1381 event->mmap.pgoff);
1382 }
1383
1384 if (machine__is_default_guest(machine)) {
1385 /*
1386 * preload dso of guest kernel and modules
1387 */
be39db9f 1388 dso__load(kernel, machine__kernel_map(machine));
b0a7d1a0
ACM
1389 }
1390 }
1391 return 0;
1392out_problem:
1393 return -1;
1394}
1395
5c5e854b 1396int machine__process_mmap2_event(struct machine *machine,
162f0bef 1397 union perf_event *event,
473398a2 1398 struct perf_sample *sample)
5c5e854b 1399{
5c5e854b
SE
1400 struct thread *thread;
1401 struct map *map;
1402 enum map_type type;
1403 int ret = 0;
1404
1405 if (dump_trace)
1406 perf_event__fprintf_mmap2(event, stdout);
1407
473398a2
ACM
1408 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1409 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
5c5e854b
SE
1410 ret = machine__process_kernel_mmap_event(machine, event);
1411 if (ret < 0)
1412 goto out_problem;
1413 return 0;
1414 }
1415
1416 thread = machine__findnew_thread(machine, event->mmap2.pid,
11c9abf2 1417 event->mmap2.tid);
5c5e854b
SE
1418 if (thread == NULL)
1419 goto out_problem;
1420
1421 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1422 type = MAP__VARIABLE;
1423 else
1424 type = MAP__FUNCTION;
1425
2a03068c 1426 map = map__new(machine, event->mmap2.start,
5c5e854b 1427 event->mmap2.len, event->mmap2.pgoff,
bf2e710b 1428 event->mmap2.maj,
5c5e854b
SE
1429 event->mmap2.min, event->mmap2.ino,
1430 event->mmap2.ino_generation,
7ef80703
DZ
1431 event->mmap2.prot,
1432 event->mmap2.flags,
5835edda 1433 event->mmap2.filename, type, thread);
5c5e854b
SE
1434
1435 if (map == NULL)
b91fc39f 1436 goto out_problem_map;
5c5e854b 1437
8132a2a8
HK
1438 ret = thread__insert_map(thread, map);
1439 if (ret)
1440 goto out_problem_insert;
1441
b91fc39f 1442 thread__put(thread);
84c2cafa 1443 map__put(map);
5c5e854b
SE
1444 return 0;
1445
8132a2a8
HK
1446out_problem_insert:
1447 map__put(map);
b91fc39f
ACM
1448out_problem_map:
1449 thread__put(thread);
5c5e854b
SE
1450out_problem:
1451 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1452 return 0;
1453}
1454
162f0bef 1455int machine__process_mmap_event(struct machine *machine, union perf_event *event,
473398a2 1456 struct perf_sample *sample)
b0a7d1a0 1457{
b0a7d1a0
ACM
1458 struct thread *thread;
1459 struct map *map;
bad40917 1460 enum map_type type;
0f476f2b 1461 u32 prot = 0;
b0a7d1a0
ACM
1462 int ret = 0;
1463
1464 if (dump_trace)
1465 perf_event__fprintf_mmap(event, stdout);
1466
473398a2
ACM
1467 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1468 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
b0a7d1a0
ACM
1469 ret = machine__process_kernel_mmap_event(machine, event);
1470 if (ret < 0)
1471 goto out_problem;
1472 return 0;
1473 }
1474
314add6b 1475 thread = machine__findnew_thread(machine, event->mmap.pid,
11c9abf2 1476 event->mmap.tid);
b0a7d1a0
ACM
1477 if (thread == NULL)
1478 goto out_problem;
bad40917
SE
1479
1480 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1481 type = MAP__VARIABLE;
0f476f2b 1482 else {
bad40917 1483 type = MAP__FUNCTION;
0f476f2b
ACM
1484 prot = PROT_EXEC;
1485 }
bad40917 1486
2a03068c 1487 map = map__new(machine, event->mmap.start,
b0a7d1a0 1488 event->mmap.len, event->mmap.pgoff,
0f476f2b 1489 0, 0, 0, 0, prot, 0,
5c5e854b 1490 event->mmap.filename,
5835edda 1491 type, thread);
bad40917 1492
b0a7d1a0 1493 if (map == NULL)
b91fc39f 1494 goto out_problem_map;
b0a7d1a0 1495
8132a2a8
HK
1496 ret = thread__insert_map(thread, map);
1497 if (ret)
1498 goto out_problem_insert;
1499
b91fc39f 1500 thread__put(thread);
84c2cafa 1501 map__put(map);
b0a7d1a0
ACM
1502 return 0;
1503
8132a2a8
HK
1504out_problem_insert:
1505 map__put(map);
b91fc39f
ACM
1506out_problem_map:
1507 thread__put(thread);
b0a7d1a0
ACM
1508out_problem:
1509 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1510 return 0;
1511}
1512
b91fc39f 1513static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
236a3bbd 1514{
91e467bc
KL
1515 struct threads *threads = machine__threads(machine, th->tid);
1516
1517 if (threads->last_match == th)
1518 threads->last_match = NULL;
f3b623b8 1519
e34f5b11 1520 BUG_ON(refcount_read(&th->refcnt) == 0);
b91fc39f 1521 if (lock)
0a7c74ea 1522 down_write(&threads->lock);
91e467bc 1523 rb_erase_init(&th->rb_node, &threads->entries);
b91fc39f 1524 RB_CLEAR_NODE(&th->rb_node);
91e467bc 1525 --threads->nr;
236a3bbd 1526 /*
f3b623b8
ACM
1527 * Move it first to the dead_threads list, then drop the reference,
1528 * if this is the last reference, then the thread__delete destructor
1529 * will be called and we will remove it from the dead_threads list.
236a3bbd 1530 */
91e467bc 1531 list_add_tail(&th->node, &threads->dead);
b91fc39f 1532 if (lock)
0a7c74ea 1533 up_write(&threads->lock);
f3b623b8 1534 thread__put(th);
236a3bbd
DA
1535}
1536
b91fc39f
ACM
1537void machine__remove_thread(struct machine *machine, struct thread *th)
1538{
1539 return __machine__remove_thread(machine, th, true);
1540}
1541
162f0bef
FW
1542int machine__process_fork_event(struct machine *machine, union perf_event *event,
1543 struct perf_sample *sample)
b0a7d1a0 1544{
d75e6097
JO
1545 struct thread *thread = machine__find_thread(machine,
1546 event->fork.pid,
1547 event->fork.tid);
314add6b
AH
1548 struct thread *parent = machine__findnew_thread(machine,
1549 event->fork.ppid,
1550 event->fork.ptid);
b91fc39f 1551 int err = 0;
b0a7d1a0 1552
5cb73340
AH
1553 if (dump_trace)
1554 perf_event__fprintf_task(event, stdout);
1555
1556 /*
1557 * There may be an existing thread that is not actually the parent,
1558 * either because we are processing events out of order, or because the
1559 * (fork) event that would have removed the thread was lost. Assume the
1560 * latter case and continue on as best we can.
1561 */
1562 if (parent->pid_ != (pid_t)event->fork.ppid) {
1563 dump_printf("removing erroneous parent thread %d/%d\n",
1564 parent->pid_, parent->tid);
1565 machine__remove_thread(machine, parent);
1566 thread__put(parent);
1567 parent = machine__findnew_thread(machine, event->fork.ppid,
1568 event->fork.ptid);
1569 }
1570
236a3bbd 1571 /* if a thread currently exists for the thread id remove it */
b91fc39f 1572 if (thread != NULL) {
236a3bbd 1573 machine__remove_thread(machine, thread);
b91fc39f
ACM
1574 thread__put(thread);
1575 }
236a3bbd 1576
314add6b
AH
1577 thread = machine__findnew_thread(machine, event->fork.pid,
1578 event->fork.tid);
b0a7d1a0
ACM
1579
1580 if (thread == NULL || parent == NULL ||
162f0bef 1581 thread__fork(thread, parent, sample->time) < 0) {
b0a7d1a0 1582 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
b91fc39f 1583 err = -1;
b0a7d1a0 1584 }
b91fc39f
ACM
1585 thread__put(thread);
1586 thread__put(parent);
b0a7d1a0 1587
b91fc39f 1588 return err;
b0a7d1a0
ACM
1589}
1590
162f0bef
FW
1591int machine__process_exit_event(struct machine *machine, union perf_event *event,
1592 struct perf_sample *sample __maybe_unused)
b0a7d1a0 1593{
d75e6097
JO
1594 struct thread *thread = machine__find_thread(machine,
1595 event->fork.pid,
1596 event->fork.tid);
b0a7d1a0
ACM
1597
1598 if (dump_trace)
1599 perf_event__fprintf_task(event, stdout);
1600
b91fc39f 1601 if (thread != NULL) {
236a3bbd 1602 thread__exited(thread);
b91fc39f
ACM
1603 thread__put(thread);
1604 }
b0a7d1a0
ACM
1605
1606 return 0;
1607}
1608
162f0bef
FW
1609int machine__process_event(struct machine *machine, union perf_event *event,
1610 struct perf_sample *sample)
b0a7d1a0
ACM
1611{
1612 int ret;
1613
1614 switch (event->header.type) {
1615 case PERF_RECORD_COMM:
162f0bef 1616 ret = machine__process_comm_event(machine, event, sample); break;
b0a7d1a0 1617 case PERF_RECORD_MMAP:
162f0bef 1618 ret = machine__process_mmap_event(machine, event, sample); break;
f3b3614a
HB
1619 case PERF_RECORD_NAMESPACES:
1620 ret = machine__process_namespaces_event(machine, event, sample); break;
5c5e854b 1621 case PERF_RECORD_MMAP2:
162f0bef 1622 ret = machine__process_mmap2_event(machine, event, sample); break;
b0a7d1a0 1623 case PERF_RECORD_FORK:
162f0bef 1624 ret = machine__process_fork_event(machine, event, sample); break;
b0a7d1a0 1625 case PERF_RECORD_EXIT:
162f0bef 1626 ret = machine__process_exit_event(machine, event, sample); break;
b0a7d1a0 1627 case PERF_RECORD_LOST:
162f0bef 1628 ret = machine__process_lost_event(machine, event, sample); break;
4a96f7a0
AH
1629 case PERF_RECORD_AUX:
1630 ret = machine__process_aux_event(machine, event); break;
0ad21f68 1631 case PERF_RECORD_ITRACE_START:
ceb92913 1632 ret = machine__process_itrace_start_event(machine, event); break;
c4937a91
KL
1633 case PERF_RECORD_LOST_SAMPLES:
1634 ret = machine__process_lost_samples_event(machine, event, sample); break;
0286039f
AH
1635 case PERF_RECORD_SWITCH:
1636 case PERF_RECORD_SWITCH_CPU_WIDE:
1637 ret = machine__process_switch_event(machine, event); break;
b0a7d1a0
ACM
1638 default:
1639 ret = -1;
1640 break;
1641 }
1642
1643 return ret;
1644}
3f067dca 1645
b21484f1 1646static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
3f067dca 1647{
a7c3899c 1648 if (!regexec(regex, sym->name, 0, NULL, 0))
3f067dca 1649 return 1;
3f067dca
ACM
1650 return 0;
1651}
1652
bb871a9c 1653static void ip__resolve_ams(struct thread *thread,
3f067dca
ACM
1654 struct addr_map_symbol *ams,
1655 u64 ip)
1656{
1657 struct addr_location al;
3f067dca
ACM
1658
1659 memset(&al, 0, sizeof(al));
52a3cb8c
ACM
1660 /*
1661 * We cannot use the header.misc hint to determine whether a
1662 * branch stack address is user, kernel, guest, hypervisor.
1663 * Branches may straddle the kernel/user/hypervisor boundaries.
1664 * Thus, we have to try consecutively until we find a match
1665 * or else, the symbol is unknown
1666 */
26bd9331 1667 thread__find_cpumode_addr_location(thread, ip, &al);
3f067dca 1668
3f067dca
ACM
1669 ams->addr = ip;
1670 ams->al_addr = al.addr;
1671 ams->sym = al.sym;
1672 ams->map = al.map;
8780fb25 1673 ams->phys_addr = 0;
3f067dca
ACM
1674}
1675
bb871a9c 1676static void ip__resolve_data(struct thread *thread,
8780fb25
KL
1677 u8 m, struct addr_map_symbol *ams,
1678 u64 addr, u64 phys_addr)
98a3b32c
SE
1679{
1680 struct addr_location al;
1681
1682 memset(&al, 0, sizeof(al));
1683
117d3c24 1684 thread__find_symbol(thread, m, addr, &al);
06b2afc0 1685
98a3b32c
SE
1686 ams->addr = addr;
1687 ams->al_addr = al.addr;
1688 ams->sym = al.sym;
1689 ams->map = al.map;
8780fb25 1690 ams->phys_addr = phys_addr;
98a3b32c
SE
1691}
1692
e80faac0
ACM
1693struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1694 struct addr_location *al)
98a3b32c 1695{
9f87498f 1696 struct mem_info *mi = mem_info__new();
98a3b32c
SE
1697
1698 if (!mi)
1699 return NULL;
1700
bb871a9c 1701 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
8780fb25
KL
1702 ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
1703 sample->addr, sample->phys_addr);
98a3b32c
SE
1704 mi->data_src.val = sample->data_src;
1705
1706 return mi;
1707}
1708
40a342cd
MW
1709static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
1710{
21ac9d54
MW
1711 char *srcline = NULL;
1712
40a342cd 1713 if (!map || callchain_param.key == CCKEY_FUNCTION)
21ac9d54
MW
1714 return srcline;
1715
1716 srcline = srcline__tree_find(&map->dso->srclines, ip);
1717 if (!srcline) {
1718 bool show_sym = false;
1719 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
1720
1721 srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
935f5a9d 1722 sym, show_sym, show_addr, ip);
21ac9d54
MW
1723 srcline__tree_insert(&map->dso->srclines, ip, srcline);
1724 }
40a342cd 1725
21ac9d54 1726 return srcline;
40a342cd
MW
1727}
1728
c4ee0625
JY
1729struct iterations {
1730 int nr_loop_iter;
1731 u64 cycles;
1732};
1733
37592b8a 1734static int add_callchain_ip(struct thread *thread,
91d7b2de 1735 struct callchain_cursor *cursor,
37592b8a
AK
1736 struct symbol **parent,
1737 struct addr_location *root_al,
73dbcd65 1738 u8 *cpumode,
410024db
JY
1739 u64 ip,
1740 bool branch,
1741 struct branch_flags *flags,
c4ee0625 1742 struct iterations *iter,
b851dd49 1743 u64 branch_from)
37592b8a
AK
1744{
1745 struct addr_location al;
c4ee0625
JY
1746 int nr_loop_iter = 0;
1747 u64 iter_cycles = 0;
40a342cd 1748 const char *srcline = NULL;
37592b8a
AK
1749
1750 al.filtered = 0;
1751 al.sym = NULL;
73dbcd65 1752 if (!cpumode) {
26bd9331 1753 thread__find_cpumode_addr_location(thread, ip, &al);
73dbcd65 1754 } else {
2e77784b
KL
1755 if (ip >= PERF_CONTEXT_MAX) {
1756 switch (ip) {
1757 case PERF_CONTEXT_HV:
73dbcd65 1758 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2e77784b
KL
1759 break;
1760 case PERF_CONTEXT_KERNEL:
73dbcd65 1761 *cpumode = PERF_RECORD_MISC_KERNEL;
2e77784b
KL
1762 break;
1763 case PERF_CONTEXT_USER:
73dbcd65 1764 *cpumode = PERF_RECORD_MISC_USER;
2e77784b
KL
1765 break;
1766 default:
1767 pr_debug("invalid callchain context: "
1768 "%"PRId64"\n", (s64) ip);
1769 /*
1770 * It seems the callchain is corrupted.
1771 * Discard all.
1772 */
91d7b2de 1773 callchain_cursor_reset(cursor);
2e77784b
KL
1774 return 1;
1775 }
1776 return 0;
1777 }
4546263d 1778 thread__find_symbol(thread, *cpumode, ip, &al);
2e77784b
KL
1779 }
1780
37592b8a 1781 if (al.sym != NULL) {
de7e6a7c 1782 if (perf_hpp_list.parent && !*parent &&
37592b8a
AK
1783 symbol__match_regex(al.sym, &parent_regex))
1784 *parent = al.sym;
1785 else if (have_ignore_callees && root_al &&
1786 symbol__match_regex(al.sym, &ignore_callees_regex)) {
1787 /* Treat this symbol as the root,
1788 forgetting its callees. */
1789 *root_al = al;
91d7b2de 1790 callchain_cursor_reset(cursor);
37592b8a
AK
1791 }
1792 }
1793
b49a8fe5
NK
1794 if (symbol_conf.hide_unresolved && al.sym == NULL)
1795 return 0;
c4ee0625
JY
1796
1797 if (iter) {
1798 nr_loop_iter = iter->nr_loop_iter;
1799 iter_cycles = iter->cycles;
1800 }
1801
40a342cd 1802 srcline = callchain_srcline(al.map, al.sym, al.addr);
410024db 1803 return callchain_cursor_append(cursor, al.addr, al.map, al.sym,
c4ee0625 1804 branch, flags, nr_loop_iter,
40a342cd 1805 iter_cycles, branch_from, srcline);
37592b8a
AK
1806}
1807
644f2df2
ACM
1808struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1809 struct addr_location *al)
3f067dca 1810{
3f067dca 1811 unsigned int i;
644f2df2
ACM
1812 const struct branch_stack *bs = sample->branch_stack;
1813 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
3f067dca 1814
3f067dca
ACM
1815 if (!bi)
1816 return NULL;
1817
1818 for (i = 0; i < bs->nr; i++) {
bb871a9c
ACM
1819 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1820 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
3f067dca
ACM
1821 bi[i].flags = bs->entries[i].flags;
1822 }
1823 return bi;
1824}
1825
c4ee0625
JY
1826static void save_iterations(struct iterations *iter,
1827 struct branch_entry *be, int nr)
1828{
1829 int i;
1830
1831 iter->nr_loop_iter = nr;
1832 iter->cycles = 0;
1833
1834 for (i = 0; i < nr; i++)
1835 iter->cycles += be[i].flags.cycles;
1836}
1837
8b7bad58
AK
1838#define CHASHSZ 127
1839#define CHASHBITS 7
1840#define NO_ENTRY 0xff
1841
1842#define PERF_MAX_BRANCH_DEPTH 127
1843
1844/* Remove loops. */
c4ee0625
JY
1845static int remove_loops(struct branch_entry *l, int nr,
1846 struct iterations *iter)
8b7bad58
AK
1847{
1848 int i, j, off;
1849 unsigned char chash[CHASHSZ];
1850
1851 memset(chash, NO_ENTRY, sizeof(chash));
1852
1853 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
1854
1855 for (i = 0; i < nr; i++) {
1856 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
1857
1858 /* no collision handling for now */
1859 if (chash[h] == NO_ENTRY) {
1860 chash[h] = i;
1861 } else if (l[chash[h]].from == l[i].from) {
1862 bool is_loop = true;
1863 /* check if it is a real loop */
1864 off = 0;
1865 for (j = chash[h]; j < i && i + off < nr; j++, off++)
1866 if (l[j].from != l[i + off].from) {
1867 is_loop = false;
1868 break;
1869 }
1870 if (is_loop) {
c4ee0625
JY
1871 j = nr - (i + off);
1872 if (j > 0) {
1873 save_iterations(iter + i + off,
1874 l + i, off);
1875
1876 memmove(iter + i, iter + i + off,
1877 j * sizeof(*iter));
1878
1879 memmove(l + i, l + i + off,
1880 j * sizeof(*l));
1881 }
1882
8b7bad58
AK
1883 nr -= off;
1884 }
1885 }
1886 }
1887 return nr;
1888}
1889
384b6055
KL
1890/*
1891 * Recolve LBR callstack chain sample
1892 * Return:
1893 * 1 on success get LBR callchain information
1894 * 0 no available LBR callchain information, should try fp
1895 * negative error code on other errors.
1896 */
1897static int resolve_lbr_callchain_sample(struct thread *thread,
91d7b2de 1898 struct callchain_cursor *cursor,
384b6055
KL
1899 struct perf_sample *sample,
1900 struct symbol **parent,
1901 struct addr_location *root_al,
1902 int max_stack)
3f067dca 1903{
384b6055 1904 struct ip_callchain *chain = sample->callchain;
18ef15c6 1905 int chain_nr = min(max_stack, (int)chain->nr), i;
73dbcd65 1906 u8 cpumode = PERF_RECORD_MISC_USER;
b851dd49 1907 u64 ip, branch_from = 0;
384b6055
KL
1908
1909 for (i = 0; i < chain_nr; i++) {
1910 if (chain->ips[i] == PERF_CONTEXT_USER)
1911 break;
1912 }
1913
1914 /* LBR only affects the user callchain */
1915 if (i != chain_nr) {
1916 struct branch_stack *lbr_stack = sample->branch_stack;
410024db
JY
1917 int lbr_nr = lbr_stack->nr, j, k;
1918 bool branch;
1919 struct branch_flags *flags;
384b6055
KL
1920 /*
1921 * LBR callstack can only get user call chain.
1922 * The mix_chain_nr is kernel call chain
1923 * number plus LBR user call chain number.
1924 * i is kernel call chain number,
1925 * 1 is PERF_CONTEXT_USER,
1926 * lbr_nr + 1 is the user call chain number.
1927 * For details, please refer to the comments
1928 * in callchain__printf
1929 */
1930 int mix_chain_nr = i + 1 + lbr_nr + 1;
1931
384b6055 1932 for (j = 0; j < mix_chain_nr; j++) {
18ef15c6 1933 int err;
410024db
JY
1934 branch = false;
1935 flags = NULL;
1936
384b6055
KL
1937 if (callchain_param.order == ORDER_CALLEE) {
1938 if (j < i + 1)
1939 ip = chain->ips[j];
410024db
JY
1940 else if (j > i + 1) {
1941 k = j - i - 2;
1942 ip = lbr_stack->entries[k].from;
1943 branch = true;
1944 flags = &lbr_stack->entries[k].flags;
1945 } else {
384b6055 1946 ip = lbr_stack->entries[0].to;
410024db
JY
1947 branch = true;
1948 flags = &lbr_stack->entries[0].flags;
b851dd49
JY
1949 branch_from =
1950 lbr_stack->entries[0].from;
410024db 1951 }
384b6055 1952 } else {
410024db
JY
1953 if (j < lbr_nr) {
1954 k = lbr_nr - j - 1;
1955 ip = lbr_stack->entries[k].from;
1956 branch = true;
1957 flags = &lbr_stack->entries[k].flags;
1958 }
384b6055
KL
1959 else if (j > lbr_nr)
1960 ip = chain->ips[i + 1 - (j - lbr_nr)];
410024db 1961 else {
384b6055 1962 ip = lbr_stack->entries[0].to;
410024db
JY
1963 branch = true;
1964 flags = &lbr_stack->entries[0].flags;
b851dd49
JY
1965 branch_from =
1966 lbr_stack->entries[0].from;
410024db 1967 }
384b6055
KL
1968 }
1969
410024db
JY
1970 err = add_callchain_ip(thread, cursor, parent,
1971 root_al, &cpumode, ip,
c4ee0625 1972 branch, flags, NULL,
b851dd49 1973 branch_from);
384b6055
KL
1974 if (err)
1975 return (err < 0) ? err : 0;
1976 }
1977 return 1;
1978 }
1979
1980 return 0;
1981}
1982
1983static int thread__resolve_callchain_sample(struct thread *thread,
91d7b2de 1984 struct callchain_cursor *cursor,
384b6055
KL
1985 struct perf_evsel *evsel,
1986 struct perf_sample *sample,
1987 struct symbol **parent,
1988 struct addr_location *root_al,
1989 int max_stack)
1990{
1991 struct branch_stack *branch = sample->branch_stack;
1992 struct ip_callchain *chain = sample->callchain;
b49a821e 1993 int chain_nr = 0;
73dbcd65 1994 u8 cpumode = PERF_RECORD_MISC_USER;
bf8bddbf 1995 int i, j, err, nr_entries;
8b7bad58
AK
1996 int skip_idx = -1;
1997 int first_call = 0;
1998
b49a821e
JY
1999 if (chain)
2000 chain_nr = chain->nr;
2001
acf2abbd 2002 if (perf_evsel__has_branch_callstack(evsel)) {
91d7b2de 2003 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
384b6055
KL
2004 root_al, max_stack);
2005 if (err)
2006 return (err < 0) ? err : 0;
2007 }
2008
8b7bad58
AK
2009 /*
2010 * Based on DWARF debug information, some architectures skip
2011 * a callchain entry saved by the kernel.
2012 */
bf8bddbf 2013 skip_idx = arch_skip_callchain_idx(thread, chain);
3f067dca 2014
8b7bad58
AK
2015 /*
2016 * Add branches to call stack for easier browsing. This gives
2017 * more context for a sample than just the callers.
2018 *
2019 * This uses individual histograms of paths compared to the
2020 * aggregated histograms the normal LBR mode uses.
2021 *
2022 * Limitations for now:
2023 * - No extra filters
2024 * - No annotations (should annotate somehow)
2025 */
2026
2027 if (branch && callchain_param.branch_callstack) {
2028 int nr = min(max_stack, (int)branch->nr);
2029 struct branch_entry be[nr];
c4ee0625 2030 struct iterations iter[nr];
8b7bad58
AK
2031
2032 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2033 pr_warning("corrupted branch chain. skipping...\n");
2034 goto check_calls;
2035 }
2036
2037 for (i = 0; i < nr; i++) {
2038 if (callchain_param.order == ORDER_CALLEE) {
2039 be[i] = branch->entries[i];
b49a821e
JY
2040
2041 if (chain == NULL)
2042 continue;
2043
8b7bad58
AK
2044 /*
2045 * Check for overlap into the callchain.
2046 * The return address is one off compared to
2047 * the branch entry. To adjust for this
2048 * assume the calling instruction is not longer
2049 * than 8 bytes.
2050 */
2051 if (i == skip_idx ||
2052 chain->ips[first_call] >= PERF_CONTEXT_MAX)
2053 first_call++;
2054 else if (be[i].from < chain->ips[first_call] &&
2055 be[i].from >= chain->ips[first_call] - 8)
2056 first_call++;
2057 } else
2058 be[i] = branch->entries[branch->nr - i - 1];
2059 }
2060
c4ee0625
JY
2061 memset(iter, 0, sizeof(struct iterations) * nr);
2062 nr = remove_loops(be, nr, iter);
410024db 2063
8b7bad58 2064 for (i = 0; i < nr; i++) {
c4ee0625
JY
2065 err = add_callchain_ip(thread, cursor, parent,
2066 root_al,
2067 NULL, be[i].to,
2068 true, &be[i].flags,
2069 NULL, be[i].from);
410024db 2070
8b7bad58 2071 if (!err)
91d7b2de 2072 err = add_callchain_ip(thread, cursor, parent, root_al,
410024db
JY
2073 NULL, be[i].from,
2074 true, &be[i].flags,
c4ee0625 2075 &iter[i], 0);
8b7bad58
AK
2076 if (err == -EINVAL)
2077 break;
2078 if (err)
2079 return err;
2080 }
b49a821e
JY
2081
2082 if (chain_nr == 0)
2083 return 0;
2084
8b7bad58
AK
2085 chain_nr -= nr;
2086 }
2087
2088check_calls:
bf8bddbf 2089 for (i = first_call, nr_entries = 0;
a29d5c9b 2090 i < chain_nr && nr_entries < max_stack; i++) {
3f067dca 2091 u64 ip;
3f067dca
ACM
2092
2093 if (callchain_param.order == ORDER_CALLEE)
a60335ba 2094 j = i;
3f067dca 2095 else
a60335ba
SB
2096 j = chain->nr - i - 1;
2097
2098#ifdef HAVE_SKIP_CALLCHAIN_IDX
2099 if (j == skip_idx)
2100 continue;
2101#endif
2102 ip = chain->ips[j];
3f067dca 2103
bf8bddbf
ACM
2104 if (ip < PERF_CONTEXT_MAX)
2105 ++nr_entries;
a29d5c9b 2106
410024db
JY
2107 err = add_callchain_ip(thread, cursor, parent,
2108 root_al, &cpumode, ip,
c4ee0625 2109 false, NULL, NULL, 0);
3f067dca 2110
3f067dca 2111 if (err)
2e77784b 2112 return (err < 0) ? err : 0;
3f067dca
ACM
2113 }
2114
2115 return 0;
2116}
2117
11ea2515
MW
2118static int append_inlines(struct callchain_cursor *cursor,
2119 struct map *map, struct symbol *sym, u64 ip)
2120{
2121 struct inline_node *inline_node;
2122 struct inline_list *ilist;
2123 u64 addr;
b38775cf 2124 int ret = 1;
11ea2515
MW
2125
2126 if (!symbol_conf.inline_name || !map || !sym)
b38775cf 2127 return ret;
11ea2515
MW
2128
2129 addr = map__rip_2objdump(map, ip);
2130
2131 inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
2132 if (!inline_node) {
2133 inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
2134 if (!inline_node)
b38775cf 2135 return ret;
11ea2515
MW
2136 inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
2137 }
2138
2139 list_for_each_entry(ilist, &inline_node->val, list) {
b38775cf
MW
2140 ret = callchain_cursor_append(cursor, ip, map,
2141 ilist->symbol, false,
2142 NULL, 0, 0, 0, ilist->srcline);
11ea2515
MW
2143
2144 if (ret != 0)
2145 return ret;
2146 }
2147
b38775cf 2148 return ret;
11ea2515
MW
2149}
2150
3f067dca
ACM
2151static int unwind_entry(struct unwind_entry *entry, void *arg)
2152{
2153 struct callchain_cursor *cursor = arg;
40a342cd 2154 const char *srcline = NULL;
b49a8fe5
NK
2155
2156 if (symbol_conf.hide_unresolved && entry->sym == NULL)
2157 return 0;
40a342cd 2158
11ea2515
MW
2159 if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
2160 return 0;
2161
40a342cd 2162 srcline = callchain_srcline(entry->map, entry->sym, entry->ip);
3f067dca 2163 return callchain_cursor_append(cursor, entry->ip,
410024db 2164 entry->map, entry->sym,
40a342cd 2165 false, NULL, 0, 0, 0, srcline);
3f067dca
ACM
2166}
2167
9919a65e
CP
2168static int thread__resolve_callchain_unwind(struct thread *thread,
2169 struct callchain_cursor *cursor,
2170 struct perf_evsel *evsel,
2171 struct perf_sample *sample,
2172 int max_stack)
3f067dca 2173{
3f067dca
ACM
2174 /* Can we do dwarf post unwind? */
2175 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2176 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
2177 return 0;
2178
2179 /* Bail out if nothing was captured. */
2180 if ((!sample->user_regs.regs) ||
2181 (!sample->user_stack.size))
2182 return 0;
2183
91d7b2de 2184 return unwind__get_entries(unwind_entry, cursor,
352ea45a 2185 thread, sample, max_stack);
9919a65e 2186}
3f067dca 2187
9919a65e
CP
2188int thread__resolve_callchain(struct thread *thread,
2189 struct callchain_cursor *cursor,
2190 struct perf_evsel *evsel,
2191 struct perf_sample *sample,
2192 struct symbol **parent,
2193 struct addr_location *root_al,
2194 int max_stack)
2195{
2196 int ret = 0;
2197
914eb9ca 2198 callchain_cursor_reset(cursor);
9919a65e
CP
2199
2200 if (callchain_param.order == ORDER_CALLEE) {
2201 ret = thread__resolve_callchain_sample(thread, cursor,
2202 evsel, sample,
2203 parent, root_al,
2204 max_stack);
2205 if (ret)
2206 return ret;
2207 ret = thread__resolve_callchain_unwind(thread, cursor,
2208 evsel, sample,
2209 max_stack);
2210 } else {
2211 ret = thread__resolve_callchain_unwind(thread, cursor,
2212 evsel, sample,
2213 max_stack);
2214 if (ret)
2215 return ret;
2216 ret = thread__resolve_callchain_sample(thread, cursor,
2217 evsel, sample,
2218 parent, root_al,
2219 max_stack);
2220 }
2221
2222 return ret;
3f067dca 2223}
35feee19
DA
2224
2225int machine__for_each_thread(struct machine *machine,
2226 int (*fn)(struct thread *thread, void *p),
2227 void *priv)
2228{
91e467bc 2229 struct threads *threads;
35feee19
DA
2230 struct rb_node *nd;
2231 struct thread *thread;
2232 int rc = 0;
91e467bc 2233 int i;
35feee19 2234
91e467bc
KL
2235 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
2236 threads = &machine->threads[i];
2237 for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
2238 thread = rb_entry(nd, struct thread, rb_node);
2239 rc = fn(thread, priv);
2240 if (rc != 0)
2241 return rc;
2242 }
35feee19 2243
91e467bc
KL
2244 list_for_each_entry(thread, &threads->dead, node) {
2245 rc = fn(thread, priv);
2246 if (rc != 0)
2247 return rc;
2248 }
35feee19
DA
2249 }
2250 return rc;
2251}
58d925dc 2252
a5499b37
AH
2253int machines__for_each_thread(struct machines *machines,
2254 int (*fn)(struct thread *thread, void *p),
2255 void *priv)
2256{
2257 struct rb_node *nd;
2258 int rc = 0;
2259
2260 rc = machine__for_each_thread(&machines->host, fn, priv);
2261 if (rc != 0)
2262 return rc;
2263
2264 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
2265 struct machine *machine = rb_entry(nd, struct machine, rb_node);
2266
2267 rc = machine__for_each_thread(machine, fn, priv);
2268 if (rc != 0)
2269 return rc;
2270 }
2271 return rc;
2272}
2273
a33fbd56 2274int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
602ad878 2275 struct target *target, struct thread_map *threads,
9d9cad76 2276 perf_event__handler_t process, bool data_mmap,
340b47f5
KL
2277 unsigned int proc_map_timeout,
2278 unsigned int nr_threads_synthesize)
58d925dc 2279{
602ad878 2280 if (target__has_task(target))
9d9cad76 2281 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
602ad878 2282 else if (target__has_cpu(target))
340b47f5
KL
2283 return perf_event__synthesize_threads(tool, process,
2284 machine, data_mmap,
2285 proc_map_timeout,
2286 nr_threads_synthesize);
58d925dc
ACM
2287 /* command specified */
2288 return 0;
2289}
b9d266ba
AH
2290
2291pid_t machine__get_current_tid(struct machine *machine, int cpu)
2292{
2293 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
2294 return -1;
2295
2296 return machine->current_tid[cpu];
2297}
2298
2299int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2300 pid_t tid)
2301{
2302 struct thread *thread;
2303
2304 if (cpu < 0)
2305 return -EINVAL;
2306
2307 if (!machine->current_tid) {
2308 int i;
2309
2310 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
2311 if (!machine->current_tid)
2312 return -ENOMEM;
2313 for (i = 0; i < MAX_NR_CPUS; i++)
2314 machine->current_tid[i] = -1;
2315 }
2316
2317 if (cpu >= MAX_NR_CPUS) {
2318 pr_err("Requested CPU %d too large. ", cpu);
2319 pr_err("Consider raising MAX_NR_CPUS\n");
2320 return -EINVAL;
2321 }
2322
2323 machine->current_tid[cpu] = tid;
2324
2325 thread = machine__findnew_thread(machine, pid, tid);
2326 if (!thread)
2327 return -ENOMEM;
2328
2329 thread->cpu = cpu;
b91fc39f 2330 thread__put(thread);
b9d266ba
AH
2331
2332 return 0;
2333}
fbe2af45
AH
2334
2335int machine__get_kernel_start(struct machine *machine)
2336{
a5e813c6 2337 struct map *map = machine__kernel_map(machine);
fbe2af45
AH
2338 int err = 0;
2339
2340 /*
2341 * The only addresses above 2^63 are kernel addresses of a 64-bit
2342 * kernel. Note that addresses are unsigned so that on a 32-bit system
2343 * all addresses including kernel addresses are less than 2^32. In
2344 * that case (32-bit system), if the kernel mapping is unknown, all
2345 * addresses will be assumed to be in user space - see
2346 * machine__kernel_ip().
2347 */
2348 machine->kernel_start = 1ULL << 63;
2349 if (map) {
be39db9f 2350 err = map__load(map);
4b1303d0 2351 if (!err)
fbe2af45
AH
2352 machine->kernel_start = map->start;
2353 }
2354 return err;
2355}
aa7cc2ae
ACM
2356
2357struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
2358{
e8807844 2359 return dsos__findnew(&machine->dsos, filename);
aa7cc2ae 2360}
c3168b0d
ACM
2361
2362char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
2363{
2364 struct machine *machine = vmachine;
2365 struct map *map;
128cde33 2366 struct symbol *sym = machine__find_kernel_function(machine, *addrp, &map);
c3168b0d
ACM
2367
2368 if (sym == NULL)
2369 return NULL;
2370
2371 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
2372 *addrp = map->unmap_ip(map, sym->start);
2373 return sym->name;
2374}