perf machine: Fix machine__destroy_kernel_maps to drop vmlinux_maps references
[linux-2.6-block.git] / tools / perf / util / machine.c
CommitLineData
3f067dca 1#include "callchain.h"
b0a7d1a0
ACM
2#include "debug.h"
3#include "event.h"
3f067dca
ACM
4#include "evsel.h"
5#include "hist.h"
9d2f8e22
ACM
6#include "machine.h"
7#include "map.h"
3f067dca 8#include "sort.h"
69d2591a 9#include "strlist.h"
9d2f8e22 10#include "thread.h"
d027b640 11#include "vdso.h"
9d2f8e22 12#include <stdbool.h>
c506c96b 13#include <symbol/kallsyms.h>
3f067dca 14#include "unwind.h"
8b7bad58 15#include "linux/hash.h"
9d2f8e22 16
b91fc39f
ACM
17static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
18
e167f995
ACM
19static void dsos__init(struct dsos *dsos)
20{
21 INIT_LIST_HEAD(&dsos->head);
22 dsos->root = RB_ROOT;
e8807844 23 pthread_rwlock_init(&dsos->lock, NULL);
e167f995
ACM
24}
25
69d2591a
ACM
26int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
27{
11246c70 28 map_groups__init(&machine->kmaps, machine);
69d2591a 29 RB_CLEAR_NODE(&machine->rb_node);
3d39ac53 30 dsos__init(&machine->dsos);
69d2591a
ACM
31
32 machine->threads = RB_ROOT;
b91fc39f 33 pthread_rwlock_init(&machine->threads_lock, NULL);
69d2591a
ACM
34 INIT_LIST_HEAD(&machine->dead_threads);
35 machine->last_match = NULL;
36
d027b640 37 machine->vdso_info = NULL;
4cde998d 38 machine->env = NULL;
d027b640 39
69d2591a
ACM
40 machine->pid = pid;
41
611a5ce8 42 machine->symbol_filter = NULL;
14bd6d20 43 machine->id_hdr_size = 0;
cfe1c414 44 machine->comm_exec = false;
fbe2af45 45 machine->kernel_start = 0;
611a5ce8 46
69d2591a
ACM
47 machine->root_dir = strdup(root_dir);
48 if (machine->root_dir == NULL)
49 return -ENOMEM;
50
51 if (pid != HOST_KERNEL_ID) {
1fcb8768 52 struct thread *thread = machine__findnew_thread(machine, -1,
314add6b 53 pid);
69d2591a
ACM
54 char comm[64];
55
56 if (thread == NULL)
57 return -ENOMEM;
58
59 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
162f0bef 60 thread__set_comm(thread, comm, 0);
b91fc39f 61 thread__put(thread);
69d2591a
ACM
62 }
63
b9d266ba
AH
64 machine->current_tid = NULL;
65
69d2591a
ACM
66 return 0;
67}
68
8fb598e5
DA
69struct machine *machine__new_host(void)
70{
71 struct machine *machine = malloc(sizeof(*machine));
72
73 if (machine != NULL) {
74 machine__init(machine, "", HOST_KERNEL_ID);
75
76 if (machine__create_kernel_maps(machine) < 0)
77 goto out_delete;
78 }
79
80 return machine;
81out_delete:
82 free(machine);
83 return NULL;
84}
85
d3a7c489 86static void dsos__purge(struct dsos *dsos)
69d2591a
ACM
87{
88 struct dso *pos, *n;
89
e8807844
ACM
90 pthread_rwlock_wrlock(&dsos->lock);
91
8fa7d87f 92 list_for_each_entry_safe(pos, n, &dsos->head, node) {
4598a0a6 93 RB_CLEAR_NODE(&pos->rb_node);
e266a753 94 pos->root = NULL;
d3a7c489
ACM
95 list_del_init(&pos->node);
96 dso__put(pos);
69d2591a 97 }
e8807844
ACM
98
99 pthread_rwlock_unlock(&dsos->lock);
d3a7c489 100}
e8807844 101
d3a7c489
ACM
102static void dsos__exit(struct dsos *dsos)
103{
104 dsos__purge(dsos);
e8807844 105 pthread_rwlock_destroy(&dsos->lock);
69d2591a
ACM
106}
107
3f067dca
ACM
108void machine__delete_threads(struct machine *machine)
109{
b91fc39f 110 struct rb_node *nd;
3f067dca 111
b91fc39f
ACM
112 pthread_rwlock_wrlock(&machine->threads_lock);
113 nd = rb_first(&machine->threads);
3f067dca
ACM
114 while (nd) {
115 struct thread *t = rb_entry(nd, struct thread, rb_node);
116
3f067dca 117 nd = rb_next(nd);
b91fc39f 118 __machine__remove_thread(machine, t, false);
3f067dca 119 }
b91fc39f 120 pthread_rwlock_unlock(&machine->threads_lock);
3f067dca
ACM
121}
122
69d2591a
ACM
123void machine__exit(struct machine *machine)
124{
125 map_groups__exit(&machine->kmaps);
e8807844 126 dsos__exit(&machine->dsos);
9a4388c7 127 machine__exit_vdso(machine);
04662523 128 zfree(&machine->root_dir);
b9d266ba 129 zfree(&machine->current_tid);
b91fc39f 130 pthread_rwlock_destroy(&machine->threads_lock);
69d2591a
ACM
131}
132
133void machine__delete(struct machine *machine)
134{
135 machine__exit(machine);
136 free(machine);
137}
138
876650e6
ACM
139void machines__init(struct machines *machines)
140{
141 machine__init(&machines->host, "", HOST_KERNEL_ID);
142 machines->guests = RB_ROOT;
611a5ce8 143 machines->symbol_filter = NULL;
876650e6
ACM
144}
145
146void machines__exit(struct machines *machines)
147{
148 machine__exit(&machines->host);
149 /* XXX exit guest */
150}
151
152struct machine *machines__add(struct machines *machines, pid_t pid,
69d2591a
ACM
153 const char *root_dir)
154{
876650e6 155 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
156 struct rb_node *parent = NULL;
157 struct machine *pos, *machine = malloc(sizeof(*machine));
158
159 if (machine == NULL)
160 return NULL;
161
162 if (machine__init(machine, root_dir, pid) != 0) {
163 free(machine);
164 return NULL;
165 }
166
611a5ce8
AH
167 machine->symbol_filter = machines->symbol_filter;
168
69d2591a
ACM
169 while (*p != NULL) {
170 parent = *p;
171 pos = rb_entry(parent, struct machine, rb_node);
172 if (pid < pos->pid)
173 p = &(*p)->rb_left;
174 else
175 p = &(*p)->rb_right;
176 }
177
178 rb_link_node(&machine->rb_node, parent, p);
876650e6 179 rb_insert_color(&machine->rb_node, &machines->guests);
69d2591a
ACM
180
181 return machine;
182}
183
611a5ce8
AH
184void machines__set_symbol_filter(struct machines *machines,
185 symbol_filter_t symbol_filter)
186{
187 struct rb_node *nd;
188
189 machines->symbol_filter = symbol_filter;
190 machines->host.symbol_filter = symbol_filter;
191
192 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
193 struct machine *machine = rb_entry(nd, struct machine, rb_node);
194
195 machine->symbol_filter = symbol_filter;
196 }
197}
198
cfe1c414
AH
199void machines__set_comm_exec(struct machines *machines, bool comm_exec)
200{
201 struct rb_node *nd;
202
203 machines->host.comm_exec = comm_exec;
204
205 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
206 struct machine *machine = rb_entry(nd, struct machine, rb_node);
207
208 machine->comm_exec = comm_exec;
209 }
210}
211
876650e6 212struct machine *machines__find(struct machines *machines, pid_t pid)
69d2591a 213{
876650e6 214 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
215 struct rb_node *parent = NULL;
216 struct machine *machine;
217 struct machine *default_machine = NULL;
218
876650e6
ACM
219 if (pid == HOST_KERNEL_ID)
220 return &machines->host;
221
69d2591a
ACM
222 while (*p != NULL) {
223 parent = *p;
224 machine = rb_entry(parent, struct machine, rb_node);
225 if (pid < machine->pid)
226 p = &(*p)->rb_left;
227 else if (pid > machine->pid)
228 p = &(*p)->rb_right;
229 else
230 return machine;
231 if (!machine->pid)
232 default_machine = machine;
233 }
234
235 return default_machine;
236}
237
876650e6 238struct machine *machines__findnew(struct machines *machines, pid_t pid)
69d2591a
ACM
239{
240 char path[PATH_MAX];
241 const char *root_dir = "";
242 struct machine *machine = machines__find(machines, pid);
243
244 if (machine && (machine->pid == pid))
245 goto out;
246
247 if ((pid != HOST_KERNEL_ID) &&
248 (pid != DEFAULT_GUEST_KERNEL_ID) &&
249 (symbol_conf.guestmount)) {
250 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
251 if (access(path, R_OK)) {
252 static struct strlist *seen;
253
254 if (!seen)
4a77e218 255 seen = strlist__new(NULL, NULL);
69d2591a
ACM
256
257 if (!strlist__has_entry(seen, path)) {
258 pr_err("Can't access file %s\n", path);
259 strlist__add(seen, path);
260 }
261 machine = NULL;
262 goto out;
263 }
264 root_dir = path;
265 }
266
267 machine = machines__add(machines, pid, root_dir);
268out:
269 return machine;
270}
271
876650e6
ACM
272void machines__process_guests(struct machines *machines,
273 machine__process_t process, void *data)
69d2591a
ACM
274{
275 struct rb_node *nd;
276
876650e6 277 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
69d2591a
ACM
278 struct machine *pos = rb_entry(nd, struct machine, rb_node);
279 process(pos, data);
280 }
281}
282
283char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
284{
285 if (machine__is_host(machine))
286 snprintf(bf, size, "[%s]", "kernel.kallsyms");
287 else if (machine__is_default_guest(machine))
288 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
289 else {
290 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
291 machine->pid);
292 }
293
294 return bf;
295}
296
876650e6 297void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
69d2591a
ACM
298{
299 struct rb_node *node;
300 struct machine *machine;
301
876650e6
ACM
302 machines->host.id_hdr_size = id_hdr_size;
303
304 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
69d2591a
ACM
305 machine = rb_entry(node, struct machine, rb_node);
306 machine->id_hdr_size = id_hdr_size;
307 }
308
309 return;
310}
311
29ce3612
AH
312static void machine__update_thread_pid(struct machine *machine,
313 struct thread *th, pid_t pid)
314{
315 struct thread *leader;
316
317 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
318 return;
319
320 th->pid_ = pid;
321
322 if (th->pid_ == th->tid)
323 return;
324
b91fc39f 325 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
29ce3612
AH
326 if (!leader)
327 goto out_err;
328
329 if (!leader->mg)
11246c70 330 leader->mg = map_groups__new(machine);
29ce3612
AH
331
332 if (!leader->mg)
333 goto out_err;
334
335 if (th->mg == leader->mg)
336 return;
337
338 if (th->mg) {
339 /*
340 * Maps are created from MMAP events which provide the pid and
341 * tid. Consequently there never should be any maps on a thread
342 * with an unknown pid. Just print an error if there are.
343 */
344 if (!map_groups__empty(th->mg))
345 pr_err("Discarding thread maps for %d:%d\n",
346 th->pid_, th->tid);
8e160b2e 347 map_groups__put(th->mg);
29ce3612
AH
348 }
349
350 th->mg = map_groups__get(leader->mg);
351
352 return;
353
354out_err:
355 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
356}
357
b91fc39f
ACM
358static struct thread *____machine__findnew_thread(struct machine *machine,
359 pid_t pid, pid_t tid,
360 bool create)
9d2f8e22
ACM
361{
362 struct rb_node **p = &machine->threads.rb_node;
363 struct rb_node *parent = NULL;
364 struct thread *th;
365
366 /*
38051234 367 * Front-end cache - TID lookups come in blocks,
9d2f8e22
ACM
368 * so most of the time we dont have to look up
369 * the full rbtree:
370 */
29ce3612 371 th = machine->last_match;
f3b623b8
ACM
372 if (th != NULL) {
373 if (th->tid == tid) {
374 machine__update_thread_pid(machine, th, pid);
375 return th;
376 }
377
0ceb8f6e 378 machine->last_match = NULL;
99d725fc 379 }
9d2f8e22
ACM
380
381 while (*p != NULL) {
382 parent = *p;
383 th = rb_entry(parent, struct thread, rb_node);
384
38051234 385 if (th->tid == tid) {
0ceb8f6e 386 machine->last_match = th;
29ce3612 387 machine__update_thread_pid(machine, th, pid);
9d2f8e22
ACM
388 return th;
389 }
390
38051234 391 if (tid < th->tid)
9d2f8e22
ACM
392 p = &(*p)->rb_left;
393 else
394 p = &(*p)->rb_right;
395 }
396
397 if (!create)
398 return NULL;
399
99d725fc 400 th = thread__new(pid, tid);
9d2f8e22
ACM
401 if (th != NULL) {
402 rb_link_node(&th->rb_node, parent, p);
403 rb_insert_color(&th->rb_node, &machine->threads);
cddcef60
JO
404
405 /*
406 * We have to initialize map_groups separately
407 * after rb tree is updated.
408 *
409 * The reason is that we call machine__findnew_thread
410 * within thread__init_map_groups to find the thread
411 * leader and that would screwed the rb tree.
412 */
418029b7 413 if (thread__init_map_groups(th, machine)) {
0170b14f 414 rb_erase_init(&th->rb_node, &machine->threads);
b91fc39f 415 RB_CLEAR_NODE(&th->rb_node);
418029b7 416 thread__delete(th);
cddcef60 417 return NULL;
418029b7 418 }
f3b623b8
ACM
419 /*
420 * It is now in the rbtree, get a ref
421 */
422 thread__get(th);
0ceb8f6e 423 machine->last_match = th;
9d2f8e22
ACM
424 }
425
426 return th;
427}
428
b91fc39f
ACM
429struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
430{
431 return ____machine__findnew_thread(machine, pid, tid, true);
432}
433
314add6b
AH
434struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
435 pid_t tid)
9d2f8e22 436{
b91fc39f
ACM
437 struct thread *th;
438
439 pthread_rwlock_wrlock(&machine->threads_lock);
440 th = thread__get(__machine__findnew_thread(machine, pid, tid));
441 pthread_rwlock_unlock(&machine->threads_lock);
442 return th;
9d2f8e22
ACM
443}
444
d75e6097
JO
445struct thread *machine__find_thread(struct machine *machine, pid_t pid,
446 pid_t tid)
9d2f8e22 447{
b91fc39f
ACM
448 struct thread *th;
449 pthread_rwlock_rdlock(&machine->threads_lock);
450 th = thread__get(____machine__findnew_thread(machine, pid, tid, false));
451 pthread_rwlock_unlock(&machine->threads_lock);
452 return th;
9d2f8e22 453}
b0a7d1a0 454
cfe1c414
AH
455struct comm *machine__thread_exec_comm(struct machine *machine,
456 struct thread *thread)
457{
458 if (machine->comm_exec)
459 return thread__exec_comm(thread);
460 else
461 return thread__comm(thread);
462}
463
162f0bef
FW
464int machine__process_comm_event(struct machine *machine, union perf_event *event,
465 struct perf_sample *sample)
b0a7d1a0 466{
314add6b
AH
467 struct thread *thread = machine__findnew_thread(machine,
468 event->comm.pid,
469 event->comm.tid);
65de51f9 470 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
b91fc39f 471 int err = 0;
b0a7d1a0 472
cfe1c414
AH
473 if (exec)
474 machine->comm_exec = true;
475
b0a7d1a0
ACM
476 if (dump_trace)
477 perf_event__fprintf_comm(event, stdout);
478
65de51f9
AH
479 if (thread == NULL ||
480 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
b0a7d1a0 481 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
b91fc39f 482 err = -1;
b0a7d1a0
ACM
483 }
484
b91fc39f
ACM
485 thread__put(thread);
486
487 return err;
b0a7d1a0
ACM
488}
489
490int machine__process_lost_event(struct machine *machine __maybe_unused,
162f0bef 491 union perf_event *event, struct perf_sample *sample __maybe_unused)
b0a7d1a0
ACM
492{
493 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
494 event->lost.id, event->lost.lost);
495 return 0;
496}
497
c4937a91
KL
498int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
499 union perf_event *event, struct perf_sample *sample)
500{
501 dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n",
502 sample->id, event->lost_samples.lost);
503 return 0;
504}
505
9f2de315
ACM
506static struct dso *machine__findnew_module_dso(struct machine *machine,
507 struct kmod_path *m,
508 const char *filename)
da17ea33
JO
509{
510 struct dso *dso;
da17ea33 511
e8807844
ACM
512 pthread_rwlock_wrlock(&machine->dsos.lock);
513
514 dso = __dsos__find(&machine->dsos, m->name, true);
da17ea33 515 if (!dso) {
e8807844 516 dso = __dsos__addnew(&machine->dsos, m->name);
da17ea33 517 if (dso == NULL)
e8807844 518 goto out_unlock;
da17ea33
JO
519
520 if (machine__is_host(machine))
521 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
522 else
523 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
524
525 /* _KMODULE_COMP should be next to _KMODULE */
ca33380a 526 if (m->kmod && m->comp)
da17ea33 527 dso->symtab_type++;
ca33380a
JO
528
529 dso__set_short_name(dso, strdup(m->name), true);
530 dso__set_long_name(dso, strdup(filename), true);
da17ea33
JO
531 }
532
d3a7c489 533 dso__get(dso);
e8807844
ACM
534out_unlock:
535 pthread_rwlock_unlock(&machine->dsos.lock);
da17ea33
JO
536 return dso;
537}
538
4a96f7a0
AH
539int machine__process_aux_event(struct machine *machine __maybe_unused,
540 union perf_event *event)
541{
542 if (dump_trace)
543 perf_event__fprintf_aux(event, stdout);
544 return 0;
545}
546
0ad21f68
AH
547int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
548 union perf_event *event)
549{
550 if (dump_trace)
551 perf_event__fprintf_itrace_start(event, stdout);
552 return 0;
553}
554
0286039f
AH
555int machine__process_switch_event(struct machine *machine __maybe_unused,
556 union perf_event *event)
557{
558 if (dump_trace)
559 perf_event__fprintf_switch(event, stdout);
560 return 0;
561}
562
9f2de315
ACM
563struct map *machine__findnew_module_map(struct machine *machine, u64 start,
564 const char *filename)
3f067dca 565{
ca33380a
JO
566 struct map *map = NULL;
567 struct dso *dso;
568 struct kmod_path m;
3f067dca 569
ca33380a 570 if (kmod_path__parse_name(&m, filename))
3f067dca
ACM
571 return NULL;
572
bc84f464
JO
573 map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
574 m.name);
575 if (map)
576 goto out;
577
9f2de315 578 dso = machine__findnew_module_dso(machine, &m, filename);
ca33380a
JO
579 if (dso == NULL)
580 goto out;
581
3f067dca
ACM
582 map = map__new2(start, dso, MAP__FUNCTION);
583 if (map == NULL)
ca33380a 584 goto out;
3f067dca 585
3f067dca 586 map_groups__insert(&machine->kmaps, map);
ca33380a 587
9afcb420
MH
588 /* Put the map here because map_groups__insert alread got it */
589 map__put(map);
ca33380a
JO
590out:
591 free(m.name);
3f067dca
ACM
592 return map;
593}
594
876650e6 595size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
3f067dca
ACM
596{
597 struct rb_node *nd;
3d39ac53 598 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
3f067dca 599
876650e6 600 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca 601 struct machine *pos = rb_entry(nd, struct machine, rb_node);
3d39ac53 602 ret += __dsos__fprintf(&pos->dsos.head, fp);
3f067dca
ACM
603 }
604
605 return ret;
606}
607
8fa7d87f 608size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
3f067dca
ACM
609 bool (skip)(struct dso *dso, int parm), int parm)
610{
3d39ac53 611 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
3f067dca
ACM
612}
613
876650e6 614size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
3f067dca
ACM
615 bool (skip)(struct dso *dso, int parm), int parm)
616{
617 struct rb_node *nd;
876650e6 618 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
3f067dca 619
876650e6 620 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca
ACM
621 struct machine *pos = rb_entry(nd, struct machine, rb_node);
622 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
623 }
624 return ret;
625}
626
627size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
628{
629 int i;
630 size_t printed = 0;
a5e813c6 631 struct dso *kdso = machine__kernel_map(machine)->dso;
3f067dca
ACM
632
633 if (kdso->has_build_id) {
634 char filename[PATH_MAX];
635 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
636 printed += fprintf(fp, "[0] %s\n", filename);
637 }
638
639 for (i = 0; i < vmlinux_path__nr_entries; ++i)
640 printed += fprintf(fp, "[%d] %s\n",
641 i + kdso->has_build_id, vmlinux_path[i]);
642
643 return printed;
644}
645
646size_t machine__fprintf(struct machine *machine, FILE *fp)
647{
648 size_t ret = 0;
649 struct rb_node *nd;
650
b91fc39f
ACM
651 pthread_rwlock_rdlock(&machine->threads_lock);
652
3f067dca
ACM
653 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
654 struct thread *pos = rb_entry(nd, struct thread, rb_node);
655
656 ret += thread__fprintf(pos, fp);
657 }
658
b91fc39f
ACM
659 pthread_rwlock_unlock(&machine->threads_lock);
660
3f067dca
ACM
661 return ret;
662}
663
664static struct dso *machine__get_kernel(struct machine *machine)
665{
666 const char *vmlinux_name = NULL;
667 struct dso *kernel;
668
669 if (machine__is_host(machine)) {
670 vmlinux_name = symbol_conf.vmlinux_name;
671 if (!vmlinux_name)
672 vmlinux_name = "[kernel.kallsyms]";
673
459ce518
ACM
674 kernel = machine__findnew_kernel(machine, vmlinux_name,
675 "[kernel]", DSO_TYPE_KERNEL);
3f067dca
ACM
676 } else {
677 char bf[PATH_MAX];
678
679 if (machine__is_default_guest(machine))
680 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
681 if (!vmlinux_name)
682 vmlinux_name = machine__mmap_name(machine, bf,
683 sizeof(bf));
684
459ce518
ACM
685 kernel = machine__findnew_kernel(machine, vmlinux_name,
686 "[guest.kernel]",
687 DSO_TYPE_GUEST_KERNEL);
3f067dca
ACM
688 }
689
690 if (kernel != NULL && (!kernel->has_build_id))
691 dso__read_running_kernel_build_id(kernel, machine);
692
693 return kernel;
694}
695
696struct process_args {
697 u64 start;
698};
699
15a0a870
AH
700static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
701 size_t bufsz)
702{
703 if (machine__is_default_guest(machine))
704 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
705 else
706 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
707}
708
a93f0e55
SQ
709const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
710
711/* Figure out the start address of kernel map from /proc/kallsyms.
712 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
713 * symbol_name if it's not that important.
714 */
4b99375b
AH
715static u64 machine__get_running_kernel_start(struct machine *machine,
716 const char **symbol_name)
3f067dca 717{
15a0a870 718 char filename[PATH_MAX];
a93f0e55
SQ
719 int i;
720 const char *name;
721 u64 addr = 0;
3f067dca 722
15a0a870 723 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
3f067dca
ACM
724
725 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
726 return 0;
727
a93f0e55
SQ
728 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
729 addr = kallsyms__get_function_start(filename, name);
730 if (addr)
731 break;
732 }
733
734 if (symbol_name)
735 *symbol_name = name;
3f067dca 736
a93f0e55 737 return addr;
3f067dca
ACM
738}
739
740int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
741{
742 enum map_type type;
4b99375b 743 u64 start = machine__get_running_kernel_start(machine, NULL);
3f067dca
ACM
744
745 for (type = 0; type < MAP__NR_TYPES; ++type) {
746 struct kmap *kmap;
77e65977 747 struct map *map;
3f067dca
ACM
748
749 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
750 if (machine->vmlinux_maps[type] == NULL)
751 return -1;
752
753 machine->vmlinux_maps[type]->map_ip =
754 machine->vmlinux_maps[type]->unmap_ip =
755 identity__map_ip;
a5e813c6 756 map = __machine__kernel_map(machine, type);
77e65977 757 kmap = map__kmap(map);
ba92732e
WN
758 if (!kmap)
759 return -1;
760
3f067dca 761 kmap->kmaps = &machine->kmaps;
77e65977 762 map_groups__insert(&machine->kmaps, map);
3f067dca
ACM
763 }
764
765 return 0;
766}
767
768void machine__destroy_kernel_maps(struct machine *machine)
769{
770 enum map_type type;
771
772 for (type = 0; type < MAP__NR_TYPES; ++type) {
773 struct kmap *kmap;
a5e813c6 774 struct map *map = __machine__kernel_map(machine, type);
3f067dca 775
77e65977 776 if (map == NULL)
3f067dca
ACM
777 continue;
778
77e65977
ACM
779 kmap = map__kmap(map);
780 map_groups__remove(&machine->kmaps, map);
ba92732e 781 if (kmap && kmap->ref_reloc_sym) {
3f067dca
ACM
782 /*
783 * ref_reloc_sym is shared among all maps, so free just
784 * on one of them.
785 */
786 if (type == MAP__FUNCTION) {
04662523
ACM
787 zfree((char **)&kmap->ref_reloc_sym->name);
788 zfree(&kmap->ref_reloc_sym);
789 } else
790 kmap->ref_reloc_sym = NULL;
3f067dca
ACM
791 }
792
e96e4078 793 map__put(machine->vmlinux_maps[type]);
3f067dca
ACM
794 machine->vmlinux_maps[type] = NULL;
795 }
796}
797
876650e6 798int machines__create_guest_kernel_maps(struct machines *machines)
3f067dca
ACM
799{
800 int ret = 0;
801 struct dirent **namelist = NULL;
802 int i, items = 0;
803 char path[PATH_MAX];
804 pid_t pid;
805 char *endp;
806
807 if (symbol_conf.default_guest_vmlinux_name ||
808 symbol_conf.default_guest_modules ||
809 symbol_conf.default_guest_kallsyms) {
810 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
811 }
812
813 if (symbol_conf.guestmount) {
814 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
815 if (items <= 0)
816 return -ENOENT;
817 for (i = 0; i < items; i++) {
818 if (!isdigit(namelist[i]->d_name[0])) {
819 /* Filter out . and .. */
820 continue;
821 }
822 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
823 if ((*endp != '\0') ||
824 (endp == namelist[i]->d_name) ||
825 (errno == ERANGE)) {
826 pr_debug("invalid directory (%s). Skipping.\n",
827 namelist[i]->d_name);
828 continue;
829 }
830 sprintf(path, "%s/%s/proc/kallsyms",
831 symbol_conf.guestmount,
832 namelist[i]->d_name);
833 ret = access(path, R_OK);
834 if (ret) {
835 pr_debug("Can't access file %s\n", path);
836 goto failure;
837 }
838 machines__create_kernel_maps(machines, pid);
839 }
840failure:
841 free(namelist);
842 }
843
844 return ret;
845}
846
876650e6 847void machines__destroy_kernel_maps(struct machines *machines)
3f067dca 848{
876650e6
ACM
849 struct rb_node *next = rb_first(&machines->guests);
850
851 machine__destroy_kernel_maps(&machines->host);
3f067dca
ACM
852
853 while (next) {
854 struct machine *pos = rb_entry(next, struct machine, rb_node);
855
856 next = rb_next(&pos->rb_node);
876650e6 857 rb_erase(&pos->rb_node, &machines->guests);
3f067dca
ACM
858 machine__delete(pos);
859 }
860}
861
876650e6 862int machines__create_kernel_maps(struct machines *machines, pid_t pid)
3f067dca
ACM
863{
864 struct machine *machine = machines__findnew(machines, pid);
865
866 if (machine == NULL)
867 return -1;
868
869 return machine__create_kernel_maps(machine);
870}
871
872int machine__load_kallsyms(struct machine *machine, const char *filename,
873 enum map_type type, symbol_filter_t filter)
874{
a5e813c6 875 struct map *map = machine__kernel_map(machine);
3f067dca
ACM
876 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
877
878 if (ret > 0) {
879 dso__set_loaded(map->dso, type);
880 /*
881 * Since /proc/kallsyms will have multiple sessions for the
882 * kernel, with modules between them, fixup the end of all
883 * sections.
884 */
885 __map_groups__fixup_end(&machine->kmaps, type);
886 }
887
888 return ret;
889}
890
891int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
892 symbol_filter_t filter)
893{
a5e813c6 894 struct map *map = machine__kernel_map(machine);
3f067dca
ACM
895 int ret = dso__load_vmlinux_path(map->dso, map, filter);
896
39b12f78 897 if (ret > 0)
3f067dca 898 dso__set_loaded(map->dso, type);
3f067dca
ACM
899
900 return ret;
901}
902
903static void map_groups__fixup_end(struct map_groups *mg)
904{
905 int i;
906 for (i = 0; i < MAP__NR_TYPES; ++i)
907 __map_groups__fixup_end(mg, i);
908}
909
910static char *get_kernel_version(const char *root_dir)
911{
912 char version[PATH_MAX];
913 FILE *file;
914 char *name, *tmp;
915 const char *prefix = "Linux version ";
916
917 sprintf(version, "%s/proc/version", root_dir);
918 file = fopen(version, "r");
919 if (!file)
920 return NULL;
921
922 version[0] = '\0';
923 tmp = fgets(version, sizeof(version), file);
924 fclose(file);
925
926 name = strstr(version, prefix);
927 if (!name)
928 return NULL;
929 name += strlen(prefix);
930 tmp = strchr(name, ' ');
931 if (tmp)
932 *tmp = '\0';
933
934 return strdup(name);
935}
936
bb58a8a4
JO
937static bool is_kmod_dso(struct dso *dso)
938{
939 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
940 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
941}
942
943static int map_groups__set_module_path(struct map_groups *mg, const char *path,
944 struct kmod_path *m)
945{
946 struct map *map;
947 char *long_name;
948
949 map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
950 if (map == NULL)
951 return 0;
952
953 long_name = strdup(path);
954 if (long_name == NULL)
955 return -ENOMEM;
956
957 dso__set_long_name(map->dso, long_name, true);
958 dso__kernel_module_get_build_id(map->dso, "");
959
960 /*
961 * Full name could reveal us kmod compression, so
962 * we need to update the symtab_type if needed.
963 */
964 if (m->comp && is_kmod_dso(map->dso))
965 map->dso->symtab_type++;
966
967 return 0;
968}
969
3f067dca 970static int map_groups__set_modules_path_dir(struct map_groups *mg,
61d4290c 971 const char *dir_name, int depth)
3f067dca
ACM
972{
973 struct dirent *dent;
974 DIR *dir = opendir(dir_name);
975 int ret = 0;
976
977 if (!dir) {
978 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
979 return -1;
980 }
981
982 while ((dent = readdir(dir)) != NULL) {
983 char path[PATH_MAX];
984 struct stat st;
985
986 /*sshfs might return bad dent->d_type, so we have to stat*/
987 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
988 if (stat(path, &st))
989 continue;
990
991 if (S_ISDIR(st.st_mode)) {
992 if (!strcmp(dent->d_name, ".") ||
993 !strcmp(dent->d_name, ".."))
994 continue;
995
61d4290c
RY
996 /* Do not follow top-level source and build symlinks */
997 if (depth == 0) {
998 if (!strcmp(dent->d_name, "source") ||
999 !strcmp(dent->d_name, "build"))
1000 continue;
1001 }
1002
1003 ret = map_groups__set_modules_path_dir(mg, path,
1004 depth + 1);
3f067dca
ACM
1005 if (ret < 0)
1006 goto out;
1007 } else {
bb58a8a4 1008 struct kmod_path m;
3f067dca 1009
bb58a8a4
JO
1010 ret = kmod_path__parse_name(&m, dent->d_name);
1011 if (ret)
1012 goto out;
c00c48fc 1013
bb58a8a4
JO
1014 if (m.kmod)
1015 ret = map_groups__set_module_path(mg, path, &m);
c00c48fc 1016
bb58a8a4 1017 free(m.name);
3f067dca 1018
bb58a8a4 1019 if (ret)
3f067dca 1020 goto out;
3f067dca
ACM
1021 }
1022 }
1023
1024out:
1025 closedir(dir);
1026 return ret;
1027}
1028
1029static int machine__set_modules_path(struct machine *machine)
1030{
1031 char *version;
1032 char modules_path[PATH_MAX];
1033
1034 version = get_kernel_version(machine->root_dir);
1035 if (!version)
1036 return -1;
1037
61d4290c 1038 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
3f067dca
ACM
1039 machine->root_dir, version);
1040 free(version);
1041
61d4290c 1042 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
3f067dca
ACM
1043}
1044
316d70d6 1045static int machine__create_module(void *arg, const char *name, u64 start)
3f067dca 1046{
316d70d6 1047 struct machine *machine = arg;
3f067dca 1048 struct map *map;
316d70d6 1049
9f2de315 1050 map = machine__findnew_module_map(machine, start, name);
316d70d6
AH
1051 if (map == NULL)
1052 return -1;
1053
1054 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1055
1056 return 0;
1057}
1058
1059static int machine__create_modules(struct machine *machine)
1060{
3f067dca
ACM
1061 const char *modules;
1062 char path[PATH_MAX];
1063
f4be904d 1064 if (machine__is_default_guest(machine)) {
3f067dca 1065 modules = symbol_conf.default_guest_modules;
f4be904d
AH
1066 } else {
1067 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
3f067dca
ACM
1068 modules = path;
1069 }
1070
aa7fe3b0 1071 if (symbol__restricted_filename(modules, "/proc/modules"))
3f067dca
ACM
1072 return -1;
1073
316d70d6 1074 if (modules__parse(modules, machine, machine__create_module))
3f067dca
ACM
1075 return -1;
1076
316d70d6
AH
1077 if (!machine__set_modules_path(machine))
1078 return 0;
3f067dca 1079
316d70d6 1080 pr_debug("Problems setting modules path maps, continuing anyway...\n");
3f067dca 1081
8f76fcd9 1082 return 0;
3f067dca
ACM
1083}
1084
1085int machine__create_kernel_maps(struct machine *machine)
1086{
1087 struct dso *kernel = machine__get_kernel(machine);
5512cf24 1088 const char *name;
4b99375b 1089 u64 addr = machine__get_running_kernel_start(machine, &name);
5512cf24
AH
1090 if (!addr)
1091 return -1;
3f067dca
ACM
1092
1093 if (kernel == NULL ||
1094 __machine__create_kernel_maps(machine, kernel) < 0)
1095 return -1;
1096
1097 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1098 if (machine__is_host(machine))
1099 pr_debug("Problems creating module maps, "
1100 "continuing anyway...\n");
1101 else
1102 pr_debug("Problems creating module maps for guest %d, "
1103 "continuing anyway...\n", machine->pid);
1104 }
1105
1106 /*
1107 * Now that we have all the maps created, just set the ->end of them:
1108 */
1109 map_groups__fixup_end(&machine->kmaps);
5512cf24
AH
1110
1111 if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
1112 addr)) {
1113 machine__destroy_kernel_maps(machine);
1114 return -1;
1115 }
1116
3f067dca
ACM
1117 return 0;
1118}
1119
b0a7d1a0
ACM
1120static void machine__set_kernel_mmap_len(struct machine *machine,
1121 union perf_event *event)
1122{
4552cf0f
NK
1123 int i;
1124
1125 for (i = 0; i < MAP__NR_TYPES; i++) {
1126 machine->vmlinux_maps[i]->start = event->mmap.start;
1127 machine->vmlinux_maps[i]->end = (event->mmap.start +
1128 event->mmap.len);
1129 /*
1130 * Be a bit paranoid here, some perf.data file came with
1131 * a zero sized synthesized MMAP event for the kernel.
1132 */
1133 if (machine->vmlinux_maps[i]->end == 0)
1134 machine->vmlinux_maps[i]->end = ~0ULL;
1135 }
b0a7d1a0
ACM
1136}
1137
8e0cf965
AH
1138static bool machine__uses_kcore(struct machine *machine)
1139{
1140 struct dso *dso;
1141
3d39ac53 1142 list_for_each_entry(dso, &machine->dsos.head, node) {
8e0cf965
AH
1143 if (dso__is_kcore(dso))
1144 return true;
1145 }
1146
1147 return false;
1148}
1149
b0a7d1a0
ACM
1150static int machine__process_kernel_mmap_event(struct machine *machine,
1151 union perf_event *event)
1152{
1153 struct map *map;
1154 char kmmap_prefix[PATH_MAX];
1155 enum dso_kernel_type kernel_type;
1156 bool is_kernel_mmap;
1157
8e0cf965
AH
1158 /* If we have maps from kcore then we do not need or want any others */
1159 if (machine__uses_kcore(machine))
1160 return 0;
1161
b0a7d1a0
ACM
1162 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
1163 if (machine__is_host(machine))
1164 kernel_type = DSO_TYPE_KERNEL;
1165 else
1166 kernel_type = DSO_TYPE_GUEST_KERNEL;
1167
1168 is_kernel_mmap = memcmp(event->mmap.filename,
1169 kmmap_prefix,
1170 strlen(kmmap_prefix) - 1) == 0;
1171 if (event->mmap.filename[0] == '/' ||
1172 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
9f2de315
ACM
1173 map = machine__findnew_module_map(machine, event->mmap.start,
1174 event->mmap.filename);
b0a7d1a0
ACM
1175 if (map == NULL)
1176 goto out_problem;
1177
b0a7d1a0
ACM
1178 map->end = map->start + event->mmap.len;
1179 } else if (is_kernel_mmap) {
1180 const char *symbol_name = (event->mmap.filename +
1181 strlen(kmmap_prefix));
1182 /*
1183 * Should be there already, from the build-id table in
1184 * the header.
1185 */
b837a8bd
NK
1186 struct dso *kernel = NULL;
1187 struct dso *dso;
1188
e8807844
ACM
1189 pthread_rwlock_rdlock(&machine->dsos.lock);
1190
3d39ac53 1191 list_for_each_entry(dso, &machine->dsos.head, node) {
1f121b03
WN
1192
1193 /*
1194 * The cpumode passed to is_kernel_module is not the
1195 * cpumode of *this* event. If we insist on passing
1196 * correct cpumode to is_kernel_module, we should
1197 * record the cpumode when we adding this dso to the
1198 * linked list.
1199 *
1200 * However we don't really need passing correct
1201 * cpumode. We know the correct cpumode must be kernel
1202 * mode (if not, we should not link it onto kernel_dsos
1203 * list).
1204 *
1205 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1206 * is_kernel_module() treats it as a kernel cpumode.
1207 */
1208
1209 if (!dso->kernel ||
1210 is_kernel_module(dso->long_name,
1211 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
b837a8bd
NK
1212 continue;
1213
1f121b03 1214
b837a8bd
NK
1215 kernel = dso;
1216 break;
1217 }
1218
e8807844
ACM
1219 pthread_rwlock_unlock(&machine->dsos.lock);
1220
b837a8bd 1221 if (kernel == NULL)
aa7cc2ae 1222 kernel = machine__findnew_dso(machine, kmmap_prefix);
b0a7d1a0
ACM
1223 if (kernel == NULL)
1224 goto out_problem;
1225
1226 kernel->kernel = kernel_type;
d3a7c489
ACM
1227 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1228 dso__put(kernel);
b0a7d1a0 1229 goto out_problem;
d3a7c489 1230 }
b0a7d1a0 1231
330dfa22
NK
1232 if (strstr(kernel->long_name, "vmlinux"))
1233 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
96d78059 1234
b0a7d1a0
ACM
1235 machine__set_kernel_mmap_len(machine, event);
1236
1237 /*
1238 * Avoid using a zero address (kptr_restrict) for the ref reloc
1239 * symbol. Effectively having zero here means that at record
1240 * time /proc/sys/kernel/kptr_restrict was non zero.
1241 */
1242 if (event->mmap.pgoff != 0) {
1243 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1244 symbol_name,
1245 event->mmap.pgoff);
1246 }
1247
1248 if (machine__is_default_guest(machine)) {
1249 /*
1250 * preload dso of guest kernel and modules
1251 */
a5e813c6 1252 dso__load(kernel, machine__kernel_map(machine), NULL);
b0a7d1a0
ACM
1253 }
1254 }
1255 return 0;
1256out_problem:
1257 return -1;
1258}
1259
5c5e854b 1260int machine__process_mmap2_event(struct machine *machine,
162f0bef
FW
1261 union perf_event *event,
1262 struct perf_sample *sample __maybe_unused)
5c5e854b
SE
1263{
1264 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1265 struct thread *thread;
1266 struct map *map;
1267 enum map_type type;
1268 int ret = 0;
1269
1270 if (dump_trace)
1271 perf_event__fprintf_mmap2(event, stdout);
1272
1273 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1274 cpumode == PERF_RECORD_MISC_KERNEL) {
1275 ret = machine__process_kernel_mmap_event(machine, event);
1276 if (ret < 0)
1277 goto out_problem;
1278 return 0;
1279 }
1280
1281 thread = machine__findnew_thread(machine, event->mmap2.pid,
11c9abf2 1282 event->mmap2.tid);
5c5e854b
SE
1283 if (thread == NULL)
1284 goto out_problem;
1285
1286 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1287 type = MAP__VARIABLE;
1288 else
1289 type = MAP__FUNCTION;
1290
2a03068c 1291 map = map__new(machine, event->mmap2.start,
5c5e854b
SE
1292 event->mmap2.len, event->mmap2.pgoff,
1293 event->mmap2.pid, event->mmap2.maj,
1294 event->mmap2.min, event->mmap2.ino,
1295 event->mmap2.ino_generation,
7ef80703
DZ
1296 event->mmap2.prot,
1297 event->mmap2.flags,
5835edda 1298 event->mmap2.filename, type, thread);
5c5e854b
SE
1299
1300 if (map == NULL)
b91fc39f 1301 goto out_problem_map;
5c5e854b
SE
1302
1303 thread__insert_map(thread, map);
b91fc39f 1304 thread__put(thread);
84c2cafa 1305 map__put(map);
5c5e854b
SE
1306 return 0;
1307
b91fc39f
ACM
1308out_problem_map:
1309 thread__put(thread);
5c5e854b
SE
1310out_problem:
1311 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1312 return 0;
1313}
1314
162f0bef
FW
1315int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1316 struct perf_sample *sample __maybe_unused)
b0a7d1a0
ACM
1317{
1318 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1319 struct thread *thread;
1320 struct map *map;
bad40917 1321 enum map_type type;
b0a7d1a0
ACM
1322 int ret = 0;
1323
1324 if (dump_trace)
1325 perf_event__fprintf_mmap(event, stdout);
1326
1327 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1328 cpumode == PERF_RECORD_MISC_KERNEL) {
1329 ret = machine__process_kernel_mmap_event(machine, event);
1330 if (ret < 0)
1331 goto out_problem;
1332 return 0;
1333 }
1334
314add6b 1335 thread = machine__findnew_thread(machine, event->mmap.pid,
11c9abf2 1336 event->mmap.tid);
b0a7d1a0
ACM
1337 if (thread == NULL)
1338 goto out_problem;
bad40917
SE
1339
1340 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1341 type = MAP__VARIABLE;
1342 else
1343 type = MAP__FUNCTION;
1344
2a03068c 1345 map = map__new(machine, event->mmap.start,
b0a7d1a0 1346 event->mmap.len, event->mmap.pgoff,
7ef80703 1347 event->mmap.pid, 0, 0, 0, 0, 0, 0,
5c5e854b 1348 event->mmap.filename,
5835edda 1349 type, thread);
bad40917 1350
b0a7d1a0 1351 if (map == NULL)
b91fc39f 1352 goto out_problem_map;
b0a7d1a0
ACM
1353
1354 thread__insert_map(thread, map);
b91fc39f 1355 thread__put(thread);
84c2cafa 1356 map__put(map);
b0a7d1a0
ACM
1357 return 0;
1358
b91fc39f
ACM
1359out_problem_map:
1360 thread__put(thread);
b0a7d1a0
ACM
1361out_problem:
1362 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1363 return 0;
1364}
1365
b91fc39f 1366static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
236a3bbd 1367{
f3b623b8 1368 if (machine->last_match == th)
0ceb8f6e 1369 machine->last_match = NULL;
f3b623b8 1370
59a51c1d 1371 BUG_ON(atomic_read(&th->refcnt) == 0);
b91fc39f
ACM
1372 if (lock)
1373 pthread_rwlock_wrlock(&machine->threads_lock);
0170b14f 1374 rb_erase_init(&th->rb_node, &machine->threads);
b91fc39f 1375 RB_CLEAR_NODE(&th->rb_node);
236a3bbd 1376 /*
f3b623b8
ACM
1377 * Move it first to the dead_threads list, then drop the reference,
1378 * if this is the last reference, then the thread__delete destructor
1379 * will be called and we will remove it from the dead_threads list.
236a3bbd
DA
1380 */
1381 list_add_tail(&th->node, &machine->dead_threads);
b91fc39f
ACM
1382 if (lock)
1383 pthread_rwlock_unlock(&machine->threads_lock);
f3b623b8 1384 thread__put(th);
236a3bbd
DA
1385}
1386
b91fc39f
ACM
1387void machine__remove_thread(struct machine *machine, struct thread *th)
1388{
1389 return __machine__remove_thread(machine, th, true);
1390}
1391
162f0bef
FW
1392int machine__process_fork_event(struct machine *machine, union perf_event *event,
1393 struct perf_sample *sample)
b0a7d1a0 1394{
d75e6097
JO
1395 struct thread *thread = machine__find_thread(machine,
1396 event->fork.pid,
1397 event->fork.tid);
314add6b
AH
1398 struct thread *parent = machine__findnew_thread(machine,
1399 event->fork.ppid,
1400 event->fork.ptid);
b91fc39f 1401 int err = 0;
b0a7d1a0 1402
5cb73340
AH
1403 if (dump_trace)
1404 perf_event__fprintf_task(event, stdout);
1405
1406 /*
1407 * There may be an existing thread that is not actually the parent,
1408 * either because we are processing events out of order, or because the
1409 * (fork) event that would have removed the thread was lost. Assume the
1410 * latter case and continue on as best we can.
1411 */
1412 if (parent->pid_ != (pid_t)event->fork.ppid) {
1413 dump_printf("removing erroneous parent thread %d/%d\n",
1414 parent->pid_, parent->tid);
1415 machine__remove_thread(machine, parent);
1416 thread__put(parent);
1417 parent = machine__findnew_thread(machine, event->fork.ppid,
1418 event->fork.ptid);
1419 }
1420
236a3bbd 1421 /* if a thread currently exists for the thread id remove it */
b91fc39f 1422 if (thread != NULL) {
236a3bbd 1423 machine__remove_thread(machine, thread);
b91fc39f
ACM
1424 thread__put(thread);
1425 }
236a3bbd 1426
314add6b
AH
1427 thread = machine__findnew_thread(machine, event->fork.pid,
1428 event->fork.tid);
b0a7d1a0
ACM
1429
1430 if (thread == NULL || parent == NULL ||
162f0bef 1431 thread__fork(thread, parent, sample->time) < 0) {
b0a7d1a0 1432 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
b91fc39f 1433 err = -1;
b0a7d1a0 1434 }
b91fc39f
ACM
1435 thread__put(thread);
1436 thread__put(parent);
b0a7d1a0 1437
b91fc39f 1438 return err;
b0a7d1a0
ACM
1439}
1440
162f0bef
FW
1441int machine__process_exit_event(struct machine *machine, union perf_event *event,
1442 struct perf_sample *sample __maybe_unused)
b0a7d1a0 1443{
d75e6097
JO
1444 struct thread *thread = machine__find_thread(machine,
1445 event->fork.pid,
1446 event->fork.tid);
b0a7d1a0
ACM
1447
1448 if (dump_trace)
1449 perf_event__fprintf_task(event, stdout);
1450
b91fc39f 1451 if (thread != NULL) {
236a3bbd 1452 thread__exited(thread);
b91fc39f
ACM
1453 thread__put(thread);
1454 }
b0a7d1a0
ACM
1455
1456 return 0;
1457}
1458
162f0bef
FW
1459int machine__process_event(struct machine *machine, union perf_event *event,
1460 struct perf_sample *sample)
b0a7d1a0
ACM
1461{
1462 int ret;
1463
1464 switch (event->header.type) {
1465 case PERF_RECORD_COMM:
162f0bef 1466 ret = machine__process_comm_event(machine, event, sample); break;
b0a7d1a0 1467 case PERF_RECORD_MMAP:
162f0bef 1468 ret = machine__process_mmap_event(machine, event, sample); break;
5c5e854b 1469 case PERF_RECORD_MMAP2:
162f0bef 1470 ret = machine__process_mmap2_event(machine, event, sample); break;
b0a7d1a0 1471 case PERF_RECORD_FORK:
162f0bef 1472 ret = machine__process_fork_event(machine, event, sample); break;
b0a7d1a0 1473 case PERF_RECORD_EXIT:
162f0bef 1474 ret = machine__process_exit_event(machine, event, sample); break;
b0a7d1a0 1475 case PERF_RECORD_LOST:
162f0bef 1476 ret = machine__process_lost_event(machine, event, sample); break;
4a96f7a0
AH
1477 case PERF_RECORD_AUX:
1478 ret = machine__process_aux_event(machine, event); break;
0ad21f68 1479 case PERF_RECORD_ITRACE_START:
ceb92913 1480 ret = machine__process_itrace_start_event(machine, event); break;
c4937a91
KL
1481 case PERF_RECORD_LOST_SAMPLES:
1482 ret = machine__process_lost_samples_event(machine, event, sample); break;
0286039f
AH
1483 case PERF_RECORD_SWITCH:
1484 case PERF_RECORD_SWITCH_CPU_WIDE:
1485 ret = machine__process_switch_event(machine, event); break;
b0a7d1a0
ACM
1486 default:
1487 ret = -1;
1488 break;
1489 }
1490
1491 return ret;
1492}
3f067dca 1493
b21484f1 1494static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
3f067dca 1495{
b21484f1 1496 if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
3f067dca 1497 return 1;
3f067dca
ACM
1498 return 0;
1499}
1500
bb871a9c 1501static void ip__resolve_ams(struct thread *thread,
3f067dca
ACM
1502 struct addr_map_symbol *ams,
1503 u64 ip)
1504{
1505 struct addr_location al;
3f067dca
ACM
1506
1507 memset(&al, 0, sizeof(al));
52a3cb8c
ACM
1508 /*
1509 * We cannot use the header.misc hint to determine whether a
1510 * branch stack address is user, kernel, guest, hypervisor.
1511 * Branches may straddle the kernel/user/hypervisor boundaries.
1512 * Thus, we have to try consecutively until we find a match
1513 * or else, the symbol is unknown
1514 */
bb871a9c 1515 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
3f067dca 1516
3f067dca
ACM
1517 ams->addr = ip;
1518 ams->al_addr = al.addr;
1519 ams->sym = al.sym;
1520 ams->map = al.map;
1521}
1522
bb871a9c 1523static void ip__resolve_data(struct thread *thread,
98a3b32c
SE
1524 u8 m, struct addr_map_symbol *ams, u64 addr)
1525{
1526 struct addr_location al;
1527
1528 memset(&al, 0, sizeof(al));
1529
bb871a9c 1530 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
06b2afc0
DZ
1531 if (al.map == NULL) {
1532 /*
1533 * some shared data regions have execute bit set which puts
1534 * their mapping in the MAP__FUNCTION type array.
1535 * Check there as a fallback option before dropping the sample.
1536 */
bb871a9c 1537 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
06b2afc0
DZ
1538 }
1539
98a3b32c
SE
1540 ams->addr = addr;
1541 ams->al_addr = al.addr;
1542 ams->sym = al.sym;
1543 ams->map = al.map;
1544}
1545
e80faac0
ACM
1546struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1547 struct addr_location *al)
98a3b32c
SE
1548{
1549 struct mem_info *mi = zalloc(sizeof(*mi));
1550
1551 if (!mi)
1552 return NULL;
1553
bb871a9c
ACM
1554 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1555 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr);
98a3b32c
SE
1556 mi->data_src.val = sample->data_src;
1557
1558 return mi;
1559}
1560
37592b8a
AK
1561static int add_callchain_ip(struct thread *thread,
1562 struct symbol **parent,
1563 struct addr_location *root_al,
73dbcd65 1564 u8 *cpumode,
37592b8a
AK
1565 u64 ip)
1566{
1567 struct addr_location al;
1568
1569 al.filtered = 0;
1570 al.sym = NULL;
73dbcd65 1571 if (!cpumode) {
8b7bad58
AK
1572 thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
1573 ip, &al);
73dbcd65 1574 } else {
2e77784b
KL
1575 if (ip >= PERF_CONTEXT_MAX) {
1576 switch (ip) {
1577 case PERF_CONTEXT_HV:
73dbcd65 1578 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2e77784b
KL
1579 break;
1580 case PERF_CONTEXT_KERNEL:
73dbcd65 1581 *cpumode = PERF_RECORD_MISC_KERNEL;
2e77784b
KL
1582 break;
1583 case PERF_CONTEXT_USER:
73dbcd65 1584 *cpumode = PERF_RECORD_MISC_USER;
2e77784b
KL
1585 break;
1586 default:
1587 pr_debug("invalid callchain context: "
1588 "%"PRId64"\n", (s64) ip);
1589 /*
1590 * It seems the callchain is corrupted.
1591 * Discard all.
1592 */
1593 callchain_cursor_reset(&callchain_cursor);
1594 return 1;
1595 }
1596 return 0;
1597 }
73dbcd65
DH
1598 thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
1599 ip, &al);
2e77784b
KL
1600 }
1601
37592b8a
AK
1602 if (al.sym != NULL) {
1603 if (sort__has_parent && !*parent &&
1604 symbol__match_regex(al.sym, &parent_regex))
1605 *parent = al.sym;
1606 else if (have_ignore_callees && root_al &&
1607 symbol__match_regex(al.sym, &ignore_callees_regex)) {
1608 /* Treat this symbol as the root,
1609 forgetting its callees. */
1610 *root_al = al;
1611 callchain_cursor_reset(&callchain_cursor);
1612 }
1613 }
1614
5550171b 1615 return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym);
37592b8a
AK
1616}
1617
644f2df2
ACM
1618struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1619 struct addr_location *al)
3f067dca 1620{
3f067dca 1621 unsigned int i;
644f2df2
ACM
1622 const struct branch_stack *bs = sample->branch_stack;
1623 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
3f067dca 1624
3f067dca
ACM
1625 if (!bi)
1626 return NULL;
1627
1628 for (i = 0; i < bs->nr; i++) {
bb871a9c
ACM
1629 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1630 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
3f067dca
ACM
1631 bi[i].flags = bs->entries[i].flags;
1632 }
1633 return bi;
1634}
1635
8b7bad58
AK
1636#define CHASHSZ 127
1637#define CHASHBITS 7
1638#define NO_ENTRY 0xff
1639
1640#define PERF_MAX_BRANCH_DEPTH 127
1641
1642/* Remove loops. */
1643static int remove_loops(struct branch_entry *l, int nr)
1644{
1645 int i, j, off;
1646 unsigned char chash[CHASHSZ];
1647
1648 memset(chash, NO_ENTRY, sizeof(chash));
1649
1650 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
1651
1652 for (i = 0; i < nr; i++) {
1653 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
1654
1655 /* no collision handling for now */
1656 if (chash[h] == NO_ENTRY) {
1657 chash[h] = i;
1658 } else if (l[chash[h]].from == l[i].from) {
1659 bool is_loop = true;
1660 /* check if it is a real loop */
1661 off = 0;
1662 for (j = chash[h]; j < i && i + off < nr; j++, off++)
1663 if (l[j].from != l[i + off].from) {
1664 is_loop = false;
1665 break;
1666 }
1667 if (is_loop) {
1668 memmove(l + i, l + i + off,
1669 (nr - (i + off)) * sizeof(*l));
1670 nr -= off;
1671 }
1672 }
1673 }
1674 return nr;
1675}
1676
384b6055
KL
1677/*
1678 * Recolve LBR callstack chain sample
1679 * Return:
1680 * 1 on success get LBR callchain information
1681 * 0 no available LBR callchain information, should try fp
1682 * negative error code on other errors.
1683 */
1684static int resolve_lbr_callchain_sample(struct thread *thread,
1685 struct perf_sample *sample,
1686 struct symbol **parent,
1687 struct addr_location *root_al,
1688 int max_stack)
3f067dca 1689{
384b6055
KL
1690 struct ip_callchain *chain = sample->callchain;
1691 int chain_nr = min(max_stack, (int)chain->nr);
73dbcd65 1692 u8 cpumode = PERF_RECORD_MISC_USER;
384b6055
KL
1693 int i, j, err;
1694 u64 ip;
1695
1696 for (i = 0; i < chain_nr; i++) {
1697 if (chain->ips[i] == PERF_CONTEXT_USER)
1698 break;
1699 }
1700
1701 /* LBR only affects the user callchain */
1702 if (i != chain_nr) {
1703 struct branch_stack *lbr_stack = sample->branch_stack;
1704 int lbr_nr = lbr_stack->nr;
1705 /*
1706 * LBR callstack can only get user call chain.
1707 * The mix_chain_nr is kernel call chain
1708 * number plus LBR user call chain number.
1709 * i is kernel call chain number,
1710 * 1 is PERF_CONTEXT_USER,
1711 * lbr_nr + 1 is the user call chain number.
1712 * For details, please refer to the comments
1713 * in callchain__printf
1714 */
1715 int mix_chain_nr = i + 1 + lbr_nr + 1;
1716
1717 if (mix_chain_nr > PERF_MAX_STACK_DEPTH + PERF_MAX_BRANCH_DEPTH) {
1718 pr_warning("corrupted callchain. skipping...\n");
1719 return 0;
1720 }
1721
1722 for (j = 0; j < mix_chain_nr; j++) {
1723 if (callchain_param.order == ORDER_CALLEE) {
1724 if (j < i + 1)
1725 ip = chain->ips[j];
1726 else if (j > i + 1)
1727 ip = lbr_stack->entries[j - i - 2].from;
1728 else
1729 ip = lbr_stack->entries[0].to;
1730 } else {
1731 if (j < lbr_nr)
1732 ip = lbr_stack->entries[lbr_nr - j - 1].from;
1733 else if (j > lbr_nr)
1734 ip = chain->ips[i + 1 - (j - lbr_nr)];
1735 else
1736 ip = lbr_stack->entries[0].to;
1737 }
1738
73dbcd65 1739 err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
384b6055
KL
1740 if (err)
1741 return (err < 0) ? err : 0;
1742 }
1743 return 1;
1744 }
1745
1746 return 0;
1747}
1748
1749static int thread__resolve_callchain_sample(struct thread *thread,
1750 struct perf_evsel *evsel,
1751 struct perf_sample *sample,
1752 struct symbol **parent,
1753 struct addr_location *root_al,
1754 int max_stack)
1755{
1756 struct branch_stack *branch = sample->branch_stack;
1757 struct ip_callchain *chain = sample->callchain;
91e95617 1758 int chain_nr = min(max_stack, (int)chain->nr);
73dbcd65 1759 u8 cpumode = PERF_RECORD_MISC_USER;
2e77784b 1760 int i, j, err;
8b7bad58
AK
1761 int skip_idx = -1;
1762 int first_call = 0;
1763
384b6055
KL
1764 callchain_cursor_reset(&callchain_cursor);
1765
1766 if (has_branch_callstack(evsel)) {
1767 err = resolve_lbr_callchain_sample(thread, sample, parent,
1768 root_al, max_stack);
1769 if (err)
1770 return (err < 0) ? err : 0;
1771 }
1772
8b7bad58
AK
1773 /*
1774 * Based on DWARF debug information, some architectures skip
1775 * a callchain entry saved by the kernel.
1776 */
1777 if (chain->nr < PERF_MAX_STACK_DEPTH)
1778 skip_idx = arch_skip_callchain_idx(thread, chain);
3f067dca 1779
8b7bad58
AK
1780 /*
1781 * Add branches to call stack for easier browsing. This gives
1782 * more context for a sample than just the callers.
1783 *
1784 * This uses individual histograms of paths compared to the
1785 * aggregated histograms the normal LBR mode uses.
1786 *
1787 * Limitations for now:
1788 * - No extra filters
1789 * - No annotations (should annotate somehow)
1790 */
1791
1792 if (branch && callchain_param.branch_callstack) {
1793 int nr = min(max_stack, (int)branch->nr);
1794 struct branch_entry be[nr];
1795
1796 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
1797 pr_warning("corrupted branch chain. skipping...\n");
1798 goto check_calls;
1799 }
1800
1801 for (i = 0; i < nr; i++) {
1802 if (callchain_param.order == ORDER_CALLEE) {
1803 be[i] = branch->entries[i];
1804 /*
1805 * Check for overlap into the callchain.
1806 * The return address is one off compared to
1807 * the branch entry. To adjust for this
1808 * assume the calling instruction is not longer
1809 * than 8 bytes.
1810 */
1811 if (i == skip_idx ||
1812 chain->ips[first_call] >= PERF_CONTEXT_MAX)
1813 first_call++;
1814 else if (be[i].from < chain->ips[first_call] &&
1815 be[i].from >= chain->ips[first_call] - 8)
1816 first_call++;
1817 } else
1818 be[i] = branch->entries[branch->nr - i - 1];
1819 }
1820
1821 nr = remove_loops(be, nr);
1822
1823 for (i = 0; i < nr; i++) {
1824 err = add_callchain_ip(thread, parent, root_al,
73dbcd65 1825 NULL, be[i].to);
8b7bad58
AK
1826 if (!err)
1827 err = add_callchain_ip(thread, parent, root_al,
73dbcd65 1828 NULL, be[i].from);
8b7bad58
AK
1829 if (err == -EINVAL)
1830 break;
1831 if (err)
1832 return err;
1833 }
1834 chain_nr -= nr;
1835 }
1836
1837check_calls:
0edd4533 1838 if (chain->nr > PERF_MAX_STACK_DEPTH && (int)chain->nr > max_stack) {
3f067dca
ACM
1839 pr_warning("corrupted callchain. skipping...\n");
1840 return 0;
1841 }
1842
8b7bad58 1843 for (i = first_call; i < chain_nr; i++) {
3f067dca 1844 u64 ip;
3f067dca
ACM
1845
1846 if (callchain_param.order == ORDER_CALLEE)
a60335ba 1847 j = i;
3f067dca 1848 else
a60335ba
SB
1849 j = chain->nr - i - 1;
1850
1851#ifdef HAVE_SKIP_CALLCHAIN_IDX
1852 if (j == skip_idx)
1853 continue;
1854#endif
1855 ip = chain->ips[j];
3f067dca 1856
73dbcd65 1857 err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
3f067dca 1858
3f067dca 1859 if (err)
2e77784b 1860 return (err < 0) ? err : 0;
3f067dca
ACM
1861 }
1862
1863 return 0;
1864}
1865
1866static int unwind_entry(struct unwind_entry *entry, void *arg)
1867{
1868 struct callchain_cursor *cursor = arg;
1869 return callchain_cursor_append(cursor, entry->ip,
1870 entry->map, entry->sym);
1871}
1872
cc8b7c2b
ACM
1873int thread__resolve_callchain(struct thread *thread,
1874 struct perf_evsel *evsel,
1875 struct perf_sample *sample,
1876 struct symbol **parent,
1877 struct addr_location *root_al,
1878 int max_stack)
3f067dca 1879{
384b6055
KL
1880 int ret = thread__resolve_callchain_sample(thread, evsel,
1881 sample, parent,
1882 root_al, max_stack);
3f067dca
ACM
1883 if (ret)
1884 return ret;
1885
1886 /* Can we do dwarf post unwind? */
1887 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
1888 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
1889 return 0;
1890
1891 /* Bail out if nothing was captured. */
1892 if ((!sample->user_regs.regs) ||
1893 (!sample->user_stack.size))
1894 return 0;
1895
dd8c17a5 1896 return unwind__get_entries(unwind_entry, &callchain_cursor,
352ea45a 1897 thread, sample, max_stack);
3f067dca
ACM
1898
1899}
35feee19
DA
1900
1901int machine__for_each_thread(struct machine *machine,
1902 int (*fn)(struct thread *thread, void *p),
1903 void *priv)
1904{
1905 struct rb_node *nd;
1906 struct thread *thread;
1907 int rc = 0;
1908
1909 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
1910 thread = rb_entry(nd, struct thread, rb_node);
1911 rc = fn(thread, priv);
1912 if (rc != 0)
1913 return rc;
1914 }
1915
1916 list_for_each_entry(thread, &machine->dead_threads, node) {
1917 rc = fn(thread, priv);
1918 if (rc != 0)
1919 return rc;
1920 }
1921 return rc;
1922}
58d925dc 1923
a5499b37
AH
1924int machines__for_each_thread(struct machines *machines,
1925 int (*fn)(struct thread *thread, void *p),
1926 void *priv)
1927{
1928 struct rb_node *nd;
1929 int rc = 0;
1930
1931 rc = machine__for_each_thread(&machines->host, fn, priv);
1932 if (rc != 0)
1933 return rc;
1934
1935 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
1936 struct machine *machine = rb_entry(nd, struct machine, rb_node);
1937
1938 rc = machine__for_each_thread(machine, fn, priv);
1939 if (rc != 0)
1940 return rc;
1941 }
1942 return rc;
1943}
1944
a33fbd56 1945int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
602ad878 1946 struct target *target, struct thread_map *threads,
9d9cad76
KL
1947 perf_event__handler_t process, bool data_mmap,
1948 unsigned int proc_map_timeout)
58d925dc 1949{
602ad878 1950 if (target__has_task(target))
9d9cad76 1951 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
602ad878 1952 else if (target__has_cpu(target))
9d9cad76 1953 return perf_event__synthesize_threads(tool, process, machine, data_mmap, proc_map_timeout);
58d925dc
ACM
1954 /* command specified */
1955 return 0;
1956}
b9d266ba
AH
1957
1958pid_t machine__get_current_tid(struct machine *machine, int cpu)
1959{
1960 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
1961 return -1;
1962
1963 return machine->current_tid[cpu];
1964}
1965
1966int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
1967 pid_t tid)
1968{
1969 struct thread *thread;
1970
1971 if (cpu < 0)
1972 return -EINVAL;
1973
1974 if (!machine->current_tid) {
1975 int i;
1976
1977 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
1978 if (!machine->current_tid)
1979 return -ENOMEM;
1980 for (i = 0; i < MAX_NR_CPUS; i++)
1981 machine->current_tid[i] = -1;
1982 }
1983
1984 if (cpu >= MAX_NR_CPUS) {
1985 pr_err("Requested CPU %d too large. ", cpu);
1986 pr_err("Consider raising MAX_NR_CPUS\n");
1987 return -EINVAL;
1988 }
1989
1990 machine->current_tid[cpu] = tid;
1991
1992 thread = machine__findnew_thread(machine, pid, tid);
1993 if (!thread)
1994 return -ENOMEM;
1995
1996 thread->cpu = cpu;
b91fc39f 1997 thread__put(thread);
b9d266ba
AH
1998
1999 return 0;
2000}
fbe2af45
AH
2001
2002int machine__get_kernel_start(struct machine *machine)
2003{
a5e813c6 2004 struct map *map = machine__kernel_map(machine);
fbe2af45
AH
2005 int err = 0;
2006
2007 /*
2008 * The only addresses above 2^63 are kernel addresses of a 64-bit
2009 * kernel. Note that addresses are unsigned so that on a 32-bit system
2010 * all addresses including kernel addresses are less than 2^32. In
2011 * that case (32-bit system), if the kernel mapping is unknown, all
2012 * addresses will be assumed to be in user space - see
2013 * machine__kernel_ip().
2014 */
2015 machine->kernel_start = 1ULL << 63;
2016 if (map) {
2017 err = map__load(map, machine->symbol_filter);
2018 if (map->start)
2019 machine->kernel_start = map->start;
2020 }
2021 return err;
2022}
aa7cc2ae
ACM
2023
2024struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
2025{
e8807844 2026 return dsos__findnew(&machine->dsos, filename);
aa7cc2ae 2027}
c3168b0d
ACM
2028
2029char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
2030{
2031 struct machine *machine = vmachine;
2032 struct map *map;
2033 struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map, NULL);
2034
2035 if (sym == NULL)
2036 return NULL;
2037
2038 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
2039 *addrp = map->unmap_ip(map, sym->start);
2040 return sym->name;
2041}