1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/capability.h>
8 #include <linux/kernel.h>
9 #include <linux/mman.h>
10 #include <linux/string.h>
11 #include <linux/time64.h>
12 #include <sys/types.h>
14 #include <sys/param.h>
22 #include "util.h" // lsdir()
28 #include "map_symbol.h"
29 #include "mem-events.h"
33 #include "namespaces.h"
36 #include <linux/ctype.h>
37 #include <linux/zalloc.h>
41 #include <symbol/kallsyms.h>
42 #include <sys/utsname.h>
44 static int dso__load_kernel_sym(struct dso *dso, struct map *map);
45 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
46 static bool symbol__is_idle(const char *name);
48 int vmlinux_path__nr_entries;
51 struct symbol_conf symbol_conf = {
54 .try_vmlinux_path = true,
56 .demangle_kernel = false,
57 .cumulate_callchain = true,
58 .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */
59 .show_hist_headers = true,
66 struct map_list_node {
67 struct list_head node;
71 static struct map_list_node *map_list_node__new(void)
73 return malloc(sizeof(struct map_list_node));
76 static enum dso_binary_type binary_type_symtab[] = {
77 DSO_BINARY_TYPE__KALLSYMS,
78 DSO_BINARY_TYPE__GUEST_KALLSYMS,
79 DSO_BINARY_TYPE__JAVA_JIT,
80 DSO_BINARY_TYPE__DEBUGLINK,
81 DSO_BINARY_TYPE__BUILD_ID_CACHE,
82 DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
83 DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
84 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
85 DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
86 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
87 DSO_BINARY_TYPE__GUEST_KMODULE,
88 DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
89 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
90 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
91 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
92 DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
93 DSO_BINARY_TYPE__NOT_FOUND,
96 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
98 static bool symbol_type__filter(char symbol_type)
100 symbol_type = toupper(symbol_type);
101 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
104 static int prefix_underscores_count(const char *str)
106 const char *tail = str;
114 const char * __weak arch__normalize_symbol_name(const char *name)
119 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
121 return strcmp(namea, nameb);
124 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
127 return strncmp(namea, nameb, n);
130 int __weak arch__choose_best_symbol(struct symbol *syma,
131 struct symbol *symb __maybe_unused)
133 /* Avoid "SyS" kernel syscall aliases */
134 if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
136 if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
142 static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
148 /* Prefer a symbol with non zero length */
149 a = syma->end - syma->start;
150 b = symb->end - symb->start;
151 if ((b == 0) && (a > 0))
153 else if ((a == 0) && (b > 0))
156 /* Prefer a non weak symbol over a weak one */
157 a = syma->binding == STB_WEAK;
158 b = symb->binding == STB_WEAK;
164 /* Prefer a global symbol over a non global one */
165 a = syma->binding == STB_GLOBAL;
166 b = symb->binding == STB_GLOBAL;
172 /* Prefer a symbol with less underscores */
173 a = prefix_underscores_count(syma->name);
174 b = prefix_underscores_count(symb->name);
180 /* Choose the symbol with the longest name */
181 na = strlen(syma->name);
182 nb = strlen(symb->name);
188 return arch__choose_best_symbol(syma, symb);
191 void symbols__fixup_duplicate(struct rb_root_cached *symbols)
194 struct symbol *curr, *next;
196 if (symbol_conf.allow_aliases)
199 nd = rb_first_cached(symbols);
202 curr = rb_entry(nd, struct symbol, rb_node);
204 nd = rb_next(&curr->rb_node);
208 next = rb_entry(nd, struct symbol, rb_node);
209 if (curr->start != next->start)
212 if (choose_best_symbol(curr, next) == SYMBOL_A) {
213 if (next->type == STT_GNU_IFUNC)
214 curr->ifunc_alias = true;
215 rb_erase_cached(&next->rb_node, symbols);
216 symbol__delete(next);
219 if (curr->type == STT_GNU_IFUNC)
220 next->ifunc_alias = true;
221 nd = rb_next(&curr->rb_node);
222 rb_erase_cached(&curr->rb_node, symbols);
223 symbol__delete(curr);
228 /* Update zero-sized symbols using the address of the next symbol */
229 void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
231 struct rb_node *nd, *prevnd = rb_first_cached(symbols);
232 struct symbol *curr, *prev;
237 curr = rb_entry(prevnd, struct symbol, rb_node);
239 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
241 curr = rb_entry(nd, struct symbol, rb_node);
244 * On some architecture kernel text segment start is located at
245 * some low memory address, while modules are located at high
246 * memory addresses (or vice versa). The gap between end of
247 * kernel text segment and beginning of first module's text
248 * segment is very big. Therefore do not fill this gap and do
249 * not assign it to the kernel dso map (kallsyms).
251 * Also BPF code can be allocated separately from text segments
252 * and modules. So the last entry in a module should not fill
255 * In kallsyms, it determines module symbols using '[' character
257 * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi]
259 if (prev->end == prev->start) {
260 const char *prev_mod;
261 const char *curr_mod;
264 prev->end = curr->start;
268 prev_mod = strchr(prev->name, '[');
269 curr_mod = strchr(curr->name, '[');
271 /* Last kernel/module symbol mapped to end of page */
272 if (!prev_mod != !curr_mod)
273 prev->end = roundup(prev->end + 4096, 4096);
274 /* Last symbol in the previous module */
275 else if (prev_mod && strcmp(prev_mod, curr_mod))
276 prev->end = roundup(prev->end + 4096, 4096);
278 prev->end = curr->start;
280 pr_debug4("%s sym:%s end:%#" PRIx64 "\n",
281 __func__, prev->name, prev->end);
286 if (curr->end == curr->start)
287 curr->end = roundup(curr->start, 4096) + 4096;
290 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
292 size_t namelen = strlen(name) + 1;
293 struct symbol *sym = calloc(1, (symbol_conf.priv_size +
294 sizeof(*sym) + namelen));
298 if (symbol_conf.priv_size) {
299 if (symbol_conf.init_annotation) {
300 struct annotation *notes = (void *)sym;
301 annotation__init(notes);
303 sym = ((void *)sym) + symbol_conf.priv_size;
307 sym->end = len ? start + len : start;
309 sym->binding = binding;
310 sym->namelen = namelen - 1;
312 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
313 __func__, name, start, sym->end);
314 memcpy(sym->name, name, namelen);
319 void symbol__delete(struct symbol *sym)
321 if (symbol_conf.priv_size) {
322 if (symbol_conf.init_annotation) {
323 struct annotation *notes = symbol__annotation(sym);
325 annotation__exit(notes);
328 free(((void *)sym) - symbol_conf.priv_size);
331 void symbols__delete(struct rb_root_cached *symbols)
334 struct rb_node *next = rb_first_cached(symbols);
337 pos = rb_entry(next, struct symbol, rb_node);
338 next = rb_next(&pos->rb_node);
339 rb_erase_cached(&pos->rb_node, symbols);
344 void __symbols__insert(struct rb_root_cached *symbols,
345 struct symbol *sym, bool kernel)
347 struct rb_node **p = &symbols->rb_root.rb_node;
348 struct rb_node *parent = NULL;
349 const u64 ip = sym->start;
351 bool leftmost = true;
354 const char *name = sym->name;
356 * ppc64 uses function descriptors and appends a '.' to the
357 * start of every instruction address. Remove it.
361 sym->idle = symbol__is_idle(name);
366 s = rb_entry(parent, struct symbol, rb_node);
374 rb_link_node(&sym->rb_node, parent, p);
375 rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
378 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
380 __symbols__insert(symbols, sym, false);
383 static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
390 n = symbols->rb_root.rb_node;
393 struct symbol *s = rb_entry(n, struct symbol, rb_node);
397 else if (ip > s->end || (ip == s->end && ip != s->start))
406 static struct symbol *symbols__first(struct rb_root_cached *symbols)
408 struct rb_node *n = rb_first_cached(symbols);
411 return rb_entry(n, struct symbol, rb_node);
416 static struct symbol *symbols__last(struct rb_root_cached *symbols)
418 struct rb_node *n = rb_last(&symbols->rb_root);
421 return rb_entry(n, struct symbol, rb_node);
426 static struct symbol *symbols__next(struct symbol *sym)
428 struct rb_node *n = rb_next(&sym->rb_node);
431 return rb_entry(n, struct symbol, rb_node);
436 static int symbols__sort_name_cmp(const void *vlhs, const void *vrhs)
438 const struct symbol *lhs = *((const struct symbol **)vlhs);
439 const struct symbol *rhs = *((const struct symbol **)vrhs);
441 return strcmp(lhs->name, rhs->name);
444 static struct symbol **symbols__sort_by_name(struct rb_root_cached *source, size_t *len)
447 struct symbol **result;
448 size_t i = 0, size = 0;
450 for (nd = rb_first_cached(source); nd; nd = rb_next(nd))
453 result = malloc(sizeof(*result) * size);
457 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
458 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
462 qsort(result, size, sizeof(*result), symbols__sort_name_cmp);
467 int symbol__match_symbol_name(const char *name, const char *str,
468 enum symbol_tag_include includes)
470 const char *versioning;
472 if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
473 (versioning = strstr(name, "@@"))) {
474 int len = strlen(str);
476 if (len < versioning - name)
477 len = versioning - name;
479 return arch__compare_symbol_names_n(name, str, len);
481 return arch__compare_symbol_names(name, str);
484 static struct symbol *symbols__find_by_name(struct symbol *symbols[],
487 enum symbol_tag_include includes,
490 size_t i, lower = 0, upper = symbols_len;
491 struct symbol *s = NULL;
494 *found_idx = SIZE_MAX;
499 while (lower < upper) {
502 i = (lower + upper) / 2;
503 cmp = symbol__match_symbol_name(symbols[i]->name, name, includes);
516 if (s && includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) {
517 /* return first symbol that has same name (if any) */
519 struct symbol *tmp = symbols[i - 1];
521 if (!arch__compare_symbol_names(tmp->name, s->name)) {
529 assert(!found_idx || !s || s == symbols[*found_idx]);
533 void dso__reset_find_symbol_cache(struct dso *dso)
535 dso->last_find_result.addr = 0;
536 dso->last_find_result.symbol = NULL;
539 void dso__insert_symbol(struct dso *dso, struct symbol *sym)
541 __symbols__insert(&dso->symbols, sym, dso->kernel);
543 /* update the symbol cache if necessary */
544 if (dso->last_find_result.addr >= sym->start &&
545 (dso->last_find_result.addr < sym->end ||
546 sym->start == sym->end)) {
547 dso->last_find_result.symbol = sym;
551 void dso__delete_symbol(struct dso *dso, struct symbol *sym)
553 rb_erase_cached(&sym->rb_node, &dso->symbols);
555 dso__reset_find_symbol_cache(dso);
558 struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
560 if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
561 dso->last_find_result.addr = addr;
562 dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
565 return dso->last_find_result.symbol;
568 struct symbol *dso__find_symbol_nocache(struct dso *dso, u64 addr)
570 return symbols__find(&dso->symbols, addr);
573 struct symbol *dso__first_symbol(struct dso *dso)
575 return symbols__first(&dso->symbols);
578 struct symbol *dso__last_symbol(struct dso *dso)
580 return symbols__last(&dso->symbols);
583 struct symbol *dso__next_symbol(struct symbol *sym)
585 return symbols__next(sym);
588 struct symbol *dso__next_symbol_by_name(struct dso *dso, size_t *idx)
590 if (*idx + 1 >= dso->symbol_names_len)
594 return dso->symbol_names[*idx];
598 * Returns first symbol that matched with @name.
600 struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name, size_t *idx)
602 struct symbol *s = symbols__find_by_name(dso->symbol_names, dso->symbol_names_len,
603 name, SYMBOL_TAG_INCLUDE__NONE, idx);
605 s = symbols__find_by_name(dso->symbol_names, dso->symbol_names_len,
606 name, SYMBOL_TAG_INCLUDE__DEFAULT_ONLY, idx);
610 void dso__sort_by_name(struct dso *dso)
612 mutex_lock(&dso->lock);
613 if (!dso__sorted_by_name(dso)) {
616 dso->symbol_names = symbols__sort_by_name(&dso->symbols, &len);
617 if (dso->symbol_names) {
618 dso->symbol_names_len = len;
619 dso__set_sorted_by_name(dso);
622 mutex_unlock(&dso->lock);
626 * While we find nice hex chars, build a long_val.
627 * Return number of chars processed.
629 static int hex2u64(const char *ptr, u64 *long_val)
633 *long_val = strtoull(ptr, &p, 16);
639 int modules__parse(const char *filename, void *arg,
640 int (*process_module)(void *arg, const char *name,
641 u64 start, u64 size))
648 file = fopen(filename, "r");
658 line_len = getline(&line, &n, file);
671 line[--line_len] = '\0'; /* \n */
673 sep = strrchr(line, 'x');
677 hex2u64(sep + 1, &start);
679 sep = strchr(line, ' ');
685 scnprintf(name, sizeof(name), "[%s]", line);
687 size = strtoul(sep + 1, &endptr, 0);
688 if (*endptr != ' ' && *endptr != '\t')
691 err = process_module(arg, name, start, size);
702 * These are symbols in the kernel image, so make sure that
703 * sym is from a kernel DSO.
705 static bool symbol__is_idle(const char *name)
707 const char * const idle_symbols[] = {
708 "acpi_idle_do_entry",
709 "acpi_processor_ffh_cstate_enter",
721 "mwait_idle_with_hints",
722 "mwait_idle_with_hints.constprop.0",
724 "ppc64_runlatch_off",
725 "pseries_dedicated_idle_sleep",
731 static struct strlist *idle_symbols_list;
733 if (idle_symbols_list)
734 return strlist__has_entry(idle_symbols_list, name);
736 idle_symbols_list = strlist__new(NULL, NULL);
738 for (i = 0; idle_symbols[i]; i++)
739 strlist__add(idle_symbols_list, idle_symbols[i]);
741 return strlist__has_entry(idle_symbols_list, name);
744 static int map__process_kallsym_symbol(void *arg, const char *name,
745 char type, u64 start)
748 struct dso *dso = arg;
749 struct rb_root_cached *root = &dso->symbols;
751 if (!symbol_type__filter(type))
754 /* Ignore local symbols for ARM modules */
759 * module symbols are not sorted so we add all
760 * symbols, setting length to 0, and rely on
761 * symbols__fixup_end() to fix it up.
763 sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
767 * We will pass the symbols to the filter later, in
768 * map__split_kallsyms, when we have split the maps per module
770 __symbols__insert(root, sym, !strchr(name, '['));
776 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
777 * so that we can in the next step set the symbol ->end address and then
778 * call kernel_maps__split_kallsyms.
780 static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
782 return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
785 static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
789 struct rb_root_cached old_root = dso->symbols;
790 struct rb_root_cached *root = &dso->symbols;
791 struct rb_node *next = rb_first_cached(root);
796 *root = RB_ROOT_CACHED;
799 struct map *curr_map;
800 struct dso *curr_map_dso;
803 pos = rb_entry(next, struct symbol, rb_node);
804 next = rb_next(&pos->rb_node);
806 rb_erase_cached(&pos->rb_node, &old_root);
807 RB_CLEAR_NODE(&pos->rb_node);
808 module = strchr(pos->name, '\t');
812 curr_map = maps__find(kmaps, pos->start);
818 curr_map_dso = map__dso(curr_map);
819 pos->start -= map__start(curr_map) - map__pgoff(curr_map);
820 if (pos->end > map__end(curr_map))
821 pos->end = map__end(curr_map);
823 pos->end -= map__start(curr_map) - map__pgoff(curr_map);
824 symbols__insert(&curr_map_dso->symbols, pos);
829 /* Symbols have been adjusted */
830 dso->adjust_symbols = 1;
836 * Split the symbols into maps, making sure there are no overlaps, i.e. the
837 * kernel range is broken in several maps, named [kernel].N, as we don't have
838 * the original ELF section names vmlinux have.
840 static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
841 struct map *initial_map)
843 struct machine *machine;
844 struct map *curr_map = map__get(initial_map);
846 int count = 0, moved = 0;
847 struct rb_root_cached *root = &dso->symbols;
848 struct rb_node *next = rb_first_cached(root);
849 int kernel_range = 0;
855 machine = maps__machine(kmaps);
857 x86_64 = machine__is(machine, "x86_64");
862 pos = rb_entry(next, struct symbol, rb_node);
863 next = rb_next(&pos->rb_node);
865 module = strchr(pos->name, '\t');
867 struct dso *curr_map_dso;
869 if (!symbol_conf.use_modules)
873 curr_map_dso = map__dso(curr_map);
874 if (strcmp(curr_map_dso->short_name, module)) {
875 if (!RC_CHK_EQUAL(curr_map, initial_map) &&
876 dso->kernel == DSO_SPACE__KERNEL_GUEST &&
877 machine__is_default_guest(machine)) {
879 * We assume all symbols of a module are
880 * continuous in * kallsyms, so curr_map
881 * points to a module and all its
882 * symbols are in its kmap. Mark it as
885 dso__set_loaded(curr_map_dso);
889 curr_map = maps__find_by_name(kmaps, module);
890 if (curr_map == NULL) {
891 pr_debug("%s/proc/{kallsyms,modules} "
892 "inconsistency while looking "
893 "for \"%s\" module!\n",
894 machine->root_dir, module);
895 curr_map = map__get(initial_map);
898 curr_map_dso = map__dso(curr_map);
899 if (curr_map_dso->loaded &&
900 !machine__is_default_guest(machine))
904 * So that we look just like we get from .ko files,
905 * i.e. not prelinked, relative to initial_map->start.
907 pos->start = map__map_ip(curr_map, pos->start);
908 pos->end = map__map_ip(curr_map, pos->end);
909 } else if (x86_64 && is_entry_trampoline(pos->name)) {
911 * These symbols are not needed anymore since the
912 * trampoline maps refer to the text section and it's
913 * symbols instead. Avoid having to deal with
914 * relocations, and the assumption that the first symbol
915 * is the start of kernel text, by simply removing the
916 * symbols at this point.
919 } else if (!RC_CHK_EQUAL(curr_map, initial_map)) {
920 char dso_name[PATH_MAX];
924 /* Kernel was relocated at boot time */
931 curr_map = map__get(initial_map);
935 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
936 snprintf(dso_name, sizeof(dso_name),
940 snprintf(dso_name, sizeof(dso_name),
944 ndso = dso__new(dso_name);
949 ndso->kernel = dso->kernel;
951 curr_map = map__new2(pos->start, ndso);
952 if (curr_map == NULL) {
957 map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY);
958 if (maps__insert(kmaps, curr_map)) {
965 /* Kernel was relocated at boot time */
970 if (!RC_CHK_EQUAL(curr_map, initial_map)) {
971 struct dso *curr_map_dso = map__dso(curr_map);
973 rb_erase_cached(&pos->rb_node, root);
974 symbols__insert(&curr_map_dso->symbols, pos);
981 rb_erase_cached(&pos->rb_node, root);
985 if (!RC_CHK_EQUAL(curr_map, initial_map) &&
986 dso->kernel == DSO_SPACE__KERNEL_GUEST &&
987 machine__is_default_guest(maps__machine(kmaps))) {
988 dso__set_loaded(map__dso(curr_map));
991 return count + moved;
994 bool symbol__restricted_filename(const char *filename,
995 const char *restricted_filename)
997 bool restricted = false;
999 if (symbol_conf.kptr_restrict) {
1000 char *r = realpath(filename, NULL);
1003 restricted = strcmp(r, restricted_filename) == 0;
1012 struct module_info {
1013 struct rb_node rb_node;
1018 static void add_module(struct module_info *mi, struct rb_root *modules)
1020 struct rb_node **p = &modules->rb_node;
1021 struct rb_node *parent = NULL;
1022 struct module_info *m;
1024 while (*p != NULL) {
1026 m = rb_entry(parent, struct module_info, rb_node);
1027 if (strcmp(mi->name, m->name) < 0)
1030 p = &(*p)->rb_right;
1032 rb_link_node(&mi->rb_node, parent, p);
1033 rb_insert_color(&mi->rb_node, modules);
1036 static void delete_modules(struct rb_root *modules)
1038 struct module_info *mi;
1039 struct rb_node *next = rb_first(modules);
1042 mi = rb_entry(next, struct module_info, rb_node);
1043 next = rb_next(&mi->rb_node);
1044 rb_erase(&mi->rb_node, modules);
1050 static struct module_info *find_module(const char *name,
1051 struct rb_root *modules)
1053 struct rb_node *n = modules->rb_node;
1056 struct module_info *m;
1059 m = rb_entry(n, struct module_info, rb_node);
1060 cmp = strcmp(name, m->name);
1072 static int __read_proc_modules(void *arg, const char *name, u64 start,
1073 u64 size __maybe_unused)
1075 struct rb_root *modules = arg;
1076 struct module_info *mi;
1078 mi = zalloc(sizeof(struct module_info));
1082 mi->name = strdup(name);
1090 add_module(mi, modules);
1095 static int read_proc_modules(const char *filename, struct rb_root *modules)
1097 if (symbol__restricted_filename(filename, "/proc/modules"))
1100 if (modules__parse(filename, modules, __read_proc_modules)) {
1101 delete_modules(modules);
1108 int compare_proc_modules(const char *from, const char *to)
1110 struct rb_root from_modules = RB_ROOT;
1111 struct rb_root to_modules = RB_ROOT;
1112 struct rb_node *from_node, *to_node;
1113 struct module_info *from_m, *to_m;
1116 if (read_proc_modules(from, &from_modules))
1119 if (read_proc_modules(to, &to_modules))
1120 goto out_delete_from;
1122 from_node = rb_first(&from_modules);
1123 to_node = rb_first(&to_modules);
1128 from_m = rb_entry(from_node, struct module_info, rb_node);
1129 to_m = rb_entry(to_node, struct module_info, rb_node);
1131 if (from_m->start != to_m->start ||
1132 strcmp(from_m->name, to_m->name))
1135 from_node = rb_next(from_node);
1136 to_node = rb_next(to_node);
1139 if (!from_node && !to_node)
1142 delete_modules(&to_modules);
1144 delete_modules(&from_modules);
1149 static int do_validate_kcore_modules_cb(struct map *old_map, void *data)
1151 struct rb_root *modules = data;
1152 struct module_info *mi;
1155 if (!__map__is_kmodule(old_map))
1158 dso = map__dso(old_map);
1159 /* Module must be in memory at the same address */
1160 mi = find_module(dso->short_name, modules);
1161 if (!mi || mi->start != map__start(old_map))
1167 static int do_validate_kcore_modules(const char *filename, struct maps *kmaps)
1169 struct rb_root modules = RB_ROOT;
1172 err = read_proc_modules(filename, &modules);
1176 err = maps__for_each_map(kmaps, do_validate_kcore_modules_cb, &modules);
1178 delete_modules(&modules);
1183 * If kallsyms is referenced by name then we look for filename in the same
1186 static bool filename_from_kallsyms_filename(char *filename,
1187 const char *base_name,
1188 const char *kallsyms_filename)
1192 strcpy(filename, kallsyms_filename);
1193 name = strrchr(filename, '/');
1199 if (!strcmp(name, "kallsyms")) {
1200 strcpy(name, base_name);
1207 static int validate_kcore_modules(const char *kallsyms_filename,
1210 struct maps *kmaps = map__kmaps(map);
1211 char modules_filename[PATH_MAX];
1216 if (!filename_from_kallsyms_filename(modules_filename, "modules",
1220 if (do_validate_kcore_modules(modules_filename, kmaps))
1226 static int validate_kcore_addresses(const char *kallsyms_filename,
1229 struct kmap *kmap = map__kmap(map);
1234 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1237 if (kallsyms__get_function_start(kallsyms_filename,
1238 kmap->ref_reloc_sym->name, &start))
1240 if (start != kmap->ref_reloc_sym->addr)
1244 return validate_kcore_modules(kallsyms_filename, map);
1247 struct kcore_mapfn_data {
1249 struct list_head maps;
1252 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1254 struct kcore_mapfn_data *md = data;
1255 struct map_list_node *list_node = map_list_node__new();
1260 list_node->map = map__new2(start, md->dso);
1261 if (!list_node->map) {
1266 map__set_end(list_node->map, map__start(list_node->map) + len);
1267 map__set_pgoff(list_node->map, pgoff);
1269 list_add(&list_node->node, &md->maps);
1274 static bool remove_old_maps(struct map *map, void *data)
1276 const struct map *map_to_save = data;
1279 * We need to preserve eBPF maps even if they are covered by kcore,
1280 * because we need to access eBPF dso for source data.
1282 return !RC_CHK_EQUAL(map, map_to_save) && !__map__is_bpf_prog(map);
1285 static int dso__load_kcore(struct dso *dso, struct map *map,
1286 const char *kallsyms_filename)
1288 struct maps *kmaps = map__kmaps(map);
1289 struct kcore_mapfn_data md;
1290 struct map *replacement_map = NULL;
1291 struct machine *machine;
1294 char kcore_filename[PATH_MAX];
1300 machine = maps__machine(kmaps);
1302 /* This function requires that the map is the kernel map */
1303 if (!__map__is_kernel(map))
1306 if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1310 /* Modules and kernel must be present at their original addresses */
1311 if (validate_kcore_addresses(kallsyms_filename, map))
1315 INIT_LIST_HEAD(&md.maps);
1317 fd = open(kcore_filename, O_RDONLY);
1319 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
1324 /* Read new maps into temporary lists */
1325 err = file__read_maps(fd, map__prot(map) & PROT_EXEC, kcore_mapfn, &md,
1329 dso->is_64_bit = is_64_bit;
1331 if (list_empty(&md.maps)) {
1336 /* Remove old maps */
1337 maps__remove_maps(kmaps, remove_old_maps, map);
1338 machine->trampolines_mapped = false;
1340 /* Find the kernel map using the '_stext' symbol */
1341 if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
1342 u64 replacement_size = 0;
1343 struct map_list_node *new_node;
1345 list_for_each_entry(new_node, &md.maps, node) {
1346 struct map *new_map = new_node->map;
1347 u64 new_size = map__size(new_map);
1349 if (!(stext >= map__start(new_map) && stext < map__end(new_map)))
1353 * On some architectures, ARM64 for example, the kernel
1354 * text can get allocated inside of the vmalloc segment.
1355 * Select the smallest matching segment, in case stext
1356 * falls within more than one in the list.
1358 if (!replacement_map || new_size < replacement_size) {
1359 replacement_map = new_map;
1360 replacement_size = new_size;
1365 if (!replacement_map)
1366 replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map;
1369 while (!list_empty(&md.maps)) {
1370 struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node);
1371 struct map *new_map = new_node->map;
1373 list_del_init(&new_node->node);
1375 if (RC_CHK_EQUAL(new_map, replacement_map)) {
1376 struct map *map_ref;
1378 map__set_start(map, map__start(new_map));
1379 map__set_end(map, map__end(new_map));
1380 map__set_pgoff(map, map__pgoff(new_map));
1381 map__set_mapping_type(map, map__mapping_type(new_map));
1382 /* Ensure maps are correctly ordered */
1383 map_ref = map__get(map);
1384 maps__remove(kmaps, map_ref);
1385 err = maps__insert(kmaps, map_ref);
1392 * Merge kcore map into existing maps,
1393 * and ensure that current maps (eBPF)
1396 if (maps__merge_in(kmaps, new_map)) {
1404 if (machine__is(machine, "x86_64")) {
1408 * If one of the corresponding symbols is there, assume the
1409 * entry trampoline maps are too.
1411 if (!kallsyms__get_function_start(kallsyms_filename,
1412 ENTRY_TRAMPOLINE_NAME,
1414 machine->trampolines_mapped = true;
1418 * Set the data type and long name so that kcore can be read via
1419 * dso__data_read_addr().
1421 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1422 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1424 dso->binary_type = DSO_BINARY_TYPE__KCORE;
1425 dso__set_long_name(dso, strdup(kcore_filename), true);
1429 if (map__prot(map) & PROT_EXEC)
1430 pr_debug("Using %s for kernel object code\n", kcore_filename);
1432 pr_debug("Using %s for kernel data\n", kcore_filename);
1437 while (!list_empty(&md.maps)) {
1438 struct map_list_node *list_node;
1440 list_node = list_entry(md.maps.next, struct map_list_node, node);
1441 list_del_init(&list_node->node);
1442 map__zput(list_node->map);
1450 * If the kernel is relocated at boot time, kallsyms won't match. Compute the
1451 * delta based on the relocation reference symbol.
1453 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
1457 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1460 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
1463 *delta = addr - kmap->ref_reloc_sym->addr;
1467 int __dso__load_kallsyms(struct dso *dso, const char *filename,
1468 struct map *map, bool no_kcore)
1470 struct kmap *kmap = map__kmap(map);
1473 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1476 if (!kmap || !kmap->kmaps)
1479 if (dso__load_all_kallsyms(dso, filename) < 0)
1482 if (kallsyms__delta(kmap, filename, &delta))
1485 symbols__fixup_end(&dso->symbols, true);
1486 symbols__fixup_duplicate(&dso->symbols);
1488 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1489 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1491 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1493 if (!no_kcore && !dso__load_kcore(dso, map, filename))
1494 return maps__split_kallsyms_for_kcore(kmap->kmaps, dso);
1496 return maps__split_kallsyms(kmap->kmaps, dso, delta, map);
1499 int dso__load_kallsyms(struct dso *dso, const char *filename,
1502 return __dso__load_kallsyms(dso, filename, map, false);
1505 static int dso__load_perf_map(const char *map_path, struct dso *dso)
1512 file = fopen(map_path, "r");
1516 while (!feof(file)) {
1521 line_len = getline(&line, &n, file);
1528 line[--line_len] = '\0'; /* \n */
1530 len = hex2u64(line, &start);
1533 if (len + 2 >= line_len)
1536 len += hex2u64(line + len, &size);
1539 if (len + 2 >= line_len)
1542 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
1545 goto out_delete_line;
1547 symbols__insert(&dso->symbols, sym);
1562 #ifdef HAVE_LIBBFD_SUPPORT
1563 #define PACKAGE 'perf'
1566 static int bfd_symbols__cmpvalue(const void *a, const void *b)
1568 const asymbol *as = *(const asymbol **)a, *bs = *(const asymbol **)b;
1570 if (bfd_asymbol_value(as) != bfd_asymbol_value(bs))
1571 return bfd_asymbol_value(as) - bfd_asymbol_value(bs);
1573 return bfd_asymbol_name(as)[0] - bfd_asymbol_name(bs)[0];
1576 static int bfd2elf_binding(asymbol *symbol)
1578 if (symbol->flags & BSF_WEAK)
1580 if (symbol->flags & BSF_GLOBAL)
1582 if (symbol->flags & BSF_LOCAL)
1587 int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
1590 long symbols_size, symbols_count, i;
1592 asymbol **symbols, *sym;
1593 struct symbol *symbol;
1597 abfd = bfd_openr(debugfile, NULL);
1601 if (!bfd_check_format(abfd, bfd_object)) {
1602 pr_debug2("%s: cannot read %s bfd file.\n", __func__,
1607 if (bfd_get_flavour(abfd) == bfd_target_elf_flavour)
1610 symbols_size = bfd_get_symtab_upper_bound(abfd);
1611 if (symbols_size == 0) {
1616 if (symbols_size < 0)
1619 symbols = malloc(symbols_size);
1623 symbols_count = bfd_canonicalize_symtab(abfd, symbols);
1624 if (symbols_count < 0)
1627 section = bfd_get_section_by_name(abfd, ".text");
1629 for (i = 0; i < symbols_count; ++i) {
1630 if (!strcmp(bfd_asymbol_name(symbols[i]), "__ImageBase") ||
1631 !strcmp(bfd_asymbol_name(symbols[i]), "__image_base__"))
1634 if (i < symbols_count) {
1635 /* PE symbols can only have 4 bytes, so use .text high bits */
1636 dso->text_offset = section->vma - (u32)section->vma;
1637 dso->text_offset += (u32)bfd_asymbol_value(symbols[i]);
1638 dso->text_end = (section->vma - dso->text_offset) + section->size;
1640 dso->text_offset = section->vma - section->filepos;
1641 dso->text_end = section->filepos + section->size;
1645 qsort(symbols, symbols_count, sizeof(asymbol *), bfd_symbols__cmpvalue);
1647 #ifdef bfd_get_section
1648 #define bfd_asymbol_section bfd_get_section
1650 for (i = 0; i < symbols_count; ++i) {
1652 section = bfd_asymbol_section(sym);
1653 if (bfd2elf_binding(sym) < 0)
1656 while (i + 1 < symbols_count &&
1657 bfd_asymbol_section(symbols[i + 1]) == section &&
1658 bfd2elf_binding(symbols[i + 1]) < 0)
1661 if (i + 1 < symbols_count &&
1662 bfd_asymbol_section(symbols[i + 1]) == section)
1663 len = symbols[i + 1]->value - sym->value;
1665 len = section->size - sym->value;
1667 start = bfd_asymbol_value(sym) - dso->text_offset;
1668 symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC,
1669 bfd_asymbol_name(sym));
1673 symbols__insert(&dso->symbols, symbol);
1675 #ifdef bfd_get_section
1676 #undef bfd_asymbol_section
1679 symbols__fixup_end(&dso->symbols, false);
1680 symbols__fixup_duplicate(&dso->symbols);
1681 dso->adjust_symbols = 1;
1692 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1693 enum dso_binary_type type)
1696 case DSO_BINARY_TYPE__JAVA_JIT:
1697 case DSO_BINARY_TYPE__DEBUGLINK:
1698 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1699 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1700 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1701 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
1702 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1703 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1704 return !kmod && dso->kernel == DSO_SPACE__USER;
1706 case DSO_BINARY_TYPE__KALLSYMS:
1707 case DSO_BINARY_TYPE__VMLINUX:
1708 case DSO_BINARY_TYPE__KCORE:
1709 return dso->kernel == DSO_SPACE__KERNEL;
1711 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1712 case DSO_BINARY_TYPE__GUEST_VMLINUX:
1713 case DSO_BINARY_TYPE__GUEST_KCORE:
1714 return dso->kernel == DSO_SPACE__KERNEL_GUEST;
1716 case DSO_BINARY_TYPE__GUEST_KMODULE:
1717 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1718 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1719 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1721 * kernel modules know their symtab type - it's set when
1722 * creating a module dso in machine__addnew_module_map().
1724 return kmod && dso->symtab_type == type;
1726 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1727 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1730 case DSO_BINARY_TYPE__BPF_PROG_INFO:
1731 case DSO_BINARY_TYPE__BPF_IMAGE:
1732 case DSO_BINARY_TYPE__OOL:
1733 case DSO_BINARY_TYPE__NOT_FOUND:
1739 /* Checks for the existence of the perf-<pid>.map file in two different
1740 * locations. First, if the process is a separate mount namespace, check in
1741 * that namespace using the pid of the innermost pid namespace. If's not in a
1742 * namespace, or the file can't be found there, try in the mount namespace of
1743 * the tracing process using our view of its pid.
1745 static int dso__find_perf_map(char *filebuf, size_t bufsz,
1746 struct nsinfo **nsip)
1748 struct nscookie nsc;
1750 struct nsinfo *nnsi;
1755 if (nsinfo__need_setns(nsi)) {
1756 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__nstgid(nsi));
1757 nsinfo__mountns_enter(nsi, &nsc);
1758 rc = access(filebuf, R_OK);
1759 nsinfo__mountns_exit(&nsc);
1764 nnsi = nsinfo__copy(nsi);
1768 nsinfo__clear_need_setns(nnsi);
1769 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__tgid(nnsi));
1777 int dso__load(struct dso *dso, struct map *map)
1782 struct machine *machine = NULL;
1783 char *root_dir = (char *) "";
1785 struct symsrc ss_[2];
1786 struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1789 struct build_id bid;
1790 struct nscookie nsc;
1791 char newmapname[PATH_MAX];
1792 const char *map_path = dso->long_name;
1794 mutex_lock(&dso->lock);
1795 perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
1797 if (dso->nsinfo && (dso__find_perf_map(newmapname,
1798 sizeof(newmapname), &dso->nsinfo) == 0)) {
1799 map_path = newmapname;
1803 nsinfo__mountns_enter(dso->nsinfo, &nsc);
1805 /* check again under the dso->lock */
1806 if (dso__loaded(dso)) {
1811 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1812 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1813 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1814 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1816 if (dso->kernel && !kmod) {
1817 if (dso->kernel == DSO_SPACE__KERNEL)
1818 ret = dso__load_kernel_sym(dso, map);
1819 else if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1820 ret = dso__load_guest_kernel_sym(dso, map);
1822 machine = maps__machine(map__kmaps(map));
1823 if (machine__is(machine, "x86_64"))
1824 machine__map_x86_64_entry_trampolines(machine, dso);
1828 dso->adjust_symbols = 0;
1831 ret = dso__load_perf_map(map_path, dso);
1832 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1833 DSO_BINARY_TYPE__NOT_FOUND;
1838 root_dir = machine->root_dir;
1840 name = malloc(PATH_MAX);
1845 * Read the build id if possible. This is required for
1846 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1848 if (!dso->has_build_id &&
1849 is_regular_file(dso->long_name)) {
1850 __symbol__join_symfs(name, PATH_MAX, dso->long_name);
1851 if (filename__read_build_id(name, &bid) > 0)
1852 dso__set_build_id(dso, &bid);
1856 * Iterate over candidate debug images.
1857 * Keep track of "interesting" ones (those which have a symtab, dynsym,
1858 * and/or opd section) for processing.
1860 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1861 struct symsrc *ss = &ss_[ss_pos];
1862 bool next_slot = false;
1868 enum dso_binary_type symtab_type = binary_type_symtab[i];
1870 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
1871 symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);
1873 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1876 if (dso__read_binary_type_filename(dso, symtab_type,
1877 root_dir, name, PATH_MAX))
1881 nsinfo__mountns_exit(&nsc);
1883 is_reg = is_regular_file(name);
1884 if (!is_reg && errno == ENOENT && dso->nsinfo) {
1885 char *new_name = dso__filename_with_chroot(dso, name);
1887 is_reg = is_regular_file(new_name);
1888 strlcpy(name, new_name, PATH_MAX);
1893 #ifdef HAVE_LIBBFD_SUPPORT
1895 bfdrc = dso__load_bfd_symbols(dso, name);
1897 if (is_reg && bfdrc < 0)
1898 sirc = symsrc__init(ss, dso, name, symtab_type);
1901 nsinfo__mountns_enter(dso->nsinfo, &nsc);
1908 if (!is_reg || sirc < 0)
1911 if (!syms_ss && symsrc__has_symtab(ss)) {
1914 if (!dso->symsrc_filename)
1915 dso->symsrc_filename = strdup(name);
1918 if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1926 if (syms_ss && runtime_ss)
1929 symsrc__destroy(ss);
1934 if (!runtime_ss && !syms_ss)
1937 if (runtime_ss && !syms_ss) {
1938 syms_ss = runtime_ss;
1941 /* We'll have to hope for the best */
1942 if (!runtime_ss && syms_ss)
1943 runtime_ss = syms_ss;
1946 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1953 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
1958 for (; ss_pos > 0; ss_pos--)
1959 symsrc__destroy(&ss_[ss_pos - 1]);
1962 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1965 dso__set_loaded(dso);
1966 mutex_unlock(&dso->lock);
1967 nsinfo__mountns_exit(&nsc);
1972 int dso__load_vmlinux(struct dso *dso, struct map *map,
1973 const char *vmlinux, bool vmlinux_allocated)
1977 char symfs_vmlinux[PATH_MAX];
1978 enum dso_binary_type symtab_type;
1980 if (vmlinux[0] == '/')
1981 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
1983 symbol__join_symfs(symfs_vmlinux, vmlinux);
1985 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1986 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1988 symtab_type = DSO_BINARY_TYPE__VMLINUX;
1990 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
1994 * dso__load_sym() may copy 'dso' which will result in the copies having
1995 * an incorrect long name unless we set it here first.
1997 dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1998 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1999 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
2001 dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
2003 err = dso__load_sym(dso, map, &ss, &ss, 0);
2004 symsrc__destroy(&ss);
2007 dso__set_loaded(dso);
2008 pr_debug("Using %s for symbols\n", symfs_vmlinux);
2014 int dso__load_vmlinux_path(struct dso *dso, struct map *map)
2017 char *filename = NULL;
2019 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
2020 vmlinux_path__nr_entries + 1);
2022 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
2023 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
2028 if (!symbol_conf.ignore_vmlinux_buildid)
2029 filename = dso__build_id_filename(dso, NULL, 0, false);
2030 if (filename != NULL) {
2031 err = dso__load_vmlinux(dso, map, filename, true);
2040 static bool visible_dir_filter(const char *name, struct dirent *d)
2042 if (d->d_type != DT_DIR)
2044 return lsdir_no_dot_filter(name, d);
2047 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
2049 char kallsyms_filename[PATH_MAX];
2051 struct strlist *dirs;
2052 struct str_node *nd;
2054 dirs = lsdir(dir, visible_dir_filter);
2058 strlist__for_each_entry(nd, dirs) {
2059 scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
2060 "%s/%s/kallsyms", dir, nd->s);
2061 if (!validate_kcore_addresses(kallsyms_filename, map)) {
2062 strlcpy(dir, kallsyms_filename, dir_sz);
2068 strlist__delete(dirs);
2074 * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
2075 * since access(R_OK) only checks with real UID/GID but open() use effective
2076 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
2078 static bool filename__readable(const char *file)
2080 int fd = open(file, O_RDONLY);
2087 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
2089 struct build_id bid;
2090 char sbuild_id[SBUILD_ID_SIZE];
2091 bool is_host = false;
2092 char path[PATH_MAX];
2094 if (!dso->has_build_id) {
2096 * Last resort, if we don't have a build-id and couldn't find
2097 * any vmlinux file, try the running kernel kallsyms table.
2102 if (sysfs__read_build_id("/sys/kernel/notes", &bid) == 0)
2103 is_host = dso__build_id_equal(dso, &bid);
2105 /* Try a fast path for /proc/kallsyms if possible */
2108 * Do not check the build-id cache, unless we know we cannot use
2109 * /proc/kcore or module maps don't match to /proc/kallsyms.
2110 * To check readability of /proc/kcore, do not use access(R_OK)
2111 * since /proc/kcore requires CAP_SYS_RAWIO to read and access
2114 if (filename__readable("/proc/kcore") &&
2115 !validate_kcore_addresses("/proc/kallsyms", map))
2119 build_id__sprintf(&dso->bid, sbuild_id);
2121 /* Find kallsyms in build-id cache with kcore */
2122 scnprintf(path, sizeof(path), "%s/%s/%s",
2123 buildid_dir, DSO__NAME_KCORE, sbuild_id);
2125 if (!find_matching_kcore(map, path, sizeof(path)))
2126 return strdup(path);
2128 /* Use current /proc/kallsyms if possible */
2131 return strdup("/proc/kallsyms");
2134 /* Finally, find a cache of kallsyms */
2135 if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
2136 pr_err("No kallsyms or vmlinux with build-id %s was found\n",
2141 return strdup(path);
2144 static int dso__load_kernel_sym(struct dso *dso, struct map *map)
2147 const char *kallsyms_filename = NULL;
2148 char *kallsyms_allocated_filename = NULL;
2149 char *filename = NULL;
2152 * Step 1: if the user specified a kallsyms or vmlinux filename, use
2153 * it and only it, reporting errors to the user if it cannot be used.
2155 * For instance, try to analyse an ARM perf.data file _without_ a
2156 * build-id, or if the user specifies the wrong path to the right
2157 * vmlinux file, obviously we can't fallback to another vmlinux (a
2158 * x86_86 one, on the machine where analysis is being performed, say),
2159 * or worse, /proc/kallsyms.
2161 * If the specified file _has_ a build-id and there is a build-id
2162 * section in the perf.data file, we will still do the expected
2163 * validation in dso__load_vmlinux and will bail out if they don't
2166 if (symbol_conf.kallsyms_name != NULL) {
2167 kallsyms_filename = symbol_conf.kallsyms_name;
2171 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
2172 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
2176 * Before checking on common vmlinux locations, check if it's
2177 * stored as standard build id binary (not kallsyms) under
2180 if (!symbol_conf.ignore_vmlinux_buildid)
2181 filename = __dso__build_id_filename(dso, NULL, 0, false, false);
2182 if (filename != NULL) {
2183 err = dso__load_vmlinux(dso, map, filename, true);
2189 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
2190 err = dso__load_vmlinux_path(dso, map);
2195 /* do not try local files if a symfs was given */
2196 if (symbol_conf.symfs[0] != 0)
2199 kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
2200 if (!kallsyms_allocated_filename)
2203 kallsyms_filename = kallsyms_allocated_filename;
2206 err = dso__load_kallsyms(dso, kallsyms_filename, map);
2208 pr_debug("Using %s for symbols\n", kallsyms_filename);
2209 free(kallsyms_allocated_filename);
2211 if (err > 0 && !dso__is_kcore(dso)) {
2212 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
2213 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
2214 map__fixup_start(map);
2215 map__fixup_end(map);
2221 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
2224 const char *kallsyms_filename;
2225 struct machine *machine = maps__machine(map__kmaps(map));
2226 char path[PATH_MAX];
2228 if (machine->kallsyms_filename) {
2229 kallsyms_filename = machine->kallsyms_filename;
2230 } else if (machine__is_default_guest(machine)) {
2232 * if the user specified a vmlinux filename, use it and only
2233 * it, reporting errors to the user if it cannot be used.
2234 * Or use file guest_kallsyms inputted by user on commandline
2236 if (symbol_conf.default_guest_vmlinux_name != NULL) {
2237 err = dso__load_vmlinux(dso, map,
2238 symbol_conf.default_guest_vmlinux_name,
2243 kallsyms_filename = symbol_conf.default_guest_kallsyms;
2244 if (!kallsyms_filename)
2247 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
2248 kallsyms_filename = path;
2251 err = dso__load_kallsyms(dso, kallsyms_filename, map);
2253 pr_debug("Using %s for symbols\n", kallsyms_filename);
2254 if (err > 0 && !dso__is_kcore(dso)) {
2255 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
2256 dso__set_long_name(dso, machine->mmap_name, false);
2257 map__fixup_start(map);
2258 map__fixup_end(map);
2264 static void vmlinux_path__exit(void)
2266 while (--vmlinux_path__nr_entries >= 0)
2267 zfree(&vmlinux_path[vmlinux_path__nr_entries]);
2268 vmlinux_path__nr_entries = 0;
2270 zfree(&vmlinux_path);
2273 static const char * const vmlinux_paths[] = {
2278 static const char * const vmlinux_paths_upd[] = {
2280 "/usr/lib/debug/boot/vmlinux-%s",
2281 "/lib/modules/%s/build/vmlinux",
2282 "/usr/lib/debug/lib/modules/%s/vmlinux",
2283 "/usr/lib/debug/boot/vmlinux-%s.debug"
2286 static int vmlinux_path__add(const char *new_entry)
2288 vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
2289 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2291 ++vmlinux_path__nr_entries;
2296 static int vmlinux_path__init(struct perf_env *env)
2300 char *kernel_version;
2303 vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
2304 ARRAY_SIZE(vmlinux_paths_upd)));
2305 if (vmlinux_path == NULL)
2308 for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
2309 if (vmlinux_path__add(vmlinux_paths[i]) < 0)
2312 /* only try kernel version if no symfs was given */
2313 if (symbol_conf.symfs[0] != 0)
2317 kernel_version = env->os_release;
2319 if (uname(&uts) < 0)
2322 kernel_version = uts.release;
2325 for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
2326 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
2327 if (vmlinux_path__add(bf) < 0)
2334 vmlinux_path__exit();
2338 int setup_list(struct strlist **list, const char *list_str,
2339 const char *list_name)
2341 if (list_str == NULL)
2344 *list = strlist__new(list_str, NULL);
2346 pr_err("problems parsing %s list\n", list_name);
2350 symbol_conf.has_filter = true;
2354 int setup_intlist(struct intlist **list, const char *list_str,
2355 const char *list_name)
2357 if (list_str == NULL)
2360 *list = intlist__new(list_str);
2362 pr_err("problems parsing %s list\n", list_name);
2368 static int setup_addrlist(struct intlist **addr_list, struct strlist *sym_list)
2370 struct str_node *pos, *tmp;
2376 *addr_list = intlist__new(NULL);
2380 strlist__for_each_entry_safe(pos, tmp, sym_list) {
2382 val = strtoul(pos->s, &sep, 16);
2383 if (errno || (sep == pos->s))
2387 end = pos->s + strlen(pos->s) - 1;
2388 while (end >= sep && isspace(*end))
2395 err = intlist__add(*addr_list, val);
2399 strlist__remove(sym_list, pos);
2404 intlist__delete(*addr_list);
2411 static bool symbol__read_kptr_restrict(void)
2414 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
2419 if (fgets(line, sizeof(line), fp) != NULL)
2420 value = perf_cap__capable(CAP_SYSLOG) ?
2427 /* Per kernel/kallsyms.c:
2428 * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG
2430 if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG))
2436 int symbol__annotation_init(void)
2438 if (symbol_conf.init_annotation)
2441 if (symbol_conf.initialized) {
2442 pr_err("Annotation needs to be init before symbol__init()\n");
2446 symbol_conf.priv_size += sizeof(struct annotation);
2447 symbol_conf.init_annotation = true;
2451 int symbol__init(struct perf_env *env)
2455 if (symbol_conf.initialized)
2458 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
2462 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
2465 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2466 pr_err("'.' is the only non valid --field-separator argument\n");
2470 if (setup_list(&symbol_conf.dso_list,
2471 symbol_conf.dso_list_str, "dso") < 0)
2474 if (setup_list(&symbol_conf.comm_list,
2475 symbol_conf.comm_list_str, "comm") < 0)
2476 goto out_free_dso_list;
2478 if (setup_intlist(&symbol_conf.pid_list,
2479 symbol_conf.pid_list_str, "pid") < 0)
2480 goto out_free_comm_list;
2482 if (setup_intlist(&symbol_conf.tid_list,
2483 symbol_conf.tid_list_str, "tid") < 0)
2484 goto out_free_pid_list;
2486 if (setup_list(&symbol_conf.sym_list,
2487 symbol_conf.sym_list_str, "symbol") < 0)
2488 goto out_free_tid_list;
2490 if (symbol_conf.sym_list &&
2491 setup_addrlist(&symbol_conf.addr_list, symbol_conf.sym_list) < 0)
2492 goto out_free_sym_list;
2494 if (setup_list(&symbol_conf.bt_stop_list,
2495 symbol_conf.bt_stop_list_str, "symbol") < 0)
2496 goto out_free_sym_list;
2499 * A path to symbols of "/" is identical to ""
2500 * reset here for simplicity.
2502 symfs = realpath(symbol_conf.symfs, NULL);
2504 symfs = symbol_conf.symfs;
2505 if (strcmp(symfs, "/") == 0)
2506 symbol_conf.symfs = "";
2507 if (symfs != symbol_conf.symfs)
2508 free((void *)symfs);
2510 symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2512 symbol_conf.initialized = true;
2516 strlist__delete(symbol_conf.sym_list);
2517 intlist__delete(symbol_conf.addr_list);
2519 intlist__delete(symbol_conf.tid_list);
2521 intlist__delete(symbol_conf.pid_list);
2523 strlist__delete(symbol_conf.comm_list);
2525 strlist__delete(symbol_conf.dso_list);
2529 void symbol__exit(void)
2531 if (!symbol_conf.initialized)
2533 strlist__delete(symbol_conf.bt_stop_list);
2534 strlist__delete(symbol_conf.sym_list);
2535 strlist__delete(symbol_conf.dso_list);
2536 strlist__delete(symbol_conf.comm_list);
2537 intlist__delete(symbol_conf.tid_list);
2538 intlist__delete(symbol_conf.pid_list);
2539 intlist__delete(symbol_conf.addr_list);
2540 vmlinux_path__exit();
2541 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2542 symbol_conf.bt_stop_list = NULL;
2543 symbol_conf.initialized = false;
2546 int symbol__config_symfs(const struct option *opt __maybe_unused,
2547 const char *dir, int unset __maybe_unused)
2552 symbol_conf.symfs = strdup(dir);
2553 if (symbol_conf.symfs == NULL)
2556 /* skip the locally configured cache if a symfs is given, and
2557 * config buildid dir to symfs/.debug
2559 ret = asprintf(&bf, "%s/%s", dir, ".debug");
2563 set_buildid_dir(bf);
2569 struct mem_info *mem_info__get(struct mem_info *mi)
2572 refcount_inc(&mi->refcnt);
2576 void mem_info__put(struct mem_info *mi)
2578 if (mi && refcount_dec_and_test(&mi->refcnt)) {
2579 addr_map_symbol__exit(&mi->iaddr);
2580 addr_map_symbol__exit(&mi->daddr);
2585 struct mem_info *mem_info__new(void)
2587 struct mem_info *mi = zalloc(sizeof(*mi));
2590 refcount_set(&mi->refcnt, 1);
2595 * Checks that user supplied symbol kernel files are accessible because
2596 * the default mechanism for accessing elf files fails silently. i.e. if
2597 * debug syms for a build ID aren't found perf carries on normally. When
2598 * they are user supplied we should assume that the user doesn't want to
2601 int symbol__validate_sym_arguments(void)
2603 if (symbol_conf.vmlinux_name &&
2604 access(symbol_conf.vmlinux_name, R_OK)) {
2605 pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name);
2608 if (symbol_conf.kallsyms_name &&
2609 access(symbol_conf.kallsyms_name, R_OK)) {
2610 pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name);