1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/capability.h>
8 #include <linux/kernel.h>
9 #include <linux/mman.h>
10 #include <linux/string.h>
11 #include <linux/time64.h>
12 #include <sys/types.h>
14 #include <sys/param.h>
22 #include "util.h" // lsdir()
28 #include "map_symbol.h"
29 #include "mem-events.h"
34 #include "namespaces.h"
37 #include <linux/ctype.h>
38 #include <linux/zalloc.h>
42 #include <symbol/kallsyms.h>
43 #include <sys/utsname.h>
45 static int dso__load_kernel_sym(struct dso *dso, struct map *map);
46 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
47 static bool symbol__is_idle(const char *name);
49 int vmlinux_path__nr_entries;
52 struct symbol_conf symbol_conf = {
55 .try_vmlinux_path = true,
57 .demangle_kernel = false,
58 .cumulate_callchain = true,
59 .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */
60 .show_hist_headers = true,
67 struct map_list_node {
68 struct list_head node;
72 static struct map_list_node *map_list_node__new(void)
74 return malloc(sizeof(struct map_list_node));
77 static enum dso_binary_type binary_type_symtab[] = {
78 DSO_BINARY_TYPE__KALLSYMS,
79 DSO_BINARY_TYPE__GUEST_KALLSYMS,
80 DSO_BINARY_TYPE__JAVA_JIT,
81 DSO_BINARY_TYPE__DEBUGLINK,
82 DSO_BINARY_TYPE__BUILD_ID_CACHE,
83 DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
84 DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
85 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
86 DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
87 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
88 DSO_BINARY_TYPE__GUEST_KMODULE,
89 DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
90 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
91 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
92 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
93 DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
94 DSO_BINARY_TYPE__NOT_FOUND,
97 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
99 static bool symbol_type__filter(char symbol_type)
101 symbol_type = toupper(symbol_type);
102 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
105 static int prefix_underscores_count(const char *str)
107 const char *tail = str;
115 const char * __weak arch__normalize_symbol_name(const char *name)
120 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
122 return strcmp(namea, nameb);
125 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
128 return strncmp(namea, nameb, n);
131 int __weak arch__choose_best_symbol(struct symbol *syma,
132 struct symbol *symb __maybe_unused)
134 /* Avoid "SyS" kernel syscall aliases */
135 if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
137 if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
143 static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
149 /* Prefer a symbol with non zero length */
150 a = syma->end - syma->start;
151 b = symb->end - symb->start;
152 if ((b == 0) && (a > 0))
154 else if ((a == 0) && (b > 0))
157 /* Prefer a non weak symbol over a weak one */
158 a = syma->binding == STB_WEAK;
159 b = symb->binding == STB_WEAK;
165 /* Prefer a global symbol over a non global one */
166 a = syma->binding == STB_GLOBAL;
167 b = symb->binding == STB_GLOBAL;
173 /* Prefer a symbol with less underscores */
174 a = prefix_underscores_count(syma->name);
175 b = prefix_underscores_count(symb->name);
181 /* Choose the symbol with the longest name */
182 na = strlen(syma->name);
183 nb = strlen(symb->name);
189 return arch__choose_best_symbol(syma, symb);
192 void symbols__fixup_duplicate(struct rb_root_cached *symbols)
195 struct symbol *curr, *next;
197 if (symbol_conf.allow_aliases)
200 nd = rb_first_cached(symbols);
203 curr = rb_entry(nd, struct symbol, rb_node);
205 nd = rb_next(&curr->rb_node);
209 next = rb_entry(nd, struct symbol, rb_node);
210 if (curr->start != next->start)
213 if (choose_best_symbol(curr, next) == SYMBOL_A) {
214 if (next->type == STT_GNU_IFUNC)
215 curr->ifunc_alias = true;
216 rb_erase_cached(&next->rb_node, symbols);
217 symbol__delete(next);
220 if (curr->type == STT_GNU_IFUNC)
221 next->ifunc_alias = true;
222 nd = rb_next(&curr->rb_node);
223 rb_erase_cached(&curr->rb_node, symbols);
224 symbol__delete(curr);
229 /* Update zero-sized symbols using the address of the next symbol */
230 void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
232 struct rb_node *nd, *prevnd = rb_first_cached(symbols);
233 struct symbol *curr, *prev;
238 curr = rb_entry(prevnd, struct symbol, rb_node);
240 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
242 curr = rb_entry(nd, struct symbol, rb_node);
245 * On some architecture kernel text segment start is located at
246 * some low memory address, while modules are located at high
247 * memory addresses (or vice versa). The gap between end of
248 * kernel text segment and beginning of first module's text
249 * segment is very big. Therefore do not fill this gap and do
250 * not assign it to the kernel dso map (kallsyms).
252 * Also BPF code can be allocated separately from text segments
253 * and modules. So the last entry in a module should not fill
256 * In kallsyms, it determines module symbols using '[' character
258 * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi]
260 if (prev->end == prev->start) {
261 const char *prev_mod;
262 const char *curr_mod;
265 prev->end = curr->start;
269 prev_mod = strchr(prev->name, '[');
270 curr_mod = strchr(curr->name, '[');
272 /* Last kernel/module symbol mapped to end of page */
273 if (!prev_mod != !curr_mod)
274 prev->end = roundup(prev->end + 4096, 4096);
275 /* Last symbol in the previous module */
276 else if (prev_mod && strcmp(prev_mod, curr_mod))
277 prev->end = roundup(prev->end + 4096, 4096);
279 prev->end = curr->start;
281 pr_debug4("%s sym:%s end:%#" PRIx64 "\n",
282 __func__, prev->name, prev->end);
287 if (curr->end == curr->start)
288 curr->end = roundup(curr->start, 4096) + 4096;
291 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
293 size_t namelen = strlen(name) + 1;
294 struct symbol *sym = calloc(1, (symbol_conf.priv_size +
295 sizeof(*sym) + namelen));
299 if (symbol_conf.priv_size) {
300 if (symbol_conf.init_annotation) {
301 struct annotation *notes = (void *)sym;
302 annotation__init(notes);
304 sym = ((void *)sym) + symbol_conf.priv_size;
308 sym->end = len ? start + len : start;
310 sym->binding = binding;
311 sym->namelen = namelen - 1;
313 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
314 __func__, name, start, sym->end);
315 memcpy(sym->name, name, namelen);
320 void symbol__delete(struct symbol *sym)
322 if (symbol_conf.priv_size) {
323 if (symbol_conf.init_annotation) {
324 struct annotation *notes = symbol__annotation(sym);
326 annotation__exit(notes);
329 free(((void *)sym) - symbol_conf.priv_size);
332 void symbols__delete(struct rb_root_cached *symbols)
335 struct rb_node *next = rb_first_cached(symbols);
338 pos = rb_entry(next, struct symbol, rb_node);
339 next = rb_next(&pos->rb_node);
340 rb_erase_cached(&pos->rb_node, symbols);
345 void __symbols__insert(struct rb_root_cached *symbols,
346 struct symbol *sym, bool kernel)
348 struct rb_node **p = &symbols->rb_root.rb_node;
349 struct rb_node *parent = NULL;
350 const u64 ip = sym->start;
352 bool leftmost = true;
355 const char *name = sym->name;
357 * ppc64 uses function descriptors and appends a '.' to the
358 * start of every instruction address. Remove it.
362 sym->idle = symbol__is_idle(name);
367 s = rb_entry(parent, struct symbol, rb_node);
375 rb_link_node(&sym->rb_node, parent, p);
376 rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
379 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
381 __symbols__insert(symbols, sym, false);
384 static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
391 n = symbols->rb_root.rb_node;
394 struct symbol *s = rb_entry(n, struct symbol, rb_node);
398 else if (ip > s->end || (ip == s->end && ip != s->start))
407 static struct symbol *symbols__first(struct rb_root_cached *symbols)
409 struct rb_node *n = rb_first_cached(symbols);
412 return rb_entry(n, struct symbol, rb_node);
417 static struct symbol *symbols__last(struct rb_root_cached *symbols)
419 struct rb_node *n = rb_last(&symbols->rb_root);
422 return rb_entry(n, struct symbol, rb_node);
427 static struct symbol *symbols__next(struct symbol *sym)
429 struct rb_node *n = rb_next(&sym->rb_node);
432 return rb_entry(n, struct symbol, rb_node);
437 static int symbols__sort_name_cmp(const void *vlhs, const void *vrhs)
439 const struct symbol *lhs = *((const struct symbol **)vlhs);
440 const struct symbol *rhs = *((const struct symbol **)vrhs);
442 return strcmp(lhs->name, rhs->name);
445 static struct symbol **symbols__sort_by_name(struct rb_root_cached *source, size_t *len)
448 struct symbol **result;
449 size_t i = 0, size = 0;
451 for (nd = rb_first_cached(source); nd; nd = rb_next(nd))
454 result = malloc(sizeof(*result) * size);
458 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
459 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
463 qsort(result, size, sizeof(*result), symbols__sort_name_cmp);
468 int symbol__match_symbol_name(const char *name, const char *str,
469 enum symbol_tag_include includes)
471 const char *versioning;
473 if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
474 (versioning = strstr(name, "@@"))) {
475 int len = strlen(str);
477 if (len < versioning - name)
478 len = versioning - name;
480 return arch__compare_symbol_names_n(name, str, len);
482 return arch__compare_symbol_names(name, str);
485 static struct symbol *symbols__find_by_name(struct symbol *symbols[],
488 enum symbol_tag_include includes,
491 size_t i, lower = 0, upper = symbols_len;
492 struct symbol *s = NULL;
495 *found_idx = SIZE_MAX;
500 while (lower < upper) {
503 i = (lower + upper) / 2;
504 cmp = symbol__match_symbol_name(symbols[i]->name, name, includes);
517 if (s && includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) {
518 /* return first symbol that has same name (if any) */
520 struct symbol *tmp = symbols[i - 1];
522 if (!arch__compare_symbol_names(tmp->name, s->name)) {
530 assert(!found_idx || !s || s == symbols[*found_idx]);
534 void dso__reset_find_symbol_cache(struct dso *dso)
536 dso__set_last_find_result_addr(dso, 0);
537 dso__set_last_find_result_symbol(dso, NULL);
540 void dso__insert_symbol(struct dso *dso, struct symbol *sym)
542 __symbols__insert(dso__symbols(dso), sym, dso__kernel(dso));
544 /* update the symbol cache if necessary */
545 if (dso__last_find_result_addr(dso) >= sym->start &&
546 (dso__last_find_result_addr(dso) < sym->end ||
547 sym->start == sym->end)) {
548 dso__set_last_find_result_symbol(dso, sym);
552 void dso__delete_symbol(struct dso *dso, struct symbol *sym)
554 rb_erase_cached(&sym->rb_node, dso__symbols(dso));
556 dso__reset_find_symbol_cache(dso);
559 struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
561 if (dso__last_find_result_addr(dso) != addr || dso__last_find_result_symbol(dso) == NULL) {
562 dso__set_last_find_result_addr(dso, addr);
563 dso__set_last_find_result_symbol(dso, symbols__find(dso__symbols(dso), addr));
566 return dso__last_find_result_symbol(dso);
569 struct symbol *dso__find_symbol_nocache(struct dso *dso, u64 addr)
571 return symbols__find(dso__symbols(dso), addr);
574 struct symbol *dso__first_symbol(struct dso *dso)
576 return symbols__first(dso__symbols(dso));
579 struct symbol *dso__last_symbol(struct dso *dso)
581 return symbols__last(dso__symbols(dso));
584 struct symbol *dso__next_symbol(struct symbol *sym)
586 return symbols__next(sym);
589 struct symbol *dso__next_symbol_by_name(struct dso *dso, size_t *idx)
591 if (*idx + 1 >= dso__symbol_names_len(dso))
595 return dso__symbol_names(dso)[*idx];
599 * Returns first symbol that matched with @name.
601 struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name, size_t *idx)
603 struct symbol *s = symbols__find_by_name(dso__symbol_names(dso),
604 dso__symbol_names_len(dso),
605 name, SYMBOL_TAG_INCLUDE__NONE, idx);
607 s = symbols__find_by_name(dso__symbol_names(dso), dso__symbol_names_len(dso),
608 name, SYMBOL_TAG_INCLUDE__DEFAULT_ONLY, idx);
613 void dso__sort_by_name(struct dso *dso)
615 mutex_lock(dso__lock(dso));
616 if (!dso__sorted_by_name(dso)) {
619 dso__set_symbol_names(dso, symbols__sort_by_name(dso__symbols(dso), &len));
620 if (dso__symbol_names(dso)) {
621 dso__set_symbol_names_len(dso, len);
622 dso__set_sorted_by_name(dso);
625 mutex_unlock(dso__lock(dso));
629 * While we find nice hex chars, build a long_val.
630 * Return number of chars processed.
632 static int hex2u64(const char *ptr, u64 *long_val)
636 *long_val = strtoull(ptr, &p, 16);
642 int modules__parse(const char *filename, void *arg,
643 int (*process_module)(void *arg, const char *name,
644 u64 start, u64 size))
651 file = fopen(filename, "r");
661 line_len = getline(&line, &n, file);
674 line[--line_len] = '\0'; /* \n */
676 sep = strrchr(line, 'x');
680 hex2u64(sep + 1, &start);
682 sep = strchr(line, ' ');
688 scnprintf(name, sizeof(name), "[%s]", line);
690 size = strtoul(sep + 1, &endptr, 0);
691 if (*endptr != ' ' && *endptr != '\t')
694 err = process_module(arg, name, start, size);
705 * These are symbols in the kernel image, so make sure that
706 * sym is from a kernel DSO.
708 static bool symbol__is_idle(const char *name)
710 const char * const idle_symbols[] = {
711 "acpi_idle_do_entry",
712 "acpi_processor_ffh_cstate_enter",
724 "mwait_idle_with_hints",
725 "mwait_idle_with_hints.constprop.0",
727 "ppc64_runlatch_off",
728 "pseries_dedicated_idle_sleep",
734 static struct strlist *idle_symbols_list;
736 if (idle_symbols_list)
737 return strlist__has_entry(idle_symbols_list, name);
739 idle_symbols_list = strlist__new(NULL, NULL);
741 for (i = 0; idle_symbols[i]; i++)
742 strlist__add(idle_symbols_list, idle_symbols[i]);
744 return strlist__has_entry(idle_symbols_list, name);
747 static int map__process_kallsym_symbol(void *arg, const char *name,
748 char type, u64 start)
751 struct dso *dso = arg;
752 struct rb_root_cached *root = dso__symbols(dso);
754 if (!symbol_type__filter(type))
757 /* Ignore local symbols for ARM modules */
762 * module symbols are not sorted so we add all
763 * symbols, setting length to 0, and rely on
764 * symbols__fixup_end() to fix it up.
766 sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
770 * We will pass the symbols to the filter later, in
771 * map__split_kallsyms, when we have split the maps per module
773 __symbols__insert(root, sym, !strchr(name, '['));
779 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
780 * so that we can in the next step set the symbol ->end address and then
781 * call kernel_maps__split_kallsyms.
783 static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
785 return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
788 static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
792 struct rb_root_cached *root = dso__symbols(dso);
793 struct rb_root_cached old_root = *root;
794 struct rb_node *next = rb_first_cached(root);
799 *root = RB_ROOT_CACHED;
802 struct map *curr_map;
803 struct dso *curr_map_dso;
806 pos = rb_entry(next, struct symbol, rb_node);
807 next = rb_next(&pos->rb_node);
809 rb_erase_cached(&pos->rb_node, &old_root);
810 RB_CLEAR_NODE(&pos->rb_node);
811 module = strchr(pos->name, '\t');
815 curr_map = maps__find(kmaps, pos->start);
821 curr_map_dso = map__dso(curr_map);
822 pos->start -= map__start(curr_map) - map__pgoff(curr_map);
823 if (pos->end > map__end(curr_map))
824 pos->end = map__end(curr_map);
826 pos->end -= map__start(curr_map) - map__pgoff(curr_map);
827 symbols__insert(dso__symbols(curr_map_dso), pos);
832 /* Symbols have been adjusted */
833 dso__set_adjust_symbols(dso, true);
839 * Split the symbols into maps, making sure there are no overlaps, i.e. the
840 * kernel range is broken in several maps, named [kernel].N, as we don't have
841 * the original ELF section names vmlinux have.
843 static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
844 struct map *initial_map)
846 struct machine *machine;
847 struct map *curr_map = map__get(initial_map);
849 int count = 0, moved = 0;
850 struct rb_root_cached *root = dso__symbols(dso);
851 struct rb_node *next = rb_first_cached(root);
852 int kernel_range = 0;
858 machine = maps__machine(kmaps);
860 x86_64 = machine__is(machine, "x86_64");
865 pos = rb_entry(next, struct symbol, rb_node);
866 next = rb_next(&pos->rb_node);
868 module = strchr(pos->name, '\t');
870 struct dso *curr_map_dso;
872 if (!symbol_conf.use_modules)
876 curr_map_dso = map__dso(curr_map);
877 if (strcmp(dso__short_name(curr_map_dso), module)) {
878 if (!RC_CHK_EQUAL(curr_map, initial_map) &&
879 dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST &&
880 machine__is_default_guest(machine)) {
882 * We assume all symbols of a module are
883 * continuous in * kallsyms, so curr_map
884 * points to a module and all its
885 * symbols are in its kmap. Mark it as
888 dso__set_loaded(curr_map_dso);
892 curr_map = maps__find_by_name(kmaps, module);
893 if (curr_map == NULL) {
894 pr_debug("%s/proc/{kallsyms,modules} "
895 "inconsistency while looking "
896 "for \"%s\" module!\n",
897 machine->root_dir, module);
898 curr_map = map__get(initial_map);
901 curr_map_dso = map__dso(curr_map);
902 if (dso__loaded(curr_map_dso) &&
903 !machine__is_default_guest(machine))
907 * So that we look just like we get from .ko files,
908 * i.e. not prelinked, relative to initial_map->start.
910 pos->start = map__map_ip(curr_map, pos->start);
911 pos->end = map__map_ip(curr_map, pos->end);
912 } else if (x86_64 && is_entry_trampoline(pos->name)) {
914 * These symbols are not needed anymore since the
915 * trampoline maps refer to the text section and it's
916 * symbols instead. Avoid having to deal with
917 * relocations, and the assumption that the first symbol
918 * is the start of kernel text, by simply removing the
919 * symbols at this point.
922 } else if (!RC_CHK_EQUAL(curr_map, initial_map)) {
923 char dso_name[PATH_MAX];
927 /* Kernel was relocated at boot time */
934 curr_map = map__get(initial_map);
938 if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
939 snprintf(dso_name, sizeof(dso_name),
943 snprintf(dso_name, sizeof(dso_name),
947 ndso = dso__new(dso_name);
952 dso__set_kernel(ndso, dso__kernel(dso));
954 curr_map = map__new2(pos->start, ndso);
955 if (curr_map == NULL) {
960 map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY);
961 if (maps__insert(kmaps, curr_map)) {
968 /* Kernel was relocated at boot time */
973 if (!RC_CHK_EQUAL(curr_map, initial_map)) {
974 struct dso *curr_map_dso = map__dso(curr_map);
976 rb_erase_cached(&pos->rb_node, root);
977 symbols__insert(dso__symbols(curr_map_dso), pos);
984 rb_erase_cached(&pos->rb_node, root);
988 if (!RC_CHK_EQUAL(curr_map, initial_map) &&
989 dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST &&
990 machine__is_default_guest(maps__machine(kmaps))) {
991 dso__set_loaded(map__dso(curr_map));
994 return count + moved;
997 bool symbol__restricted_filename(const char *filename,
998 const char *restricted_filename)
1000 bool restricted = false;
1002 if (symbol_conf.kptr_restrict) {
1003 char *r = realpath(filename, NULL);
1006 restricted = strcmp(r, restricted_filename) == 0;
1015 struct module_info {
1016 struct rb_node rb_node;
1021 static void add_module(struct module_info *mi, struct rb_root *modules)
1023 struct rb_node **p = &modules->rb_node;
1024 struct rb_node *parent = NULL;
1025 struct module_info *m;
1027 while (*p != NULL) {
1029 m = rb_entry(parent, struct module_info, rb_node);
1030 if (strcmp(mi->name, m->name) < 0)
1033 p = &(*p)->rb_right;
1035 rb_link_node(&mi->rb_node, parent, p);
1036 rb_insert_color(&mi->rb_node, modules);
1039 static void delete_modules(struct rb_root *modules)
1041 struct module_info *mi;
1042 struct rb_node *next = rb_first(modules);
1045 mi = rb_entry(next, struct module_info, rb_node);
1046 next = rb_next(&mi->rb_node);
1047 rb_erase(&mi->rb_node, modules);
1053 static struct module_info *find_module(const char *name,
1054 struct rb_root *modules)
1056 struct rb_node *n = modules->rb_node;
1059 struct module_info *m;
1062 m = rb_entry(n, struct module_info, rb_node);
1063 cmp = strcmp(name, m->name);
1075 static int __read_proc_modules(void *arg, const char *name, u64 start,
1076 u64 size __maybe_unused)
1078 struct rb_root *modules = arg;
1079 struct module_info *mi;
1081 mi = zalloc(sizeof(struct module_info));
1085 mi->name = strdup(name);
1093 add_module(mi, modules);
1098 static int read_proc_modules(const char *filename, struct rb_root *modules)
1100 if (symbol__restricted_filename(filename, "/proc/modules"))
1103 if (modules__parse(filename, modules, __read_proc_modules)) {
1104 delete_modules(modules);
1111 int compare_proc_modules(const char *from, const char *to)
1113 struct rb_root from_modules = RB_ROOT;
1114 struct rb_root to_modules = RB_ROOT;
1115 struct rb_node *from_node, *to_node;
1116 struct module_info *from_m, *to_m;
1119 if (read_proc_modules(from, &from_modules))
1122 if (read_proc_modules(to, &to_modules))
1123 goto out_delete_from;
1125 from_node = rb_first(&from_modules);
1126 to_node = rb_first(&to_modules);
1131 from_m = rb_entry(from_node, struct module_info, rb_node);
1132 to_m = rb_entry(to_node, struct module_info, rb_node);
1134 if (from_m->start != to_m->start ||
1135 strcmp(from_m->name, to_m->name))
1138 from_node = rb_next(from_node);
1139 to_node = rb_next(to_node);
1142 if (!from_node && !to_node)
1145 delete_modules(&to_modules);
1147 delete_modules(&from_modules);
1152 static int do_validate_kcore_modules_cb(struct map *old_map, void *data)
1154 struct rb_root *modules = data;
1155 struct module_info *mi;
1158 if (!__map__is_kmodule(old_map))
1161 dso = map__dso(old_map);
1162 /* Module must be in memory at the same address */
1163 mi = find_module(dso__short_name(dso), modules);
1164 if (!mi || mi->start != map__start(old_map))
1170 static int do_validate_kcore_modules(const char *filename, struct maps *kmaps)
1172 struct rb_root modules = RB_ROOT;
1175 err = read_proc_modules(filename, &modules);
1179 err = maps__for_each_map(kmaps, do_validate_kcore_modules_cb, &modules);
1181 delete_modules(&modules);
1186 * If kallsyms is referenced by name then we look for filename in the same
1189 static bool filename_from_kallsyms_filename(char *filename,
1190 const char *base_name,
1191 const char *kallsyms_filename)
1195 strcpy(filename, kallsyms_filename);
1196 name = strrchr(filename, '/');
1202 if (!strcmp(name, "kallsyms")) {
1203 strcpy(name, base_name);
1210 static int validate_kcore_modules(const char *kallsyms_filename,
1213 struct maps *kmaps = map__kmaps(map);
1214 char modules_filename[PATH_MAX];
1219 if (!filename_from_kallsyms_filename(modules_filename, "modules",
1223 if (do_validate_kcore_modules(modules_filename, kmaps))
1229 static int validate_kcore_addresses(const char *kallsyms_filename,
1232 struct kmap *kmap = map__kmap(map);
1237 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1240 if (kallsyms__get_function_start(kallsyms_filename,
1241 kmap->ref_reloc_sym->name, &start))
1243 if (start != kmap->ref_reloc_sym->addr)
1247 return validate_kcore_modules(kallsyms_filename, map);
1250 struct kcore_mapfn_data {
1252 struct list_head maps;
1255 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1257 struct kcore_mapfn_data *md = data;
1258 struct map_list_node *list_node = map_list_node__new();
1263 list_node->map = map__new2(start, md->dso);
1264 if (!list_node->map) {
1269 map__set_end(list_node->map, map__start(list_node->map) + len);
1270 map__set_pgoff(list_node->map, pgoff);
1272 list_add(&list_node->node, &md->maps);
1277 static bool remove_old_maps(struct map *map, void *data)
1279 const struct map *map_to_save = data;
1282 * We need to preserve eBPF maps even if they are covered by kcore,
1283 * because we need to access eBPF dso for source data.
1285 return !RC_CHK_EQUAL(map, map_to_save) && !__map__is_bpf_prog(map);
1288 static int dso__load_kcore(struct dso *dso, struct map *map,
1289 const char *kallsyms_filename)
1291 struct maps *kmaps = map__kmaps(map);
1292 struct kcore_mapfn_data md;
1293 struct map *map_ref, *replacement_map = NULL;
1294 struct machine *machine;
1297 char kcore_filename[PATH_MAX];
1303 machine = maps__machine(kmaps);
1305 /* This function requires that the map is the kernel map */
1306 if (!__map__is_kernel(map))
1309 if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1313 /* Modules and kernel must be present at their original addresses */
1314 if (validate_kcore_addresses(kallsyms_filename, map))
1318 INIT_LIST_HEAD(&md.maps);
1320 fd = open(kcore_filename, O_RDONLY);
1322 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
1327 /* Read new maps into temporary lists */
1328 err = file__read_maps(fd, map__prot(map) & PROT_EXEC, kcore_mapfn, &md,
1332 dso__set_is_64_bit(dso, is_64_bit);
1334 if (list_empty(&md.maps)) {
1339 /* Remove old maps */
1340 maps__remove_maps(kmaps, remove_old_maps, map);
1341 machine->trampolines_mapped = false;
1343 /* Find the kernel map using the '_stext' symbol */
1344 if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
1345 u64 replacement_size = 0;
1346 struct map_list_node *new_node;
1348 list_for_each_entry(new_node, &md.maps, node) {
1349 struct map *new_map = new_node->map;
1350 u64 new_size = map__size(new_map);
1352 if (!(stext >= map__start(new_map) && stext < map__end(new_map)))
1356 * On some architectures, ARM64 for example, the kernel
1357 * text can get allocated inside of the vmalloc segment.
1358 * Select the smallest matching segment, in case stext
1359 * falls within more than one in the list.
1361 if (!replacement_map || new_size < replacement_size) {
1362 replacement_map = new_map;
1363 replacement_size = new_size;
1368 if (!replacement_map)
1369 replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map;
1372 * Update addresses of vmlinux map. Re-insert it to ensure maps are
1373 * correctly ordered. Do this before using maps__merge_in() for the
1374 * remaining maps so vmlinux gets split if necessary.
1376 map_ref = map__get(map);
1377 maps__remove(kmaps, map_ref);
1379 map__set_start(map_ref, map__start(replacement_map));
1380 map__set_end(map_ref, map__end(replacement_map));
1381 map__set_pgoff(map_ref, map__pgoff(replacement_map));
1382 map__set_mapping_type(map_ref, map__mapping_type(replacement_map));
1384 err = maps__insert(kmaps, map_ref);
1390 while (!list_empty(&md.maps)) {
1391 struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node);
1392 struct map *new_map = new_node->map;
1394 list_del_init(&new_node->node);
1396 /* skip if replacement_map, already inserted above */
1397 if (!RC_CHK_EQUAL(new_map, replacement_map)) {
1399 * Merge kcore map into existing maps,
1400 * and ensure that current maps (eBPF)
1403 if (maps__merge_in(kmaps, new_map)) {
1411 if (machine__is(machine, "x86_64")) {
1415 * If one of the corresponding symbols is there, assume the
1416 * entry trampoline maps are too.
1418 if (!kallsyms__get_function_start(kallsyms_filename,
1419 ENTRY_TRAMPOLINE_NAME,
1421 machine->trampolines_mapped = true;
1425 * Set the data type and long name so that kcore can be read via
1426 * dso__data_read_addr().
1428 if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
1429 dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_KCORE);
1431 dso__set_binary_type(dso, DSO_BINARY_TYPE__KCORE);
1432 dso__set_long_name(dso, strdup(kcore_filename), true);
1436 if (map__prot(map) & PROT_EXEC)
1437 pr_debug("Using %s for kernel object code\n", kcore_filename);
1439 pr_debug("Using %s for kernel data\n", kcore_filename);
1444 while (!list_empty(&md.maps)) {
1445 struct map_list_node *list_node;
1447 list_node = list_entry(md.maps.next, struct map_list_node, node);
1448 list_del_init(&list_node->node);
1449 map__zput(list_node->map);
1457 * If the kernel is relocated at boot time, kallsyms won't match. Compute the
1458 * delta based on the relocation reference symbol.
1460 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
1464 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1467 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
1470 *delta = addr - kmap->ref_reloc_sym->addr;
1474 int __dso__load_kallsyms(struct dso *dso, const char *filename,
1475 struct map *map, bool no_kcore)
1477 struct kmap *kmap = map__kmap(map);
1480 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1483 if (!kmap || !kmap->kmaps)
1486 if (dso__load_all_kallsyms(dso, filename) < 0)
1489 if (kallsyms__delta(kmap, filename, &delta))
1492 symbols__fixup_end(dso__symbols(dso), true);
1493 symbols__fixup_duplicate(dso__symbols(dso));
1495 if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
1496 dso__set_symtab_type(dso, DSO_BINARY_TYPE__GUEST_KALLSYMS);
1498 dso__set_symtab_type(dso, DSO_BINARY_TYPE__KALLSYMS);
1500 if (!no_kcore && !dso__load_kcore(dso, map, filename))
1501 return maps__split_kallsyms_for_kcore(kmap->kmaps, dso);
1503 return maps__split_kallsyms(kmap->kmaps, dso, delta, map);
1506 int dso__load_kallsyms(struct dso *dso, const char *filename,
1509 return __dso__load_kallsyms(dso, filename, map, false);
1512 static int dso__load_perf_map(const char *map_path, struct dso *dso)
1519 file = fopen(map_path, "r");
1523 while (!feof(file)) {
1528 line_len = getline(&line, &n, file);
1535 line[--line_len] = '\0'; /* \n */
1537 len = hex2u64(line, &start);
1540 if (len + 2 >= line_len)
1543 len += hex2u64(line + len, &size);
1546 if (len + 2 >= line_len)
1549 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
1552 goto out_delete_line;
1554 symbols__insert(dso__symbols(dso), sym);
1569 #ifdef HAVE_LIBBFD_SUPPORT
1570 #define PACKAGE 'perf'
1573 static int bfd_symbols__cmpvalue(const void *a, const void *b)
1575 const asymbol *as = *(const asymbol **)a, *bs = *(const asymbol **)b;
1577 if (bfd_asymbol_value(as) != bfd_asymbol_value(bs))
1578 return bfd_asymbol_value(as) - bfd_asymbol_value(bs);
1580 return bfd_asymbol_name(as)[0] - bfd_asymbol_name(bs)[0];
1583 static int bfd2elf_binding(asymbol *symbol)
1585 if (symbol->flags & BSF_WEAK)
1587 if (symbol->flags & BSF_GLOBAL)
1589 if (symbol->flags & BSF_LOCAL)
1594 int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
1597 long symbols_size, symbols_count, i;
1599 asymbol **symbols, *sym;
1600 struct symbol *symbol;
1604 abfd = bfd_openr(debugfile, NULL);
1608 if (!bfd_check_format(abfd, bfd_object)) {
1609 pr_debug2("%s: cannot read %s bfd file.\n", __func__,
1614 if (bfd_get_flavour(abfd) == bfd_target_elf_flavour)
1617 symbols_size = bfd_get_symtab_upper_bound(abfd);
1618 if (symbols_size == 0) {
1623 if (symbols_size < 0)
1626 symbols = malloc(symbols_size);
1630 symbols_count = bfd_canonicalize_symtab(abfd, symbols);
1631 if (symbols_count < 0)
1634 section = bfd_get_section_by_name(abfd, ".text");
1636 for (i = 0; i < symbols_count; ++i) {
1637 if (!strcmp(bfd_asymbol_name(symbols[i]), "__ImageBase") ||
1638 !strcmp(bfd_asymbol_name(symbols[i]), "__image_base__"))
1641 if (i < symbols_count) {
1642 /* PE symbols can only have 4 bytes, so use .text high bits */
1643 dso->text_offset = section->vma - (u32)section->vma;
1644 dso->text_offset += (u32)bfd_asymbol_value(symbols[i]);
1645 dso->text_end = (section->vma - dso->text_offset) + section->size;
1647 dso->text_offset = section->vma - section->filepos;
1648 dso->text_end = section->filepos + section->size;
1652 qsort(symbols, symbols_count, sizeof(asymbol *), bfd_symbols__cmpvalue);
1654 #ifdef bfd_get_section
1655 #define bfd_asymbol_section bfd_get_section
1657 for (i = 0; i < symbols_count; ++i) {
1659 section = bfd_asymbol_section(sym);
1660 if (bfd2elf_binding(sym) < 0)
1663 while (i + 1 < symbols_count &&
1664 bfd_asymbol_section(symbols[i + 1]) == section &&
1665 bfd2elf_binding(symbols[i + 1]) < 0)
1668 if (i + 1 < symbols_count &&
1669 bfd_asymbol_section(symbols[i + 1]) == section)
1670 len = symbols[i + 1]->value - sym->value;
1672 len = section->size - sym->value;
1674 start = bfd_asymbol_value(sym) - dso->text_offset;
1675 symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC,
1676 bfd_asymbol_name(sym));
1680 symbols__insert(dso__symbols(dso), symbol);
1682 #ifdef bfd_get_section
1683 #undef bfd_asymbol_section
1686 symbols__fixup_end(dso__symbols(dso), false);
1687 symbols__fixup_duplicate(dso__symbols(dso));
1688 dso__set_adjust_symbols(dso, true);
1699 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1700 enum dso_binary_type type)
1703 case DSO_BINARY_TYPE__JAVA_JIT:
1704 case DSO_BINARY_TYPE__DEBUGLINK:
1705 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1706 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1707 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1708 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
1709 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1710 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1711 return !kmod && dso__kernel(dso) == DSO_SPACE__USER;
1713 case DSO_BINARY_TYPE__KALLSYMS:
1714 case DSO_BINARY_TYPE__VMLINUX:
1715 case DSO_BINARY_TYPE__KCORE:
1716 return dso__kernel(dso) == DSO_SPACE__KERNEL;
1718 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1719 case DSO_BINARY_TYPE__GUEST_VMLINUX:
1720 case DSO_BINARY_TYPE__GUEST_KCORE:
1721 return dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST;
1723 case DSO_BINARY_TYPE__GUEST_KMODULE:
1724 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1725 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1726 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1728 * kernel modules know their symtab type - it's set when
1729 * creating a module dso in machine__addnew_module_map().
1731 return kmod && dso__symtab_type(dso) == type;
1733 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1734 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1737 case DSO_BINARY_TYPE__BPF_PROG_INFO:
1738 case DSO_BINARY_TYPE__BPF_IMAGE:
1739 case DSO_BINARY_TYPE__OOL:
1740 case DSO_BINARY_TYPE__NOT_FOUND:
1746 /* Checks for the existence of the perf-<pid>.map file in two different
1747 * locations. First, if the process is a separate mount namespace, check in
1748 * that namespace using the pid of the innermost pid namespace. If's not in a
1749 * namespace, or the file can't be found there, try in the mount namespace of
1750 * the tracing process using our view of its pid.
1752 static int dso__find_perf_map(char *filebuf, size_t bufsz,
1753 struct nsinfo **nsip)
1755 struct nscookie nsc;
1757 struct nsinfo *nnsi;
1762 if (nsinfo__need_setns(nsi)) {
1763 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__nstgid(nsi));
1764 nsinfo__mountns_enter(nsi, &nsc);
1765 rc = access(filebuf, R_OK);
1766 nsinfo__mountns_exit(&nsc);
1771 nnsi = nsinfo__copy(nsi);
1775 nsinfo__clear_need_setns(nnsi);
1776 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__tgid(nnsi));
1784 int dso__load(struct dso *dso, struct map *map)
1789 struct machine *machine = NULL;
1790 char *root_dir = (char *) "";
1792 struct symsrc ss_[2];
1793 struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1796 struct build_id bid;
1797 struct nscookie nsc;
1798 char newmapname[PATH_MAX];
1799 const char *map_path = dso__long_name(dso);
1801 mutex_lock(dso__lock(dso));
1802 perfmap = strncmp(dso__name(dso), "/tmp/perf-", 10) == 0;
1804 if (dso__nsinfo(dso) &&
1805 (dso__find_perf_map(newmapname, sizeof(newmapname),
1806 dso__nsinfo_ptr(dso)) == 0)) {
1807 map_path = newmapname;
1811 nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
1813 /* check again under the dso->lock */
1814 if (dso__loaded(dso)) {
1819 kmod = dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1820 dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1821 dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE ||
1822 dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1824 if (dso__kernel(dso) && !kmod) {
1825 if (dso__kernel(dso) == DSO_SPACE__KERNEL)
1826 ret = dso__load_kernel_sym(dso, map);
1827 else if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
1828 ret = dso__load_guest_kernel_sym(dso, map);
1830 machine = maps__machine(map__kmaps(map));
1831 if (machine__is(machine, "x86_64"))
1832 machine__map_x86_64_entry_trampolines(machine, dso);
1836 dso__set_adjust_symbols(dso, false);
1839 ret = dso__load_perf_map(map_path, dso);
1840 dso__set_symtab_type(dso, ret > 0
1841 ? DSO_BINARY_TYPE__JAVA_JIT
1842 : DSO_BINARY_TYPE__NOT_FOUND);
1847 root_dir = machine->root_dir;
1849 name = malloc(PATH_MAX);
1854 * Read the build id if possible. This is required for
1855 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1857 if (!dso__has_build_id(dso) &&
1858 is_regular_file(dso__long_name(dso))) {
1859 __symbol__join_symfs(name, PATH_MAX, dso__long_name(dso));
1860 if (filename__read_build_id(name, &bid) > 0)
1861 dso__set_build_id(dso, &bid);
1865 * Iterate over candidate debug images.
1866 * Keep track of "interesting" ones (those which have a symtab, dynsym,
1867 * and/or opd section) for processing.
1869 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1870 struct symsrc *ss = &ss_[ss_pos];
1871 bool next_slot = false;
1877 enum dso_binary_type symtab_type = binary_type_symtab[i];
1879 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
1880 symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);
1882 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1885 if (dso__read_binary_type_filename(dso, symtab_type,
1886 root_dir, name, PATH_MAX))
1890 nsinfo__mountns_exit(&nsc);
1892 is_reg = is_regular_file(name);
1893 if (!is_reg && errno == ENOENT && dso__nsinfo(dso)) {
1894 char *new_name = dso__filename_with_chroot(dso, name);
1896 is_reg = is_regular_file(new_name);
1897 strlcpy(name, new_name, PATH_MAX);
1902 #ifdef HAVE_LIBBFD_SUPPORT
1904 bfdrc = dso__load_bfd_symbols(dso, name);
1906 if (is_reg && bfdrc < 0)
1907 sirc = symsrc__init(ss, dso, name, symtab_type);
1910 nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
1917 if (!is_reg || sirc < 0)
1920 if (!syms_ss && symsrc__has_symtab(ss)) {
1923 if (!dso__symsrc_filename(dso))
1924 dso__set_symsrc_filename(dso, strdup(name));
1927 if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1935 if (syms_ss && runtime_ss)
1938 symsrc__destroy(ss);
1943 if (!runtime_ss && !syms_ss)
1946 if (runtime_ss && !syms_ss) {
1947 syms_ss = runtime_ss;
1950 /* We'll have to hope for the best */
1951 if (!runtime_ss && syms_ss)
1952 runtime_ss = syms_ss;
1955 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1962 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
1967 for (; ss_pos > 0; ss_pos--)
1968 symsrc__destroy(&ss_[ss_pos - 1]);
1971 if (ret < 0 && strstr(dso__name(dso), " (deleted)") != NULL)
1974 dso__set_loaded(dso);
1975 mutex_unlock(dso__lock(dso));
1976 nsinfo__mountns_exit(&nsc);
1982 * Always takes ownership of vmlinux when vmlinux_allocated == true, even if
1983 * it returns an error.
1985 int dso__load_vmlinux(struct dso *dso, struct map *map,
1986 const char *vmlinux, bool vmlinux_allocated)
1990 char symfs_vmlinux[PATH_MAX];
1991 enum dso_binary_type symtab_type;
1993 if (vmlinux[0] == '/')
1994 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
1996 symbol__join_symfs(symfs_vmlinux, vmlinux);
1998 if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
1999 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
2001 symtab_type = DSO_BINARY_TYPE__VMLINUX;
2003 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) {
2004 if (vmlinux_allocated)
2005 free((char *) vmlinux);
2010 * dso__load_sym() may copy 'dso' which will result in the copies having
2011 * an incorrect long name unless we set it here first.
2013 dso__set_long_name(dso, vmlinux, vmlinux_allocated);
2014 if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
2015 dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_VMLINUX);
2017 dso__set_binary_type(dso, DSO_BINARY_TYPE__VMLINUX);
2019 err = dso__load_sym(dso, map, &ss, &ss, 0);
2020 symsrc__destroy(&ss);
2023 dso__set_loaded(dso);
2024 pr_debug("Using %s for symbols\n", symfs_vmlinux);
2030 int dso__load_vmlinux_path(struct dso *dso, struct map *map)
2033 char *filename = NULL;
2035 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
2036 vmlinux_path__nr_entries + 1);
2038 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
2039 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
2044 if (!symbol_conf.ignore_vmlinux_buildid)
2045 filename = dso__build_id_filename(dso, NULL, 0, false);
2046 if (filename != NULL) {
2047 err = dso__load_vmlinux(dso, map, filename, true);
2055 static bool visible_dir_filter(const char *name, struct dirent *d)
2057 if (d->d_type != DT_DIR)
2059 return lsdir_no_dot_filter(name, d);
2062 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
2064 char kallsyms_filename[PATH_MAX];
2066 struct strlist *dirs;
2067 struct str_node *nd;
2069 dirs = lsdir(dir, visible_dir_filter);
2073 strlist__for_each_entry(nd, dirs) {
2074 scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
2075 "%s/%s/kallsyms", dir, nd->s);
2076 if (!validate_kcore_addresses(kallsyms_filename, map)) {
2077 strlcpy(dir, kallsyms_filename, dir_sz);
2083 strlist__delete(dirs);
2089 * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
2090 * since access(R_OK) only checks with real UID/GID but open() use effective
2091 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
2093 static bool filename__readable(const char *file)
2095 int fd = open(file, O_RDONLY);
2102 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
2104 struct build_id bid;
2105 char sbuild_id[SBUILD_ID_SIZE];
2106 bool is_host = false;
2107 char path[PATH_MAX];
2109 if (!dso__has_build_id(dso)) {
2111 * Last resort, if we don't have a build-id and couldn't find
2112 * any vmlinux file, try the running kernel kallsyms table.
2117 if (sysfs__read_build_id("/sys/kernel/notes", &bid) == 0)
2118 is_host = dso__build_id_equal(dso, &bid);
2120 /* Try a fast path for /proc/kallsyms if possible */
2123 * Do not check the build-id cache, unless we know we cannot use
2124 * /proc/kcore or module maps don't match to /proc/kallsyms.
2125 * To check readability of /proc/kcore, do not use access(R_OK)
2126 * since /proc/kcore requires CAP_SYS_RAWIO to read and access
2129 if (filename__readable("/proc/kcore") &&
2130 !validate_kcore_addresses("/proc/kallsyms", map))
2134 build_id__sprintf(dso__bid(dso), sbuild_id);
2136 /* Find kallsyms in build-id cache with kcore */
2137 scnprintf(path, sizeof(path), "%s/%s/%s",
2138 buildid_dir, DSO__NAME_KCORE, sbuild_id);
2140 if (!find_matching_kcore(map, path, sizeof(path)))
2141 return strdup(path);
2143 /* Use current /proc/kallsyms if possible */
2146 return strdup("/proc/kallsyms");
2149 /* Finally, find a cache of kallsyms */
2150 if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
2151 pr_err("No kallsyms or vmlinux with build-id %s was found\n",
2156 return strdup(path);
2159 static int dso__load_kernel_sym(struct dso *dso, struct map *map)
2162 const char *kallsyms_filename = NULL;
2163 char *kallsyms_allocated_filename = NULL;
2164 char *filename = NULL;
2167 * Step 1: if the user specified a kallsyms or vmlinux filename, use
2168 * it and only it, reporting errors to the user if it cannot be used.
2170 * For instance, try to analyse an ARM perf.data file _without_ a
2171 * build-id, or if the user specifies the wrong path to the right
2172 * vmlinux file, obviously we can't fallback to another vmlinux (a
2173 * x86_86 one, on the machine where analysis is being performed, say),
2174 * or worse, /proc/kallsyms.
2176 * If the specified file _has_ a build-id and there is a build-id
2177 * section in the perf.data file, we will still do the expected
2178 * validation in dso__load_vmlinux and will bail out if they don't
2181 if (symbol_conf.kallsyms_name != NULL) {
2182 kallsyms_filename = symbol_conf.kallsyms_name;
2186 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
2187 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
2191 * Before checking on common vmlinux locations, check if it's
2192 * stored as standard build id binary (not kallsyms) under
2195 if (!symbol_conf.ignore_vmlinux_buildid)
2196 filename = __dso__build_id_filename(dso, NULL, 0, false, false);
2197 if (filename != NULL) {
2198 err = dso__load_vmlinux(dso, map, filename, true);
2203 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
2204 err = dso__load_vmlinux_path(dso, map);
2209 /* do not try local files if a symfs was given */
2210 if (symbol_conf.symfs[0] != 0)
2213 kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
2214 if (!kallsyms_allocated_filename)
2217 kallsyms_filename = kallsyms_allocated_filename;
2220 err = dso__load_kallsyms(dso, kallsyms_filename, map);
2222 pr_debug("Using %s for symbols\n", kallsyms_filename);
2223 free(kallsyms_allocated_filename);
2225 if (err > 0 && !dso__is_kcore(dso)) {
2226 dso__set_binary_type(dso, DSO_BINARY_TYPE__KALLSYMS);
2227 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
2228 map__fixup_start(map);
2229 map__fixup_end(map);
2235 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
2238 const char *kallsyms_filename;
2239 struct machine *machine = maps__machine(map__kmaps(map));
2240 char path[PATH_MAX];
2242 if (machine->kallsyms_filename) {
2243 kallsyms_filename = machine->kallsyms_filename;
2244 } else if (machine__is_default_guest(machine)) {
2246 * if the user specified a vmlinux filename, use it and only
2247 * it, reporting errors to the user if it cannot be used.
2248 * Or use file guest_kallsyms inputted by user on commandline
2250 if (symbol_conf.default_guest_vmlinux_name != NULL) {
2251 err = dso__load_vmlinux(dso, map,
2252 symbol_conf.default_guest_vmlinux_name,
2257 kallsyms_filename = symbol_conf.default_guest_kallsyms;
2258 if (!kallsyms_filename)
2261 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
2262 kallsyms_filename = path;
2265 err = dso__load_kallsyms(dso, kallsyms_filename, map);
2267 pr_debug("Using %s for symbols\n", kallsyms_filename);
2268 if (err > 0 && !dso__is_kcore(dso)) {
2269 dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_KALLSYMS);
2270 dso__set_long_name(dso, machine->mmap_name, false);
2271 map__fixup_start(map);
2272 map__fixup_end(map);
2278 static void vmlinux_path__exit(void)
2280 while (--vmlinux_path__nr_entries >= 0)
2281 zfree(&vmlinux_path[vmlinux_path__nr_entries]);
2282 vmlinux_path__nr_entries = 0;
2284 zfree(&vmlinux_path);
2287 static const char * const vmlinux_paths[] = {
2292 static const char * const vmlinux_paths_upd[] = {
2294 "/usr/lib/debug/boot/vmlinux-%s",
2295 "/lib/modules/%s/build/vmlinux",
2296 "/usr/lib/debug/lib/modules/%s/vmlinux",
2297 "/usr/lib/debug/boot/vmlinux-%s.debug"
2300 static int vmlinux_path__add(const char *new_entry)
2302 vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
2303 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2305 ++vmlinux_path__nr_entries;
2310 static int vmlinux_path__init(struct perf_env *env)
2314 char *kernel_version;
2317 vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
2318 ARRAY_SIZE(vmlinux_paths_upd)));
2319 if (vmlinux_path == NULL)
2322 for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
2323 if (vmlinux_path__add(vmlinux_paths[i]) < 0)
2326 /* only try kernel version if no symfs was given */
2327 if (symbol_conf.symfs[0] != 0)
2331 kernel_version = env->os_release;
2333 if (uname(&uts) < 0)
2336 kernel_version = uts.release;
2339 for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
2340 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
2341 if (vmlinux_path__add(bf) < 0)
2348 vmlinux_path__exit();
2352 int setup_list(struct strlist **list, const char *list_str,
2353 const char *list_name)
2355 if (list_str == NULL)
2358 *list = strlist__new(list_str, NULL);
2360 pr_err("problems parsing %s list\n", list_name);
2364 symbol_conf.has_filter = true;
2368 int setup_intlist(struct intlist **list, const char *list_str,
2369 const char *list_name)
2371 if (list_str == NULL)
2374 *list = intlist__new(list_str);
2376 pr_err("problems parsing %s list\n", list_name);
2382 static int setup_addrlist(struct intlist **addr_list, struct strlist *sym_list)
2384 struct str_node *pos, *tmp;
2390 *addr_list = intlist__new(NULL);
2394 strlist__for_each_entry_safe(pos, tmp, sym_list) {
2396 val = strtoul(pos->s, &sep, 16);
2397 if (errno || (sep == pos->s))
2401 end = pos->s + strlen(pos->s) - 1;
2402 while (end >= sep && isspace(*end))
2409 err = intlist__add(*addr_list, val);
2413 strlist__remove(sym_list, pos);
2418 intlist__delete(*addr_list);
2425 static bool symbol__read_kptr_restrict(void)
2428 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
2433 if (fgets(line, sizeof(line), fp) != NULL)
2434 value = perf_cap__capable(CAP_SYSLOG) ?
2441 /* Per kernel/kallsyms.c:
2442 * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG
2444 if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG))
2450 int symbol__annotation_init(void)
2452 if (symbol_conf.init_annotation)
2455 if (symbol_conf.initialized) {
2456 pr_err("Annotation needs to be init before symbol__init()\n");
2460 symbol_conf.priv_size += sizeof(struct annotation);
2461 symbol_conf.init_annotation = true;
2465 int symbol__init(struct perf_env *env)
2469 if (symbol_conf.initialized)
2472 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
2476 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
2479 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2480 pr_err("'.' is the only non valid --field-separator argument\n");
2484 if (setup_list(&symbol_conf.dso_list,
2485 symbol_conf.dso_list_str, "dso") < 0)
2488 if (setup_list(&symbol_conf.comm_list,
2489 symbol_conf.comm_list_str, "comm") < 0)
2490 goto out_free_dso_list;
2492 if (setup_intlist(&symbol_conf.pid_list,
2493 symbol_conf.pid_list_str, "pid") < 0)
2494 goto out_free_comm_list;
2496 if (setup_intlist(&symbol_conf.tid_list,
2497 symbol_conf.tid_list_str, "tid") < 0)
2498 goto out_free_pid_list;
2500 if (setup_list(&symbol_conf.sym_list,
2501 symbol_conf.sym_list_str, "symbol") < 0)
2502 goto out_free_tid_list;
2504 if (symbol_conf.sym_list &&
2505 setup_addrlist(&symbol_conf.addr_list, symbol_conf.sym_list) < 0)
2506 goto out_free_sym_list;
2508 if (setup_list(&symbol_conf.bt_stop_list,
2509 symbol_conf.bt_stop_list_str, "symbol") < 0)
2510 goto out_free_sym_list;
2513 * A path to symbols of "/" is identical to ""
2514 * reset here for simplicity.
2516 symfs = realpath(symbol_conf.symfs, NULL);
2518 symfs = symbol_conf.symfs;
2519 if (strcmp(symfs, "/") == 0)
2520 symbol_conf.symfs = "";
2521 if (symfs != symbol_conf.symfs)
2522 free((void *)symfs);
2524 symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2526 symbol_conf.initialized = true;
2530 strlist__delete(symbol_conf.sym_list);
2531 intlist__delete(symbol_conf.addr_list);
2533 intlist__delete(symbol_conf.tid_list);
2535 intlist__delete(symbol_conf.pid_list);
2537 strlist__delete(symbol_conf.comm_list);
2539 strlist__delete(symbol_conf.dso_list);
2543 void symbol__exit(void)
2545 if (!symbol_conf.initialized)
2547 strlist__delete(symbol_conf.bt_stop_list);
2548 strlist__delete(symbol_conf.sym_list);
2549 strlist__delete(symbol_conf.dso_list);
2550 strlist__delete(symbol_conf.comm_list);
2551 intlist__delete(symbol_conf.tid_list);
2552 intlist__delete(symbol_conf.pid_list);
2553 intlist__delete(symbol_conf.addr_list);
2554 vmlinux_path__exit();
2555 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2556 symbol_conf.bt_stop_list = NULL;
2557 symbol_conf.initialized = false;
2560 int symbol__config_symfs(const struct option *opt __maybe_unused,
2561 const char *dir, int unset __maybe_unused)
2566 symbol_conf.symfs = strdup(dir);
2567 if (symbol_conf.symfs == NULL)
2570 /* skip the locally configured cache if a symfs is given, and
2571 * config buildid dir to symfs/.debug
2573 ret = asprintf(&bf, "%s/%s", dir, ".debug");
2577 set_buildid_dir(bf);
2584 * Checks that user supplied symbol kernel files are accessible because
2585 * the default mechanism for accessing elf files fails silently. i.e. if
2586 * debug syms for a build ID aren't found perf carries on normally. When
2587 * they are user supplied we should assume that the user doesn't want to
2590 int symbol__validate_sym_arguments(void)
2592 if (symbol_conf.vmlinux_name &&
2593 access(symbol_conf.vmlinux_name, R_OK)) {
2594 pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name);
2597 if (symbol_conf.kallsyms_name &&
2598 access(symbol_conf.kallsyms_name, R_OK)) {
2599 pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name);