1 // SPDX-License-Identifier: GPL-2.0
19 #include "util/copyfile.h"
20 #include <linux/ctype.h>
21 #include <linux/kernel.h>
22 #include <linux/zalloc.h>
23 #include <linux/string.h>
24 #include <symbol/kallsyms.h>
25 #include <internal/lib.h>
27 #ifdef HAVE_LIBBFD_SUPPORT
28 #define PACKAGE 'perf'
32 #if defined(HAVE_LIBBFD_SUPPORT) || defined(HAVE_CPLUS_DEMANGLE_SUPPORT)
34 #define DMGL_PARAMS (1 << 0) /* Include function args */
35 #define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
40 #define EM_AARCH64 183 /* ARM 64 bit */
44 #define EM_LOONGARCH 258
47 #ifndef ELF32_ST_VISIBILITY
48 #define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
51 /* For ELF64 the definitions are the same. */
52 #ifndef ELF64_ST_VISIBILITY
53 #define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
56 /* How to extract information held in the st_other field. */
57 #ifndef GELF_ST_VISIBILITY
58 #define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val)
61 typedef Elf64_Nhdr GElf_Nhdr;
64 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
65 static int elf_getphdrnum(Elf *elf, size_t *dst)
70 ehdr = gelf_getehdr(elf, &gehdr);
80 #ifndef HAVE_ELF_GETSHDRSTRNDX_SUPPORT
81 static int elf_getshdrstrndx(Elf *elf __maybe_unused, size_t *dst __maybe_unused)
83 pr_err("%s: update your libelf to > 0.140, this one lacks elf_getshdrstrndx().\n", __func__);
88 #ifndef NT_GNU_BUILD_ID
89 #define NT_GNU_BUILD_ID 3
93 * elf_symtab__for_each_symbol - iterate thru all the symbols
95 * @syms: struct elf_symtab instance to iterate
97 * @sym: GElf_Sym iterator
99 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
100 for (idx = 0, gelf_getsym(syms, idx, &sym);\
102 idx++, gelf_getsym(syms, idx, &sym))
104 static inline uint8_t elf_sym__type(const GElf_Sym *sym)
106 return GELF_ST_TYPE(sym->st_info);
109 static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
111 return GELF_ST_VISIBILITY(sym->st_other);
114 #ifndef STT_GNU_IFUNC
115 #define STT_GNU_IFUNC 10
118 static inline int elf_sym__is_function(const GElf_Sym *sym)
120 return (elf_sym__type(sym) == STT_FUNC ||
121 elf_sym__type(sym) == STT_GNU_IFUNC) &&
123 sym->st_shndx != SHN_UNDEF;
126 static inline bool elf_sym__is_object(const GElf_Sym *sym)
128 return elf_sym__type(sym) == STT_OBJECT &&
130 sym->st_shndx != SHN_UNDEF;
133 static inline int elf_sym__is_label(const GElf_Sym *sym)
135 return elf_sym__type(sym) == STT_NOTYPE &&
137 sym->st_shndx != SHN_UNDEF &&
138 sym->st_shndx != SHN_ABS &&
139 elf_sym__visibility(sym) != STV_HIDDEN &&
140 elf_sym__visibility(sym) != STV_INTERNAL;
143 static bool elf_sym__filter(GElf_Sym *sym)
145 return elf_sym__is_function(sym) || elf_sym__is_object(sym);
148 static inline const char *elf_sym__name(const GElf_Sym *sym,
149 const Elf_Data *symstrs)
151 return symstrs->d_buf + sym->st_name;
154 static inline const char *elf_sec__name(const GElf_Shdr *shdr,
155 const Elf_Data *secstrs)
157 return secstrs->d_buf + shdr->sh_name;
160 static inline int elf_sec__is_text(const GElf_Shdr *shdr,
161 const Elf_Data *secstrs)
163 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
166 static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
167 const Elf_Data *secstrs)
169 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
172 static bool elf_sec__filter(GElf_Shdr *shdr, Elf_Data *secstrs)
174 return elf_sec__is_text(shdr, secstrs) ||
175 elf_sec__is_data(shdr, secstrs);
178 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
184 while ((sec = elf_nextscn(elf, sec)) != NULL) {
185 gelf_getshdr(sec, &shdr);
187 if ((addr >= shdr.sh_addr) &&
188 (addr < (shdr.sh_addr + shdr.sh_size)))
197 Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
198 GElf_Shdr *shp, const char *name, size_t *idx)
203 /* ELF is corrupted/truncated, avoid calling elf_strptr. */
204 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
207 while ((sec = elf_nextscn(elf, sec)) != NULL) {
210 gelf_getshdr(sec, shp);
211 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
212 if (str && !strcmp(name, str)) {
223 bool filename__has_section(const char *filename, const char *sec)
231 fd = open(filename, O_RDONLY);
235 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
239 if (gelf_getehdr(elf, &ehdr) == NULL)
242 found = !!elf_section_by_name(elf, &ehdr, &shdr, sec, NULL);
251 static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr)
256 if (elf_getphdrnum(elf, &phdrnum))
259 for (i = 0; i < phdrnum; i++) {
260 if (gelf_getphdr(elf, i, phdr) == NULL)
263 if (phdr->p_type != PT_LOAD)
266 sz = max(phdr->p_memsz, phdr->p_filesz);
270 if (vaddr >= phdr->p_vaddr && (vaddr < phdr->p_vaddr + sz))
274 /* Not found any valid program header */
287 static u32 get_rel_symidx(struct rel_info *ri, u32 idx)
289 idx = ri->sorted ? ri->sorted[idx] : idx;
291 gelf_getrela(ri->reldata, idx, &ri->rela);
292 return GELF_R_SYM(ri->rela.r_info);
294 gelf_getrel(ri->reldata, idx, &ri->rel);
295 return GELF_R_SYM(ri->rel.r_info);
298 static u64 get_rel_offset(struct rel_info *ri, u32 x)
303 gelf_getrela(ri->reldata, x, &rela);
304 return rela.r_offset;
308 gelf_getrel(ri->reldata, x, &rel);
313 static int rel_cmp(const void *a, const void *b, void *r)
315 struct rel_info *ri = r;
316 u64 a_offset = get_rel_offset(ri, *(const u32 *)a);
317 u64 b_offset = get_rel_offset(ri, *(const u32 *)b);
319 return a_offset < b_offset ? -1 : (a_offset > b_offset ? 1 : 0);
322 static int sort_rel(struct rel_info *ri)
324 size_t sz = sizeof(ri->sorted[0]);
327 ri->sorted = calloc(ri->nr_entries, sz);
330 for (i = 0; i < ri->nr_entries; i++)
332 qsort_r(ri->sorted, ri->nr_entries, sz, rel_cmp, ri);
337 * For x86_64, the GNU linker is putting IFUNC information in the relocation
340 static bool addend_may_be_ifunc(GElf_Ehdr *ehdr, struct rel_info *ri)
342 return ehdr->e_machine == EM_X86_64 && ri->is_rela &&
343 GELF_R_TYPE(ri->rela.r_info) == R_X86_64_IRELATIVE;
346 static bool get_ifunc_name(Elf *elf, struct dso *dso, GElf_Ehdr *ehdr,
347 struct rel_info *ri, char *buf, size_t buf_sz)
349 u64 addr = ri->rela.r_addend;
353 if (!addend_may_be_ifunc(ehdr, ri))
356 if (elf_read_program_header(elf, addr, &phdr))
359 addr -= phdr.p_vaddr - phdr.p_offset;
361 sym = dso__find_symbol_nocache(dso, addr);
363 /* Expecting the address to be an IFUNC or IFUNC alias */
364 if (!sym || sym->start != addr || (sym->type != STT_GNU_IFUNC && !sym->ifunc_alias))
367 snprintf(buf, buf_sz, "%s@plt", sym->name);
372 static void exit_rel(struct rel_info *ri)
377 static bool get_plt_sizes(struct dso *dso, GElf_Ehdr *ehdr, GElf_Shdr *shdr_plt,
378 u64 *plt_header_size, u64 *plt_entry_size)
380 switch (ehdr->e_machine) {
382 *plt_header_size = 20;
383 *plt_entry_size = 12;
386 *plt_header_size = 32;
387 *plt_entry_size = 16;
390 *plt_header_size = 32;
391 *plt_entry_size = 16;
394 *plt_header_size = 48;
395 *plt_entry_size = 12;
398 *plt_header_size = 128;
399 *plt_entry_size = 32;
403 *plt_entry_size = shdr_plt->sh_entsize;
404 /* Size is 8 or 16, if not, assume alignment indicates size */
405 if (*plt_entry_size != 8 && *plt_entry_size != 16)
406 *plt_entry_size = shdr_plt->sh_addralign == 8 ? 8 : 16;
407 *plt_header_size = *plt_entry_size;
409 default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */
410 *plt_header_size = shdr_plt->sh_entsize;
411 *plt_entry_size = shdr_plt->sh_entsize;
416 pr_debug("Missing PLT entry size for %s\n", dso__long_name(dso));
420 static bool machine_is_x86(GElf_Half e_machine)
422 return e_machine == EM_386 || e_machine == EM_X86_64;
430 struct rela_dyn_info {
432 Elf_Data *plt_got_data;
434 struct rela_dyn *sorted;
435 Elf_Data *dynsym_data;
436 Elf_Data *dynstr_data;
437 Elf_Data *rela_dyn_data;
440 static void exit_rela_dyn(struct rela_dyn_info *di)
445 static int cmp_offset(const void *a, const void *b)
447 const struct rela_dyn *va = a;
448 const struct rela_dyn *vb = b;
450 return va->offset < vb->offset ? -1 : (va->offset > vb->offset ? 1 : 0);
453 static int sort_rela_dyn(struct rela_dyn_info *di)
457 di->sorted = calloc(di->nr_entries, sizeof(di->sorted[0]));
461 /* Get data for sorting: the offset and symbol index */
462 for (i = 0, n = 0; i < di->nr_entries; i++) {
466 gelf_getrela(di->rela_dyn_data, i, &rela);
467 sym_idx = GELF_R_SYM(rela.r_info);
469 di->sorted[n].sym_idx = sym_idx;
470 di->sorted[n].offset = rela.r_offset;
477 qsort(di->sorted, n, sizeof(di->sorted[0]), cmp_offset);
482 static void get_rela_dyn_info(Elf *elf, GElf_Ehdr *ehdr, struct rela_dyn_info *di, Elf_Scn *scn)
484 GElf_Shdr rela_dyn_shdr;
487 di->plt_got_data = elf_getdata(scn, NULL);
489 scn = elf_section_by_name(elf, ehdr, &rela_dyn_shdr, ".rela.dyn", NULL);
490 if (!scn || !rela_dyn_shdr.sh_link || !rela_dyn_shdr.sh_entsize)
493 di->nr_entries = rela_dyn_shdr.sh_size / rela_dyn_shdr.sh_entsize;
494 di->rela_dyn_data = elf_getdata(scn, NULL);
496 scn = elf_getscn(elf, rela_dyn_shdr.sh_link);
497 if (!scn || !gelf_getshdr(scn, &shdr) || !shdr.sh_link)
500 di->dynsym_data = elf_getdata(scn, NULL);
501 di->dynstr_data = elf_getdata(elf_getscn(elf, shdr.sh_link), NULL);
503 if (!di->plt_got_data || !di->dynstr_data || !di->dynsym_data || !di->rela_dyn_data)
506 /* Sort into offset order */
510 /* Get instruction displacement from a plt entry for x86_64 */
511 static u32 get_x86_64_plt_disp(const u8 *p)
513 u8 endbr64[] = {0xf3, 0x0f, 0x1e, 0xfa};
517 if (!memcmp(p, endbr64, sizeof(endbr64)))
518 n += sizeof(endbr64);
519 /* Skip bnd prefix */
522 /* jmp with 4-byte displacement */
523 if (p[n] == 0xff && p[n + 1] == 0x25) {
527 /* Also add offset from start of entry to end of instruction */
528 memcpy(&disp, p + n, sizeof(disp));
529 return n + 4 + le32toh(disp);
534 static bool get_plt_got_name(GElf_Shdr *shdr, size_t i,
535 struct rela_dyn_info *di,
536 char *buf, size_t buf_sz)
538 struct rela_dyn vi, *vr;
539 const char *sym_name;
548 disp = get_x86_64_plt_disp(di->plt_got_data->d_buf + i);
552 /* Compute target offset of the .plt.got entry */
553 vi.offset = shdr->sh_offset + di->plt_got_data->d_off + i + disp;
555 /* Find that offset in .rela.dyn (sorted by offset) */
556 vr = bsearch(&vi, di->sorted, di->nr_entries, sizeof(di->sorted[0]), cmp_offset);
560 /* Get the associated symbol */
561 gelf_getsym(di->dynsym_data, vr->sym_idx, &sym);
562 sym_name = elf_sym__name(&sym, di->dynstr_data);
563 demangled = dso__demangle_sym(di->dso, /*kmodule=*/0, sym_name);
564 if (demangled != NULL)
565 sym_name = demangled;
567 snprintf(buf, buf_sz, "%s@plt", sym_name);
576 static int dso__synthesize_plt_got_symbols(struct dso *dso, Elf *elf,
578 char *buf, size_t buf_sz)
580 struct rela_dyn_info di = { .dso = dso };
587 scn = elf_section_by_name(elf, ehdr, &shdr, ".plt.got", NULL);
588 if (!scn || !shdr.sh_entsize)
591 if (ehdr->e_machine == EM_X86_64)
592 get_rela_dyn_info(elf, ehdr, &di, scn);
594 for (i = 0; i < shdr.sh_size; i += shdr.sh_entsize) {
595 if (!get_plt_got_name(&shdr, i, &di, buf, buf_sz))
596 snprintf(buf, buf_sz, "offset_%#" PRIx64 "@plt", (u64)shdr.sh_offset + i);
597 sym = symbol__new(shdr.sh_offset + i, shdr.sh_entsize, STB_GLOBAL, STT_FUNC, buf);
600 symbols__insert(dso__symbols(dso), sym);
609 * We need to check if we have a .dynsym, so that we can handle the
610 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
611 * .dynsym or .symtab).
612 * And always look at the original dso, not at debuginfo packages, that
613 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
615 int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
619 u64 plt_offset, plt_header_size, plt_entry_size;
620 GElf_Shdr shdr_plt, plt_sec_shdr;
621 struct symbol *f, *plt_sym;
622 GElf_Shdr shdr_rel_plt, shdr_dynsym;
623 Elf_Data *syms, *symstrs;
624 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
626 char sympltname[1024];
628 int nr = 0, err = -1;
629 struct rel_info ri = { .is_rela = false };
635 if (!elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL))
639 * A symbol from a previous section (e.g. .init) can have been expanded
640 * by symbols__fixup_end() to overlap .plt. Truncate it before adding
641 * a symbol for .plt header.
643 f = dso__find_symbol_nocache(dso, shdr_plt.sh_offset);
644 if (f && f->start < shdr_plt.sh_offset && f->end > shdr_plt.sh_offset)
645 f->end = shdr_plt.sh_offset;
647 if (!get_plt_sizes(dso, &ehdr, &shdr_plt, &plt_header_size, &plt_entry_size))
650 /* Add a symbol for .plt header */
651 plt_sym = symbol__new(shdr_plt.sh_offset, plt_header_size, STB_GLOBAL, STT_FUNC, ".plt");
654 symbols__insert(dso__symbols(dso), plt_sym);
656 /* Only x86 has .plt.got */
657 if (machine_is_x86(ehdr.e_machine) &&
658 dso__synthesize_plt_got_symbols(dso, elf, &ehdr, sympltname, sizeof(sympltname)))
661 /* Only x86 has .plt.sec */
662 if (machine_is_x86(ehdr.e_machine) &&
663 elf_section_by_name(elf, &ehdr, &plt_sec_shdr, ".plt.sec", NULL)) {
664 if (!get_plt_sizes(dso, &ehdr, &plt_sec_shdr, &plt_header_size, &plt_entry_size))
666 /* Extend .plt symbol to entire .plt */
667 plt_sym->end = plt_sym->start + shdr_plt.sh_size;
668 /* Use .plt.sec offset */
669 plt_offset = plt_sec_shdr.sh_offset;
672 plt_offset = shdr_plt.sh_offset;
676 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
678 if (scn_plt_rel == NULL) {
679 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
681 if (scn_plt_rel == NULL)
685 if (shdr_rel_plt.sh_type != SHT_RELA &&
686 shdr_rel_plt.sh_type != SHT_REL)
689 if (!shdr_rel_plt.sh_link)
692 if (shdr_rel_plt.sh_link == ss->dynsym_idx) {
693 scn_dynsym = ss->dynsym;
694 shdr_dynsym = ss->dynshdr;
695 } else if (shdr_rel_plt.sh_link == ss->symtab_idx) {
697 * A static executable can have a .plt due to IFUNCs, in which
698 * case .symtab is used not .dynsym.
700 scn_dynsym = ss->symtab;
701 shdr_dynsym = ss->symshdr;
710 * Fetch the relocation section to find the idxes to the GOT
711 * and the symbols in the .dynsym they refer to.
713 ri.reldata = elf_getdata(scn_plt_rel, NULL);
717 syms = elf_getdata(scn_dynsym, NULL);
721 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
722 if (scn_symstrs == NULL)
725 symstrs = elf_getdata(scn_symstrs, NULL);
729 if (symstrs->d_size == 0)
732 ri.nr_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
734 ri.is_rela = shdr_rel_plt.sh_type == SHT_RELA;
738 * Assume a .plt with the same number of entries as the number
739 * of relocation entries is not lazy and does not have a header.
741 if (ri.nr_entries * plt_entry_size == shdr_plt.sh_size)
742 dso__delete_symbol(dso, plt_sym);
744 plt_offset += plt_header_size;
748 * x86 doesn't insert IFUNC relocations in .plt order, so sort to get
751 if (machine_is_x86(ehdr.e_machine) && sort_rel(&ri))
754 for (idx = 0; idx < ri.nr_entries; idx++) {
755 const char *elf_name = NULL;
756 char *demangled = NULL;
758 gelf_getsym(syms, get_rel_symidx(&ri, idx), &sym);
760 elf_name = elf_sym__name(&sym, symstrs);
761 demangled = dso__demangle_sym(dso, /*kmodule=*/0, elf_name);
763 elf_name = demangled;
765 snprintf(sympltname, sizeof(sympltname), "%s@plt", elf_name);
766 else if (!get_ifunc_name(elf, dso, &ehdr, &ri, sympltname, sizeof(sympltname)))
767 snprintf(sympltname, sizeof(sympltname),
768 "offset_%#" PRIx64 "@plt", plt_offset);
771 f = symbol__new(plt_offset, plt_entry_size, STB_GLOBAL, STT_FUNC, sympltname);
775 plt_offset += plt_entry_size;
776 symbols__insert(dso__symbols(dso), f);
785 pr_debug("%s: problems reading %s PLT info.\n",
786 __func__, dso__long_name(dso));
791 * Align offset to 4 bytes as needed for note name and descriptor data.
793 #define NOTE_ALIGN(n) (((n) + 3) & -4U)
795 static int elf_read_build_id(Elf *elf, void *bf, size_t size)
805 if (size < BUILD_ID_SIZE)
812 if (gelf_getehdr(elf, &ehdr) == NULL) {
813 pr_err("%s: cannot get elf header.\n", __func__);
818 * Check following sections for notes:
819 * '.note.gnu.build-id'
821 * '.note' (VDSO specific)
824 sec = elf_section_by_name(elf, &ehdr, &shdr,
825 ".note.gnu.build-id", NULL);
829 sec = elf_section_by_name(elf, &ehdr, &shdr,
834 sec = elf_section_by_name(elf, &ehdr, &shdr,
843 data = elf_getdata(sec, NULL);
848 while (ptr < (data->d_buf + data->d_size)) {
849 GElf_Nhdr *nhdr = ptr;
850 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
851 descsz = NOTE_ALIGN(nhdr->n_descsz);
854 ptr += sizeof(*nhdr);
857 if (nhdr->n_type == NT_GNU_BUILD_ID &&
858 nhdr->n_namesz == sizeof("GNU")) {
859 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
860 size_t sz = min(size, descsz);
862 memset(bf + sz, 0, size - sz);
874 #ifdef HAVE_LIBBFD_BUILDID_SUPPORT
876 static int read_build_id(const char *filename, struct build_id *bid)
878 size_t size = sizeof(bid->data);
882 abfd = bfd_openr(filename, NULL);
886 if (!bfd_check_format(abfd, bfd_object)) {
887 pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
891 if (!abfd->build_id || abfd->build_id->size > size)
894 memcpy(bid->data, abfd->build_id->data, abfd->build_id->size);
895 memset(bid->data + abfd->build_id->size, 0, size - abfd->build_id->size);
896 err = bid->size = abfd->build_id->size;
903 #else // HAVE_LIBBFD_BUILDID_SUPPORT
905 static int read_build_id(const char *filename, struct build_id *bid)
907 size_t size = sizeof(bid->data);
911 if (size < BUILD_ID_SIZE)
914 fd = open(filename, O_RDONLY);
918 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
920 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
924 err = elf_read_build_id(elf, bid->data, size);
935 #endif // HAVE_LIBBFD_BUILDID_SUPPORT
937 int filename__read_build_id(const char *filename, struct build_id *bid)
939 struct kmod_path m = { .name = NULL, };
946 err = kmod_path__parse(&m, filename);
953 fd = filename__decompress(filename, path, sizeof(path), m.comp, &error);
955 pr_debug("Failed to decompress (error %d) %s\n",
963 err = read_build_id(filename, bid);
970 int sysfs__read_build_id(const char *filename, struct build_id *bid)
972 size_t size = sizeof(bid->data);
975 fd = open(filename, O_RDONLY);
982 size_t namesz, descsz;
984 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
987 namesz = NOTE_ALIGN(nhdr.n_namesz);
988 descsz = NOTE_ALIGN(nhdr.n_descsz);
989 if (nhdr.n_type == NT_GNU_BUILD_ID &&
990 nhdr.n_namesz == sizeof("GNU")) {
991 if (read(fd, bf, namesz) != (ssize_t)namesz)
993 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
994 size_t sz = min(descsz, size);
995 if (read(fd, bid->data, sz) == (ssize_t)sz) {
996 memset(bid->data + sz, 0, size - sz);
1001 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
1004 int n = namesz + descsz;
1006 if (n > (int)sizeof(bf)) {
1008 pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
1009 __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
1011 if (read(fd, bf, n) != n)
1020 #ifdef HAVE_LIBBFD_SUPPORT
1022 int filename__read_debuglink(const char *filename, char *debuglink,
1029 abfd = bfd_openr(filename, NULL);
1033 if (!bfd_check_format(abfd, bfd_object)) {
1034 pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
1038 section = bfd_get_section_by_name(abfd, ".gnu_debuglink");
1042 if (section->size > size)
1045 if (!bfd_get_section_contents(abfd, section, debuglink, 0,
1058 int filename__read_debuglink(const char *filename, char *debuglink,
1069 fd = open(filename, O_RDONLY);
1073 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1075 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
1080 if (ek != ELF_K_ELF)
1083 if (gelf_getehdr(elf, &ehdr) == NULL) {
1084 pr_err("%s: cannot get elf header.\n", __func__);
1088 sec = elf_section_by_name(elf, &ehdr, &shdr,
1089 ".gnu_debuglink", NULL);
1093 data = elf_getdata(sec, NULL);
1097 /* the start of this section is a zero-terminated string */
1098 strncpy(debuglink, data->d_buf, size);
1112 bool symsrc__possibly_runtime(struct symsrc *ss)
1114 return ss->dynsym || ss->opdsec;
1117 bool symsrc__has_symtab(struct symsrc *ss)
1119 return ss->symtab != NULL;
1122 void symsrc__destroy(struct symsrc *ss)
1129 bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
1132 * Usually vmlinux is an ELF file with type ET_EXEC for most
1133 * architectures; except Arm64 kernel is linked with option
1134 * '-share', so need to check type ET_DYN.
1136 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL ||
1137 ehdr.e_type == ET_DYN;
1140 static Elf *read_gnu_debugdata(struct dso *dso, Elf *elf, const char *name, int *fd_ret)
1149 char temp_filename[] = "/tmp/perf.gnu_debugdata.elf.XXXXXX";
1152 if (gelf_getehdr(elf, &ehdr) == NULL) {
1153 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
1154 *dso__load_errno(dso) = DSO_LOAD_ERRNO__INVALID_ELF;
1158 scn = elf_section_by_name(elf, &ehdr, &shdr, ".gnu_debugdata", &shndx);
1160 *dso__load_errno(dso) = -ENOENT;
1164 if (shdr.sh_type == SHT_NOBITS) {
1165 pr_debug("%s: .gnu_debugdata of ELF file %s has no data.\n", __func__, name);
1166 *dso__load_errno(dso) = DSO_LOAD_ERRNO__INVALID_ELF;
1170 scn_data = elf_rawdata(scn, NULL);
1172 pr_debug("%s: error reading .gnu_debugdata of %s: %s\n", __func__,
1173 name, elf_errmsg(-1));
1174 *dso__load_errno(dso) = DSO_LOAD_ERRNO__INVALID_ELF;
1178 wrapped = fmemopen(scn_data->d_buf, scn_data->d_size, "r");
1180 pr_debug("%s: fmemopen: %s\n", __func__, strerror(errno));
1181 *dso__load_errno(dso) = -errno;
1185 temp_fd = mkstemp(temp_filename);
1187 pr_debug("%s: mkstemp: %s\n", __func__, strerror(errno));
1188 *dso__load_errno(dso) = -errno;
1192 unlink(temp_filename);
1194 ret = lzma_decompress_stream_to_file(wrapped, temp_fd);
1197 *dso__load_errno(dso) = -errno;
1202 elf_embedded = elf_begin(temp_fd, PERF_ELF_C_READ_MMAP, NULL);
1203 if (!elf_embedded) {
1204 pr_debug("%s: error reading .gnu_debugdata of %s: %s\n", __func__,
1205 name, elf_errmsg(-1));
1206 *dso__load_errno(dso) = DSO_LOAD_ERRNO__INVALID_ELF;
1210 pr_debug("%s: using .gnu_debugdata of %s\n", __func__, name);
1212 return elf_embedded;
1215 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
1216 enum dso_binary_type type)
1222 if (dso__needs_decompress(dso)) {
1223 fd = dso__decompress_kmodule_fd(dso, name);
1227 type = dso__symtab_type(dso);
1229 fd = open(name, O_RDONLY);
1231 *dso__load_errno(dso) = errno;
1236 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1238 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
1239 *dso__load_errno(dso) = DSO_LOAD_ERRNO__INVALID_ELF;
1243 if (type == DSO_BINARY_TYPE__GNU_DEBUGDATA) {
1245 Elf *embedded = read_gnu_debugdata(dso, elf, name, &new_fd);
1256 if (gelf_getehdr(elf, &ehdr) == NULL) {
1257 *dso__load_errno(dso) = DSO_LOAD_ERRNO__INVALID_ELF;
1258 pr_debug("%s: cannot get elf header.\n", __func__);
1262 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
1263 *dso__load_errno(dso) = DSO_LOAD_ERRNO__INTERNAL_ERROR;
1267 /* Always reject images with a mismatched build-id: */
1268 if (dso__has_build_id(dso) && !symbol_conf.ignore_vmlinux_buildid) {
1269 u8 build_id[BUILD_ID_SIZE];
1270 struct build_id bid;
1273 size = elf_read_build_id(elf, build_id, BUILD_ID_SIZE);
1275 *dso__load_errno(dso) = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
1279 build_id__init(&bid, build_id, size);
1280 if (!dso__build_id_equal(dso, &bid)) {
1281 pr_debug("%s: build id mismatch for %s.\n", __func__, name);
1282 *dso__load_errno(dso) = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
1287 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1290 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
1292 if (ss->symshdr.sh_type != SHT_SYMTAB)
1296 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
1298 if (ss->dynshdr.sh_type != SHT_DYNSYM)
1302 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
1304 if (ss->opdshdr.sh_type != SHT_PROGBITS)
1307 if (dso__kernel(dso) == DSO_SPACE__USER)
1308 ss->adjust_symbols = true;
1310 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
1312 ss->name = strdup(name);
1314 *dso__load_errno(dso) = errno;
1332 static bool is_exe_text(int flags)
1334 return (flags & (SHF_ALLOC | SHF_EXECINSTR)) == (SHF_ALLOC | SHF_EXECINSTR);
1338 * Some executable module sections like .noinstr.text might be laid out with
1339 * .text so they can use the same mapping (memory address to file offset).
1340 * Check if that is the case. Refer to kernel layout_sections(). Return the
1343 static u64 max_text_section(Elf *elf, GElf_Ehdr *ehdr)
1345 Elf_Scn *sec = NULL;
1349 /* Doesn't work for some arch */
1350 if (ehdr->e_machine == EM_PARISC ||
1351 ehdr->e_machine == EM_ALPHA)
1354 /* ELF is corrupted/truncated, avoid calling elf_strptr. */
1355 if (!elf_rawdata(elf_getscn(elf, ehdr->e_shstrndx), NULL))
1358 while ((sec = elf_nextscn(elf, sec)) != NULL) {
1361 if (!gelf_getshdr(sec, &shdr))
1364 if (!is_exe_text(shdr.sh_flags))
1367 /* .init and .exit sections are not placed with .text */
1368 sec_name = elf_strptr(elf, ehdr->e_shstrndx, shdr.sh_name);
1370 strstarts(sec_name, ".init") ||
1371 strstarts(sec_name, ".exit"))
1374 /* Must be next to previous, assumes .text is first */
1375 if (offs && PERF_ALIGN(offs, shdr.sh_addralign ?: 1) != shdr.sh_offset)
1378 offs = shdr.sh_offset + shdr.sh_size;
1385 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
1386 * @kmap: kernel maps and relocation reference symbol
1388 * This function returns %true if we are dealing with the kernel maps and the
1389 * relocation reference symbol has not yet been found. Otherwise %false is
1392 static bool ref_reloc_sym_not_found(struct kmap *kmap)
1394 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
1395 !kmap->ref_reloc_sym->unrelocated_addr;
1399 * ref_reloc - kernel relocation offset.
1400 * @kmap: kernel maps and relocation reference symbol
1402 * This function returns the offset of kernel addresses as determined by using
1403 * the relocation reference symbol i.e. if the kernel has not been relocated
1404 * then the return value is zero.
1406 static u64 ref_reloc(struct kmap *kmap)
1408 if (kmap && kmap->ref_reloc_sym &&
1409 kmap->ref_reloc_sym->unrelocated_addr)
1410 return kmap->ref_reloc_sym->addr -
1411 kmap->ref_reloc_sym->unrelocated_addr;
1415 void __weak arch__sym_update(struct symbol *s __maybe_unused,
1416 GElf_Sym *sym __maybe_unused) { }
1418 static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
1419 GElf_Sym *sym, GElf_Shdr *shdr,
1420 struct maps *kmaps, struct kmap *kmap,
1421 struct dso **curr_dsop,
1422 const char *section_name,
1423 bool adjust_kernel_syms, bool kmodule, bool *remap_kernel,
1424 u64 max_text_sh_offset)
1426 struct dso *curr_dso = *curr_dsop;
1427 struct map *curr_map;
1428 char dso_name[PATH_MAX];
1430 /* Adjust symbol to map to file offset */
1431 if (adjust_kernel_syms)
1432 sym->st_value -= shdr->sh_addr - shdr->sh_offset;
1434 if (strcmp(section_name, (dso__short_name(curr_dso) + dso__short_name_len(dso))) == 0)
1437 if (strcmp(section_name, ".text") == 0) {
1439 * The initial kernel mapping is based on
1440 * kallsyms and identity maps. Overwrite it to
1441 * map to the kernel dso.
1443 if (*remap_kernel && dso__kernel(dso) && !kmodule) {
1444 *remap_kernel = false;
1445 map__set_start(map, shdr->sh_addr + ref_reloc(kmap));
1446 map__set_end(map, map__start(map) + shdr->sh_size);
1447 map__set_pgoff(map, shdr->sh_offset);
1448 map__set_mapping_type(map, MAPPING_TYPE__DSO);
1449 /* Ensure maps are correctly ordered */
1452 struct map *tmp = map__get(map);
1454 maps__remove(kmaps, map);
1455 err = maps__insert(kmaps, map);
1463 * The initial module mapping is based on
1464 * /proc/modules mapped to offset zero.
1465 * Overwrite it to map to the module dso.
1467 if (*remap_kernel && kmodule) {
1468 *remap_kernel = false;
1469 map__set_pgoff(map, shdr->sh_offset);
1472 dso__put(*curr_dsop);
1473 *curr_dsop = dso__get(dso);
1481 * perf does not record module section addresses except for .text, but
1482 * some sections can use the same mapping as .text.
1484 if (kmodule && adjust_kernel_syms && is_exe_text(shdr->sh_flags) &&
1485 shdr->sh_offset <= max_text_sh_offset) {
1486 dso__put(*curr_dsop);
1487 *curr_dsop = dso__get(dso);
1491 snprintf(dso_name, sizeof(dso_name), "%s%s", dso__short_name(dso), section_name);
1493 curr_map = maps__find_by_name(kmaps, dso_name);
1494 if (curr_map == NULL) {
1495 u64 start = sym->st_value;
1498 start += map__start(map) + shdr->sh_offset;
1500 curr_dso = dso__new(dso_name);
1501 if (curr_dso == NULL)
1503 dso__set_kernel(curr_dso, dso__kernel(dso));
1504 RC_CHK_ACCESS(curr_dso)->long_name = dso__long_name(dso);
1505 RC_CHK_ACCESS(curr_dso)->long_name_len = dso__long_name_len(dso);
1506 dso__set_binary_type(curr_dso, dso__binary_type(dso));
1507 dso__set_adjust_symbols(curr_dso, dso__adjust_symbols(dso));
1508 curr_map = map__new2(start, curr_dso);
1509 if (curr_map == NULL) {
1513 if (dso__kernel(curr_dso))
1514 map__kmap(curr_map)->kmaps = kmaps;
1516 if (adjust_kernel_syms) {
1517 map__set_start(curr_map, shdr->sh_addr + ref_reloc(kmap));
1518 map__set_end(curr_map, map__start(curr_map) + shdr->sh_size);
1519 map__set_pgoff(curr_map, shdr->sh_offset);
1521 map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY);
1523 dso__set_symtab_type(curr_dso, dso__symtab_type(dso));
1524 if (maps__insert(kmaps, curr_map))
1526 dsos__add(&maps__machine(kmaps)->dsos, curr_dso);
1527 dso__set_loaded(curr_dso);
1528 dso__put(*curr_dsop);
1529 *curr_dsop = curr_dso;
1531 dso__put(*curr_dsop);
1532 *curr_dsop = dso__get(map__dso(curr_map));
1540 dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
1541 struct symsrc *runtime_ss, int kmodule, int dynsym)
1543 struct kmap *kmap = dso__kernel(dso) ? map__kmap(map) : NULL;
1544 struct maps *kmaps = kmap ? map__kmaps(map) : NULL;
1545 struct dso *curr_dso = NULL;
1546 Elf_Data *symstrs, *secstrs, *secstrs_run, *secstrs_sym;
1552 Elf_Data *syms, *opddata = NULL;
1554 Elf_Scn *sec, *sec_strndx;
1557 bool remap_kernel = false, adjust_kernel_syms = false;
1558 u64 max_text_sh_offset = 0;
1564 ehdr = syms_ss->ehdr;
1566 sec = syms_ss->dynsym;
1567 shdr = syms_ss->dynshdr;
1569 sec = syms_ss->symtab;
1570 shdr = syms_ss->symshdr;
1573 if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
1575 dso__set_text_offset(dso, tshdr.sh_addr - tshdr.sh_offset);
1576 dso__set_text_end(dso, tshdr.sh_offset + tshdr.sh_size);
1579 if (runtime_ss->opdsec)
1580 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
1582 syms = elf_getdata(sec, NULL);
1586 sec = elf_getscn(elf, shdr.sh_link);
1590 symstrs = elf_getdata(sec, NULL);
1591 if (symstrs == NULL)
1594 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
1595 if (sec_strndx == NULL)
1598 secstrs_run = elf_getdata(sec_strndx, NULL);
1599 if (secstrs_run == NULL)
1602 sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
1603 if (sec_strndx == NULL)
1606 secstrs_sym = elf_getdata(sec_strndx, NULL);
1607 if (secstrs_sym == NULL)
1610 nr_syms = shdr.sh_size / shdr.sh_entsize;
1612 memset(&sym, 0, sizeof(sym));
1615 * The kernel relocation symbol is needed in advance in order to adjust
1616 * kernel maps correctly.
1618 if (ref_reloc_sym_not_found(kmap)) {
1619 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1620 const char *elf_name = elf_sym__name(&sym, symstrs);
1622 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
1624 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
1625 map__set_reloc(map, kmap->ref_reloc_sym->addr - kmap->ref_reloc_sym->unrelocated_addr);
1631 * Handle any relocation of vdso necessary because older kernels
1632 * attempted to prelink vdso to its virtual address.
1634 if (dso__is_vdso(dso))
1635 map__set_reloc(map, map__start(map) - dso__text_offset(dso));
1637 dso__set_adjust_symbols(dso, runtime_ss->adjust_symbols || ref_reloc(kmap));
1639 * Initial kernel and module mappings do not map to the dso.
1642 if (dso__kernel(dso)) {
1643 remap_kernel = true;
1644 adjust_kernel_syms = dso__adjust_symbols(dso);
1647 if (kmodule && adjust_kernel_syms)
1648 max_text_sh_offset = max_text_section(runtime_ss->elf, &runtime_ss->ehdr);
1650 curr_dso = dso__get(dso);
1651 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1653 const char *elf_name = elf_sym__name(&sym, symstrs);
1654 char *demangled = NULL;
1655 int is_label = elf_sym__is_label(&sym);
1656 const char *section_name;
1657 bool used_opd = false;
1659 if (!is_label && !elf_sym__filter(&sym))
1662 /* Reject ARM ELF "mapping symbols": these aren't unique and
1663 * don't identify functions, so will confuse the profile
1665 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
1666 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
1667 && (elf_name[2] == '\0' || elf_name[2] == '.'))
1671 /* Reject RISCV ELF "mapping symbols" */
1672 if (ehdr.e_machine == EM_RISCV) {
1673 if (elf_name[0] == '$' && strchr("dx", elf_name[1]))
1677 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
1678 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
1679 u64 *opd = opddata->d_buf + offset;
1680 sym.st_value = DSO__SWAP(dso, u64, *opd);
1681 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
1687 * When loading symbols in a data mapping, ABS symbols (which
1688 * has a value of SHN_ABS in its st_shndx) failed at
1689 * elf_getscn(). And it marks the loading as a failure so
1690 * already loaded symbols cannot be fixed up.
1692 * I'm not sure what should be done. Just ignore them for now.
1695 if (sym.st_shndx == SHN_ABS)
1698 sec = elf_getscn(syms_ss->elf, sym.st_shndx);
1702 gelf_getshdr(sec, &shdr);
1705 * If the attribute bit SHF_ALLOC is not set, the section
1706 * doesn't occupy memory during process execution.
1707 * E.g. ".gnu.warning.*" section is used by linker to generate
1708 * warnings when calling deprecated functions, the symbols in
1709 * the section aren't loaded to memory during process execution,
1712 if (!(shdr.sh_flags & SHF_ALLOC))
1715 secstrs = secstrs_sym;
1718 * We have to fallback to runtime when syms' section header has
1719 * NOBITS set. NOBITS results in file offset (sh_offset) not
1720 * being incremented. So sh_offset used below has different
1721 * values for syms (invalid) and runtime (valid).
1723 if (shdr.sh_type == SHT_NOBITS) {
1724 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
1728 gelf_getshdr(sec, &shdr);
1729 secstrs = secstrs_run;
1732 if (is_label && !elf_sec__filter(&shdr, secstrs))
1735 section_name = elf_sec__name(&shdr, secstrs);
1737 /* On ARM, symbols for thumb functions have 1 added to
1738 * the symbol address as a flag - remove it */
1739 if ((ehdr.e_machine == EM_ARM) &&
1740 (GELF_ST_TYPE(sym.st_info) == STT_FUNC) &&
1744 if (dso__kernel(dso)) {
1745 if (dso__process_kernel_symbol(dso, map, &sym, &shdr,
1746 kmaps, kmap, &curr_dso,
1751 max_text_sh_offset))
1753 } else if ((used_opd && runtime_ss->adjust_symbols) ||
1754 (!used_opd && syms_ss->adjust_symbols)) {
1757 if (elf_read_program_header(runtime_ss->elf,
1758 (u64)sym.st_value, &phdr)) {
1759 pr_debug4("%s: failed to find program header for "
1760 "symbol: %s st_value: %#" PRIx64 "\n",
1761 __func__, elf_name, (u64)sym.st_value);
1762 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1763 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n",
1764 __func__, (u64)sym.st_value, (u64)shdr.sh_addr,
1765 (u64)shdr.sh_offset);
1767 * Fail to find program header, let's rollback
1768 * to use shdr.sh_addr and shdr.sh_offset to
1769 * calibrate symbol's file address, though this
1770 * is not necessary for normal C ELF file, we
1771 * still need to handle java JIT symbols in this
1774 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1776 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1777 "p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",
1778 __func__, (u64)sym.st_value, (u64)phdr.p_vaddr,
1779 (u64)phdr.p_offset);
1780 sym.st_value -= phdr.p_vaddr - phdr.p_offset;
1784 demangled = dso__demangle_sym(dso, kmodule, elf_name);
1785 if (demangled != NULL)
1786 elf_name = demangled;
1788 f = symbol__new(sym.st_value, sym.st_size,
1789 GELF_ST_BIND(sym.st_info),
1790 GELF_ST_TYPE(sym.st_info), elf_name);
1795 arch__sym_update(f, &sym);
1797 __symbols__insert(dso__symbols(curr_dso), f, dso__kernel(dso));
1803 * For misannotated, zeroed, ASM function sizes.
1806 symbols__fixup_end(dso__symbols(dso), false);
1807 symbols__fixup_duplicate(dso__symbols(dso));
1810 * We need to fixup this here too because we create new
1811 * maps here, for things like vsyscall sections.
1813 maps__fixup_end(kmaps);
1822 int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
1823 struct symsrc *runtime_ss, int kmodule)
1828 dso__set_symtab_type(dso, syms_ss->type);
1829 dso__set_is_64_bit(dso, syms_ss->is_64_bit);
1830 dso__set_rel(dso, syms_ss->ehdr.e_type == ET_REL);
1833 * Modules may already have symbols from kallsyms, but those symbols
1834 * have the wrong values for the dso maps, so remove them.
1836 if (kmodule && syms_ss->symtab)
1837 symbols__delete(dso__symbols(dso));
1839 if (!syms_ss->symtab) {
1841 * If the vmlinux is stripped, fail so we will fall back
1842 * to using kallsyms. The vmlinux runtime symbols aren't
1845 if (dso__kernel(dso))
1848 err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
1855 if (syms_ss->dynsym) {
1856 err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
1864 * The .gnu_debugdata is a special situation: it contains a symbol
1865 * table, but the runtime file may also contain dynsym entries which are
1866 * not present there. We need to load both.
1868 if (syms_ss->type == DSO_BINARY_TYPE__GNU_DEBUGDATA && runtime_ss->dynsym) {
1869 err = dso__load_sym_internal(dso, map, runtime_ss, runtime_ss,
1879 static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1886 if (elf_getphdrnum(elf, &phdrnum))
1889 for (i = 0; i < phdrnum; i++) {
1890 if (gelf_getphdr(elf, i, &phdr) == NULL)
1892 if (phdr.p_type != PT_LOAD)
1895 if (!(phdr.p_flags & PF_X))
1898 if (!(phdr.p_flags & PF_R))
1901 sz = min(phdr.p_memsz, phdr.p_filesz);
1904 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1911 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1917 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1922 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1924 err = elf_read_maps(elf, exe, mapfn, data);
1930 enum dso_type dso__type_fd(int fd)
1932 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1937 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1942 if (ek != ELF_K_ELF)
1945 if (gelf_getclass(elf) == ELFCLASS64) {
1946 dso_type = DSO__TYPE_64BIT;
1950 if (gelf_getehdr(elf, &ehdr) == NULL)
1953 if (ehdr.e_machine == EM_X86_64)
1954 dso_type = DSO__TYPE_X32BIT;
1956 dso_type = DSO__TYPE_32BIT;
1963 static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1968 char *buf = malloc(page_size);
1973 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1976 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1983 /* Use read because mmap won't work on proc files */
1984 r = read(from, buf, n);
1990 r = write(to, buf, n);
2011 static int kcore__open(struct kcore *kcore, const char *filename)
2015 kcore->fd = open(filename, O_RDONLY);
2016 if (kcore->fd == -1)
2019 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
2023 kcore->elfclass = gelf_getclass(kcore->elf);
2024 if (kcore->elfclass == ELFCLASSNONE)
2027 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
2034 elf_end(kcore->elf);
2040 static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
2043 kcore->elfclass = elfclass;
2046 kcore->fd = mkstemp(filename);
2048 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
2049 if (kcore->fd == -1)
2052 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
2056 if (!gelf_newehdr(kcore->elf, elfclass))
2059 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
2064 elf_end(kcore->elf);
2071 static void kcore__close(struct kcore *kcore)
2073 elf_end(kcore->elf);
2077 static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
2079 GElf_Ehdr *ehdr = &to->ehdr;
2080 GElf_Ehdr *kehdr = &from->ehdr;
2082 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
2083 ehdr->e_type = kehdr->e_type;
2084 ehdr->e_machine = kehdr->e_machine;
2085 ehdr->e_version = kehdr->e_version;
2088 ehdr->e_flags = kehdr->e_flags;
2089 ehdr->e_phnum = count;
2090 ehdr->e_shentsize = 0;
2092 ehdr->e_shstrndx = 0;
2094 if (from->elfclass == ELFCLASS32) {
2095 ehdr->e_phoff = sizeof(Elf32_Ehdr);
2096 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
2097 ehdr->e_phentsize = sizeof(Elf32_Phdr);
2099 ehdr->e_phoff = sizeof(Elf64_Ehdr);
2100 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
2101 ehdr->e_phentsize = sizeof(Elf64_Phdr);
2104 if (!gelf_update_ehdr(to->elf, ehdr))
2107 if (!gelf_newphdr(to->elf, count))
2113 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
2118 .p_flags = PF_R | PF_W | PF_X,
2124 .p_align = page_size,
2127 if (!gelf_update_phdr(kcore->elf, idx, &phdr))
2133 static off_t kcore__write(struct kcore *kcore)
2135 return elf_update(kcore->elf, ELF_C_WRITE);
2143 struct list_head node;
2144 struct phdr_data *remaps;
2149 struct list_head node;
2152 struct kcore_copy_info {
2158 u64 first_module_symbol;
2159 u64 last_module_symbol;
2161 struct list_head phdrs;
2162 struct list_head syms;
2165 #define kcore_copy__for_each_phdr(k, p) \
2166 list_for_each_entry((p), &(k)->phdrs, node)
2168 static struct phdr_data *phdr_data__new(u64 addr, u64 len, off_t offset)
2170 struct phdr_data *p = zalloc(sizeof(*p));
2181 static struct phdr_data *kcore_copy_info__addnew(struct kcore_copy_info *kci,
2185 struct phdr_data *p = phdr_data__new(addr, len, offset);
2188 list_add_tail(&p->node, &kci->phdrs);
2193 static void kcore_copy__free_phdrs(struct kcore_copy_info *kci)
2195 struct phdr_data *p, *tmp;
2197 list_for_each_entry_safe(p, tmp, &kci->phdrs, node) {
2198 list_del_init(&p->node);
2203 static struct sym_data *kcore_copy__new_sym(struct kcore_copy_info *kci,
2206 struct sym_data *s = zalloc(sizeof(*s));
2210 list_add_tail(&s->node, &kci->syms);
2216 static void kcore_copy__free_syms(struct kcore_copy_info *kci)
2218 struct sym_data *s, *tmp;
2220 list_for_each_entry_safe(s, tmp, &kci->syms, node) {
2221 list_del_init(&s->node);
2226 static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
2229 struct kcore_copy_info *kci = arg;
2231 if (!kallsyms__is_function(type))
2234 if (strchr(name, '[')) {
2235 if (!kci->first_module_symbol || start < kci->first_module_symbol)
2236 kci->first_module_symbol = start;
2237 if (start > kci->last_module_symbol)
2238 kci->last_module_symbol = start;
2242 if (!kci->first_symbol || start < kci->first_symbol)
2243 kci->first_symbol = start;
2245 if (!kci->last_symbol || start > kci->last_symbol)
2246 kci->last_symbol = start;
2248 if (!strcmp(name, "_stext")) {
2253 if (!strcmp(name, "_etext")) {
2258 if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start))
2264 static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
2267 char kallsyms_filename[PATH_MAX];
2269 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
2271 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
2274 if (kallsyms__parse(kallsyms_filename, kci,
2275 kcore_copy__process_kallsyms) < 0)
2281 static int kcore_copy__process_modules(void *arg,
2282 const char *name __maybe_unused,
2283 u64 start, u64 size __maybe_unused)
2285 struct kcore_copy_info *kci = arg;
2287 if (!kci->first_module || start < kci->first_module)
2288 kci->first_module = start;
2293 static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
2296 char modules_filename[PATH_MAX];
2298 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
2300 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
2303 if (modules__parse(modules_filename, kci,
2304 kcore_copy__process_modules) < 0)
2310 static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end,
2311 u64 pgoff, u64 s, u64 e)
2315 if (s < start || s >= end)
2318 offset = (s - start) + pgoff;
2319 len = e < end ? e - s : end - s;
2321 return kcore_copy_info__addnew(kci, s, len, offset) ? 0 : -1;
2324 static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
2326 struct kcore_copy_info *kci = data;
2327 u64 end = start + len;
2328 struct sym_data *sdat;
2330 if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext))
2333 if (kcore_copy__map(kci, start, end, pgoff, kci->first_module,
2334 kci->last_module_symbol))
2337 list_for_each_entry(sdat, &kci->syms, node) {
2338 u64 s = round_down(sdat->addr, page_size);
2340 if (kcore_copy__map(kci, start, end, pgoff, s, s + len))
2347 static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
2349 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
2355 static void kcore_copy__find_remaps(struct kcore_copy_info *kci)
2357 struct phdr_data *p, *k = NULL;
2363 /* Find phdr that corresponds to the kernel map (contains stext) */
2364 kcore_copy__for_each_phdr(kci, p) {
2365 u64 pend = p->addr + p->len - 1;
2367 if (p->addr <= kci->stext && pend >= kci->stext) {
2376 kend = k->offset + k->len;
2378 /* Find phdrs that remap the kernel */
2379 kcore_copy__for_each_phdr(kci, p) {
2380 u64 pend = p->offset + p->len;
2385 if (p->offset >= k->offset && pend <= kend)
2390 static void kcore_copy__layout(struct kcore_copy_info *kci)
2392 struct phdr_data *p;
2395 kcore_copy__find_remaps(kci);
2397 kcore_copy__for_each_phdr(kci, p) {
2405 kcore_copy__for_each_phdr(kci, p) {
2406 struct phdr_data *k = p->remaps;
2409 p->rel = p->offset - k->offset + k->rel;
2413 static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
2416 if (kcore_copy__parse_kallsyms(kci, dir))
2419 if (kcore_copy__parse_modules(kci, dir))
2423 kci->stext = round_down(kci->stext, page_size);
2425 kci->stext = round_down(kci->first_symbol, page_size);
2428 kci->etext = round_up(kci->etext, page_size);
2429 } else if (kci->last_symbol) {
2430 kci->etext = round_up(kci->last_symbol, page_size);
2431 kci->etext += page_size;
2434 if (kci->first_module_symbol &&
2435 (!kci->first_module || kci->first_module_symbol < kci->first_module))
2436 kci->first_module = kci->first_module_symbol;
2438 kci->first_module = round_down(kci->first_module, page_size);
2440 if (kci->last_module_symbol) {
2441 kci->last_module_symbol = round_up(kci->last_module_symbol,
2443 kci->last_module_symbol += page_size;
2446 if (!kci->stext || !kci->etext)
2449 if (kci->first_module && !kci->last_module_symbol)
2452 if (kcore_copy__read_maps(kci, elf))
2455 kcore_copy__layout(kci);
2460 static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
2463 char from_filename[PATH_MAX];
2464 char to_filename[PATH_MAX];
2466 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
2467 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
2469 return copyfile_mode(from_filename, to_filename, 0400);
2472 static int kcore_copy__unlink(const char *dir, const char *name)
2474 char filename[PATH_MAX];
2476 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
2478 return unlink(filename);
2481 static int kcore_copy__compare_fds(int from, int to)
2489 buf_from = malloc(page_size);
2490 buf_to = malloc(page_size);
2491 if (!buf_from || !buf_to)
2495 /* Use read because mmap won't work on proc files */
2496 ret = read(from, buf_from, page_size);
2505 if (readn(to, buf_to, len) != (int)len)
2508 if (memcmp(buf_from, buf_to, len))
2519 static int kcore_copy__compare_files(const char *from_filename,
2520 const char *to_filename)
2522 int from, to, err = -1;
2524 from = open(from_filename, O_RDONLY);
2528 to = open(to_filename, O_RDONLY);
2530 goto out_close_from;
2532 err = kcore_copy__compare_fds(from, to);
2540 static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
2543 char from_filename[PATH_MAX];
2544 char to_filename[PATH_MAX];
2546 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
2547 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
2549 return kcore_copy__compare_files(from_filename, to_filename);
2553 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
2554 * @from_dir: from directory
2555 * @to_dir: to directory
2557 * This function copies kallsyms, modules and kcore files from one directory to
2558 * another. kallsyms and modules are copied entirely. Only code segments are
2559 * copied from kcore. It is assumed that two segments suffice: one for the
2560 * kernel proper and one for all the modules. The code segments are determined
2561 * from kallsyms and modules files. The kernel map starts at _stext or the
2562 * lowest function symbol, and ends at _etext or the highest function symbol.
2563 * The module map starts at the lowest module address and ends at the highest
2564 * module symbol. Start addresses are rounded down to the nearest page. End
2565 * addresses are rounded up to the nearest page. An extra page is added to the
2566 * highest kernel symbol and highest module symbol to, hopefully, encompass that
2567 * symbol too. Because it contains only code sections, the resulting kcore is
2568 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
2569 * is not the same for the kernel map and the modules map. That happens because
2570 * the data is copied adjacently whereas the original kcore has gaps. Finally,
2571 * kallsyms file is compared with its copy to check that modules have not been
2572 * loaded or unloaded while the copies were taking place.
2574 * Return: %0 on success, %-1 on failure.
2576 int kcore_copy(const char *from_dir, const char *to_dir)
2579 struct kcore extract;
2580 int idx = 0, err = -1;
2582 struct kcore_copy_info kci = { .stext = 0, };
2583 char kcore_filename[PATH_MAX];
2584 char extract_filename[PATH_MAX];
2585 struct phdr_data *p;
2587 INIT_LIST_HEAD(&kci.phdrs);
2588 INIT_LIST_HEAD(&kci.syms);
2590 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
2593 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
2594 goto out_unlink_kallsyms;
2596 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
2597 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
2599 if (kcore__open(&kcore, kcore_filename))
2600 goto out_unlink_modules;
2602 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
2603 goto out_kcore_close;
2605 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
2606 goto out_kcore_close;
2608 if (kcore__copy_hdr(&kcore, &extract, kci.phnum))
2609 goto out_extract_close;
2611 offset = gelf_fsize(extract.elf, ELF_T_EHDR, 1, EV_CURRENT) +
2612 gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT);
2613 offset = round_up(offset, page_size);
2615 kcore_copy__for_each_phdr(&kci, p) {
2616 off_t offs = p->rel + offset;
2618 if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len))
2619 goto out_extract_close;
2622 sz = kcore__write(&extract);
2623 if (sz < 0 || sz > offset)
2624 goto out_extract_close;
2626 kcore_copy__for_each_phdr(&kci, p) {
2627 off_t offs = p->rel + offset;
2631 if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len))
2632 goto out_extract_close;
2635 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
2636 goto out_extract_close;
2641 kcore__close(&extract);
2643 unlink(extract_filename);
2645 kcore__close(&kcore);
2648 kcore_copy__unlink(to_dir, "modules");
2649 out_unlink_kallsyms:
2651 kcore_copy__unlink(to_dir, "kallsyms");
2653 kcore_copy__free_phdrs(&kci);
2654 kcore_copy__free_syms(&kci);
2659 int kcore_extract__create(struct kcore_extract *kce)
2662 struct kcore extract;
2664 int idx = 0, err = -1;
2665 off_t offset = page_size, sz;
2667 if (kcore__open(&kcore, kce->kcore_filename))
2670 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
2671 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
2672 goto out_kcore_close;
2674 if (kcore__copy_hdr(&kcore, &extract, count))
2675 goto out_extract_close;
2677 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
2678 goto out_extract_close;
2680 sz = kcore__write(&extract);
2681 if (sz < 0 || sz > offset)
2682 goto out_extract_close;
2684 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
2685 goto out_extract_close;
2690 kcore__close(&extract);
2692 unlink(kce->extract_filename);
2694 kcore__close(&kcore);
2699 void kcore_extract__delete(struct kcore_extract *kce)
2701 unlink(kce->extract_filename);
2704 #ifdef HAVE_GELF_GETNOTE_SUPPORT
2706 static void sdt_adjust_loc(struct sdt_note *tmp, GElf_Addr base_off)
2712 tmp->addr.a32[SDT_NOTE_IDX_LOC] =
2713 tmp->addr.a32[SDT_NOTE_IDX_LOC] + base_off -
2714 tmp->addr.a32[SDT_NOTE_IDX_BASE];
2716 tmp->addr.a64[SDT_NOTE_IDX_LOC] =
2717 tmp->addr.a64[SDT_NOTE_IDX_LOC] + base_off -
2718 tmp->addr.a64[SDT_NOTE_IDX_BASE];
2721 static void sdt_adjust_refctr(struct sdt_note *tmp, GElf_Addr base_addr,
2727 if (tmp->bit32 && tmp->addr.a32[SDT_NOTE_IDX_REFCTR])
2728 tmp->addr.a32[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
2729 else if (tmp->addr.a64[SDT_NOTE_IDX_REFCTR])
2730 tmp->addr.a64[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
2734 * populate_sdt_note : Parse raw data and identify SDT note
2735 * @elf: elf of the opened file
2736 * @data: raw data of a section with description offset applied
2737 * @len: note description size
2738 * @type: type of the note
2739 * @sdt_notes: List to add the SDT note
2741 * Responsible for parsing the @data in section .note.stapsdt in @elf and
2742 * if its an SDT note, it appends to @sdt_notes list.
2744 static int populate_sdt_note(Elf **elf, const char *data, size_t len,
2745 struct list_head *sdt_notes)
2747 const char *provider, *name, *args;
2748 struct sdt_note *tmp = NULL;
2754 Elf64_Addr a64[NR_ADDR];
2755 Elf32_Addr a32[NR_ADDR];
2759 .d_buf = &buf, .d_type = ELF_T_ADDR, .d_version = EV_CURRENT,
2760 .d_size = gelf_fsize((*elf), ELF_T_ADDR, NR_ADDR, EV_CURRENT),
2761 .d_off = 0, .d_align = 0
2764 .d_buf = (void *) data, .d_type = ELF_T_ADDR,
2765 .d_version = EV_CURRENT, .d_size = dst.d_size, .d_off = 0,
2769 tmp = (struct sdt_note *)calloc(1, sizeof(struct sdt_note));
2775 INIT_LIST_HEAD(&tmp->note_list);
2777 if (len < dst.d_size + 3)
2780 /* Translation from file representation to memory representation */
2781 if (gelf_xlatetom(*elf, &dst, &src,
2782 elf_getident(*elf, NULL)[EI_DATA]) == NULL) {
2783 pr_err("gelf_xlatetom : %s\n", elf_errmsg(-1));
2787 /* Populate the fields of sdt_note */
2788 provider = data + dst.d_size;
2790 name = (const char *)memchr(provider, '\0', data + len - provider);
2794 tmp->provider = strdup(provider);
2795 if (!tmp->provider) {
2799 tmp->name = strdup(name);
2805 args = memchr(name, '\0', data + len - name);
2808 * There is no argument if:
2809 * - We reached the end of the note;
2810 * - There is not enough room to hold a potential string;
2811 * - The argument string is empty or just contains ':'.
2813 if (args == NULL || data + len - args < 2 ||
2814 args[1] == ':' || args[1] == '\0')
2817 tmp->args = strdup(++args);
2824 if (gelf_getclass(*elf) == ELFCLASS32) {
2825 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf32_Addr));
2828 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf64_Addr));
2832 if (!gelf_getehdr(*elf, &ehdr)) {
2833 pr_debug("%s : cannot get elf header.\n", __func__);
2838 /* Adjust the prelink effect :
2839 * Find out the .stapsdt.base section.
2840 * This scn will help us to handle prelinking (if present).
2841 * Compare the retrieved file offset of the base section with the
2842 * base address in the description of the SDT note. If its different,
2843 * then accordingly, adjust the note location.
2845 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL))
2846 sdt_adjust_loc(tmp, shdr.sh_offset);
2848 /* Adjust reference counter offset */
2849 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_PROBES_SCN, NULL))
2850 sdt_adjust_refctr(tmp, shdr.sh_addr, shdr.sh_offset);
2852 list_add_tail(&tmp->note_list, sdt_notes);
2860 zfree(&tmp->provider);
2868 * construct_sdt_notes_list : constructs a list of SDT notes
2869 * @elf : elf to look into
2870 * @sdt_notes : empty list_head
2872 * Scans the sections in 'elf' for the section
2873 * .note.stapsdt. It, then calls populate_sdt_note to find
2874 * out the SDT events and populates the 'sdt_notes'.
2876 static int construct_sdt_notes_list(Elf *elf, struct list_head *sdt_notes)
2879 Elf_Scn *scn = NULL;
2882 size_t shstrndx, next;
2884 size_t name_off, desc_off, offset;
2887 if (gelf_getehdr(elf, &ehdr) == NULL) {
2891 if (elf_getshdrstrndx(elf, &shstrndx) != 0) {
2896 /* Look for the required section */
2897 scn = elf_section_by_name(elf, &ehdr, &shdr, SDT_NOTE_SCN, NULL);
2903 if ((shdr.sh_type != SHT_NOTE) || (shdr.sh_flags & SHF_ALLOC)) {
2908 data = elf_getdata(scn, NULL);
2910 /* Get the SDT notes */
2911 for (offset = 0; (next = gelf_getnote(data, offset, &nhdr, &name_off,
2912 &desc_off)) > 0; offset = next) {
2913 if (nhdr.n_namesz == sizeof(SDT_NOTE_NAME) &&
2914 !memcmp(data->d_buf + name_off, SDT_NOTE_NAME,
2915 sizeof(SDT_NOTE_NAME))) {
2916 /* Check the type of the note */
2917 if (nhdr.n_type != SDT_NOTE_TYPE)
2920 ret = populate_sdt_note(&elf, ((data->d_buf) + desc_off),
2921 nhdr.n_descsz, sdt_notes);
2926 if (list_empty(sdt_notes))
2934 * get_sdt_note_list : Wrapper to construct a list of sdt notes
2935 * @head : empty list_head
2936 * @target : file to find SDT notes from
2938 * This opens the file, initializes
2939 * the ELF and then calls construct_sdt_notes_list.
2941 int get_sdt_note_list(struct list_head *head, const char *target)
2946 fd = open(target, O_RDONLY);
2950 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
2955 ret = construct_sdt_notes_list(elf, head);
2963 * cleanup_sdt_note_list : free the sdt notes' list
2964 * @sdt_notes: sdt notes' list
2966 * Free up the SDT notes in @sdt_notes.
2967 * Returns the number of SDT notes free'd.
2969 int cleanup_sdt_note_list(struct list_head *sdt_notes)
2971 struct sdt_note *tmp, *pos;
2974 list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
2975 list_del_init(&pos->note_list);
2978 zfree(&pos->provider);
2986 * sdt_notes__get_count: Counts the number of sdt events
2987 * @start: list_head to sdt_notes list
2989 * Returns the number of SDT notes in a list
2991 int sdt_notes__get_count(struct list_head *start)
2993 struct sdt_note *sdt_ptr;
2996 list_for_each_entry(sdt_ptr, start, note_list)
3002 void symbol__elf_init(void)
3004 elf_version(EV_CURRENT);