| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #include <asm/bug.h> |
| 3 | #include <linux/kernel.h> |
| 4 | #include <linux/string.h> |
| 5 | #include <linux/zalloc.h> |
| 6 | #include <sys/time.h> |
| 7 | #include <sys/resource.h> |
| 8 | #include <sys/types.h> |
| 9 | #include <sys/stat.h> |
| 10 | #include <unistd.h> |
| 11 | #include <errno.h> |
| 12 | #include <fcntl.h> |
| 13 | #include <stdlib.h> |
| 14 | #ifdef HAVE_LIBBPF_SUPPORT |
| 15 | #include <bpf/libbpf.h> |
| 16 | #include "bpf-event.h" |
| 17 | #include "bpf-utils.h" |
| 18 | #endif |
| 19 | #include "compress.h" |
| 20 | #include "env.h" |
| 21 | #include "namespaces.h" |
| 22 | #include "path.h" |
| 23 | #include "map.h" |
| 24 | #include "symbol.h" |
| 25 | #include "srcline.h" |
| 26 | #include "dso.h" |
| 27 | #include "dsos.h" |
| 28 | #include "machine.h" |
| 29 | #include "auxtrace.h" |
| 30 | #include "util.h" /* O_CLOEXEC for older systems */ |
| 31 | #include "debug.h" |
| 32 | #include "string2.h" |
| 33 | #include "vdso.h" |
| 34 | #include "annotate-data.h" |
| 35 | |
| 36 | static const char * const debuglink_paths[] = { |
| 37 | "%.0s%s", |
| 38 | "%s/%s", |
| 39 | "%s/.debug/%s", |
| 40 | "/usr/lib/debug%s/%s" |
| 41 | }; |
| 42 | |
| 43 | void dso__set_nsinfo(struct dso *dso, struct nsinfo *nsi) |
| 44 | { |
| 45 | nsinfo__put(RC_CHK_ACCESS(dso)->nsinfo); |
| 46 | RC_CHK_ACCESS(dso)->nsinfo = nsi; |
| 47 | } |
| 48 | |
| 49 | char dso__symtab_origin(const struct dso *dso) |
| 50 | { |
| 51 | static const char origin[] = { |
| 52 | [DSO_BINARY_TYPE__KALLSYMS] = 'k', |
| 53 | [DSO_BINARY_TYPE__VMLINUX] = 'v', |
| 54 | [DSO_BINARY_TYPE__JAVA_JIT] = 'j', |
| 55 | [DSO_BINARY_TYPE__DEBUGLINK] = 'l', |
| 56 | [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', |
| 57 | [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', |
| 58 | [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', |
| 59 | [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', |
| 60 | [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', |
| 61 | [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', |
| 62 | [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', |
| 63 | [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', |
| 64 | [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', |
| 65 | [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm', |
| 66 | [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', |
| 67 | [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', |
| 68 | [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M', |
| 69 | [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', |
| 70 | [DSO_BINARY_TYPE__GNU_DEBUGDATA] = 'n', |
| 71 | }; |
| 72 | |
| 73 | if (dso == NULL || dso__symtab_type(dso) == DSO_BINARY_TYPE__NOT_FOUND) |
| 74 | return '!'; |
| 75 | return origin[dso__symtab_type(dso)]; |
| 76 | } |
| 77 | |
| 78 | bool dso__is_object_file(const struct dso *dso) |
| 79 | { |
| 80 | switch (dso__binary_type(dso)) { |
| 81 | case DSO_BINARY_TYPE__KALLSYMS: |
| 82 | case DSO_BINARY_TYPE__GUEST_KALLSYMS: |
| 83 | case DSO_BINARY_TYPE__JAVA_JIT: |
| 84 | case DSO_BINARY_TYPE__BPF_PROG_INFO: |
| 85 | case DSO_BINARY_TYPE__BPF_IMAGE: |
| 86 | case DSO_BINARY_TYPE__OOL: |
| 87 | return false; |
| 88 | case DSO_BINARY_TYPE__VMLINUX: |
| 89 | case DSO_BINARY_TYPE__GUEST_VMLINUX: |
| 90 | case DSO_BINARY_TYPE__DEBUGLINK: |
| 91 | case DSO_BINARY_TYPE__BUILD_ID_CACHE: |
| 92 | case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: |
| 93 | case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: |
| 94 | case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: |
| 95 | case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: |
| 96 | case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: |
| 97 | case DSO_BINARY_TYPE__GNU_DEBUGDATA: |
| 98 | case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: |
| 99 | case DSO_BINARY_TYPE__GUEST_KMODULE: |
| 100 | case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: |
| 101 | case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: |
| 102 | case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: |
| 103 | case DSO_BINARY_TYPE__KCORE: |
| 104 | case DSO_BINARY_TYPE__GUEST_KCORE: |
| 105 | case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: |
| 106 | case DSO_BINARY_TYPE__NOT_FOUND: |
| 107 | default: |
| 108 | return true; |
| 109 | } |
| 110 | } |
| 111 | |
| 112 | int dso__read_binary_type_filename(const struct dso *dso, |
| 113 | enum dso_binary_type type, |
| 114 | char *root_dir, char *filename, size_t size) |
| 115 | { |
| 116 | char build_id_hex[SBUILD_ID_SIZE]; |
| 117 | int ret = 0; |
| 118 | size_t len; |
| 119 | |
| 120 | switch (type) { |
| 121 | case DSO_BINARY_TYPE__DEBUGLINK: |
| 122 | { |
| 123 | const char *last_slash; |
| 124 | char dso_dir[PATH_MAX]; |
| 125 | char symfile[PATH_MAX]; |
| 126 | unsigned int i; |
| 127 | |
| 128 | len = __symbol__join_symfs(filename, size, dso__long_name(dso)); |
| 129 | last_slash = filename + len; |
| 130 | while (last_slash != filename && *last_slash != '/') |
| 131 | last_slash--; |
| 132 | |
| 133 | strncpy(dso_dir, filename, last_slash - filename); |
| 134 | dso_dir[last_slash-filename] = '\0'; |
| 135 | |
| 136 | if (!is_regular_file(filename)) { |
| 137 | ret = -1; |
| 138 | break; |
| 139 | } |
| 140 | |
| 141 | ret = filename__read_debuglink(filename, symfile, PATH_MAX); |
| 142 | if (ret) |
| 143 | break; |
| 144 | |
| 145 | /* Check predefined locations where debug file might reside */ |
| 146 | ret = -1; |
| 147 | for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) { |
| 148 | snprintf(filename, size, |
| 149 | debuglink_paths[i], dso_dir, symfile); |
| 150 | if (is_regular_file(filename)) { |
| 151 | ret = 0; |
| 152 | break; |
| 153 | } |
| 154 | } |
| 155 | |
| 156 | break; |
| 157 | } |
| 158 | case DSO_BINARY_TYPE__BUILD_ID_CACHE: |
| 159 | if (dso__build_id_filename(dso, filename, size, false) == NULL) |
| 160 | ret = -1; |
| 161 | break; |
| 162 | |
| 163 | case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: |
| 164 | if (dso__build_id_filename(dso, filename, size, true) == NULL) |
| 165 | ret = -1; |
| 166 | break; |
| 167 | |
| 168 | case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: |
| 169 | len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); |
| 170 | snprintf(filename + len, size - len, "%s.debug", dso__long_name(dso)); |
| 171 | break; |
| 172 | |
| 173 | case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: |
| 174 | len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); |
| 175 | snprintf(filename + len, size - len, "%s", dso__long_name(dso)); |
| 176 | break; |
| 177 | |
| 178 | case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: |
| 179 | /* |
| 180 | * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in |
| 181 | * /usr/lib/debug/lib when it is expected to be in |
| 182 | * /usr/lib/debug/usr/lib |
| 183 | */ |
| 184 | if (strlen(dso__long_name(dso)) < 9 || |
| 185 | strncmp(dso__long_name(dso), "/usr/lib/", 9)) { |
| 186 | ret = -1; |
| 187 | break; |
| 188 | } |
| 189 | len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); |
| 190 | snprintf(filename + len, size - len, "%s", dso__long_name(dso) + 4); |
| 191 | break; |
| 192 | |
| 193 | case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: |
| 194 | { |
| 195 | const char *last_slash; |
| 196 | size_t dir_size; |
| 197 | |
| 198 | last_slash = dso__long_name(dso) + dso__long_name_len(dso); |
| 199 | while (last_slash != dso__long_name(dso) && *last_slash != '/') |
| 200 | last_slash--; |
| 201 | |
| 202 | len = __symbol__join_symfs(filename, size, ""); |
| 203 | dir_size = last_slash - dso__long_name(dso) + 2; |
| 204 | if (dir_size > (size - len)) { |
| 205 | ret = -1; |
| 206 | break; |
| 207 | } |
| 208 | len += scnprintf(filename + len, dir_size, "%s", dso__long_name(dso)); |
| 209 | len += scnprintf(filename + len , size - len, ".debug%s", |
| 210 | last_slash); |
| 211 | break; |
| 212 | } |
| 213 | |
| 214 | case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: |
| 215 | if (!dso__has_build_id(dso)) { |
| 216 | ret = -1; |
| 217 | break; |
| 218 | } |
| 219 | |
| 220 | build_id__snprintf(dso__bid(dso), build_id_hex, sizeof(build_id_hex)); |
| 221 | len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); |
| 222 | snprintf(filename + len, size - len, "%.2s/%s.debug", |
| 223 | build_id_hex, build_id_hex + 2); |
| 224 | break; |
| 225 | |
| 226 | case DSO_BINARY_TYPE__VMLINUX: |
| 227 | case DSO_BINARY_TYPE__GUEST_VMLINUX: |
| 228 | case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: |
| 229 | case DSO_BINARY_TYPE__GNU_DEBUGDATA: |
| 230 | __symbol__join_symfs(filename, size, dso__long_name(dso)); |
| 231 | break; |
| 232 | |
| 233 | case DSO_BINARY_TYPE__GUEST_KMODULE: |
| 234 | case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: |
| 235 | path__join3(filename, size, symbol_conf.symfs, |
| 236 | root_dir, dso__long_name(dso)); |
| 237 | break; |
| 238 | |
| 239 | case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: |
| 240 | case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: |
| 241 | __symbol__join_symfs(filename, size, dso__long_name(dso)); |
| 242 | break; |
| 243 | |
| 244 | case DSO_BINARY_TYPE__KCORE: |
| 245 | case DSO_BINARY_TYPE__GUEST_KCORE: |
| 246 | snprintf(filename, size, "%s", dso__long_name(dso)); |
| 247 | break; |
| 248 | |
| 249 | default: |
| 250 | case DSO_BINARY_TYPE__KALLSYMS: |
| 251 | case DSO_BINARY_TYPE__GUEST_KALLSYMS: |
| 252 | case DSO_BINARY_TYPE__JAVA_JIT: |
| 253 | case DSO_BINARY_TYPE__BPF_PROG_INFO: |
| 254 | case DSO_BINARY_TYPE__BPF_IMAGE: |
| 255 | case DSO_BINARY_TYPE__OOL: |
| 256 | case DSO_BINARY_TYPE__NOT_FOUND: |
| 257 | ret = -1; |
| 258 | break; |
| 259 | } |
| 260 | |
| 261 | return ret; |
| 262 | } |
| 263 | |
| 264 | enum { |
| 265 | COMP_ID__NONE = 0, |
| 266 | }; |
| 267 | |
| 268 | static const struct { |
| 269 | const char *fmt; |
| 270 | int (*decompress)(const char *input, int output); |
| 271 | bool (*is_compressed)(const char *input); |
| 272 | } compressions[] = { |
| 273 | [COMP_ID__NONE] = { .fmt = NULL, }, |
| 274 | #ifdef HAVE_ZLIB_SUPPORT |
| 275 | { "gz", gzip_decompress_to_file, gzip_is_compressed }, |
| 276 | #endif |
| 277 | #ifdef HAVE_LZMA_SUPPORT |
| 278 | { "xz", lzma_decompress_to_file, lzma_is_compressed }, |
| 279 | #endif |
| 280 | { NULL, NULL, NULL }, |
| 281 | }; |
| 282 | |
| 283 | static int is_supported_compression(const char *ext) |
| 284 | { |
| 285 | unsigned i; |
| 286 | |
| 287 | for (i = 1; compressions[i].fmt; i++) { |
| 288 | if (!strcmp(ext, compressions[i].fmt)) |
| 289 | return i; |
| 290 | } |
| 291 | return COMP_ID__NONE; |
| 292 | } |
| 293 | |
| 294 | bool is_kernel_module(const char *pathname, int cpumode) |
| 295 | { |
| 296 | struct kmod_path m; |
| 297 | int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK; |
| 298 | |
| 299 | WARN_ONCE(mode != cpumode, |
| 300 | "Internal error: passing unmasked cpumode (%x) to is_kernel_module", |
| 301 | cpumode); |
| 302 | |
| 303 | switch (mode) { |
| 304 | case PERF_RECORD_MISC_USER: |
| 305 | case PERF_RECORD_MISC_HYPERVISOR: |
| 306 | case PERF_RECORD_MISC_GUEST_USER: |
| 307 | return false; |
| 308 | /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */ |
| 309 | default: |
| 310 | if (kmod_path__parse(&m, pathname)) { |
| 311 | pr_err("Failed to check whether %s is a kernel module or not. Assume it is.", |
| 312 | pathname); |
| 313 | return true; |
| 314 | } |
| 315 | } |
| 316 | |
| 317 | return m.kmod; |
| 318 | } |
| 319 | |
| 320 | bool dso__needs_decompress(struct dso *dso) |
| 321 | { |
| 322 | return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || |
| 323 | dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; |
| 324 | } |
| 325 | |
| 326 | int filename__decompress(const char *name, char *pathname, |
| 327 | size_t len, int comp, int *err) |
| 328 | { |
| 329 | char tmpbuf[] = KMOD_DECOMP_NAME; |
| 330 | int fd = -1; |
| 331 | |
| 332 | /* |
| 333 | * We have proper compression id for DSO and yet the file |
| 334 | * behind the 'name' can still be plain uncompressed object. |
| 335 | * |
| 336 | * The reason is behind the logic we open the DSO object files, |
| 337 | * when we try all possible 'debug' objects until we find the |
| 338 | * data. So even if the DSO is represented by 'krava.xz' module, |
| 339 | * we can end up here opening ~/.debug/....23432432/debug' file |
| 340 | * which is not compressed. |
| 341 | * |
| 342 | * To keep this transparent, we detect this and return the file |
| 343 | * descriptor to the uncompressed file. |
| 344 | */ |
| 345 | if (!compressions[comp].is_compressed(name)) |
| 346 | return open(name, O_RDONLY); |
| 347 | |
| 348 | fd = mkstemp(tmpbuf); |
| 349 | if (fd < 0) { |
| 350 | *err = errno; |
| 351 | return -1; |
| 352 | } |
| 353 | |
| 354 | if (compressions[comp].decompress(name, fd)) { |
| 355 | *err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; |
| 356 | close(fd); |
| 357 | fd = -1; |
| 358 | } |
| 359 | |
| 360 | if (!pathname || (fd < 0)) |
| 361 | unlink(tmpbuf); |
| 362 | |
| 363 | if (pathname && (fd >= 0)) |
| 364 | strlcpy(pathname, tmpbuf, len); |
| 365 | |
| 366 | return fd; |
| 367 | } |
| 368 | |
| 369 | static int decompress_kmodule(struct dso *dso, const char *name, |
| 370 | char *pathname, size_t len) |
| 371 | { |
| 372 | if (!dso__needs_decompress(dso)) |
| 373 | return -1; |
| 374 | |
| 375 | if (dso__comp(dso) == COMP_ID__NONE) |
| 376 | return -1; |
| 377 | |
| 378 | return filename__decompress(name, pathname, len, dso__comp(dso), dso__load_errno(dso)); |
| 379 | } |
| 380 | |
| 381 | int dso__decompress_kmodule_fd(struct dso *dso, const char *name) |
| 382 | { |
| 383 | return decompress_kmodule(dso, name, NULL, 0); |
| 384 | } |
| 385 | |
| 386 | int dso__decompress_kmodule_path(struct dso *dso, const char *name, |
| 387 | char *pathname, size_t len) |
| 388 | { |
| 389 | int fd = decompress_kmodule(dso, name, pathname, len); |
| 390 | |
| 391 | close(fd); |
| 392 | return fd >= 0 ? 0 : -1; |
| 393 | } |
| 394 | |
| 395 | /* |
| 396 | * Parses kernel module specified in @path and updates |
| 397 | * @m argument like: |
| 398 | * |
| 399 | * @comp - true if @path contains supported compression suffix, |
| 400 | * false otherwise |
| 401 | * @kmod - true if @path contains '.ko' suffix in right position, |
| 402 | * false otherwise |
| 403 | * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name |
| 404 | * of the kernel module without suffixes, otherwise strudup-ed |
| 405 | * base name of @path |
| 406 | * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string |
| 407 | * the compression suffix |
| 408 | * |
| 409 | * Returns 0 if there's no strdup error, -ENOMEM otherwise. |
| 410 | */ |
| 411 | int __kmod_path__parse(struct kmod_path *m, const char *path, |
| 412 | bool alloc_name) |
| 413 | { |
| 414 | const char *name = strrchr(path, '/'); |
| 415 | const char *ext = strrchr(path, '.'); |
| 416 | bool is_simple_name = false; |
| 417 | |
| 418 | memset(m, 0x0, sizeof(*m)); |
| 419 | name = name ? name + 1 : path; |
| 420 | |
| 421 | /* |
| 422 | * '.' is also a valid character for module name. For example: |
| 423 | * [aaa.bbb] is a valid module name. '[' should have higher |
| 424 | * priority than '.ko' suffix. |
| 425 | * |
| 426 | * The kernel names are from machine__mmap_name. Such |
| 427 | * name should belong to kernel itself, not kernel module. |
| 428 | */ |
| 429 | if (name[0] == '[') { |
| 430 | is_simple_name = true; |
| 431 | if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || |
| 432 | (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || |
| 433 | (strncmp(name, "[vdso]", 6) == 0) || |
| 434 | (strncmp(name, "[vdso32]", 8) == 0) || |
| 435 | (strncmp(name, "[vdsox32]", 9) == 0) || |
| 436 | (strncmp(name, "[vsyscall]", 10) == 0)) { |
| 437 | m->kmod = false; |
| 438 | |
| 439 | } else |
| 440 | m->kmod = true; |
| 441 | } |
| 442 | |
| 443 | /* No extension, just return name. */ |
| 444 | if ((ext == NULL) || is_simple_name) { |
| 445 | if (alloc_name) { |
| 446 | m->name = strdup(name); |
| 447 | return m->name ? 0 : -ENOMEM; |
| 448 | } |
| 449 | return 0; |
| 450 | } |
| 451 | |
| 452 | m->comp = is_supported_compression(ext + 1); |
| 453 | if (m->comp > COMP_ID__NONE) |
| 454 | ext -= 3; |
| 455 | |
| 456 | /* Check .ko extension only if there's enough name left. */ |
| 457 | if (ext > name) |
| 458 | m->kmod = !strncmp(ext, ".ko", 3); |
| 459 | |
| 460 | if (alloc_name) { |
| 461 | if (m->kmod) { |
| 462 | if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1) |
| 463 | return -ENOMEM; |
| 464 | } else { |
| 465 | if (asprintf(&m->name, "%s", name) == -1) |
| 466 | return -ENOMEM; |
| 467 | } |
| 468 | |
| 469 | strreplace(m->name, '-', '_'); |
| 470 | } |
| 471 | |
| 472 | return 0; |
| 473 | } |
| 474 | |
| 475 | void dso__set_module_info(struct dso *dso, struct kmod_path *m, |
| 476 | struct machine *machine) |
| 477 | { |
| 478 | if (machine__is_host(machine)) |
| 479 | dso__set_symtab_type(dso, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE); |
| 480 | else |
| 481 | dso__set_symtab_type(dso, DSO_BINARY_TYPE__GUEST_KMODULE); |
| 482 | |
| 483 | /* _KMODULE_COMP should be next to _KMODULE */ |
| 484 | if (m->kmod && m->comp) { |
| 485 | dso__set_symtab_type(dso, dso__symtab_type(dso) + 1); |
| 486 | dso__set_comp(dso, m->comp); |
| 487 | } |
| 488 | |
| 489 | dso__set_is_kmod(dso); |
| 490 | dso__set_short_name(dso, strdup(m->name), true); |
| 491 | } |
| 492 | |
| 493 | /* |
| 494 | * Global list of open DSOs and the counter. |
| 495 | */ |
| 496 | struct mutex _dso__data_open_lock; |
| 497 | static LIST_HEAD(dso__data_open); |
| 498 | static long dso__data_open_cnt GUARDED_BY(_dso__data_open_lock); |
| 499 | |
| 500 | static void dso__data_open_lock_init(void) |
| 501 | { |
| 502 | mutex_init(&_dso__data_open_lock); |
| 503 | } |
| 504 | |
| 505 | static struct mutex *dso__data_open_lock(void) LOCK_RETURNED(_dso__data_open_lock) |
| 506 | { |
| 507 | static pthread_once_t data_open_lock_once = PTHREAD_ONCE_INIT; |
| 508 | |
| 509 | pthread_once(&data_open_lock_once, dso__data_open_lock_init); |
| 510 | |
| 511 | return &_dso__data_open_lock; |
| 512 | } |
| 513 | |
| 514 | static void dso__list_add(struct dso *dso) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) |
| 515 | { |
| 516 | list_add_tail(&dso__data(dso)->open_entry, &dso__data_open); |
| 517 | #ifdef REFCNT_CHECKING |
| 518 | dso__data(dso)->dso = dso__get(dso); |
| 519 | #endif |
| 520 | /* Assume the dso is part of dsos, hence the optional reference count above. */ |
| 521 | assert(dso__dsos(dso)); |
| 522 | dso__data_open_cnt++; |
| 523 | } |
| 524 | |
| 525 | static void dso__list_del(struct dso *dso) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) |
| 526 | { |
| 527 | list_del_init(&dso__data(dso)->open_entry); |
| 528 | #ifdef REFCNT_CHECKING |
| 529 | mutex_unlock(dso__data_open_lock()); |
| 530 | dso__put(dso__data(dso)->dso); |
| 531 | mutex_lock(dso__data_open_lock()); |
| 532 | #endif |
| 533 | WARN_ONCE(dso__data_open_cnt <= 0, |
| 534 | "DSO data fd counter out of bounds."); |
| 535 | dso__data_open_cnt--; |
| 536 | } |
| 537 | |
| 538 | static void close_first_dso(void); |
| 539 | |
| 540 | static int do_open(char *name) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) |
| 541 | { |
| 542 | int fd; |
| 543 | char sbuf[STRERR_BUFSIZE]; |
| 544 | |
| 545 | do { |
| 546 | fd = open(name, O_RDONLY|O_CLOEXEC); |
| 547 | if (fd >= 0) |
| 548 | return fd; |
| 549 | |
| 550 | pr_debug("dso open failed: %s\n", |
| 551 | str_error_r(errno, sbuf, sizeof(sbuf))); |
| 552 | if (!dso__data_open_cnt || errno != EMFILE) |
| 553 | break; |
| 554 | |
| 555 | close_first_dso(); |
| 556 | } while (1); |
| 557 | |
| 558 | return -1; |
| 559 | } |
| 560 | |
| 561 | char *dso__filename_with_chroot(const struct dso *dso, const char *filename) |
| 562 | { |
| 563 | return filename_with_chroot(nsinfo__pid(dso__nsinfo_const(dso)), filename); |
| 564 | } |
| 565 | |
| 566 | static int __open_dso(struct dso *dso, struct machine *machine) |
| 567 | EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) |
| 568 | { |
| 569 | int fd = -EINVAL; |
| 570 | char *root_dir = (char *)""; |
| 571 | char *name = malloc(PATH_MAX); |
| 572 | bool decomp = false; |
| 573 | |
| 574 | if (!name) |
| 575 | return -ENOMEM; |
| 576 | |
| 577 | mutex_lock(dso__lock(dso)); |
| 578 | if (machine) |
| 579 | root_dir = machine->root_dir; |
| 580 | |
| 581 | if (dso__read_binary_type_filename(dso, dso__binary_type(dso), |
| 582 | root_dir, name, PATH_MAX)) |
| 583 | goto out; |
| 584 | |
| 585 | if (!is_regular_file(name)) { |
| 586 | char *new_name; |
| 587 | |
| 588 | if (errno != ENOENT || dso__nsinfo(dso) == NULL) |
| 589 | goto out; |
| 590 | |
| 591 | new_name = dso__filename_with_chroot(dso, name); |
| 592 | if (!new_name) |
| 593 | goto out; |
| 594 | |
| 595 | free(name); |
| 596 | name = new_name; |
| 597 | } |
| 598 | |
| 599 | if (dso__needs_decompress(dso)) { |
| 600 | char newpath[KMOD_DECOMP_LEN]; |
| 601 | size_t len = sizeof(newpath); |
| 602 | |
| 603 | if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { |
| 604 | fd = -(*dso__load_errno(dso)); |
| 605 | goto out; |
| 606 | } |
| 607 | |
| 608 | decomp = true; |
| 609 | strcpy(name, newpath); |
| 610 | } |
| 611 | |
| 612 | fd = do_open(name); |
| 613 | |
| 614 | if (decomp) |
| 615 | unlink(name); |
| 616 | |
| 617 | out: |
| 618 | mutex_unlock(dso__lock(dso)); |
| 619 | free(name); |
| 620 | return fd; |
| 621 | } |
| 622 | |
| 623 | static void check_data_close(void); |
| 624 | |
| 625 | /** |
| 626 | * dso_close - Open DSO data file |
| 627 | * @dso: dso object |
| 628 | * |
| 629 | * Open @dso's data file descriptor and updates |
| 630 | * list/count of open DSO objects. |
| 631 | */ |
| 632 | static int open_dso(struct dso *dso, struct machine *machine) |
| 633 | EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) |
| 634 | { |
| 635 | int fd; |
| 636 | struct nscookie nsc; |
| 637 | |
| 638 | if (dso__binary_type(dso) != DSO_BINARY_TYPE__BUILD_ID_CACHE) { |
| 639 | mutex_lock(dso__lock(dso)); |
| 640 | nsinfo__mountns_enter(dso__nsinfo(dso), &nsc); |
| 641 | mutex_unlock(dso__lock(dso)); |
| 642 | } |
| 643 | fd = __open_dso(dso, machine); |
| 644 | if (dso__binary_type(dso) != DSO_BINARY_TYPE__BUILD_ID_CACHE) |
| 645 | nsinfo__mountns_exit(&nsc); |
| 646 | |
| 647 | if (fd >= 0) { |
| 648 | dso__list_add(dso); |
| 649 | /* |
| 650 | * Check if we crossed the allowed number |
| 651 | * of opened DSOs and close one if needed. |
| 652 | */ |
| 653 | check_data_close(); |
| 654 | } |
| 655 | |
| 656 | return fd; |
| 657 | } |
| 658 | |
| 659 | static void close_data_fd(struct dso *dso) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) |
| 660 | { |
| 661 | if (dso__data(dso)->fd >= 0) { |
| 662 | close(dso__data(dso)->fd); |
| 663 | dso__data(dso)->fd = -1; |
| 664 | dso__data(dso)->file_size = 0; |
| 665 | dso__list_del(dso); |
| 666 | } |
| 667 | } |
| 668 | |
| 669 | /** |
| 670 | * dso_close - Close DSO data file |
| 671 | * @dso: dso object |
| 672 | * |
| 673 | * Close @dso's data file descriptor and updates |
| 674 | * list/count of open DSO objects. |
| 675 | */ |
| 676 | static void close_dso(struct dso *dso) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) |
| 677 | { |
| 678 | close_data_fd(dso); |
| 679 | } |
| 680 | |
| 681 | static void close_first_dso(void) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) |
| 682 | { |
| 683 | struct dso_data *dso_data; |
| 684 | struct dso *dso; |
| 685 | |
| 686 | dso_data = list_first_entry(&dso__data_open, struct dso_data, open_entry); |
| 687 | #ifdef REFCNT_CHECKING |
| 688 | dso = dso_data->dso; |
| 689 | #else |
| 690 | dso = container_of(dso_data, struct dso, data); |
| 691 | #endif |
| 692 | close_dso(dso); |
| 693 | } |
| 694 | |
| 695 | static rlim_t get_fd_limit(void) |
| 696 | { |
| 697 | struct rlimit l; |
| 698 | rlim_t limit = 0; |
| 699 | |
| 700 | /* Allow half of the current open fd limit. */ |
| 701 | if (getrlimit(RLIMIT_NOFILE, &l) == 0) { |
| 702 | if (l.rlim_cur == RLIM_INFINITY) |
| 703 | limit = l.rlim_cur; |
| 704 | else |
| 705 | limit = l.rlim_cur / 2; |
| 706 | } else { |
| 707 | pr_err("failed to get fd limit\n"); |
| 708 | limit = 1; |
| 709 | } |
| 710 | |
| 711 | return limit; |
| 712 | } |
| 713 | |
| 714 | static rlim_t fd_limit; |
| 715 | |
| 716 | /* |
| 717 | * Used only by tests/dso-data.c to reset the environment |
| 718 | * for tests. I dont expect we should change this during |
| 719 | * standard runtime. |
| 720 | */ |
| 721 | void reset_fd_limit(void) |
| 722 | { |
| 723 | fd_limit = 0; |
| 724 | } |
| 725 | |
| 726 | static bool may_cache_fd(void) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) |
| 727 | { |
| 728 | if (!fd_limit) |
| 729 | fd_limit = get_fd_limit(); |
| 730 | |
| 731 | if (fd_limit == RLIM_INFINITY) |
| 732 | return true; |
| 733 | |
| 734 | return fd_limit > (rlim_t) dso__data_open_cnt; |
| 735 | } |
| 736 | |
| 737 | /* |
| 738 | * Check and close LRU dso if we crossed allowed limit |
| 739 | * for opened dso file descriptors. The limit is half |
| 740 | * of the RLIMIT_NOFILE files opened. |
| 741 | */ |
| 742 | static void check_data_close(void) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) |
| 743 | { |
| 744 | bool cache_fd = may_cache_fd(); |
| 745 | |
| 746 | if (!cache_fd) |
| 747 | close_first_dso(); |
| 748 | } |
| 749 | |
| 750 | /** |
| 751 | * dso__data_close - Close DSO data file |
| 752 | * @dso: dso object |
| 753 | * |
| 754 | * External interface to close @dso's data file descriptor. |
| 755 | */ |
| 756 | void dso__data_close(struct dso *dso) |
| 757 | { |
| 758 | mutex_lock(dso__data_open_lock()); |
| 759 | close_dso(dso); |
| 760 | mutex_unlock(dso__data_open_lock()); |
| 761 | } |
| 762 | |
| 763 | static void try_to_open_dso(struct dso *dso, struct machine *machine) |
| 764 | EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) |
| 765 | { |
| 766 | enum dso_binary_type binary_type_data[] = { |
| 767 | DSO_BINARY_TYPE__BUILD_ID_CACHE, |
| 768 | DSO_BINARY_TYPE__SYSTEM_PATH_DSO, |
| 769 | DSO_BINARY_TYPE__NOT_FOUND, |
| 770 | }; |
| 771 | int i = 0; |
| 772 | struct dso_data *dso_data = dso__data(dso); |
| 773 | |
| 774 | if (dso_data->fd >= 0) |
| 775 | return; |
| 776 | |
| 777 | if (dso__binary_type(dso) != DSO_BINARY_TYPE__NOT_FOUND) { |
| 778 | dso_data->fd = open_dso(dso, machine); |
| 779 | goto out; |
| 780 | } |
| 781 | |
| 782 | do { |
| 783 | dso__set_binary_type(dso, binary_type_data[i++]); |
| 784 | |
| 785 | dso_data->fd = open_dso(dso, machine); |
| 786 | if (dso_data->fd >= 0) |
| 787 | goto out; |
| 788 | |
| 789 | } while (dso__binary_type(dso) != DSO_BINARY_TYPE__NOT_FOUND); |
| 790 | out: |
| 791 | if (dso_data->fd >= 0) |
| 792 | dso_data->status = DSO_DATA_STATUS_OK; |
| 793 | else |
| 794 | dso_data->status = DSO_DATA_STATUS_ERROR; |
| 795 | } |
| 796 | |
| 797 | /** |
| 798 | * dso__data_get_fd - Get dso's data file descriptor |
| 799 | * @dso: dso object |
| 800 | * @machine: machine object |
| 801 | * |
| 802 | * External interface to find dso's file, open it and |
| 803 | * returns file descriptor. It should be paired with |
| 804 | * dso__data_put_fd() if it returns non-negative value. |
| 805 | */ |
| 806 | bool dso__data_get_fd(struct dso *dso, struct machine *machine, int *fd) |
| 807 | { |
| 808 | *fd = -1; |
| 809 | if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) |
| 810 | return false; |
| 811 | |
| 812 | mutex_lock(dso__data_open_lock()); |
| 813 | |
| 814 | try_to_open_dso(dso, machine); |
| 815 | |
| 816 | *fd = dso__data(dso)->fd; |
| 817 | if (*fd >= 0) |
| 818 | return true; |
| 819 | |
| 820 | mutex_unlock(dso__data_open_lock()); |
| 821 | return false; |
| 822 | } |
| 823 | |
| 824 | void dso__data_put_fd(struct dso *dso __maybe_unused) |
| 825 | { |
| 826 | mutex_unlock(dso__data_open_lock()); |
| 827 | } |
| 828 | |
| 829 | bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) |
| 830 | { |
| 831 | u32 flag = 1 << by; |
| 832 | |
| 833 | if (dso__data(dso)->status_seen & flag) |
| 834 | return true; |
| 835 | |
| 836 | dso__data(dso)->status_seen |= flag; |
| 837 | |
| 838 | return false; |
| 839 | } |
| 840 | |
| 841 | #ifdef HAVE_LIBBPF_SUPPORT |
| 842 | static ssize_t bpf_read(struct dso *dso, u64 offset, char *data) |
| 843 | { |
| 844 | struct bpf_prog_info_node *node; |
| 845 | ssize_t size = DSO__DATA_CACHE_SIZE; |
| 846 | struct dso_bpf_prog *dso_bpf_prog = dso__bpf_prog(dso); |
| 847 | u64 len; |
| 848 | u8 *buf; |
| 849 | |
| 850 | node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id); |
| 851 | if (!node || !node->info_linear) { |
| 852 | dso__data(dso)->status = DSO_DATA_STATUS_ERROR; |
| 853 | return -1; |
| 854 | } |
| 855 | |
| 856 | len = node->info_linear->info.jited_prog_len; |
| 857 | buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns; |
| 858 | |
| 859 | if (offset >= len) |
| 860 | return -1; |
| 861 | |
| 862 | size = (ssize_t)min(len - offset, (u64)size); |
| 863 | memcpy(data, buf + offset, size); |
| 864 | return size; |
| 865 | } |
| 866 | |
| 867 | static int bpf_size(struct dso *dso) |
| 868 | { |
| 869 | struct bpf_prog_info_node *node; |
| 870 | struct dso_bpf_prog *dso_bpf_prog = dso__bpf_prog(dso); |
| 871 | |
| 872 | node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id); |
| 873 | if (!node || !node->info_linear) { |
| 874 | dso__data(dso)->status = DSO_DATA_STATUS_ERROR; |
| 875 | return -1; |
| 876 | } |
| 877 | |
| 878 | dso__data(dso)->file_size = node->info_linear->info.jited_prog_len; |
| 879 | return 0; |
| 880 | } |
| 881 | #endif // HAVE_LIBBPF_SUPPORT |
| 882 | |
| 883 | static void |
| 884 | dso_cache__free(struct dso *dso) |
| 885 | { |
| 886 | struct rb_root *root = &dso__data(dso)->cache; |
| 887 | struct rb_node *next = rb_first(root); |
| 888 | |
| 889 | mutex_lock(dso__lock(dso)); |
| 890 | while (next) { |
| 891 | struct dso_cache *cache; |
| 892 | |
| 893 | cache = rb_entry(next, struct dso_cache, rb_node); |
| 894 | next = rb_next(&cache->rb_node); |
| 895 | rb_erase(&cache->rb_node, root); |
| 896 | free(cache); |
| 897 | } |
| 898 | mutex_unlock(dso__lock(dso)); |
| 899 | } |
| 900 | |
| 901 | static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset) |
| 902 | { |
| 903 | const struct rb_root *root = &dso__data(dso)->cache; |
| 904 | struct rb_node * const *p = &root->rb_node; |
| 905 | const struct rb_node *parent = NULL; |
| 906 | struct dso_cache *cache; |
| 907 | |
| 908 | while (*p != NULL) { |
| 909 | u64 end; |
| 910 | |
| 911 | parent = *p; |
| 912 | cache = rb_entry(parent, struct dso_cache, rb_node); |
| 913 | end = cache->offset + DSO__DATA_CACHE_SIZE; |
| 914 | |
| 915 | if (offset < cache->offset) |
| 916 | p = &(*p)->rb_left; |
| 917 | else if (offset >= end) |
| 918 | p = &(*p)->rb_right; |
| 919 | else |
| 920 | return cache; |
| 921 | } |
| 922 | |
| 923 | return NULL; |
| 924 | } |
| 925 | |
| 926 | static struct dso_cache * |
| 927 | dso_cache__insert(struct dso *dso, struct dso_cache *new) |
| 928 | { |
| 929 | struct rb_root *root = &dso__data(dso)->cache; |
| 930 | struct rb_node **p = &root->rb_node; |
| 931 | struct rb_node *parent = NULL; |
| 932 | struct dso_cache *cache; |
| 933 | u64 offset = new->offset; |
| 934 | |
| 935 | mutex_lock(dso__lock(dso)); |
| 936 | while (*p != NULL) { |
| 937 | u64 end; |
| 938 | |
| 939 | parent = *p; |
| 940 | cache = rb_entry(parent, struct dso_cache, rb_node); |
| 941 | end = cache->offset + DSO__DATA_CACHE_SIZE; |
| 942 | |
| 943 | if (offset < cache->offset) |
| 944 | p = &(*p)->rb_left; |
| 945 | else if (offset >= end) |
| 946 | p = &(*p)->rb_right; |
| 947 | else |
| 948 | goto out; |
| 949 | } |
| 950 | |
| 951 | rb_link_node(&new->rb_node, parent, p); |
| 952 | rb_insert_color(&new->rb_node, root); |
| 953 | |
| 954 | cache = NULL; |
| 955 | out: |
| 956 | mutex_unlock(dso__lock(dso)); |
| 957 | return cache; |
| 958 | } |
| 959 | |
| 960 | static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data, |
| 961 | u64 size, bool out) |
| 962 | { |
| 963 | u64 cache_offset = offset - cache->offset; |
| 964 | u64 cache_size = min(cache->size - cache_offset, size); |
| 965 | |
| 966 | if (out) |
| 967 | memcpy(data, cache->data + cache_offset, cache_size); |
| 968 | else |
| 969 | memcpy(cache->data + cache_offset, data, cache_size); |
| 970 | return cache_size; |
| 971 | } |
| 972 | |
| 973 | static ssize_t file_read(struct dso *dso, struct machine *machine, |
| 974 | u64 offset, char *data) |
| 975 | { |
| 976 | ssize_t ret; |
| 977 | |
| 978 | mutex_lock(dso__data_open_lock()); |
| 979 | |
| 980 | /* |
| 981 | * dso__data(dso)->fd might be closed if other thread opened another |
| 982 | * file (dso) due to open file limit (RLIMIT_NOFILE). |
| 983 | */ |
| 984 | try_to_open_dso(dso, machine); |
| 985 | |
| 986 | if (dso__data(dso)->fd < 0) { |
| 987 | dso__data(dso)->status = DSO_DATA_STATUS_ERROR; |
| 988 | ret = -errno; |
| 989 | goto out; |
| 990 | } |
| 991 | |
| 992 | ret = pread(dso__data(dso)->fd, data, DSO__DATA_CACHE_SIZE, offset); |
| 993 | out: |
| 994 | mutex_unlock(dso__data_open_lock()); |
| 995 | return ret; |
| 996 | } |
| 997 | |
| 998 | static struct dso_cache *dso_cache__populate(struct dso *dso, |
| 999 | struct machine *machine, |
| 1000 | u64 offset, ssize_t *ret) |
| 1001 | { |
| 1002 | u64 cache_offset = offset & DSO__DATA_CACHE_MASK; |
| 1003 | struct dso_cache *cache; |
| 1004 | struct dso_cache *old; |
| 1005 | |
| 1006 | cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); |
| 1007 | if (!cache) { |
| 1008 | *ret = -ENOMEM; |
| 1009 | return NULL; |
| 1010 | } |
| 1011 | #ifdef HAVE_LIBBPF_SUPPORT |
| 1012 | if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) |
| 1013 | *ret = bpf_read(dso, cache_offset, cache->data); |
| 1014 | else |
| 1015 | #endif |
| 1016 | if (dso__binary_type(dso) == DSO_BINARY_TYPE__OOL) |
| 1017 | *ret = DSO__DATA_CACHE_SIZE; |
| 1018 | else |
| 1019 | *ret = file_read(dso, machine, cache_offset, cache->data); |
| 1020 | |
| 1021 | if (*ret <= 0) { |
| 1022 | free(cache); |
| 1023 | return NULL; |
| 1024 | } |
| 1025 | |
| 1026 | cache->offset = cache_offset; |
| 1027 | cache->size = *ret; |
| 1028 | |
| 1029 | old = dso_cache__insert(dso, cache); |
| 1030 | if (old) { |
| 1031 | /* we lose the race */ |
| 1032 | free(cache); |
| 1033 | cache = old; |
| 1034 | } |
| 1035 | |
| 1036 | return cache; |
| 1037 | } |
| 1038 | |
| 1039 | static struct dso_cache *dso_cache__find(struct dso *dso, |
| 1040 | struct machine *machine, |
| 1041 | u64 offset, |
| 1042 | ssize_t *ret) |
| 1043 | { |
| 1044 | struct dso_cache *cache = __dso_cache__find(dso, offset); |
| 1045 | |
| 1046 | return cache ? cache : dso_cache__populate(dso, machine, offset, ret); |
| 1047 | } |
| 1048 | |
| 1049 | static ssize_t dso_cache_io(struct dso *dso, struct machine *machine, |
| 1050 | u64 offset, u8 *data, ssize_t size, bool out) |
| 1051 | { |
| 1052 | struct dso_cache *cache; |
| 1053 | ssize_t ret = 0; |
| 1054 | |
| 1055 | cache = dso_cache__find(dso, machine, offset, &ret); |
| 1056 | if (!cache) |
| 1057 | return ret; |
| 1058 | |
| 1059 | return dso_cache__memcpy(cache, offset, data, size, out); |
| 1060 | } |
| 1061 | |
| 1062 | /* |
| 1063 | * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks |
| 1064 | * in the rb_tree. Any read to already cached data is served |
| 1065 | * by cached data. Writes update the cache only, not the backing file. |
| 1066 | */ |
| 1067 | static ssize_t cached_io(struct dso *dso, struct machine *machine, |
| 1068 | u64 offset, u8 *data, ssize_t size, bool out) |
| 1069 | { |
| 1070 | ssize_t r = 0; |
| 1071 | u8 *p = data; |
| 1072 | |
| 1073 | do { |
| 1074 | ssize_t ret; |
| 1075 | |
| 1076 | ret = dso_cache_io(dso, machine, offset, p, size, out); |
| 1077 | if (ret < 0) |
| 1078 | return ret; |
| 1079 | |
| 1080 | /* Reached EOF, return what we have. */ |
| 1081 | if (!ret) |
| 1082 | break; |
| 1083 | |
| 1084 | BUG_ON(ret > size); |
| 1085 | |
| 1086 | r += ret; |
| 1087 | p += ret; |
| 1088 | offset += ret; |
| 1089 | size -= ret; |
| 1090 | |
| 1091 | } while (size); |
| 1092 | |
| 1093 | return r; |
| 1094 | } |
| 1095 | |
| 1096 | static int file_size(struct dso *dso, struct machine *machine) |
| 1097 | { |
| 1098 | int ret = 0; |
| 1099 | struct stat st; |
| 1100 | char sbuf[STRERR_BUFSIZE]; |
| 1101 | |
| 1102 | mutex_lock(dso__data_open_lock()); |
| 1103 | |
| 1104 | /* |
| 1105 | * dso__data(dso)->fd might be closed if other thread opened another |
| 1106 | * file (dso) due to open file limit (RLIMIT_NOFILE). |
| 1107 | */ |
| 1108 | try_to_open_dso(dso, machine); |
| 1109 | |
| 1110 | if (dso__data(dso)->fd < 0) { |
| 1111 | ret = -errno; |
| 1112 | dso__data(dso)->status = DSO_DATA_STATUS_ERROR; |
| 1113 | goto out; |
| 1114 | } |
| 1115 | |
| 1116 | if (fstat(dso__data(dso)->fd, &st) < 0) { |
| 1117 | ret = -errno; |
| 1118 | pr_err("dso cache fstat failed: %s\n", |
| 1119 | str_error_r(errno, sbuf, sizeof(sbuf))); |
| 1120 | dso__data(dso)->status = DSO_DATA_STATUS_ERROR; |
| 1121 | goto out; |
| 1122 | } |
| 1123 | dso__data(dso)->file_size = st.st_size; |
| 1124 | |
| 1125 | out: |
| 1126 | mutex_unlock(dso__data_open_lock()); |
| 1127 | return ret; |
| 1128 | } |
| 1129 | |
| 1130 | int dso__data_file_size(struct dso *dso, struct machine *machine) |
| 1131 | { |
| 1132 | if (dso__data(dso)->file_size) |
| 1133 | return 0; |
| 1134 | |
| 1135 | if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) |
| 1136 | return -1; |
| 1137 | #ifdef HAVE_LIBBPF_SUPPORT |
| 1138 | if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) |
| 1139 | return bpf_size(dso); |
| 1140 | #endif |
| 1141 | return file_size(dso, machine); |
| 1142 | } |
| 1143 | |
| 1144 | /** |
| 1145 | * dso__data_size - Return dso data size |
| 1146 | * @dso: dso object |
| 1147 | * @machine: machine object |
| 1148 | * |
| 1149 | * Return: dso data size |
| 1150 | */ |
| 1151 | off_t dso__data_size(struct dso *dso, struct machine *machine) |
| 1152 | { |
| 1153 | if (dso__data_file_size(dso, machine)) |
| 1154 | return -1; |
| 1155 | |
| 1156 | /* For now just estimate dso data size is close to file size */ |
| 1157 | return dso__data(dso)->file_size; |
| 1158 | } |
| 1159 | |
| 1160 | static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine, |
| 1161 | u64 offset, u8 *data, ssize_t size, |
| 1162 | bool out) |
| 1163 | { |
| 1164 | if (dso__data_file_size(dso, machine)) |
| 1165 | return -1; |
| 1166 | |
| 1167 | /* Check the offset sanity. */ |
| 1168 | if (offset > dso__data(dso)->file_size) |
| 1169 | return -1; |
| 1170 | |
| 1171 | if (offset + size < offset) |
| 1172 | return -1; |
| 1173 | |
| 1174 | return cached_io(dso, machine, offset, data, size, out); |
| 1175 | } |
| 1176 | |
| 1177 | /** |
| 1178 | * dso__data_read_offset - Read data from dso file offset |
| 1179 | * @dso: dso object |
| 1180 | * @machine: machine object |
| 1181 | * @offset: file offset |
| 1182 | * @data: buffer to store data |
| 1183 | * @size: size of the @data buffer |
| 1184 | * |
| 1185 | * External interface to read data from dso file offset. Open |
| 1186 | * dso data file and use cached_read to get the data. |
| 1187 | */ |
| 1188 | ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, |
| 1189 | u64 offset, u8 *data, ssize_t size) |
| 1190 | { |
| 1191 | if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) |
| 1192 | return -1; |
| 1193 | |
| 1194 | return data_read_write_offset(dso, machine, offset, data, size, true); |
| 1195 | } |
| 1196 | |
| 1197 | uint16_t dso__e_machine(struct dso *dso, struct machine *machine) |
| 1198 | { |
| 1199 | uint16_t e_machine = EM_NONE; |
| 1200 | int fd; |
| 1201 | |
| 1202 | switch (dso__binary_type(dso)) { |
| 1203 | case DSO_BINARY_TYPE__KALLSYMS: |
| 1204 | case DSO_BINARY_TYPE__GUEST_KALLSYMS: |
| 1205 | case DSO_BINARY_TYPE__VMLINUX: |
| 1206 | case DSO_BINARY_TYPE__GUEST_VMLINUX: |
| 1207 | case DSO_BINARY_TYPE__GUEST_KMODULE: |
| 1208 | case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: |
| 1209 | case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: |
| 1210 | case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: |
| 1211 | case DSO_BINARY_TYPE__KCORE: |
| 1212 | case DSO_BINARY_TYPE__GUEST_KCORE: |
| 1213 | case DSO_BINARY_TYPE__BPF_PROG_INFO: |
| 1214 | case DSO_BINARY_TYPE__BPF_IMAGE: |
| 1215 | case DSO_BINARY_TYPE__OOL: |
| 1216 | case DSO_BINARY_TYPE__JAVA_JIT: |
| 1217 | return EM_HOST; |
| 1218 | case DSO_BINARY_TYPE__DEBUGLINK: |
| 1219 | case DSO_BINARY_TYPE__BUILD_ID_CACHE: |
| 1220 | case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: |
| 1221 | case DSO_BINARY_TYPE__GNU_DEBUGDATA: |
| 1222 | case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: |
| 1223 | case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: |
| 1224 | case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: |
| 1225 | case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: |
| 1226 | case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: |
| 1227 | case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: |
| 1228 | break; |
| 1229 | case DSO_BINARY_TYPE__NOT_FOUND: |
| 1230 | default: |
| 1231 | return EM_NONE; |
| 1232 | } |
| 1233 | |
| 1234 | mutex_lock(dso__data_open_lock()); |
| 1235 | |
| 1236 | /* |
| 1237 | * dso__data(dso)->fd might be closed if other thread opened another |
| 1238 | * file (dso) due to open file limit (RLIMIT_NOFILE). |
| 1239 | */ |
| 1240 | try_to_open_dso(dso, machine); |
| 1241 | fd = dso__data(dso)->fd; |
| 1242 | if (fd >= 0) { |
| 1243 | _Static_assert(offsetof(Elf32_Ehdr, e_machine) == 18, "Unexpected offset"); |
| 1244 | _Static_assert(offsetof(Elf64_Ehdr, e_machine) == 18, "Unexpected offset"); |
| 1245 | if (dso__needs_swap(dso) == DSO_SWAP__UNSET) { |
| 1246 | unsigned char eidata; |
| 1247 | |
| 1248 | if (pread(fd, &eidata, sizeof(eidata), EI_DATA) == sizeof(eidata)) |
| 1249 | dso__swap_init(dso, eidata); |
| 1250 | } |
| 1251 | if (dso__needs_swap(dso) != DSO_SWAP__UNSET && |
| 1252 | pread(fd, &e_machine, sizeof(e_machine), 18) == sizeof(e_machine)) |
| 1253 | e_machine = DSO__SWAP(dso, uint16_t, e_machine); |
| 1254 | } |
| 1255 | mutex_unlock(dso__data_open_lock()); |
| 1256 | return e_machine; |
| 1257 | } |
| 1258 | |
| 1259 | /** |
| 1260 | * dso__data_read_addr - Read data from dso address |
| 1261 | * @dso: dso object |
| 1262 | * @machine: machine object |
| 1263 | * @add: virtual memory address |
| 1264 | * @data: buffer to store data |
| 1265 | * @size: size of the @data buffer |
| 1266 | * |
| 1267 | * External interface to read data from dso address. |
| 1268 | */ |
| 1269 | ssize_t dso__data_read_addr(struct dso *dso, struct map *map, |
| 1270 | struct machine *machine, u64 addr, |
| 1271 | u8 *data, ssize_t size) |
| 1272 | { |
| 1273 | u64 offset = map__map_ip(map, addr); |
| 1274 | |
| 1275 | return dso__data_read_offset(dso, machine, offset, data, size); |
| 1276 | } |
| 1277 | |
| 1278 | /** |
| 1279 | * dso__data_write_cache_offs - Write data to dso data cache at file offset |
| 1280 | * @dso: dso object |
| 1281 | * @machine: machine object |
| 1282 | * @offset: file offset |
| 1283 | * @data: buffer to write |
| 1284 | * @size: size of the @data buffer |
| 1285 | * |
| 1286 | * Write into the dso file data cache, but do not change the file itself. |
| 1287 | */ |
| 1288 | ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine, |
| 1289 | u64 offset, const u8 *data_in, ssize_t size) |
| 1290 | { |
| 1291 | u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */ |
| 1292 | |
| 1293 | if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) |
| 1294 | return -1; |
| 1295 | |
| 1296 | return data_read_write_offset(dso, machine, offset, data, size, false); |
| 1297 | } |
| 1298 | |
| 1299 | /** |
| 1300 | * dso__data_write_cache_addr - Write data to dso data cache at dso address |
| 1301 | * @dso: dso object |
| 1302 | * @machine: machine object |
| 1303 | * @add: virtual memory address |
| 1304 | * @data: buffer to write |
| 1305 | * @size: size of the @data buffer |
| 1306 | * |
| 1307 | * External interface to write into the dso file data cache, but do not change |
| 1308 | * the file itself. |
| 1309 | */ |
| 1310 | ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map, |
| 1311 | struct machine *machine, u64 addr, |
| 1312 | const u8 *data, ssize_t size) |
| 1313 | { |
| 1314 | u64 offset = map__map_ip(map, addr); |
| 1315 | |
| 1316 | return dso__data_write_cache_offs(dso, machine, offset, data, size); |
| 1317 | } |
| 1318 | |
| 1319 | struct map *dso__new_map(const char *name) |
| 1320 | { |
| 1321 | struct map *map = NULL; |
| 1322 | struct dso *dso = dso__new(name); |
| 1323 | |
| 1324 | if (dso) { |
| 1325 | map = map__new2(0, dso); |
| 1326 | dso__put(dso); |
| 1327 | } |
| 1328 | |
| 1329 | return map; |
| 1330 | } |
| 1331 | |
| 1332 | struct dso *machine__findnew_kernel(struct machine *machine, const char *name, |
| 1333 | const char *short_name, int dso_type) |
| 1334 | { |
| 1335 | /* |
| 1336 | * The kernel dso could be created by build_id processing. |
| 1337 | */ |
| 1338 | struct dso *dso = machine__findnew_dso(machine, name); |
| 1339 | |
| 1340 | /* |
| 1341 | * We need to run this in all cases, since during the build_id |
| 1342 | * processing we had no idea this was the kernel dso. |
| 1343 | */ |
| 1344 | if (dso != NULL) { |
| 1345 | dso__set_short_name(dso, short_name, false); |
| 1346 | dso__set_kernel(dso, dso_type); |
| 1347 | } |
| 1348 | |
| 1349 | return dso; |
| 1350 | } |
| 1351 | |
| 1352 | static void __dso__set_long_name_id(struct dso *dso, const char *name, bool name_allocated) |
| 1353 | { |
| 1354 | if (dso__long_name_allocated(dso)) |
| 1355 | free((char *)dso__long_name(dso)); |
| 1356 | |
| 1357 | RC_CHK_ACCESS(dso)->long_name = name; |
| 1358 | RC_CHK_ACCESS(dso)->long_name_len = strlen(name); |
| 1359 | dso__set_long_name_allocated(dso, name_allocated); |
| 1360 | } |
| 1361 | |
| 1362 | static void dso__set_long_name_id(struct dso *dso, const char *name, bool name_allocated) |
| 1363 | { |
| 1364 | struct dsos *dsos = dso__dsos(dso); |
| 1365 | |
| 1366 | if (name == NULL) |
| 1367 | return; |
| 1368 | |
| 1369 | if (dsos) { |
| 1370 | /* |
| 1371 | * Need to avoid re-sorting the dsos breaking by non-atomically |
| 1372 | * renaming the dso. |
| 1373 | */ |
| 1374 | down_write(&dsos->lock); |
| 1375 | __dso__set_long_name_id(dso, name, name_allocated); |
| 1376 | dsos->sorted = false; |
| 1377 | up_write(&dsos->lock); |
| 1378 | } else { |
| 1379 | __dso__set_long_name_id(dso, name, name_allocated); |
| 1380 | } |
| 1381 | } |
| 1382 | |
| 1383 | static int __dso_id__cmp(const struct dso_id *a, const struct dso_id *b) |
| 1384 | { |
| 1385 | if (a->mmap2_valid && b->mmap2_valid) { |
| 1386 | if (a->maj > b->maj) return -1; |
| 1387 | if (a->maj < b->maj) return 1; |
| 1388 | |
| 1389 | if (a->min > b->min) return -1; |
| 1390 | if (a->min < b->min) return 1; |
| 1391 | |
| 1392 | if (a->ino > b->ino) return -1; |
| 1393 | if (a->ino < b->ino) return 1; |
| 1394 | } |
| 1395 | if (a->mmap2_ino_generation_valid && b->mmap2_ino_generation_valid) { |
| 1396 | if (a->ino_generation > b->ino_generation) return -1; |
| 1397 | if (a->ino_generation < b->ino_generation) return 1; |
| 1398 | } |
| 1399 | if (build_id__is_defined(&a->build_id) && build_id__is_defined(&b->build_id)) { |
| 1400 | if (a->build_id.size != b->build_id.size) |
| 1401 | return a->build_id.size < b->build_id.size ? -1 : 1; |
| 1402 | return memcmp(a->build_id.data, b->build_id.data, a->build_id.size); |
| 1403 | } |
| 1404 | return 0; |
| 1405 | } |
| 1406 | |
| 1407 | const struct dso_id dso_id_empty = { |
| 1408 | { |
| 1409 | .maj = 0, |
| 1410 | .min = 0, |
| 1411 | .ino = 0, |
| 1412 | .ino_generation = 0, |
| 1413 | }, |
| 1414 | .mmap2_valid = false, |
| 1415 | .mmap2_ino_generation_valid = false, |
| 1416 | { |
| 1417 | .size = 0, |
| 1418 | } |
| 1419 | }; |
| 1420 | |
| 1421 | void __dso__improve_id(struct dso *dso, const struct dso_id *id) |
| 1422 | { |
| 1423 | struct dsos *dsos = dso__dsos(dso); |
| 1424 | struct dso_id *dso_id = dso__id(dso); |
| 1425 | bool changed = false; |
| 1426 | |
| 1427 | /* dsos write lock held by caller. */ |
| 1428 | |
| 1429 | if (id->mmap2_valid && !dso_id->mmap2_valid) { |
| 1430 | dso_id->maj = id->maj; |
| 1431 | dso_id->min = id->min; |
| 1432 | dso_id->ino = id->ino; |
| 1433 | dso_id->mmap2_valid = true; |
| 1434 | changed = true; |
| 1435 | } |
| 1436 | if (id->mmap2_ino_generation_valid && !dso_id->mmap2_ino_generation_valid) { |
| 1437 | dso_id->ino_generation = id->ino_generation; |
| 1438 | dso_id->mmap2_ino_generation_valid = true; |
| 1439 | changed = true; |
| 1440 | } |
| 1441 | if (build_id__is_defined(&id->build_id) && !build_id__is_defined(&dso_id->build_id)) { |
| 1442 | dso_id->build_id = id->build_id; |
| 1443 | changed = true; |
| 1444 | } |
| 1445 | if (changed && dsos) |
| 1446 | dsos->sorted = false; |
| 1447 | } |
| 1448 | |
| 1449 | int dso_id__cmp(const struct dso_id *a, const struct dso_id *b) |
| 1450 | { |
| 1451 | if (a == &dso_id_empty || b == &dso_id_empty) { |
| 1452 | /* There is no valid data to compare so the comparison always returns identical. */ |
| 1453 | return 0; |
| 1454 | } |
| 1455 | |
| 1456 | return __dso_id__cmp(a, b); |
| 1457 | } |
| 1458 | |
| 1459 | int dso__cmp_id(struct dso *a, struct dso *b) |
| 1460 | { |
| 1461 | return __dso_id__cmp(dso__id(a), dso__id(b)); |
| 1462 | } |
| 1463 | |
| 1464 | void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) |
| 1465 | { |
| 1466 | dso__set_long_name_id(dso, name, name_allocated); |
| 1467 | } |
| 1468 | |
| 1469 | static void __dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) |
| 1470 | { |
| 1471 | if (dso__short_name_allocated(dso)) |
| 1472 | free((char *)dso__short_name(dso)); |
| 1473 | |
| 1474 | RC_CHK_ACCESS(dso)->short_name = name; |
| 1475 | RC_CHK_ACCESS(dso)->short_name_len = strlen(name); |
| 1476 | dso__set_short_name_allocated(dso, name_allocated); |
| 1477 | } |
| 1478 | |
| 1479 | void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) |
| 1480 | { |
| 1481 | struct dsos *dsos = dso__dsos(dso); |
| 1482 | |
| 1483 | if (name == NULL) |
| 1484 | return; |
| 1485 | |
| 1486 | if (dsos) { |
| 1487 | /* |
| 1488 | * Need to avoid re-sorting the dsos breaking by non-atomically |
| 1489 | * renaming the dso. |
| 1490 | */ |
| 1491 | down_write(&dsos->lock); |
| 1492 | __dso__set_short_name(dso, name, name_allocated); |
| 1493 | dsos->sorted = false; |
| 1494 | up_write(&dsos->lock); |
| 1495 | } else { |
| 1496 | __dso__set_short_name(dso, name, name_allocated); |
| 1497 | } |
| 1498 | } |
| 1499 | |
| 1500 | int dso__name_len(const struct dso *dso) |
| 1501 | { |
| 1502 | if (!dso) |
| 1503 | return strlen("[unknown]"); |
| 1504 | if (verbose > 0) |
| 1505 | return dso__long_name_len(dso); |
| 1506 | |
| 1507 | return dso__short_name_len(dso); |
| 1508 | } |
| 1509 | |
| 1510 | bool dso__loaded(const struct dso *dso) |
| 1511 | { |
| 1512 | return RC_CHK_ACCESS(dso)->loaded; |
| 1513 | } |
| 1514 | |
| 1515 | bool dso__sorted_by_name(const struct dso *dso) |
| 1516 | { |
| 1517 | return RC_CHK_ACCESS(dso)->sorted_by_name; |
| 1518 | } |
| 1519 | |
| 1520 | void dso__set_sorted_by_name(struct dso *dso) |
| 1521 | { |
| 1522 | RC_CHK_ACCESS(dso)->sorted_by_name = true; |
| 1523 | } |
| 1524 | |
| 1525 | struct dso *dso__new_id(const char *name, const struct dso_id *id) |
| 1526 | { |
| 1527 | RC_STRUCT(dso) *dso = zalloc(sizeof(*dso) + strlen(name) + 1); |
| 1528 | struct dso *res; |
| 1529 | struct dso_data *data; |
| 1530 | |
| 1531 | if (!dso) |
| 1532 | return NULL; |
| 1533 | |
| 1534 | if (ADD_RC_CHK(res, dso)) { |
| 1535 | strcpy(dso->name, name); |
| 1536 | if (id) |
| 1537 | dso->id = *id; |
| 1538 | dso__set_long_name_id(res, dso->name, false); |
| 1539 | dso__set_short_name(res, dso->name, false); |
| 1540 | dso->symbols = RB_ROOT_CACHED; |
| 1541 | dso->symbol_names = NULL; |
| 1542 | dso->symbol_names_len = 0; |
| 1543 | dso->inlined_nodes = RB_ROOT_CACHED; |
| 1544 | dso->srclines = RB_ROOT_CACHED; |
| 1545 | dso->data_types = RB_ROOT; |
| 1546 | dso->global_vars = RB_ROOT; |
| 1547 | dso->data.fd = -1; |
| 1548 | dso->data.status = DSO_DATA_STATUS_UNKNOWN; |
| 1549 | dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; |
| 1550 | dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND; |
| 1551 | dso->is_64_bit = (sizeof(void *) == 8); |
| 1552 | dso->loaded = 0; |
| 1553 | dso->rel = 0; |
| 1554 | dso->sorted_by_name = 0; |
| 1555 | dso->has_srcline = 1; |
| 1556 | dso->a2l_fails = 1; |
| 1557 | dso->kernel = DSO_SPACE__USER; |
| 1558 | dso->is_kmod = 0; |
| 1559 | dso->needs_swap = DSO_SWAP__UNSET; |
| 1560 | dso->comp = COMP_ID__NONE; |
| 1561 | mutex_init(&dso->lock); |
| 1562 | refcount_set(&dso->refcnt, 1); |
| 1563 | data = &dso->data; |
| 1564 | data->cache = RB_ROOT; |
| 1565 | data->fd = -1; |
| 1566 | data->status = DSO_DATA_STATUS_UNKNOWN; |
| 1567 | INIT_LIST_HEAD(&data->open_entry); |
| 1568 | #ifdef REFCNT_CHECKING |
| 1569 | data->dso = NULL; /* Set when on the open_entry list. */ |
| 1570 | #endif |
| 1571 | } |
| 1572 | return res; |
| 1573 | } |
| 1574 | |
| 1575 | struct dso *dso__new(const char *name) |
| 1576 | { |
| 1577 | return dso__new_id(name, NULL); |
| 1578 | } |
| 1579 | |
| 1580 | void dso__delete(struct dso *dso) |
| 1581 | { |
| 1582 | if (dso__dsos(dso)) |
| 1583 | pr_err("DSO %s is still in rbtree when being deleted!\n", dso__long_name(dso)); |
| 1584 | |
| 1585 | /* free inlines first, as they reference symbols */ |
| 1586 | inlines__tree_delete(&RC_CHK_ACCESS(dso)->inlined_nodes); |
| 1587 | srcline__tree_delete(&RC_CHK_ACCESS(dso)->srclines); |
| 1588 | symbols__delete(&RC_CHK_ACCESS(dso)->symbols); |
| 1589 | RC_CHK_ACCESS(dso)->symbol_names_len = 0; |
| 1590 | zfree(&RC_CHK_ACCESS(dso)->symbol_names); |
| 1591 | annotated_data_type__tree_delete(dso__data_types(dso)); |
| 1592 | global_var_type__tree_delete(dso__global_vars(dso)); |
| 1593 | |
| 1594 | if (RC_CHK_ACCESS(dso)->short_name_allocated) { |
| 1595 | zfree((char **)&RC_CHK_ACCESS(dso)->short_name); |
| 1596 | RC_CHK_ACCESS(dso)->short_name_allocated = false; |
| 1597 | } |
| 1598 | |
| 1599 | if (RC_CHK_ACCESS(dso)->long_name_allocated) { |
| 1600 | zfree((char **)&RC_CHK_ACCESS(dso)->long_name); |
| 1601 | RC_CHK_ACCESS(dso)->long_name_allocated = false; |
| 1602 | } |
| 1603 | |
| 1604 | dso__data_close(dso); |
| 1605 | auxtrace_cache__free(RC_CHK_ACCESS(dso)->auxtrace_cache); |
| 1606 | dso_cache__free(dso); |
| 1607 | dso__free_a2l(dso); |
| 1608 | dso__free_symsrc_filename(dso); |
| 1609 | nsinfo__zput(RC_CHK_ACCESS(dso)->nsinfo); |
| 1610 | mutex_destroy(dso__lock(dso)); |
| 1611 | RC_CHK_FREE(dso); |
| 1612 | } |
| 1613 | |
| 1614 | struct dso *dso__get(struct dso *dso) |
| 1615 | { |
| 1616 | struct dso *result; |
| 1617 | |
| 1618 | if (RC_CHK_GET(result, dso)) |
| 1619 | refcount_inc(&RC_CHK_ACCESS(dso)->refcnt); |
| 1620 | |
| 1621 | return result; |
| 1622 | } |
| 1623 | |
| 1624 | void dso__put(struct dso *dso) |
| 1625 | { |
| 1626 | #ifdef REFCNT_CHECKING |
| 1627 | if (dso && dso__data(dso) && refcount_read(&RC_CHK_ACCESS(dso)->refcnt) == 2) |
| 1628 | dso__data_close(dso); |
| 1629 | #endif |
| 1630 | if (dso && refcount_dec_and_test(&RC_CHK_ACCESS(dso)->refcnt)) |
| 1631 | dso__delete(dso); |
| 1632 | else |
| 1633 | RC_CHK_PUT(dso); |
| 1634 | } |
| 1635 | |
| 1636 | int dso__swap_init(struct dso *dso, unsigned char eidata) |
| 1637 | { |
| 1638 | static unsigned int const endian = 1; |
| 1639 | |
| 1640 | dso__set_needs_swap(dso, DSO_SWAP__NO); |
| 1641 | |
| 1642 | switch (eidata) { |
| 1643 | case ELFDATA2LSB: |
| 1644 | /* We are big endian, DSO is little endian. */ |
| 1645 | if (*(unsigned char const *)&endian != 1) |
| 1646 | dso__set_needs_swap(dso, DSO_SWAP__YES); |
| 1647 | break; |
| 1648 | |
| 1649 | case ELFDATA2MSB: |
| 1650 | /* We are little endian, DSO is big endian. */ |
| 1651 | if (*(unsigned char const *)&endian != 0) |
| 1652 | dso__set_needs_swap(dso, DSO_SWAP__YES); |
| 1653 | break; |
| 1654 | |
| 1655 | default: |
| 1656 | pr_err("unrecognized DSO data encoding %d\n", eidata); |
| 1657 | return -EINVAL; |
| 1658 | } |
| 1659 | |
| 1660 | return 0; |
| 1661 | } |
| 1662 | |
| 1663 | void dso__set_build_id(struct dso *dso, const struct build_id *bid) |
| 1664 | { |
| 1665 | dso__id(dso)->build_id = *bid; |
| 1666 | } |
| 1667 | |
| 1668 | bool dso__build_id_equal(const struct dso *dso, const struct build_id *bid) |
| 1669 | { |
| 1670 | const struct build_id *dso_bid = dso__bid(dso); |
| 1671 | |
| 1672 | if (dso_bid->size > bid->size && dso_bid->size == BUILD_ID_SIZE) { |
| 1673 | /* |
| 1674 | * For the backward compatibility, it allows a build-id has |
| 1675 | * trailing zeros. |
| 1676 | */ |
| 1677 | return !memcmp(dso_bid->data, bid->data, bid->size) && |
| 1678 | !memchr_inv(&dso_bid->data[bid->size], 0, |
| 1679 | dso_bid->size - bid->size); |
| 1680 | } |
| 1681 | |
| 1682 | return dso_bid->size == bid->size && |
| 1683 | memcmp(dso_bid->data, bid->data, dso_bid->size) == 0; |
| 1684 | } |
| 1685 | |
| 1686 | void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) |
| 1687 | { |
| 1688 | char path[PATH_MAX]; |
| 1689 | struct build_id bid = { .size = 0, }; |
| 1690 | |
| 1691 | if (machine__is_default_guest(machine)) |
| 1692 | return; |
| 1693 | sprintf(path, "%s/sys/kernel/notes", machine->root_dir); |
| 1694 | sysfs__read_build_id(path, &bid); |
| 1695 | dso__set_build_id(dso, &bid); |
| 1696 | } |
| 1697 | |
| 1698 | int dso__kernel_module_get_build_id(struct dso *dso, |
| 1699 | const char *root_dir) |
| 1700 | { |
| 1701 | char filename[PATH_MAX]; |
| 1702 | struct build_id bid = { .size = 0, }; |
| 1703 | /* |
| 1704 | * kernel module short names are of the form "[module]" and |
| 1705 | * we need just "module" here. |
| 1706 | */ |
| 1707 | const char *name = dso__short_name(dso) + 1; |
| 1708 | |
| 1709 | snprintf(filename, sizeof(filename), |
| 1710 | "%s/sys/module/%.*s/notes/.note.gnu.build-id", |
| 1711 | root_dir, (int)strlen(name) - 1, name); |
| 1712 | |
| 1713 | sysfs__read_build_id(filename, &bid); |
| 1714 | dso__set_build_id(dso, &bid); |
| 1715 | return 0; |
| 1716 | } |
| 1717 | |
| 1718 | static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) |
| 1719 | { |
| 1720 | char sbuild_id[SBUILD_ID_SIZE]; |
| 1721 | |
| 1722 | build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id)); |
| 1723 | return fprintf(fp, "%s", sbuild_id); |
| 1724 | } |
| 1725 | |
| 1726 | size_t dso__fprintf(struct dso *dso, FILE *fp) |
| 1727 | { |
| 1728 | struct rb_node *nd; |
| 1729 | size_t ret = fprintf(fp, "dso: %s (", dso__short_name(dso)); |
| 1730 | |
| 1731 | if (dso__short_name(dso) != dso__long_name(dso)) |
| 1732 | ret += fprintf(fp, "%s, ", dso__long_name(dso)); |
| 1733 | ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT "); |
| 1734 | ret += dso__fprintf_buildid(dso, fp); |
| 1735 | ret += fprintf(fp, ")\n"); |
| 1736 | for (nd = rb_first_cached(dso__symbols(dso)); nd; nd = rb_next(nd)) { |
| 1737 | struct symbol *pos = rb_entry(nd, struct symbol, rb_node); |
| 1738 | ret += symbol__fprintf(pos, fp); |
| 1739 | } |
| 1740 | |
| 1741 | return ret; |
| 1742 | } |
| 1743 | |
| 1744 | enum dso_type dso__type(struct dso *dso, struct machine *machine) |
| 1745 | { |
| 1746 | int fd = -1; |
| 1747 | enum dso_type type = DSO__TYPE_UNKNOWN; |
| 1748 | |
| 1749 | if (dso__data_get_fd(dso, machine, &fd)) { |
| 1750 | type = dso__type_fd(fd); |
| 1751 | dso__data_put_fd(dso); |
| 1752 | } |
| 1753 | |
| 1754 | return type; |
| 1755 | } |
| 1756 | |
| 1757 | int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) |
| 1758 | { |
| 1759 | int idx, errnum = *dso__load_errno(dso); |
| 1760 | /* |
| 1761 | * This must have a same ordering as the enum dso_load_errno. |
| 1762 | */ |
| 1763 | static const char *dso_load__error_str[] = { |
| 1764 | "Internal tools/perf/ library error", |
| 1765 | "Invalid ELF file", |
| 1766 | "Can not read build id", |
| 1767 | "Mismatching build id", |
| 1768 | "Decompression failure", |
| 1769 | }; |
| 1770 | |
| 1771 | BUG_ON(buflen == 0); |
| 1772 | |
| 1773 | if (errnum >= 0) { |
| 1774 | const char *err = str_error_r(errnum, buf, buflen); |
| 1775 | |
| 1776 | if (err != buf) |
| 1777 | scnprintf(buf, buflen, "%s", err); |
| 1778 | |
| 1779 | return 0; |
| 1780 | } |
| 1781 | |
| 1782 | if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END) |
| 1783 | return -1; |
| 1784 | |
| 1785 | idx = errnum - __DSO_LOAD_ERRNO__START; |
| 1786 | scnprintf(buf, buflen, "%s", dso_load__error_str[idx]); |
| 1787 | return 0; |
| 1788 | } |
| 1789 | |
| 1790 | bool perf_pid_map_tid(const char *dso_name, int *tid) |
| 1791 | { |
| 1792 | return sscanf(dso_name, "/tmp/perf-%d.map", tid) == 1; |
| 1793 | } |
| 1794 | |
| 1795 | bool is_perf_pid_map_name(const char *dso_name) |
| 1796 | { |
| 1797 | int tid; |
| 1798 | |
| 1799 | return perf_pid_map_tid(dso_name, &tid); |
| 1800 | } |