1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/compiler.h>
12 #include <linux/list.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15 #include <linux/string.h>
16 #include <linux/stringify.h>
17 #include <linux/zalloc.h>
19 #include <sys/utsname.h>
20 #include <linux/time64.h>
22 #include <bpf/libbpf.h>
29 #include "trace-event.h"
39 #include <api/fs/fs.h>
42 #include "time-utils.h"
45 #include "bpf-event.h"
47 #include <linux/ctype.h>
51 * must be a numerical value to let the endianness
52 * determine the memory layout. That way we are able
53 * to detect endianness when reading the perf.data file
56 * we check for legacy (PERFFILE) format.
58 static const char *__perf_magic1 = "PERFFILE";
59 static const u64 __perf_magic2 = 0x32454c4946524550ULL;
60 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
62 #define PERF_MAGIC __perf_magic2
64 const char perf_version_string[] = PERF_VERSION;
66 struct perf_file_attr {
67 struct perf_event_attr attr;
68 struct perf_file_section ids;
72 struct perf_header *ph;
74 void *buf; /* Either buf != NULL or fd >= 0 */
77 struct perf_evsel *events;
80 void perf_header__set_feat(struct perf_header *header, int feat)
82 set_bit(feat, header->adds_features);
85 void perf_header__clear_feat(struct perf_header *header, int feat)
87 clear_bit(feat, header->adds_features);
90 bool perf_header__has_feat(const struct perf_header *header, int feat)
92 return test_bit(feat, header->adds_features);
95 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
97 ssize_t ret = writen(ff->fd, buf, size);
99 if (ret != (ssize_t)size)
100 return ret < 0 ? (int)ret : -1;
104 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
106 /* struct perf_event_header::size is u16 */
107 const size_t max_size = 0xffff - sizeof(struct perf_event_header);
108 size_t new_size = ff->size;
111 if (size + ff->offset > max_size)
114 while (size > (new_size - ff->offset))
116 new_size = min(max_size, new_size);
118 if (ff->size < new_size) {
119 addr = realloc(ff->buf, new_size);
126 memcpy(ff->buf + ff->offset, buf, size);
132 /* Return: 0 if succeded, -ERR if failed. */
133 int do_write(struct feat_fd *ff, const void *buf, size_t size)
136 return __do_write_fd(ff, buf, size);
137 return __do_write_buf(ff, buf, size);
140 /* Return: 0 if succeded, -ERR if failed. */
141 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
143 u64 *p = (u64 *) set;
146 ret = do_write(ff, &size, sizeof(size));
150 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
151 ret = do_write(ff, p + i, sizeof(*p));
159 /* Return: 0 if succeded, -ERR if failed. */
160 int write_padded(struct feat_fd *ff, const void *bf,
161 size_t count, size_t count_aligned)
163 static const char zero_buf[NAME_ALIGN];
164 int err = do_write(ff, bf, count);
167 err = do_write(ff, zero_buf, count_aligned - count);
172 #define string_size(str) \
173 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
175 /* Return: 0 if succeded, -ERR if failed. */
176 static int do_write_string(struct feat_fd *ff, const char *str)
181 olen = strlen(str) + 1;
182 len = PERF_ALIGN(olen, NAME_ALIGN);
184 /* write len, incl. \0 */
185 ret = do_write(ff, &len, sizeof(len));
189 return write_padded(ff, str, olen, len);
192 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
194 ssize_t ret = readn(ff->fd, addr, size);
197 return ret < 0 ? (int)ret : -1;
201 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
203 if (size > (ssize_t)ff->size - ff->offset)
206 memcpy(addr, ff->buf + ff->offset, size);
213 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
216 return __do_read_fd(ff, addr, size);
217 return __do_read_buf(ff, addr, size);
220 static int do_read_u32(struct feat_fd *ff, u32 *addr)
224 ret = __do_read(ff, addr, sizeof(*addr));
228 if (ff->ph->needs_swap)
229 *addr = bswap_32(*addr);
233 static int do_read_u64(struct feat_fd *ff, u64 *addr)
237 ret = __do_read(ff, addr, sizeof(*addr));
241 if (ff->ph->needs_swap)
242 *addr = bswap_64(*addr);
246 static char *do_read_string(struct feat_fd *ff)
251 if (do_read_u32(ff, &len))
258 if (!__do_read(ff, buf, len)) {
260 * strings are padded by zeroes
261 * thus the actual strlen of buf
262 * may be less than len
271 /* Return: 0 if succeded, -ERR if failed. */
272 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
278 ret = do_read_u64(ff, &size);
282 set = bitmap_alloc(size);
288 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
289 ret = do_read_u64(ff, p + i);
301 static int write_tracing_data(struct feat_fd *ff,
302 struct perf_evlist *evlist)
304 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
307 return read_tracing_data(ff->fd, &evlist->entries);
310 static int write_build_id(struct feat_fd *ff,
311 struct perf_evlist *evlist __maybe_unused)
313 struct perf_session *session;
316 session = container_of(ff->ph, struct perf_session, header);
318 if (!perf_session__read_build_ids(session, true))
321 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
324 err = perf_session__write_buildid_table(session, ff);
326 pr_debug("failed to write buildid table\n");
329 perf_session__cache_build_ids(session);
334 static int write_hostname(struct feat_fd *ff,
335 struct perf_evlist *evlist __maybe_unused)
344 return do_write_string(ff, uts.nodename);
347 static int write_osrelease(struct feat_fd *ff,
348 struct perf_evlist *evlist __maybe_unused)
357 return do_write_string(ff, uts.release);
360 static int write_arch(struct feat_fd *ff,
361 struct perf_evlist *evlist __maybe_unused)
370 return do_write_string(ff, uts.machine);
373 static int write_version(struct feat_fd *ff,
374 struct perf_evlist *evlist __maybe_unused)
376 return do_write_string(ff, perf_version_string);
379 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
384 const char *search = cpuinfo_proc;
391 file = fopen("/proc/cpuinfo", "r");
395 while (getline(&buf, &len, file) > 0) {
396 ret = strncmp(buf, search, strlen(search));
408 p = strchr(buf, ':');
409 if (p && *(p+1) == ' ' && *(p+2))
415 /* squash extra space characters (branding string) */
420 char *q = skip_spaces(r);
423 while ((*r++ = *q++));
427 ret = do_write_string(ff, s);
434 static int write_cpudesc(struct feat_fd *ff,
435 struct perf_evlist *evlist __maybe_unused)
437 const char *cpuinfo_procs[] = CPUINFO_PROC;
440 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
442 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
450 static int write_nrcpus(struct feat_fd *ff,
451 struct perf_evlist *evlist __maybe_unused)
457 nrc = cpu__max_present_cpu();
459 nr = sysconf(_SC_NPROCESSORS_ONLN);
463 nra = (u32)(nr & UINT_MAX);
465 ret = do_write(ff, &nrc, sizeof(nrc));
469 return do_write(ff, &nra, sizeof(nra));
472 static int write_event_desc(struct feat_fd *ff,
473 struct perf_evlist *evlist)
475 struct perf_evsel *evsel;
479 nre = evlist->nr_entries;
482 * write number of events
484 ret = do_write(ff, &nre, sizeof(nre));
489 * size of perf_event_attr struct
491 sz = (u32)sizeof(evsel->attr);
492 ret = do_write(ff, &sz, sizeof(sz));
496 evlist__for_each_entry(evlist, evsel) {
497 ret = do_write(ff, &evsel->attr, sz);
501 * write number of unique id per event
502 * there is one id per instance of an event
504 * copy into an nri to be independent of the
508 ret = do_write(ff, &nri, sizeof(nri));
513 * write event string as passed on cmdline
515 ret = do_write_string(ff, perf_evsel__name(evsel));
519 * write unique ids for this event
521 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
528 static int write_cmdline(struct feat_fd *ff,
529 struct perf_evlist *evlist __maybe_unused)
531 char pbuf[MAXPATHLEN], *buf;
534 /* actual path to perf binary */
535 buf = perf_exe(pbuf, MAXPATHLEN);
537 /* account for binary path */
538 n = perf_env.nr_cmdline + 1;
540 ret = do_write(ff, &n, sizeof(n));
544 ret = do_write_string(ff, buf);
548 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
549 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
557 static int write_cpu_topology(struct feat_fd *ff,
558 struct perf_evlist *evlist __maybe_unused)
560 struct cpu_topology *tp;
564 tp = cpu_topology__new();
568 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
572 for (i = 0; i < tp->core_sib; i++) {
573 ret = do_write_string(ff, tp->core_siblings[i]);
577 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
581 for (i = 0; i < tp->thread_sib; i++) {
582 ret = do_write_string(ff, tp->thread_siblings[i]);
587 ret = perf_env__read_cpu_topology_map(&perf_env);
591 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
592 ret = do_write(ff, &perf_env.cpu[j].core_id,
593 sizeof(perf_env.cpu[j].core_id));
596 ret = do_write(ff, &perf_env.cpu[j].socket_id,
597 sizeof(perf_env.cpu[j].socket_id));
605 ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib));
609 for (i = 0; i < tp->die_sib; i++) {
610 ret = do_write_string(ff, tp->die_siblings[i]);
615 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
616 ret = do_write(ff, &perf_env.cpu[j].die_id,
617 sizeof(perf_env.cpu[j].die_id));
623 cpu_topology__delete(tp);
629 static int write_total_mem(struct feat_fd *ff,
630 struct perf_evlist *evlist __maybe_unused)
638 fp = fopen("/proc/meminfo", "r");
642 while (getline(&buf, &len, fp) > 0) {
643 ret = strncmp(buf, "MemTotal:", 9);
648 n = sscanf(buf, "%*s %"PRIu64, &mem);
650 ret = do_write(ff, &mem, sizeof(mem));
658 static int write_numa_topology(struct feat_fd *ff,
659 struct perf_evlist *evlist __maybe_unused)
661 struct numa_topology *tp;
665 tp = numa_topology__new();
669 ret = do_write(ff, &tp->nr, sizeof(u32));
673 for (i = 0; i < tp->nr; i++) {
674 struct numa_topology_node *n = &tp->nodes[i];
676 ret = do_write(ff, &n->node, sizeof(u32));
680 ret = do_write(ff, &n->mem_total, sizeof(u64));
684 ret = do_write(ff, &n->mem_free, sizeof(u64));
688 ret = do_write_string(ff, n->cpus);
696 numa_topology__delete(tp);
703 * struct pmu_mappings {
712 static int write_pmu_mappings(struct feat_fd *ff,
713 struct perf_evlist *evlist __maybe_unused)
715 struct perf_pmu *pmu = NULL;
720 * Do a first pass to count number of pmu to avoid lseek so this
721 * works in pipe mode as well.
723 while ((pmu = perf_pmu__scan(pmu))) {
729 ret = do_write(ff, &pmu_num, sizeof(pmu_num));
733 while ((pmu = perf_pmu__scan(pmu))) {
737 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
741 ret = do_write_string(ff, pmu->name);
752 * struct group_descs {
754 * struct group_desc {
761 static int write_group_desc(struct feat_fd *ff,
762 struct perf_evlist *evlist)
764 u32 nr_groups = evlist->nr_groups;
765 struct perf_evsel *evsel;
768 ret = do_write(ff, &nr_groups, sizeof(nr_groups));
772 evlist__for_each_entry(evlist, evsel) {
773 if (perf_evsel__is_group_leader(evsel) &&
774 evsel->nr_members > 1) {
775 const char *name = evsel->group_name ?: "{anon_group}";
776 u32 leader_idx = evsel->idx;
777 u32 nr_members = evsel->nr_members;
779 ret = do_write_string(ff, name);
783 ret = do_write(ff, &leader_idx, sizeof(leader_idx));
787 ret = do_write(ff, &nr_members, sizeof(nr_members));
796 * Return the CPU id as a raw string.
798 * Each architecture should provide a more precise id string that
799 * can be use to match the architecture's "mapfile".
801 char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
806 /* Return zero when the cpuid from the mapfile.csv matches the
807 * cpuid string generated on this platform.
808 * Otherwise return non-zero.
810 int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
813 regmatch_t pmatch[1];
816 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
817 /* Warn unable to generate match particular string. */
818 pr_info("Invalid regular expression %s\n", mapcpuid);
822 match = !regexec(&re, cpuid, 1, pmatch, 0);
825 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
827 /* Verify the entire string matched. */
828 if (match_len == strlen(cpuid))
835 * default get_cpuid(): nothing gets recorded
836 * actual implementation must be in arch/$(SRCARCH)/util/header.c
838 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
843 static int write_cpuid(struct feat_fd *ff,
844 struct perf_evlist *evlist __maybe_unused)
849 ret = get_cpuid(buffer, sizeof(buffer));
853 return do_write_string(ff, buffer);
856 static int write_branch_stack(struct feat_fd *ff __maybe_unused,
857 struct perf_evlist *evlist __maybe_unused)
862 static int write_auxtrace(struct feat_fd *ff,
863 struct perf_evlist *evlist __maybe_unused)
865 struct perf_session *session;
868 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
871 session = container_of(ff->ph, struct perf_session, header);
873 err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
875 pr_err("Failed to write auxtrace index\n");
879 static int write_clockid(struct feat_fd *ff,
880 struct perf_evlist *evlist __maybe_unused)
882 return do_write(ff, &ff->ph->env.clockid_res_ns,
883 sizeof(ff->ph->env.clockid_res_ns));
886 static int write_dir_format(struct feat_fd *ff,
887 struct perf_evlist *evlist __maybe_unused)
889 struct perf_session *session;
890 struct perf_data *data;
892 session = container_of(ff->ph, struct perf_session, header);
893 data = session->data;
895 if (WARN_ON(!perf_data__is_dir(data)))
898 return do_write(ff, &data->dir.version, sizeof(data->dir.version));
901 #ifdef HAVE_LIBBPF_SUPPORT
902 static int write_bpf_prog_info(struct feat_fd *ff,
903 struct perf_evlist *evlist __maybe_unused)
905 struct perf_env *env = &ff->ph->env;
906 struct rb_root *root;
907 struct rb_node *next;
910 down_read(&env->bpf_progs.lock);
912 ret = do_write(ff, &env->bpf_progs.infos_cnt,
913 sizeof(env->bpf_progs.infos_cnt));
917 root = &env->bpf_progs.infos;
918 next = rb_first(root);
920 struct bpf_prog_info_node *node;
923 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
924 next = rb_next(&node->rb_node);
925 len = sizeof(struct bpf_prog_info_linear) +
926 node->info_linear->data_len;
928 /* before writing to file, translate address to offset */
929 bpf_program__bpil_addr_to_offs(node->info_linear);
930 ret = do_write(ff, node->info_linear, len);
932 * translate back to address even when do_write() fails,
933 * so that this function never changes the data.
935 bpf_program__bpil_offs_to_addr(node->info_linear);
940 up_read(&env->bpf_progs.lock);
943 #else // HAVE_LIBBPF_SUPPORT
944 static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
945 struct perf_evlist *evlist __maybe_unused)
949 #endif // HAVE_LIBBPF_SUPPORT
951 static int write_bpf_btf(struct feat_fd *ff,
952 struct perf_evlist *evlist __maybe_unused)
954 struct perf_env *env = &ff->ph->env;
955 struct rb_root *root;
956 struct rb_node *next;
959 down_read(&env->bpf_progs.lock);
961 ret = do_write(ff, &env->bpf_progs.btfs_cnt,
962 sizeof(env->bpf_progs.btfs_cnt));
967 root = &env->bpf_progs.btfs;
968 next = rb_first(root);
970 struct btf_node *node;
972 node = rb_entry(next, struct btf_node, rb_node);
973 next = rb_next(&node->rb_node);
974 ret = do_write(ff, &node->id,
975 sizeof(u32) * 2 + node->data_size);
980 up_read(&env->bpf_progs.lock);
984 static int cpu_cache_level__sort(const void *a, const void *b)
986 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
987 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
989 return cache_a->level - cache_b->level;
992 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
994 if (a->level != b->level)
997 if (a->line_size != b->line_size)
1000 if (a->sets != b->sets)
1003 if (a->ways != b->ways)
1006 if (strcmp(a->type, b->type))
1009 if (strcmp(a->size, b->size))
1012 if (strcmp(a->map, b->map))
1018 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1020 char path[PATH_MAX], file[PATH_MAX];
1024 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1025 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1027 if (stat(file, &st))
1030 scnprintf(file, PATH_MAX, "%s/level", path);
1031 if (sysfs__read_int(file, (int *) &cache->level))
1034 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1035 if (sysfs__read_int(file, (int *) &cache->line_size))
1038 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1039 if (sysfs__read_int(file, (int *) &cache->sets))
1042 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1043 if (sysfs__read_int(file, (int *) &cache->ways))
1046 scnprintf(file, PATH_MAX, "%s/type", path);
1047 if (sysfs__read_str(file, &cache->type, &len))
1050 cache->type[len] = 0;
1051 cache->type = strim(cache->type);
1053 scnprintf(file, PATH_MAX, "%s/size", path);
1054 if (sysfs__read_str(file, &cache->size, &len)) {
1055 zfree(&cache->type);
1059 cache->size[len] = 0;
1060 cache->size = strim(cache->size);
1062 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1063 if (sysfs__read_str(file, &cache->map, &len)) {
1065 zfree(&cache->type);
1069 cache->map[len] = 0;
1070 cache->map = strim(cache->map);
1074 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1076 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1079 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
1086 ncpus = sysconf(_SC_NPROCESSORS_CONF);
1090 nr = (u32)(ncpus & UINT_MAX);
1092 for (cpu = 0; cpu < nr; cpu++) {
1093 for (level = 0; level < 10; level++) {
1094 struct cpu_cache_level c;
1097 err = cpu_cache_level__read(&c, cpu, level);
1104 for (i = 0; i < cnt; i++) {
1105 if (cpu_cache_level__cmp(&c, &caches[i]))
1112 cpu_cache_level__free(&c);
1114 if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1123 #define MAX_CACHES (MAX_NR_CPUS * 4)
1125 static int write_cache(struct feat_fd *ff,
1126 struct perf_evlist *evlist __maybe_unused)
1128 struct cpu_cache_level caches[MAX_CACHES];
1129 u32 cnt = 0, i, version = 1;
1132 ret = build_caches(caches, MAX_CACHES, &cnt);
1136 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1138 ret = do_write(ff, &version, sizeof(u32));
1142 ret = do_write(ff, &cnt, sizeof(u32));
1146 for (i = 0; i < cnt; i++) {
1147 struct cpu_cache_level *c = &caches[i];
1150 ret = do_write(ff, &c->v, sizeof(u32)); \
1161 ret = do_write_string(ff, (const char *) c->v); \
1172 for (i = 0; i < cnt; i++)
1173 cpu_cache_level__free(&caches[i]);
1177 static int write_stat(struct feat_fd *ff __maybe_unused,
1178 struct perf_evlist *evlist __maybe_unused)
1183 static int write_sample_time(struct feat_fd *ff,
1184 struct perf_evlist *evlist)
1188 ret = do_write(ff, &evlist->first_sample_time,
1189 sizeof(evlist->first_sample_time));
1193 return do_write(ff, &evlist->last_sample_time,
1194 sizeof(evlist->last_sample_time));
1198 static int memory_node__read(struct memory_node *n, unsigned long idx)
1200 unsigned int phys, size = 0;
1201 char path[PATH_MAX];
1205 #define for_each_memory(mem, dir) \
1206 while ((ent = readdir(dir))) \
1207 if (strcmp(ent->d_name, ".") && \
1208 strcmp(ent->d_name, "..") && \
1209 sscanf(ent->d_name, "memory%u", &mem) == 1)
1211 scnprintf(path, PATH_MAX,
1212 "%s/devices/system/node/node%lu",
1213 sysfs__mountpoint(), idx);
1215 dir = opendir(path);
1217 pr_warning("failed: cant' open memory sysfs data\n");
1221 for_each_memory(phys, dir) {
1222 size = max(phys, size);
1227 n->set = bitmap_alloc(size);
1238 for_each_memory(phys, dir) {
1239 set_bit(phys, n->set);
1246 static int memory_node__sort(const void *a, const void *b)
1248 const struct memory_node *na = a;
1249 const struct memory_node *nb = b;
1251 return na->node - nb->node;
1254 static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1256 char path[PATH_MAX];
1262 scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1263 sysfs__mountpoint());
1265 dir = opendir(path);
1267 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1272 while (!ret && (ent = readdir(dir))) {
1276 if (!strcmp(ent->d_name, ".") ||
1277 !strcmp(ent->d_name, ".."))
1280 r = sscanf(ent->d_name, "node%u", &idx);
1284 if (WARN_ONCE(cnt >= size,
1285 "failed to write MEM_TOPOLOGY, way too many nodes\n"))
1288 ret = memory_node__read(&nodes[cnt++], idx);
1295 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1300 #define MAX_MEMORY_NODES 2000
1303 * The MEM_TOPOLOGY holds physical memory map for every
1304 * node in system. The format of data is as follows:
1306 * 0 - version | for future changes
1307 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1308 * 16 - count | number of nodes
1310 * For each node we store map of physical indexes for
1313 * 32 - node id | node index
1314 * 40 - size | size of bitmap
1315 * 48 - bitmap | bitmap of memory indexes that belongs to node
1317 static int write_mem_topology(struct feat_fd *ff __maybe_unused,
1318 struct perf_evlist *evlist __maybe_unused)
1320 static struct memory_node nodes[MAX_MEMORY_NODES];
1321 u64 bsize, version = 1, i, nr;
1324 ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1325 (unsigned long long *) &bsize);
1329 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
1333 ret = do_write(ff, &version, sizeof(version));
1337 ret = do_write(ff, &bsize, sizeof(bsize));
1341 ret = do_write(ff, &nr, sizeof(nr));
1345 for (i = 0; i < nr; i++) {
1346 struct memory_node *n = &nodes[i];
1349 ret = do_write(ff, &n->v, sizeof(n->v)); \
1358 ret = do_write_bitmap(ff, n->set, n->size);
1367 static int write_compressed(struct feat_fd *ff __maybe_unused,
1368 struct perf_evlist *evlist __maybe_unused)
1372 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
1376 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
1380 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
1384 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
1388 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
1391 static void print_hostname(struct feat_fd *ff, FILE *fp)
1393 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1396 static void print_osrelease(struct feat_fd *ff, FILE *fp)
1398 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1401 static void print_arch(struct feat_fd *ff, FILE *fp)
1403 fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1406 static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1408 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1411 static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1413 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1414 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1417 static void print_version(struct feat_fd *ff, FILE *fp)
1419 fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1422 static void print_cmdline(struct feat_fd *ff, FILE *fp)
1426 nr = ff->ph->env.nr_cmdline;
1428 fprintf(fp, "# cmdline : ");
1430 for (i = 0; i < nr; i++) {
1431 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1433 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1437 char *quote = strchr(argv_i, '\'');
1441 fprintf(fp, "%s\\\'", argv_i);
1444 fprintf(fp, "%s ", argv_i);
1451 static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1453 struct perf_header *ph = ff->ph;
1454 int cpu_nr = ph->env.nr_cpus_avail;
1458 nr = ph->env.nr_sibling_cores;
1459 str = ph->env.sibling_cores;
1461 for (i = 0; i < nr; i++) {
1462 fprintf(fp, "# sibling sockets : %s\n", str);
1463 str += strlen(str) + 1;
1466 if (ph->env.nr_sibling_dies) {
1467 nr = ph->env.nr_sibling_dies;
1468 str = ph->env.sibling_dies;
1470 for (i = 0; i < nr; i++) {
1471 fprintf(fp, "# sibling dies : %s\n", str);
1472 str += strlen(str) + 1;
1476 nr = ph->env.nr_sibling_threads;
1477 str = ph->env.sibling_threads;
1479 for (i = 0; i < nr; i++) {
1480 fprintf(fp, "# sibling threads : %s\n", str);
1481 str += strlen(str) + 1;
1484 if (ph->env.nr_sibling_dies) {
1485 if (ph->env.cpu != NULL) {
1486 for (i = 0; i < cpu_nr; i++)
1487 fprintf(fp, "# CPU %d: Core ID %d, "
1488 "Die ID %d, Socket ID %d\n",
1489 i, ph->env.cpu[i].core_id,
1490 ph->env.cpu[i].die_id,
1491 ph->env.cpu[i].socket_id);
1493 fprintf(fp, "# Core ID, Die ID and Socket ID "
1494 "information is not available\n");
1496 if (ph->env.cpu != NULL) {
1497 for (i = 0; i < cpu_nr; i++)
1498 fprintf(fp, "# CPU %d: Core ID %d, "
1500 i, ph->env.cpu[i].core_id,
1501 ph->env.cpu[i].socket_id);
1503 fprintf(fp, "# Core ID and Socket ID "
1504 "information is not available\n");
1508 static void print_clockid(struct feat_fd *ff, FILE *fp)
1510 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1511 ff->ph->env.clockid_res_ns * 1000);
1514 static void print_dir_format(struct feat_fd *ff, FILE *fp)
1516 struct perf_session *session;
1517 struct perf_data *data;
1519 session = container_of(ff->ph, struct perf_session, header);
1520 data = session->data;
1522 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
1525 static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1527 struct perf_env *env = &ff->ph->env;
1528 struct rb_root *root;
1529 struct rb_node *next;
1531 down_read(&env->bpf_progs.lock);
1533 root = &env->bpf_progs.infos;
1534 next = rb_first(root);
1537 struct bpf_prog_info_node *node;
1539 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1540 next = rb_next(&node->rb_node);
1542 bpf_event__print_bpf_prog_info(&node->info_linear->info,
1546 up_read(&env->bpf_progs.lock);
1549 static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
1551 struct perf_env *env = &ff->ph->env;
1552 struct rb_root *root;
1553 struct rb_node *next;
1555 down_read(&env->bpf_progs.lock);
1557 root = &env->bpf_progs.btfs;
1558 next = rb_first(root);
1561 struct btf_node *node;
1563 node = rb_entry(next, struct btf_node, rb_node);
1564 next = rb_next(&node->rb_node);
1565 fprintf(fp, "# btf info of id %u\n", node->id);
1568 up_read(&env->bpf_progs.lock);
1571 static void free_event_desc(struct perf_evsel *events)
1573 struct perf_evsel *evsel;
1578 for (evsel = events; evsel->attr.size; evsel++) {
1579 zfree(&evsel->name);
1586 static struct perf_evsel *read_event_desc(struct feat_fd *ff)
1588 struct perf_evsel *evsel, *events = NULL;
1591 u32 nre, sz, nr, i, j;
1594 /* number of events */
1595 if (do_read_u32(ff, &nre))
1598 if (do_read_u32(ff, &sz))
1601 /* buffer to hold on file attr struct */
1606 /* the last event terminates with evsel->attr.size == 0: */
1607 events = calloc(nre + 1, sizeof(*events));
1611 msz = sizeof(evsel->attr);
1615 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1619 * must read entire on-file attr struct to
1620 * sync up with layout.
1622 if (__do_read(ff, buf, sz))
1625 if (ff->ph->needs_swap)
1626 perf_event__attr_swap(buf);
1628 memcpy(&evsel->attr, buf, msz);
1630 if (do_read_u32(ff, &nr))
1633 if (ff->ph->needs_swap)
1634 evsel->needs_swap = true;
1636 evsel->name = do_read_string(ff);
1643 id = calloc(nr, sizeof(*id));
1649 for (j = 0 ; j < nr; j++) {
1650 if (do_read_u64(ff, id))
1659 free_event_desc(events);
1664 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1665 void *priv __maybe_unused)
1667 return fprintf(fp, ", %s = %s", name, val);
1670 static void print_event_desc(struct feat_fd *ff, FILE *fp)
1672 struct perf_evsel *evsel, *events;
1677 events = ff->events;
1679 events = read_event_desc(ff);
1682 fprintf(fp, "# event desc: not available or unable to read\n");
1686 for (evsel = events; evsel->attr.size; evsel++) {
1687 fprintf(fp, "# event : name = %s, ", evsel->name);
1690 fprintf(fp, ", id = {");
1691 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1694 fprintf(fp, " %"PRIu64, *id);
1699 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1704 free_event_desc(events);
1708 static void print_total_mem(struct feat_fd *ff, FILE *fp)
1710 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
1713 static void print_numa_topology(struct feat_fd *ff, FILE *fp)
1716 struct numa_node *n;
1718 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1719 n = &ff->ph->env.numa_nodes[i];
1721 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1722 " free = %"PRIu64" kB\n",
1723 n->node, n->mem_total, n->mem_free);
1725 fprintf(fp, "# node%u cpu list : ", n->node);
1726 cpu_map__fprintf(n->map, fp);
1730 static void print_cpuid(struct feat_fd *ff, FILE *fp)
1732 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
1735 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
1737 fprintf(fp, "# contains samples with branch stack\n");
1740 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
1742 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1745 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
1747 fprintf(fp, "# contains stat data\n");
1750 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
1754 fprintf(fp, "# CPU cache info:\n");
1755 for (i = 0; i < ff->ph->env.caches_cnt; i++) {
1757 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
1761 static void print_compressed(struct feat_fd *ff, FILE *fp)
1763 fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
1764 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
1765 ff->ph->env.comp_level, ff->ph->env.comp_ratio);
1768 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
1770 const char *delimiter = "# pmu mappings: ";
1775 pmu_num = ff->ph->env.nr_pmu_mappings;
1777 fprintf(fp, "# pmu mappings: not available\n");
1781 str = ff->ph->env.pmu_mappings;
1784 type = strtoul(str, &tmp, 0);
1789 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1792 str += strlen(str) + 1;
1801 fprintf(fp, "# pmu mappings: unable to read\n");
1804 static void print_group_desc(struct feat_fd *ff, FILE *fp)
1806 struct perf_session *session;
1807 struct perf_evsel *evsel;
1810 session = container_of(ff->ph, struct perf_session, header);
1812 evlist__for_each_entry(session->evlist, evsel) {
1813 if (perf_evsel__is_group_leader(evsel) &&
1814 evsel->nr_members > 1) {
1815 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1816 perf_evsel__name(evsel));
1818 nr = evsel->nr_members - 1;
1820 fprintf(fp, ",%s", perf_evsel__name(evsel));
1828 static void print_sample_time(struct feat_fd *ff, FILE *fp)
1830 struct perf_session *session;
1834 session = container_of(ff->ph, struct perf_session, header);
1836 timestamp__scnprintf_usec(session->evlist->first_sample_time,
1837 time_buf, sizeof(time_buf));
1838 fprintf(fp, "# time of first sample : %s\n", time_buf);
1840 timestamp__scnprintf_usec(session->evlist->last_sample_time,
1841 time_buf, sizeof(time_buf));
1842 fprintf(fp, "# time of last sample : %s\n", time_buf);
1844 d = (double)(session->evlist->last_sample_time -
1845 session->evlist->first_sample_time) / NSEC_PER_MSEC;
1847 fprintf(fp, "# sample duration : %10.3f ms\n", d);
1850 static void memory_node__fprintf(struct memory_node *n,
1851 unsigned long long bsize, FILE *fp)
1853 char buf_map[100], buf_size[50];
1854 unsigned long long size;
1856 size = bsize * bitmap_weight(n->set, n->size);
1857 unit_number__scnprintf(buf_size, 50, size);
1859 bitmap_scnprintf(n->set, n->size, buf_map, 100);
1860 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
1863 static void print_mem_topology(struct feat_fd *ff, FILE *fp)
1865 struct memory_node *nodes;
1868 nodes = ff->ph->env.memory_nodes;
1869 nr = ff->ph->env.nr_memory_nodes;
1871 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
1872 nr, ff->ph->env.memory_bsize);
1874 for (i = 0; i < nr; i++) {
1875 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
1879 static int __event_process_build_id(struct build_id_event *bev,
1881 struct perf_session *session)
1884 struct machine *machine;
1887 enum dso_kernel_type dso_type;
1889 machine = perf_session__findnew_machine(session, bev->pid);
1893 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1896 case PERF_RECORD_MISC_KERNEL:
1897 dso_type = DSO_TYPE_KERNEL;
1899 case PERF_RECORD_MISC_GUEST_KERNEL:
1900 dso_type = DSO_TYPE_GUEST_KERNEL;
1902 case PERF_RECORD_MISC_USER:
1903 case PERF_RECORD_MISC_GUEST_USER:
1904 dso_type = DSO_TYPE_USER;
1910 dso = machine__findnew_dso(machine, filename);
1912 char sbuild_id[SBUILD_ID_SIZE];
1914 dso__set_build_id(dso, &bev->build_id);
1916 if (dso_type != DSO_TYPE_USER) {
1917 struct kmod_path m = { .name = NULL, };
1919 if (!kmod_path__parse_name(&m, filename) && m.kmod)
1920 dso__set_module_info(dso, &m, machine);
1922 dso->kernel = dso_type;
1927 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1929 pr_debug("build id event received for %s: %s\n",
1930 dso->long_name, sbuild_id);
1939 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1940 int input, u64 offset, u64 size)
1942 struct perf_session *session = container_of(header, struct perf_session, header);
1944 struct perf_event_header header;
1945 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1948 struct build_id_event bev;
1949 char filename[PATH_MAX];
1950 u64 limit = offset + size;
1952 while (offset < limit) {
1955 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1958 if (header->needs_swap)
1959 perf_event_header__bswap(&old_bev.header);
1961 len = old_bev.header.size - sizeof(old_bev);
1962 if (readn(input, filename, len) != len)
1965 bev.header = old_bev.header;
1968 * As the pid is the missing value, we need to fill
1969 * it properly. The header.misc value give us nice hint.
1971 bev.pid = HOST_KERNEL_ID;
1972 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1973 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1974 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1976 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1977 __event_process_build_id(&bev, filename, session);
1979 offset += bev.header.size;
1985 static int perf_header__read_build_ids(struct perf_header *header,
1986 int input, u64 offset, u64 size)
1988 struct perf_session *session = container_of(header, struct perf_session, header);
1989 struct build_id_event bev;
1990 char filename[PATH_MAX];
1991 u64 limit = offset + size, orig_offset = offset;
1994 while (offset < limit) {
1997 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
2000 if (header->needs_swap)
2001 perf_event_header__bswap(&bev.header);
2003 len = bev.header.size - sizeof(bev);
2004 if (readn(input, filename, len) != len)
2007 * The a1645ce1 changeset:
2009 * "perf: 'perf kvm' tool for monitoring guest performance from host"
2011 * Added a field to struct build_id_event that broke the file
2014 * Since the kernel build-id is the first entry, process the
2015 * table using the old format if the well known
2016 * '[kernel.kallsyms]' string for the kernel build-id has the
2017 * first 4 characters chopped off (where the pid_t sits).
2019 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2020 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2022 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2025 __event_process_build_id(&bev, filename, session);
2027 offset += bev.header.size;
2034 /* Macro for features that simply need to read and store a string. */
2035 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
2036 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
2038 ff->ph->env.__feat_env = do_read_string(ff); \
2039 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
2042 FEAT_PROCESS_STR_FUN(hostname, hostname);
2043 FEAT_PROCESS_STR_FUN(osrelease, os_release);
2044 FEAT_PROCESS_STR_FUN(version, version);
2045 FEAT_PROCESS_STR_FUN(arch, arch);
2046 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2047 FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2049 static int process_tracing_data(struct feat_fd *ff, void *data)
2051 ssize_t ret = trace_report(ff->fd, data, false);
2053 return ret < 0 ? -1 : 0;
2056 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
2058 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
2059 pr_debug("Failed to read buildids, continuing...\n");
2063 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
2066 u32 nr_cpus_avail, nr_cpus_online;
2068 ret = do_read_u32(ff, &nr_cpus_avail);
2072 ret = do_read_u32(ff, &nr_cpus_online);
2075 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
2076 ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
2080 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
2085 ret = do_read_u64(ff, &total_mem);
2088 ff->ph->env.total_mem = (unsigned long long)total_mem;
2092 static struct perf_evsel *
2093 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
2095 struct perf_evsel *evsel;
2097 evlist__for_each_entry(evlist, evsel) {
2098 if (evsel->idx == idx)
2106 perf_evlist__set_event_name(struct perf_evlist *evlist,
2107 struct perf_evsel *event)
2109 struct perf_evsel *evsel;
2114 evsel = perf_evlist__find_by_index(evlist, event->idx);
2121 evsel->name = strdup(event->name);
2125 process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
2127 struct perf_session *session;
2128 struct perf_evsel *evsel, *events = read_event_desc(ff);
2133 session = container_of(ff->ph, struct perf_session, header);
2135 if (session->data->is_pipe) {
2136 /* Save events for reading later by print_event_desc,
2137 * since they can't be read again in pipe mode. */
2138 ff->events = events;
2141 for (evsel = events; evsel->attr.size; evsel++)
2142 perf_evlist__set_event_name(session->evlist, evsel);
2144 if (!session->data->is_pipe)
2145 free_event_desc(events);
2150 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
2152 char *str, *cmdline = NULL, **argv = NULL;
2155 if (do_read_u32(ff, &nr))
2158 ff->ph->env.nr_cmdline = nr;
2160 cmdline = zalloc(ff->size + nr + 1);
2164 argv = zalloc(sizeof(char *) * (nr + 1));
2168 for (i = 0; i < nr; i++) {
2169 str = do_read_string(ff);
2173 argv[i] = cmdline + len;
2174 memcpy(argv[i], str, strlen(str) + 1);
2175 len += strlen(str) + 1;
2178 ff->ph->env.cmdline = cmdline;
2179 ff->ph->env.cmdline_argv = (const char **) argv;
2188 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
2193 int cpu_nr = ff->ph->env.nr_cpus_avail;
2195 struct perf_header *ph = ff->ph;
2196 bool do_core_id_test = true;
2198 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
2202 if (do_read_u32(ff, &nr))
2205 ph->env.nr_sibling_cores = nr;
2206 size += sizeof(u32);
2207 if (strbuf_init(&sb, 128) < 0)
2210 for (i = 0; i < nr; i++) {
2211 str = do_read_string(ff);
2215 /* include a NULL character at the end */
2216 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2218 size += string_size(str);
2221 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
2223 if (do_read_u32(ff, &nr))
2226 ph->env.nr_sibling_threads = nr;
2227 size += sizeof(u32);
2229 for (i = 0; i < nr; i++) {
2230 str = do_read_string(ff);
2234 /* include a NULL character at the end */
2235 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2237 size += string_size(str);
2240 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
2243 * The header may be from old perf,
2244 * which doesn't include core id and socket id information.
2246 if (ff->size <= size) {
2247 zfree(&ph->env.cpu);
2251 /* On s390 the socket_id number is not related to the numbers of cpus.
2252 * The socket_id number might be higher than the numbers of cpus.
2253 * This depends on the configuration.
2255 if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
2256 do_core_id_test = false;
2258 for (i = 0; i < (u32)cpu_nr; i++) {
2259 if (do_read_u32(ff, &nr))
2262 ph->env.cpu[i].core_id = nr;
2263 size += sizeof(u32);
2265 if (do_read_u32(ff, &nr))
2268 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
2269 pr_debug("socket_id number is too big."
2270 "You may need to upgrade the perf tool.\n");
2274 ph->env.cpu[i].socket_id = nr;
2275 size += sizeof(u32);
2279 * The header may be from old perf,
2280 * which doesn't include die information.
2282 if (ff->size <= size)
2285 if (do_read_u32(ff, &nr))
2288 ph->env.nr_sibling_dies = nr;
2289 size += sizeof(u32);
2291 for (i = 0; i < nr; i++) {
2292 str = do_read_string(ff);
2296 /* include a NULL character at the end */
2297 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2299 size += string_size(str);
2302 ph->env.sibling_dies = strbuf_detach(&sb, NULL);
2304 for (i = 0; i < (u32)cpu_nr; i++) {
2305 if (do_read_u32(ff, &nr))
2308 ph->env.cpu[i].die_id = nr;
2314 strbuf_release(&sb);
2316 zfree(&ph->env.cpu);
2320 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
2322 struct numa_node *nodes, *n;
2327 if (do_read_u32(ff, &nr))
2330 nodes = zalloc(sizeof(*nodes) * nr);
2334 for (i = 0; i < nr; i++) {
2338 if (do_read_u32(ff, &n->node))
2341 if (do_read_u64(ff, &n->mem_total))
2344 if (do_read_u64(ff, &n->mem_free))
2347 str = do_read_string(ff);
2351 n->map = cpu_map__new(str);
2357 ff->ph->env.nr_numa_nodes = nr;
2358 ff->ph->env.numa_nodes = nodes;
2366 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
2373 if (do_read_u32(ff, &pmu_num))
2377 pr_debug("pmu mappings not available\n");
2381 ff->ph->env.nr_pmu_mappings = pmu_num;
2382 if (strbuf_init(&sb, 128) < 0)
2386 if (do_read_u32(ff, &type))
2389 name = do_read_string(ff);
2393 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2395 /* include a NULL character at the end */
2396 if (strbuf_add(&sb, "", 1) < 0)
2399 if (!strcmp(name, "msr"))
2400 ff->ph->env.msr_pmu_type = type;
2405 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2409 strbuf_release(&sb);
2413 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
2416 u32 i, nr, nr_groups;
2417 struct perf_session *session;
2418 struct perf_evsel *evsel, *leader = NULL;
2425 if (do_read_u32(ff, &nr_groups))
2428 ff->ph->env.nr_groups = nr_groups;
2430 pr_debug("group desc not available\n");
2434 desc = calloc(nr_groups, sizeof(*desc));
2438 for (i = 0; i < nr_groups; i++) {
2439 desc[i].name = do_read_string(ff);
2443 if (do_read_u32(ff, &desc[i].leader_idx))
2446 if (do_read_u32(ff, &desc[i].nr_members))
2451 * Rebuild group relationship based on the group_desc
2453 session = container_of(ff->ph, struct perf_session, header);
2454 session->evlist->nr_groups = nr_groups;
2457 evlist__for_each_entry(session->evlist, evsel) {
2458 if (evsel->idx == (int) desc[i].leader_idx) {
2459 evsel->leader = evsel;
2460 /* {anon_group} is a dummy name */
2461 if (strcmp(desc[i].name, "{anon_group}")) {
2462 evsel->group_name = desc[i].name;
2463 desc[i].name = NULL;
2465 evsel->nr_members = desc[i].nr_members;
2467 if (i >= nr_groups || nr > 0) {
2468 pr_debug("invalid group desc\n");
2473 nr = evsel->nr_members - 1;
2476 /* This is a group member */
2477 evsel->leader = leader;
2483 if (i != nr_groups || nr != 0) {
2484 pr_debug("invalid group desc\n");
2490 for (i = 0; i < nr_groups; i++)
2491 zfree(&desc[i].name);
2497 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
2499 struct perf_session *session;
2502 session = container_of(ff->ph, struct perf_session, header);
2504 err = auxtrace_index__process(ff->fd, ff->size, session,
2505 ff->ph->needs_swap);
2507 pr_err("Failed to process auxtrace index\n");
2511 static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
2513 struct cpu_cache_level *caches;
2514 u32 cnt, i, version;
2516 if (do_read_u32(ff, &version))
2522 if (do_read_u32(ff, &cnt))
2525 caches = zalloc(sizeof(*caches) * cnt);
2529 for (i = 0; i < cnt; i++) {
2530 struct cpu_cache_level c;
2533 if (do_read_u32(ff, &c.v))\
2534 goto out_free_caches; \
2543 c.v = do_read_string(ff); \
2545 goto out_free_caches;
2555 ff->ph->env.caches = caches;
2556 ff->ph->env.caches_cnt = cnt;
2563 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2565 struct perf_session *session;
2566 u64 first_sample_time, last_sample_time;
2569 session = container_of(ff->ph, struct perf_session, header);
2571 ret = do_read_u64(ff, &first_sample_time);
2575 ret = do_read_u64(ff, &last_sample_time);
2579 session->evlist->first_sample_time = first_sample_time;
2580 session->evlist->last_sample_time = last_sample_time;
2584 static int process_mem_topology(struct feat_fd *ff,
2585 void *data __maybe_unused)
2587 struct memory_node *nodes;
2588 u64 version, i, nr, bsize;
2591 if (do_read_u64(ff, &version))
2597 if (do_read_u64(ff, &bsize))
2600 if (do_read_u64(ff, &nr))
2603 nodes = zalloc(sizeof(*nodes) * nr);
2607 for (i = 0; i < nr; i++) {
2608 struct memory_node n;
2611 if (do_read_u64(ff, &n.v)) \
2619 if (do_read_bitmap(ff, &n.set, &n.size))
2625 ff->ph->env.memory_bsize = bsize;
2626 ff->ph->env.memory_nodes = nodes;
2627 ff->ph->env.nr_memory_nodes = nr;
2636 static int process_clockid(struct feat_fd *ff,
2637 void *data __maybe_unused)
2639 if (do_read_u64(ff, &ff->ph->env.clockid_res_ns))
2645 static int process_dir_format(struct feat_fd *ff,
2646 void *_data __maybe_unused)
2648 struct perf_session *session;
2649 struct perf_data *data;
2651 session = container_of(ff->ph, struct perf_session, header);
2652 data = session->data;
2654 if (WARN_ON(!perf_data__is_dir(data)))
2657 return do_read_u64(ff, &data->dir.version);
2660 #ifdef HAVE_LIBBPF_SUPPORT
2661 static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
2663 struct bpf_prog_info_linear *info_linear;
2664 struct bpf_prog_info_node *info_node;
2665 struct perf_env *env = &ff->ph->env;
2669 if (ff->ph->needs_swap) {
2670 pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
2674 if (do_read_u32(ff, &count))
2677 down_write(&env->bpf_progs.lock);
2679 for (i = 0; i < count; ++i) {
2680 u32 info_len, data_len;
2684 if (do_read_u32(ff, &info_len))
2686 if (do_read_u32(ff, &data_len))
2689 if (info_len > sizeof(struct bpf_prog_info)) {
2690 pr_warning("detected invalid bpf_prog_info\n");
2694 info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
2698 info_linear->info_len = sizeof(struct bpf_prog_info);
2699 info_linear->data_len = data_len;
2700 if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
2702 if (__do_read(ff, &info_linear->info, info_len))
2704 if (info_len < sizeof(struct bpf_prog_info))
2705 memset(((void *)(&info_linear->info)) + info_len, 0,
2706 sizeof(struct bpf_prog_info) - info_len);
2708 if (__do_read(ff, info_linear->data, data_len))
2711 info_node = malloc(sizeof(struct bpf_prog_info_node));
2715 /* after reading from file, translate offset to address */
2716 bpf_program__bpil_offs_to_addr(info_linear);
2717 info_node->info_linear = info_linear;
2718 perf_env__insert_bpf_prog_info(env, info_node);
2721 up_write(&env->bpf_progs.lock);
2726 up_write(&env->bpf_progs.lock);
2729 #else // HAVE_LIBBPF_SUPPORT
2730 static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
2734 #endif // HAVE_LIBBPF_SUPPORT
2736 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
2738 struct perf_env *env = &ff->ph->env;
2739 struct btf_node *node = NULL;
2743 if (ff->ph->needs_swap) {
2744 pr_warning("interpreting btf from systems with endianity is not yet supported\n");
2748 if (do_read_u32(ff, &count))
2751 down_write(&env->bpf_progs.lock);
2753 for (i = 0; i < count; ++i) {
2756 if (do_read_u32(ff, &id))
2758 if (do_read_u32(ff, &data_size))
2761 node = malloc(sizeof(struct btf_node) + data_size);
2766 node->data_size = data_size;
2768 if (__do_read(ff, node->data, data_size))
2771 perf_env__insert_btf(env, node);
2777 up_write(&env->bpf_progs.lock);
2782 static int process_compressed(struct feat_fd *ff,
2783 void *data __maybe_unused)
2785 if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
2788 if (do_read_u32(ff, &(ff->ph->env.comp_type)))
2791 if (do_read_u32(ff, &(ff->ph->env.comp_level)))
2794 if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
2797 if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
2803 struct feature_ops {
2804 int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
2805 void (*print)(struct feat_fd *ff, FILE *fp);
2806 int (*process)(struct feat_fd *ff, void *data);
2812 #define FEAT_OPR(n, func, __full_only) \
2814 .name = __stringify(n), \
2815 .write = write_##func, \
2816 .print = print_##func, \
2817 .full_only = __full_only, \
2818 .process = process_##func, \
2819 .synthesize = true \
2822 #define FEAT_OPN(n, func, __full_only) \
2824 .name = __stringify(n), \
2825 .write = write_##func, \
2826 .print = print_##func, \
2827 .full_only = __full_only, \
2828 .process = process_##func \
2831 /* feature_ops not implemented: */
2832 #define print_tracing_data NULL
2833 #define print_build_id NULL
2835 #define process_branch_stack NULL
2836 #define process_stat NULL
2839 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2840 FEAT_OPN(TRACING_DATA, tracing_data, false),
2841 FEAT_OPN(BUILD_ID, build_id, false),
2842 FEAT_OPR(HOSTNAME, hostname, false),
2843 FEAT_OPR(OSRELEASE, osrelease, false),
2844 FEAT_OPR(VERSION, version, false),
2845 FEAT_OPR(ARCH, arch, false),
2846 FEAT_OPR(NRCPUS, nrcpus, false),
2847 FEAT_OPR(CPUDESC, cpudesc, false),
2848 FEAT_OPR(CPUID, cpuid, false),
2849 FEAT_OPR(TOTAL_MEM, total_mem, false),
2850 FEAT_OPR(EVENT_DESC, event_desc, false),
2851 FEAT_OPR(CMDLINE, cmdline, false),
2852 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
2853 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
2854 FEAT_OPN(BRANCH_STACK, branch_stack, false),
2855 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
2856 FEAT_OPR(GROUP_DESC, group_desc, false),
2857 FEAT_OPN(AUXTRACE, auxtrace, false),
2858 FEAT_OPN(STAT, stat, false),
2859 FEAT_OPN(CACHE, cache, true),
2860 FEAT_OPR(SAMPLE_TIME, sample_time, false),
2861 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
2862 FEAT_OPR(CLOCKID, clockid, false),
2863 FEAT_OPN(DIR_FORMAT, dir_format, false),
2864 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
2865 FEAT_OPR(BPF_BTF, bpf_btf, false),
2866 FEAT_OPR(COMPRESSED, compressed, false),
2869 struct header_print_data {
2871 bool full; /* extended list of headers */
2874 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2875 struct perf_header *ph,
2876 int feat, int fd, void *data)
2878 struct header_print_data *hd = data;
2881 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2882 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2883 "%d, continuing...\n", section->offset, feat);
2886 if (feat >= HEADER_LAST_FEATURE) {
2887 pr_warning("unknown feature %d\n", feat);
2890 if (!feat_ops[feat].print)
2893 ff = (struct feat_fd) {
2898 if (!feat_ops[feat].full_only || hd->full)
2899 feat_ops[feat].print(&ff, hd->fp);
2901 fprintf(hd->fp, "# %s info available, use -I to display\n",
2902 feat_ops[feat].name);
2907 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2909 struct header_print_data hd;
2910 struct perf_header *header = &session->header;
2911 int fd = perf_data__fd(session->data);
2919 ret = fstat(fd, &st);
2923 stctime = st.st_ctime;
2924 fprintf(fp, "# captured on : %s", ctime(&stctime));
2926 fprintf(fp, "# header version : %u\n", header->version);
2927 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
2928 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
2929 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
2931 perf_header__process_sections(header, fd, &hd,
2932 perf_file_section__fprintf_info);
2934 if (session->data->is_pipe)
2937 fprintf(fp, "# missing features: ");
2938 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2940 fprintf(fp, "%s ", feat_ops[bit].name);
2947 static int do_write_feat(struct feat_fd *ff, int type,
2948 struct perf_file_section **p,
2949 struct perf_evlist *evlist)
2954 if (perf_header__has_feat(ff->ph, type)) {
2955 if (!feat_ops[type].write)
2958 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
2961 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
2963 err = feat_ops[type].write(ff, evlist);
2965 pr_debug("failed to write feature %s\n", feat_ops[type].name);
2967 /* undo anything written */
2968 lseek(ff->fd, (*p)->offset, SEEK_SET);
2972 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
2978 static int perf_header__adds_write(struct perf_header *header,
2979 struct perf_evlist *evlist, int fd)
2983 struct perf_file_section *feat_sec, *p;
2989 ff = (struct feat_fd){
2994 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2998 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2999 if (feat_sec == NULL)
3002 sec_size = sizeof(*feat_sec) * nr_sections;
3004 sec_start = header->feat_offset;
3005 lseek(fd, sec_start + sec_size, SEEK_SET);
3007 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3008 if (do_write_feat(&ff, feat, &p, evlist))
3009 perf_header__clear_feat(header, feat);
3012 lseek(fd, sec_start, SEEK_SET);
3014 * may write more than needed due to dropped feature, but
3015 * this is okay, reader will skip the missing entries
3017 err = do_write(&ff, feat_sec, sec_size);
3019 pr_debug("failed to write feature section\n");
3024 int perf_header__write_pipe(int fd)
3026 struct perf_pipe_file_header f_header;
3030 ff = (struct feat_fd){ .fd = fd };
3032 f_header = (struct perf_pipe_file_header){
3033 .magic = PERF_MAGIC,
3034 .size = sizeof(f_header),
3037 err = do_write(&ff, &f_header, sizeof(f_header));
3039 pr_debug("failed to write perf pipe header\n");
3046 int perf_session__write_header(struct perf_session *session,
3047 struct perf_evlist *evlist,
3048 int fd, bool at_exit)
3050 struct perf_file_header f_header;
3051 struct perf_file_attr f_attr;
3052 struct perf_header *header = &session->header;
3053 struct perf_evsel *evsel;
3058 ff = (struct feat_fd){ .fd = fd};
3059 lseek(fd, sizeof(f_header), SEEK_SET);
3061 evlist__for_each_entry(session->evlist, evsel) {
3062 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
3063 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
3065 pr_debug("failed to write perf header\n");
3070 attr_offset = lseek(ff.fd, 0, SEEK_CUR);
3072 evlist__for_each_entry(evlist, evsel) {
3073 f_attr = (struct perf_file_attr){
3074 .attr = evsel->attr,
3076 .offset = evsel->id_offset,
3077 .size = evsel->ids * sizeof(u64),
3080 err = do_write(&ff, &f_attr, sizeof(f_attr));
3082 pr_debug("failed to write perf header attribute\n");
3087 if (!header->data_offset)
3088 header->data_offset = lseek(fd, 0, SEEK_CUR);
3089 header->feat_offset = header->data_offset + header->data_size;
3092 err = perf_header__adds_write(header, evlist, fd);
3097 f_header = (struct perf_file_header){
3098 .magic = PERF_MAGIC,
3099 .size = sizeof(f_header),
3100 .attr_size = sizeof(f_attr),
3102 .offset = attr_offset,
3103 .size = evlist->nr_entries * sizeof(f_attr),
3106 .offset = header->data_offset,
3107 .size = header->data_size,
3109 /* event_types is ignored, store zeros */
3112 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
3114 lseek(fd, 0, SEEK_SET);
3115 err = do_write(&ff, &f_header, sizeof(f_header));
3117 pr_debug("failed to write perf header\n");
3120 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
3125 static int perf_header__getbuffer64(struct perf_header *header,
3126 int fd, void *buf, size_t size)
3128 if (readn(fd, buf, size) <= 0)
3131 if (header->needs_swap)
3132 mem_bswap_64(buf, size);
3137 int perf_header__process_sections(struct perf_header *header, int fd,
3139 int (*process)(struct perf_file_section *section,
3140 struct perf_header *ph,
3141 int feat, int fd, void *data))
3143 struct perf_file_section *feat_sec, *sec;
3149 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3153 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
3157 sec_size = sizeof(*feat_sec) * nr_sections;
3159 lseek(fd, header->feat_offset, SEEK_SET);
3161 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
3165 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
3166 err = process(sec++, header, feat, fd, data);
3176 static const int attr_file_abi_sizes[] = {
3177 [0] = PERF_ATTR_SIZE_VER0,
3178 [1] = PERF_ATTR_SIZE_VER1,
3179 [2] = PERF_ATTR_SIZE_VER2,
3180 [3] = PERF_ATTR_SIZE_VER3,
3181 [4] = PERF_ATTR_SIZE_VER4,
3186 * In the legacy file format, the magic number is not used to encode endianness.
3187 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
3188 * on ABI revisions, we need to try all combinations for all endianness to
3189 * detect the endianness.
3191 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
3193 uint64_t ref_size, attr_size;
3196 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
3197 ref_size = attr_file_abi_sizes[i]
3198 + sizeof(struct perf_file_section);
3199 if (hdr_sz != ref_size) {
3200 attr_size = bswap_64(hdr_sz);
3201 if (attr_size != ref_size)
3204 ph->needs_swap = true;
3206 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
3211 /* could not determine endianness */
3215 #define PERF_PIPE_HDR_VER0 16
3217 static const size_t attr_pipe_abi_sizes[] = {
3218 [0] = PERF_PIPE_HDR_VER0,
3223 * In the legacy pipe format, there is an implicit assumption that endiannesss
3224 * between host recording the samples, and host parsing the samples is the
3225 * same. This is not always the case given that the pipe output may always be
3226 * redirected into a file and analyzed on a different machine with possibly a
3227 * different endianness and perf_event ABI revsions in the perf tool itself.
3229 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
3234 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
3235 if (hdr_sz != attr_pipe_abi_sizes[i]) {
3236 attr_size = bswap_64(hdr_sz);
3237 if (attr_size != hdr_sz)
3240 ph->needs_swap = true;
3242 pr_debug("Pipe ABI%d perf.data file detected\n", i);
3248 bool is_perf_magic(u64 magic)
3250 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
3251 || magic == __perf_magic2
3252 || magic == __perf_magic2_sw)
3258 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
3259 bool is_pipe, struct perf_header *ph)
3263 /* check for legacy format */
3264 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
3266 ph->version = PERF_HEADER_VERSION_1;
3267 pr_debug("legacy perf.data format\n");
3269 return try_all_pipe_abis(hdr_sz, ph);
3271 return try_all_file_abis(hdr_sz, ph);
3274 * the new magic number serves two purposes:
3275 * - unique number to identify actual perf.data files
3276 * - encode endianness of file
3278 ph->version = PERF_HEADER_VERSION_2;
3280 /* check magic number with one endianness */
3281 if (magic == __perf_magic2)
3284 /* check magic number with opposite endianness */
3285 if (magic != __perf_magic2_sw)
3288 ph->needs_swap = true;
3293 int perf_file_header__read(struct perf_file_header *header,
3294 struct perf_header *ph, int fd)
3298 lseek(fd, 0, SEEK_SET);
3300 ret = readn(fd, header, sizeof(*header));
3304 if (check_magic_endian(header->magic,
3305 header->attr_size, false, ph) < 0) {
3306 pr_debug("magic/endian check failed\n");
3310 if (ph->needs_swap) {
3311 mem_bswap_64(header, offsetof(struct perf_file_header,
3315 if (header->size != sizeof(*header)) {
3316 /* Support the previous format */
3317 if (header->size == offsetof(typeof(*header), adds_features))
3318 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3321 } else if (ph->needs_swap) {
3323 * feature bitmap is declared as an array of unsigned longs --
3324 * not good since its size can differ between the host that
3325 * generated the data file and the host analyzing the file.
3327 * We need to handle endianness, but we don't know the size of
3328 * the unsigned long where the file was generated. Take a best
3329 * guess at determining it: try 64-bit swap first (ie., file
3330 * created on a 64-bit host), and check if the hostname feature
3331 * bit is set (this feature bit is forced on as of fbe96f2).
3332 * If the bit is not, undo the 64-bit swap and try a 32-bit
3333 * swap. If the hostname bit is still not set (e.g., older data
3334 * file), punt and fallback to the original behavior --
3335 * clearing all feature bits and setting buildid.
3337 mem_bswap_64(&header->adds_features,
3338 BITS_TO_U64(HEADER_FEAT_BITS));
3340 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3342 mem_bswap_64(&header->adds_features,
3343 BITS_TO_U64(HEADER_FEAT_BITS));
3346 mem_bswap_32(&header->adds_features,
3347 BITS_TO_U32(HEADER_FEAT_BITS));
3350 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3351 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3352 set_bit(HEADER_BUILD_ID, header->adds_features);
3356 memcpy(&ph->adds_features, &header->adds_features,
3357 sizeof(ph->adds_features));
3359 ph->data_offset = header->data.offset;
3360 ph->data_size = header->data.size;
3361 ph->feat_offset = header->data.offset + header->data.size;
3365 static int perf_file_section__process(struct perf_file_section *section,
3366 struct perf_header *ph,
3367 int feat, int fd, void *data)
3369 struct feat_fd fdd = {
3372 .size = section->size,
3373 .offset = section->offset,
3376 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3377 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3378 "%d, continuing...\n", section->offset, feat);
3382 if (feat >= HEADER_LAST_FEATURE) {
3383 pr_debug("unknown feature %d, continuing...\n", feat);
3387 if (!feat_ops[feat].process)
3390 return feat_ops[feat].process(&fdd, data);
3393 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
3394 struct perf_header *ph, int fd,
3397 struct feat_fd ff = {
3398 .fd = STDOUT_FILENO,
3403 ret = readn(fd, header, sizeof(*header));
3407 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
3408 pr_debug("endian/magic failed\n");
3413 header->size = bswap_64(header->size);
3415 if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
3421 static int perf_header__read_pipe(struct perf_session *session)
3423 struct perf_header *header = &session->header;
3424 struct perf_pipe_file_header f_header;
3426 if (perf_file_header__read_pipe(&f_header, header,
3427 perf_data__fd(session->data),
3428 session->repipe) < 0) {
3429 pr_debug("incompatible file format\n");
3436 static int read_attr(int fd, struct perf_header *ph,
3437 struct perf_file_attr *f_attr)
3439 struct perf_event_attr *attr = &f_attr->attr;
3441 size_t our_sz = sizeof(f_attr->attr);
3444 memset(f_attr, 0, sizeof(*f_attr));
3446 /* read minimal guaranteed structure */
3447 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
3449 pr_debug("cannot read %d bytes of header attr\n",
3450 PERF_ATTR_SIZE_VER0);
3454 /* on file perf_event_attr size */
3462 sz = PERF_ATTR_SIZE_VER0;
3463 } else if (sz > our_sz) {
3464 pr_debug("file uses a more recent and unsupported ABI"
3465 " (%zu bytes extra)\n", sz - our_sz);
3468 /* what we have not yet read and that we know about */
3469 left = sz - PERF_ATTR_SIZE_VER0;
3472 ptr += PERF_ATTR_SIZE_VER0;
3474 ret = readn(fd, ptr, left);
3476 /* read perf_file_section, ids are read in caller */
3477 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
3479 return ret <= 0 ? -1 : 0;
3482 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
3483 struct tep_handle *pevent)
3485 struct tep_event *event;
3488 /* already prepared */
3489 if (evsel->tp_format)
3492 if (pevent == NULL) {
3493 pr_debug("broken or missing trace data\n");
3497 event = tep_find_event(pevent, evsel->attr.config);
3498 if (event == NULL) {
3499 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
3504 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
3505 evsel->name = strdup(bf);
3506 if (evsel->name == NULL)
3510 evsel->tp_format = event;
3514 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
3515 struct tep_handle *pevent)
3517 struct perf_evsel *pos;
3519 evlist__for_each_entry(evlist, pos) {
3520 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
3521 perf_evsel__prepare_tracepoint_event(pos, pevent))
3528 int perf_session__read_header(struct perf_session *session)
3530 struct perf_data *data = session->data;
3531 struct perf_header *header = &session->header;
3532 struct perf_file_header f_header;
3533 struct perf_file_attr f_attr;
3535 int nr_attrs, nr_ids, i, j;
3536 int fd = perf_data__fd(data);
3538 session->evlist = perf_evlist__new();
3539 if (session->evlist == NULL)
3542 session->evlist->env = &header->env;
3543 session->machines.host.env = &header->env;
3544 if (perf_data__is_pipe(data))
3545 return perf_header__read_pipe(session);
3547 if (perf_file_header__read(&f_header, header, fd) < 0)
3551 * Sanity check that perf.data was written cleanly; data size is
3552 * initialized to 0 and updated only if the on_exit function is run.
3553 * If data size is still 0 then the file contains only partial
3554 * information. Just warn user and process it as much as it can.
3556 if (f_header.data.size == 0) {
3557 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
3558 "Was the 'perf record' command properly terminated?\n",
3562 nr_attrs = f_header.attrs.size / f_header.attr_size;
3563 lseek(fd, f_header.attrs.offset, SEEK_SET);
3565 for (i = 0; i < nr_attrs; i++) {
3566 struct perf_evsel *evsel;
3569 if (read_attr(fd, header, &f_attr) < 0)
3572 if (header->needs_swap) {
3573 f_attr.ids.size = bswap_64(f_attr.ids.size);
3574 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
3575 perf_event__attr_swap(&f_attr.attr);
3578 tmp = lseek(fd, 0, SEEK_CUR);
3579 evsel = perf_evsel__new(&f_attr.attr);
3582 goto out_delete_evlist;
3584 evsel->needs_swap = header->needs_swap;
3586 * Do it before so that if perf_evsel__alloc_id fails, this
3587 * entry gets purged too at perf_evlist__delete().
3589 perf_evlist__add(session->evlist, evsel);
3591 nr_ids = f_attr.ids.size / sizeof(u64);
3593 * We don't have the cpu and thread maps on the header, so
3594 * for allocating the perf_sample_id table we fake 1 cpu and
3595 * hattr->ids threads.
3597 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
3598 goto out_delete_evlist;
3600 lseek(fd, f_attr.ids.offset, SEEK_SET);
3602 for (j = 0; j < nr_ids; j++) {
3603 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
3606 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
3609 lseek(fd, tmp, SEEK_SET);
3612 perf_header__process_sections(header, fd, &session->tevent,
3613 perf_file_section__process);
3615 if (perf_evlist__prepare_tracepoint_events(session->evlist,
3616 session->tevent.pevent))
3617 goto out_delete_evlist;
3624 perf_evlist__delete(session->evlist);
3625 session->evlist = NULL;
3629 int perf_event__synthesize_attr(struct perf_tool *tool,
3630 struct perf_event_attr *attr, u32 ids, u64 *id,
3631 perf_event__handler_t process)
3633 union perf_event *ev;
3637 size = sizeof(struct perf_event_attr);
3638 size = PERF_ALIGN(size, sizeof(u64));
3639 size += sizeof(struct perf_event_header);
3640 size += ids * sizeof(u64);
3647 ev->attr.attr = *attr;
3648 memcpy(ev->attr.id, id, ids * sizeof(u64));
3650 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
3651 ev->attr.header.size = (u16)size;
3653 if (ev->attr.header.size == size)
3654 err = process(tool, ev, NULL, NULL);
3663 int perf_event__synthesize_features(struct perf_tool *tool,
3664 struct perf_session *session,
3665 struct perf_evlist *evlist,
3666 perf_event__handler_t process)
3668 struct perf_header *header = &session->header;
3670 struct feature_event *fe;
3674 sz_hdr = sizeof(fe->header);
3675 sz = sizeof(union perf_event);
3676 /* get a nice alignment */
3677 sz = PERF_ALIGN(sz, page_size);
3679 memset(&ff, 0, sizeof(ff));
3681 ff.buf = malloc(sz);
3685 ff.size = sz - sz_hdr;
3686 ff.ph = &session->header;
3688 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3689 if (!feat_ops[feat].synthesize) {
3690 pr_debug("No record header feature for header :%d\n", feat);
3694 ff.offset = sizeof(*fe);
3696 ret = feat_ops[feat].write(&ff, evlist);
3697 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
3698 pr_debug("Error writing feature\n");
3701 /* ff.buf may have changed due to realloc in do_write() */
3703 memset(fe, 0, sizeof(*fe));
3706 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3707 fe->header.size = ff.offset;
3709 ret = process(tool, ff.buf, NULL, NULL);
3716 /* Send HEADER_LAST_FEATURE mark. */
3718 fe->feat_id = HEADER_LAST_FEATURE;
3719 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3720 fe->header.size = sizeof(*fe);
3722 ret = process(tool, ff.buf, NULL, NULL);
3728 int perf_event__process_feature(struct perf_session *session,
3729 union perf_event *event)
3731 struct perf_tool *tool = session->tool;
3732 struct feat_fd ff = { .fd = 0 };
3733 struct feature_event *fe = (struct feature_event *)event;
3734 int type = fe->header.type;
3735 u64 feat = fe->feat_id;
3737 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
3738 pr_warning("invalid record type %d in pipe-mode\n", type);
3741 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
3742 pr_warning("invalid record type %d in pipe-mode\n", type);
3746 if (!feat_ops[feat].process)
3749 ff.buf = (void *)fe->data;
3750 ff.size = event->header.size - sizeof(event->header);
3751 ff.ph = &session->header;
3753 if (feat_ops[feat].process(&ff, NULL))
3756 if (!feat_ops[feat].print || !tool->show_feat_hdr)
3759 if (!feat_ops[feat].full_only ||
3760 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
3761 feat_ops[feat].print(&ff, stdout);
3763 fprintf(stdout, "# %s info available, use -I to display\n",
3764 feat_ops[feat].name);
3770 static struct event_update_event *
3771 event_update_event__new(size_t size, u64 type, u64 id)
3773 struct event_update_event *ev;
3775 size += sizeof(*ev);
3776 size = PERF_ALIGN(size, sizeof(u64));
3780 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3781 ev->header.size = (u16)size;
3789 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3790 struct perf_evsel *evsel,
3791 perf_event__handler_t process)
3793 struct event_update_event *ev;
3794 size_t size = strlen(evsel->unit);
3797 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3801 strlcpy(ev->data, evsel->unit, size + 1);
3802 err = process(tool, (union perf_event *)ev, NULL, NULL);
3808 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3809 struct perf_evsel *evsel,
3810 perf_event__handler_t process)
3812 struct event_update_event *ev;
3813 struct event_update_event_scale *ev_data;
3816 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3820 ev_data = (struct event_update_event_scale *) ev->data;
3821 ev_data->scale = evsel->scale;
3822 err = process(tool, (union perf_event*) ev, NULL, NULL);
3828 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3829 struct perf_evsel *evsel,
3830 perf_event__handler_t process)
3832 struct event_update_event *ev;
3833 size_t len = strlen(evsel->name);
3836 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3840 strlcpy(ev->data, evsel->name, len + 1);
3841 err = process(tool, (union perf_event*) ev, NULL, NULL);
3847 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3848 struct perf_evsel *evsel,
3849 perf_event__handler_t process)
3851 size_t size = sizeof(struct event_update_event);
3852 struct event_update_event *ev;
3856 if (!evsel->own_cpus)
3859 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3863 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3864 ev->header.size = (u16)size;
3865 ev->type = PERF_EVENT_UPDATE__CPUS;
3866 ev->id = evsel->id[0];
3868 cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3872 err = process(tool, (union perf_event*) ev, NULL, NULL);
3877 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3879 struct event_update_event *ev = &event->event_update;
3880 struct event_update_event_scale *ev_scale;
3881 struct event_update_event_cpus *ev_cpus;
3882 struct cpu_map *map;
3885 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id);
3888 case PERF_EVENT_UPDATE__SCALE:
3889 ev_scale = (struct event_update_event_scale *) ev->data;
3890 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3892 case PERF_EVENT_UPDATE__UNIT:
3893 ret += fprintf(fp, "... unit: %s\n", ev->data);
3895 case PERF_EVENT_UPDATE__NAME:
3896 ret += fprintf(fp, "... name: %s\n", ev->data);
3898 case PERF_EVENT_UPDATE__CPUS:
3899 ev_cpus = (struct event_update_event_cpus *) ev->data;
3900 ret += fprintf(fp, "... ");
3902 map = cpu_map__new_data(&ev_cpus->cpus);
3904 ret += cpu_map__fprintf(map, fp);
3906 ret += fprintf(fp, "failed to get cpus\n");
3909 ret += fprintf(fp, "... unknown type\n");
3916 int perf_event__synthesize_attrs(struct perf_tool *tool,
3917 struct perf_evlist *evlist,
3918 perf_event__handler_t process)
3920 struct perf_evsel *evsel;
3923 evlist__for_each_entry(evlist, evsel) {
3924 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3925 evsel->id, process);
3927 pr_debug("failed to create perf header attribute\n");
3935 static bool has_unit(struct perf_evsel *counter)
3937 return counter->unit && *counter->unit;
3940 static bool has_scale(struct perf_evsel *counter)
3942 return counter->scale != 1;
3945 int perf_event__synthesize_extra_attr(struct perf_tool *tool,
3946 struct perf_evlist *evsel_list,
3947 perf_event__handler_t process,
3950 struct perf_evsel *counter;
3954 * Synthesize other events stuff not carried within
3955 * attr event - unit, scale, name
3957 evlist__for_each_entry(evsel_list, counter) {
3958 if (!counter->supported)
3962 * Synthesize unit and scale only if it's defined.
3964 if (has_unit(counter)) {
3965 err = perf_event__synthesize_event_update_unit(tool, counter, process);
3967 pr_err("Couldn't synthesize evsel unit.\n");
3972 if (has_scale(counter)) {
3973 err = perf_event__synthesize_event_update_scale(tool, counter, process);
3975 pr_err("Couldn't synthesize evsel counter.\n");
3980 if (counter->own_cpus) {
3981 err = perf_event__synthesize_event_update_cpus(tool, counter, process);
3983 pr_err("Couldn't synthesize evsel cpus.\n");
3989 * Name is needed only for pipe output,
3990 * perf.data carries event names.
3993 err = perf_event__synthesize_event_update_name(tool, counter, process);
3995 pr_err("Couldn't synthesize evsel name.\n");
4003 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
4004 union perf_event *event,
4005 struct perf_evlist **pevlist)
4008 struct perf_evsel *evsel;
4009 struct perf_evlist *evlist = *pevlist;
4011 if (evlist == NULL) {
4012 *pevlist = evlist = perf_evlist__new();
4017 evsel = perf_evsel__new(&event->attr.attr);
4021 perf_evlist__add(evlist, evsel);
4023 ids = event->header.size;
4024 ids -= (void *)&event->attr.id - (void *)event;
4025 n_ids = ids / sizeof(u64);
4027 * We don't have the cpu and thread maps on the header, so
4028 * for allocating the perf_sample_id table we fake 1 cpu and
4029 * hattr->ids threads.
4031 if (perf_evsel__alloc_id(evsel, 1, n_ids))
4034 for (i = 0; i < n_ids; i++) {
4035 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
4041 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
4042 union perf_event *event,
4043 struct perf_evlist **pevlist)
4045 struct event_update_event *ev = &event->event_update;
4046 struct event_update_event_scale *ev_scale;
4047 struct event_update_event_cpus *ev_cpus;
4048 struct perf_evlist *evlist;
4049 struct perf_evsel *evsel;
4050 struct cpu_map *map;
4052 if (!pevlist || *pevlist == NULL)
4057 evsel = perf_evlist__id2evsel(evlist, ev->id);
4062 case PERF_EVENT_UPDATE__UNIT:
4063 evsel->unit = strdup(ev->data);
4065 case PERF_EVENT_UPDATE__NAME:
4066 evsel->name = strdup(ev->data);
4068 case PERF_EVENT_UPDATE__SCALE:
4069 ev_scale = (struct event_update_event_scale *) ev->data;
4070 evsel->scale = ev_scale->scale;
4072 case PERF_EVENT_UPDATE__CPUS:
4073 ev_cpus = (struct event_update_event_cpus *) ev->data;
4075 map = cpu_map__new_data(&ev_cpus->cpus);
4077 evsel->own_cpus = map;
4079 pr_err("failed to get event_update cpus\n");
4087 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
4088 struct perf_evlist *evlist,
4089 perf_event__handler_t process)
4091 union perf_event ev;
4092 struct tracing_data *tdata;
4093 ssize_t size = 0, aligned_size = 0, padding;
4095 int err __maybe_unused = 0;
4098 * We are going to store the size of the data followed
4099 * by the data contents. Since the fd descriptor is a pipe,
4100 * we cannot seek back to store the size of the data once
4101 * we know it. Instead we:
4103 * - write the tracing data to the temp file
4104 * - get/write the data size to pipe
4105 * - write the tracing data from the temp file
4108 tdata = tracing_data_get(&evlist->entries, fd, true);
4112 memset(&ev, 0, sizeof(ev));
4114 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
4116 aligned_size = PERF_ALIGN(size, sizeof(u64));
4117 padding = aligned_size - size;
4118 ev.tracing_data.header.size = sizeof(ev.tracing_data);
4119 ev.tracing_data.size = aligned_size;
4121 process(tool, &ev, NULL, NULL);
4124 * The put function will copy all the tracing data
4125 * stored in temp file to the pipe.
4127 tracing_data_put(tdata);
4129 ff = (struct feat_fd){ .fd = fd };
4130 if (write_padded(&ff, NULL, 0, padding))
4133 return aligned_size;
4136 int perf_event__process_tracing_data(struct perf_session *session,
4137 union perf_event *event)
4139 ssize_t size_read, padding, size = event->tracing_data.size;
4140 int fd = perf_data__fd(session->data);
4141 off_t offset = lseek(fd, 0, SEEK_CUR);
4144 /* setup for reading amidst mmap */
4145 lseek(fd, offset + sizeof(struct tracing_data_event),
4148 size_read = trace_report(fd, &session->tevent,
4150 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
4152 if (readn(fd, buf, padding) < 0) {
4153 pr_err("%s: reading input file", __func__);
4156 if (session->repipe) {
4157 int retw = write(STDOUT_FILENO, buf, padding);
4158 if (retw <= 0 || retw != padding) {
4159 pr_err("%s: repiping tracing data padding", __func__);
4164 if (size_read + padding != size) {
4165 pr_err("%s: tracing data size mismatch", __func__);
4169 perf_evlist__prepare_tracepoint_events(session->evlist,
4170 session->tevent.pevent);
4172 return size_read + padding;
4175 int perf_event__synthesize_build_id(struct perf_tool *tool,
4176 struct dso *pos, u16 misc,
4177 perf_event__handler_t process,
4178 struct machine *machine)
4180 union perf_event ev;
4187 memset(&ev, 0, sizeof(ev));
4189 len = pos->long_name_len + 1;
4190 len = PERF_ALIGN(len, NAME_ALIGN);
4191 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
4192 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
4193 ev.build_id.header.misc = misc;
4194 ev.build_id.pid = machine->pid;
4195 ev.build_id.header.size = sizeof(ev.build_id) + len;
4196 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
4198 err = process(tool, &ev, NULL, machine);
4203 int perf_event__process_build_id(struct perf_session *session,
4204 union perf_event *event)
4206 __event_process_build_id(&event->build_id,
4207 event->build_id.filename,