perf tools: Include errno.h where needed
[linux-2.6-block.git] / tools / perf / util / header.c
1 #include <errno.h>
2 #include <inttypes.h>
3 #include "util.h"
4 #include "string2.h"
5 #include <sys/types.h>
6 #include <byteswap.h>
7 #include <unistd.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <linux/list.h>
11 #include <linux/kernel.h>
12 #include <linux/bitops.h>
13 #include <sys/utsname.h>
14
15 #include "evlist.h"
16 #include "evsel.h"
17 #include "header.h"
18 #include "../perf.h"
19 #include "trace-event.h"
20 #include "session.h"
21 #include "symbol.h"
22 #include "debug.h"
23 #include "cpumap.h"
24 #include "pmu.h"
25 #include "vdso.h"
26 #include "strbuf.h"
27 #include "build-id.h"
28 #include "data.h"
29 #include <api/fs/fs.h>
30 #include "asm/bug.h"
31
32 #include "sane_ctype.h"
33
34 /*
35  * magic2 = "PERFILE2"
36  * must be a numerical value to let the endianness
37  * determine the memory layout. That way we are able
38  * to detect endianness when reading the perf.data file
39  * back.
40  *
41  * we check for legacy (PERFFILE) format.
42  */
43 static const char *__perf_magic1 = "PERFFILE";
44 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
45 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
46
47 #define PERF_MAGIC      __perf_magic2
48
49 const char perf_version_string[] = PERF_VERSION;
50
51 struct perf_file_attr {
52         struct perf_event_attr  attr;
53         struct perf_file_section        ids;
54 };
55
56 void perf_header__set_feat(struct perf_header *header, int feat)
57 {
58         set_bit(feat, header->adds_features);
59 }
60
61 void perf_header__clear_feat(struct perf_header *header, int feat)
62 {
63         clear_bit(feat, header->adds_features);
64 }
65
66 bool perf_header__has_feat(const struct perf_header *header, int feat)
67 {
68         return test_bit(feat, header->adds_features);
69 }
70
71 static int do_write(int fd, const void *buf, size_t size)
72 {
73         while (size) {
74                 int ret = write(fd, buf, size);
75
76                 if (ret < 0)
77                         return -errno;
78
79                 size -= ret;
80                 buf += ret;
81         }
82
83         return 0;
84 }
85
86 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
87 {
88         static const char zero_buf[NAME_ALIGN];
89         int err = do_write(fd, bf, count);
90
91         if (!err)
92                 err = do_write(fd, zero_buf, count_aligned - count);
93
94         return err;
95 }
96
97 #define string_size(str)                                                \
98         (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
99
100 static int do_write_string(int fd, const char *str)
101 {
102         u32 len, olen;
103         int ret;
104
105         olen = strlen(str) + 1;
106         len = PERF_ALIGN(olen, NAME_ALIGN);
107
108         /* write len, incl. \0 */
109         ret = do_write(fd, &len, sizeof(len));
110         if (ret < 0)
111                 return ret;
112
113         return write_padded(fd, str, olen, len);
114 }
115
116 static char *do_read_string(int fd, struct perf_header *ph)
117 {
118         ssize_t sz, ret;
119         u32 len;
120         char *buf;
121
122         sz = readn(fd, &len, sizeof(len));
123         if (sz < (ssize_t)sizeof(len))
124                 return NULL;
125
126         if (ph->needs_swap)
127                 len = bswap_32(len);
128
129         buf = malloc(len);
130         if (!buf)
131                 return NULL;
132
133         ret = readn(fd, buf, len);
134         if (ret == (ssize_t)len) {
135                 /*
136                  * strings are padded by zeroes
137                  * thus the actual strlen of buf
138                  * may be less than len
139                  */
140                 return buf;
141         }
142
143         free(buf);
144         return NULL;
145 }
146
147 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
148                             struct perf_evlist *evlist)
149 {
150         return read_tracing_data(fd, &evlist->entries);
151 }
152
153
154 static int write_build_id(int fd, struct perf_header *h,
155                           struct perf_evlist *evlist __maybe_unused)
156 {
157         struct perf_session *session;
158         int err;
159
160         session = container_of(h, struct perf_session, header);
161
162         if (!perf_session__read_build_ids(session, true))
163                 return -1;
164
165         err = perf_session__write_buildid_table(session, fd);
166         if (err < 0) {
167                 pr_debug("failed to write buildid table\n");
168                 return err;
169         }
170         perf_session__cache_build_ids(session);
171
172         return 0;
173 }
174
175 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
176                           struct perf_evlist *evlist __maybe_unused)
177 {
178         struct utsname uts;
179         int ret;
180
181         ret = uname(&uts);
182         if (ret < 0)
183                 return -1;
184
185         return do_write_string(fd, uts.nodename);
186 }
187
188 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
189                            struct perf_evlist *evlist __maybe_unused)
190 {
191         struct utsname uts;
192         int ret;
193
194         ret = uname(&uts);
195         if (ret < 0)
196                 return -1;
197
198         return do_write_string(fd, uts.release);
199 }
200
201 static int write_arch(int fd, struct perf_header *h __maybe_unused,
202                       struct perf_evlist *evlist __maybe_unused)
203 {
204         struct utsname uts;
205         int ret;
206
207         ret = uname(&uts);
208         if (ret < 0)
209                 return -1;
210
211         return do_write_string(fd, uts.machine);
212 }
213
214 static int write_version(int fd, struct perf_header *h __maybe_unused,
215                          struct perf_evlist *evlist __maybe_unused)
216 {
217         return do_write_string(fd, perf_version_string);
218 }
219
220 static int __write_cpudesc(int fd, const char *cpuinfo_proc)
221 {
222         FILE *file;
223         char *buf = NULL;
224         char *s, *p;
225         const char *search = cpuinfo_proc;
226         size_t len = 0;
227         int ret = -1;
228
229         if (!search)
230                 return -1;
231
232         file = fopen("/proc/cpuinfo", "r");
233         if (!file)
234                 return -1;
235
236         while (getline(&buf, &len, file) > 0) {
237                 ret = strncmp(buf, search, strlen(search));
238                 if (!ret)
239                         break;
240         }
241
242         if (ret) {
243                 ret = -1;
244                 goto done;
245         }
246
247         s = buf;
248
249         p = strchr(buf, ':');
250         if (p && *(p+1) == ' ' && *(p+2))
251                 s = p + 2;
252         p = strchr(s, '\n');
253         if (p)
254                 *p = '\0';
255
256         /* squash extra space characters (branding string) */
257         p = s;
258         while (*p) {
259                 if (isspace(*p)) {
260                         char *r = p + 1;
261                         char *q = r;
262                         *p = ' ';
263                         while (*q && isspace(*q))
264                                 q++;
265                         if (q != (p+1))
266                                 while ((*r++ = *q++));
267                 }
268                 p++;
269         }
270         ret = do_write_string(fd, s);
271 done:
272         free(buf);
273         fclose(file);
274         return ret;
275 }
276
277 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
278                        struct perf_evlist *evlist __maybe_unused)
279 {
280 #ifndef CPUINFO_PROC
281 #define CPUINFO_PROC {"model name", }
282 #endif
283         const char *cpuinfo_procs[] = CPUINFO_PROC;
284         unsigned int i;
285
286         for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
287                 int ret;
288                 ret = __write_cpudesc(fd, cpuinfo_procs[i]);
289                 if (ret >= 0)
290                         return ret;
291         }
292         return -1;
293 }
294
295
296 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
297                         struct perf_evlist *evlist __maybe_unused)
298 {
299         long nr;
300         u32 nrc, nra;
301         int ret;
302
303         nrc = cpu__max_present_cpu();
304
305         nr = sysconf(_SC_NPROCESSORS_ONLN);
306         if (nr < 0)
307                 return -1;
308
309         nra = (u32)(nr & UINT_MAX);
310
311         ret = do_write(fd, &nrc, sizeof(nrc));
312         if (ret < 0)
313                 return ret;
314
315         return do_write(fd, &nra, sizeof(nra));
316 }
317
318 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
319                             struct perf_evlist *evlist)
320 {
321         struct perf_evsel *evsel;
322         u32 nre, nri, sz;
323         int ret;
324
325         nre = evlist->nr_entries;
326
327         /*
328          * write number of events
329          */
330         ret = do_write(fd, &nre, sizeof(nre));
331         if (ret < 0)
332                 return ret;
333
334         /*
335          * size of perf_event_attr struct
336          */
337         sz = (u32)sizeof(evsel->attr);
338         ret = do_write(fd, &sz, sizeof(sz));
339         if (ret < 0)
340                 return ret;
341
342         evlist__for_each_entry(evlist, evsel) {
343                 ret = do_write(fd, &evsel->attr, sz);
344                 if (ret < 0)
345                         return ret;
346                 /*
347                  * write number of unique id per event
348                  * there is one id per instance of an event
349                  *
350                  * copy into an nri to be independent of the
351                  * type of ids,
352                  */
353                 nri = evsel->ids;
354                 ret = do_write(fd, &nri, sizeof(nri));
355                 if (ret < 0)
356                         return ret;
357
358                 /*
359                  * write event string as passed on cmdline
360                  */
361                 ret = do_write_string(fd, perf_evsel__name(evsel));
362                 if (ret < 0)
363                         return ret;
364                 /*
365                  * write unique ids for this event
366                  */
367                 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
368                 if (ret < 0)
369                         return ret;
370         }
371         return 0;
372 }
373
374 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
375                          struct perf_evlist *evlist __maybe_unused)
376 {
377         char buf[MAXPATHLEN];
378         u32 n;
379         int i, ret;
380
381         /* actual path to perf binary */
382         ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
383         if (ret <= 0)
384                 return -1;
385
386         /* readlink() does not add null termination */
387         buf[ret] = '\0';
388
389         /* account for binary path */
390         n = perf_env.nr_cmdline + 1;
391
392         ret = do_write(fd, &n, sizeof(n));
393         if (ret < 0)
394                 return ret;
395
396         ret = do_write_string(fd, buf);
397         if (ret < 0)
398                 return ret;
399
400         for (i = 0 ; i < perf_env.nr_cmdline; i++) {
401                 ret = do_write_string(fd, perf_env.cmdline_argv[i]);
402                 if (ret < 0)
403                         return ret;
404         }
405         return 0;
406 }
407
408 #define CORE_SIB_FMT \
409         "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
410 #define THRD_SIB_FMT \
411         "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
412
413 struct cpu_topo {
414         u32 cpu_nr;
415         u32 core_sib;
416         u32 thread_sib;
417         char **core_siblings;
418         char **thread_siblings;
419 };
420
421 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
422 {
423         FILE *fp;
424         char filename[MAXPATHLEN];
425         char *buf = NULL, *p;
426         size_t len = 0;
427         ssize_t sret;
428         u32 i = 0;
429         int ret = -1;
430
431         sprintf(filename, CORE_SIB_FMT, cpu);
432         fp = fopen(filename, "r");
433         if (!fp)
434                 goto try_threads;
435
436         sret = getline(&buf, &len, fp);
437         fclose(fp);
438         if (sret <= 0)
439                 goto try_threads;
440
441         p = strchr(buf, '\n');
442         if (p)
443                 *p = '\0';
444
445         for (i = 0; i < tp->core_sib; i++) {
446                 if (!strcmp(buf, tp->core_siblings[i]))
447                         break;
448         }
449         if (i == tp->core_sib) {
450                 tp->core_siblings[i] = buf;
451                 tp->core_sib++;
452                 buf = NULL;
453                 len = 0;
454         }
455         ret = 0;
456
457 try_threads:
458         sprintf(filename, THRD_SIB_FMT, cpu);
459         fp = fopen(filename, "r");
460         if (!fp)
461                 goto done;
462
463         if (getline(&buf, &len, fp) <= 0)
464                 goto done;
465
466         p = strchr(buf, '\n');
467         if (p)
468                 *p = '\0';
469
470         for (i = 0; i < tp->thread_sib; i++) {
471                 if (!strcmp(buf, tp->thread_siblings[i]))
472                         break;
473         }
474         if (i == tp->thread_sib) {
475                 tp->thread_siblings[i] = buf;
476                 tp->thread_sib++;
477                 buf = NULL;
478         }
479         ret = 0;
480 done:
481         if(fp)
482                 fclose(fp);
483         free(buf);
484         return ret;
485 }
486
487 static void free_cpu_topo(struct cpu_topo *tp)
488 {
489         u32 i;
490
491         if (!tp)
492                 return;
493
494         for (i = 0 ; i < tp->core_sib; i++)
495                 zfree(&tp->core_siblings[i]);
496
497         for (i = 0 ; i < tp->thread_sib; i++)
498                 zfree(&tp->thread_siblings[i]);
499
500         free(tp);
501 }
502
503 static struct cpu_topo *build_cpu_topology(void)
504 {
505         struct cpu_topo *tp = NULL;
506         void *addr;
507         u32 nr, i;
508         size_t sz;
509         long ncpus;
510         int ret = -1;
511         struct cpu_map *map;
512
513         ncpus = cpu__max_present_cpu();
514
515         /* build online CPU map */
516         map = cpu_map__new(NULL);
517         if (map == NULL) {
518                 pr_debug("failed to get system cpumap\n");
519                 return NULL;
520         }
521
522         nr = (u32)(ncpus & UINT_MAX);
523
524         sz = nr * sizeof(char *);
525         addr = calloc(1, sizeof(*tp) + 2 * sz);
526         if (!addr)
527                 goto out_free;
528
529         tp = addr;
530         tp->cpu_nr = nr;
531         addr += sizeof(*tp);
532         tp->core_siblings = addr;
533         addr += sz;
534         tp->thread_siblings = addr;
535
536         for (i = 0; i < nr; i++) {
537                 if (!cpu_map__has(map, i))
538                         continue;
539
540                 ret = build_cpu_topo(tp, i);
541                 if (ret < 0)
542                         break;
543         }
544
545 out_free:
546         cpu_map__put(map);
547         if (ret) {
548                 free_cpu_topo(tp);
549                 tp = NULL;
550         }
551         return tp;
552 }
553
554 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
555                           struct perf_evlist *evlist __maybe_unused)
556 {
557         struct cpu_topo *tp;
558         u32 i;
559         int ret, j;
560
561         tp = build_cpu_topology();
562         if (!tp)
563                 return -1;
564
565         ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
566         if (ret < 0)
567                 goto done;
568
569         for (i = 0; i < tp->core_sib; i++) {
570                 ret = do_write_string(fd, tp->core_siblings[i]);
571                 if (ret < 0)
572                         goto done;
573         }
574         ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
575         if (ret < 0)
576                 goto done;
577
578         for (i = 0; i < tp->thread_sib; i++) {
579                 ret = do_write_string(fd, tp->thread_siblings[i]);
580                 if (ret < 0)
581                         break;
582         }
583
584         ret = perf_env__read_cpu_topology_map(&perf_env);
585         if (ret < 0)
586                 goto done;
587
588         for (j = 0; j < perf_env.nr_cpus_avail; j++) {
589                 ret = do_write(fd, &perf_env.cpu[j].core_id,
590                                sizeof(perf_env.cpu[j].core_id));
591                 if (ret < 0)
592                         return ret;
593                 ret = do_write(fd, &perf_env.cpu[j].socket_id,
594                                sizeof(perf_env.cpu[j].socket_id));
595                 if (ret < 0)
596                         return ret;
597         }
598 done:
599         free_cpu_topo(tp);
600         return ret;
601 }
602
603
604
605 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
606                           struct perf_evlist *evlist __maybe_unused)
607 {
608         char *buf = NULL;
609         FILE *fp;
610         size_t len = 0;
611         int ret = -1, n;
612         uint64_t mem;
613
614         fp = fopen("/proc/meminfo", "r");
615         if (!fp)
616                 return -1;
617
618         while (getline(&buf, &len, fp) > 0) {
619                 ret = strncmp(buf, "MemTotal:", 9);
620                 if (!ret)
621                         break;
622         }
623         if (!ret) {
624                 n = sscanf(buf, "%*s %"PRIu64, &mem);
625                 if (n == 1)
626                         ret = do_write(fd, &mem, sizeof(mem));
627         } else
628                 ret = -1;
629         free(buf);
630         fclose(fp);
631         return ret;
632 }
633
634 static int write_topo_node(int fd, int node)
635 {
636         char str[MAXPATHLEN];
637         char field[32];
638         char *buf = NULL, *p;
639         size_t len = 0;
640         FILE *fp;
641         u64 mem_total, mem_free, mem;
642         int ret = -1;
643
644         sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
645         fp = fopen(str, "r");
646         if (!fp)
647                 return -1;
648
649         while (getline(&buf, &len, fp) > 0) {
650                 /* skip over invalid lines */
651                 if (!strchr(buf, ':'))
652                         continue;
653                 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
654                         goto done;
655                 if (!strcmp(field, "MemTotal:"))
656                         mem_total = mem;
657                 if (!strcmp(field, "MemFree:"))
658                         mem_free = mem;
659         }
660
661         fclose(fp);
662         fp = NULL;
663
664         ret = do_write(fd, &mem_total, sizeof(u64));
665         if (ret)
666                 goto done;
667
668         ret = do_write(fd, &mem_free, sizeof(u64));
669         if (ret)
670                 goto done;
671
672         ret = -1;
673         sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
674
675         fp = fopen(str, "r");
676         if (!fp)
677                 goto done;
678
679         if (getline(&buf, &len, fp) <= 0)
680                 goto done;
681
682         p = strchr(buf, '\n');
683         if (p)
684                 *p = '\0';
685
686         ret = do_write_string(fd, buf);
687 done:
688         free(buf);
689         if (fp)
690                 fclose(fp);
691         return ret;
692 }
693
694 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
695                           struct perf_evlist *evlist __maybe_unused)
696 {
697         char *buf = NULL;
698         size_t len = 0;
699         FILE *fp;
700         struct cpu_map *node_map = NULL;
701         char *c;
702         u32 nr, i, j;
703         int ret = -1;
704
705         fp = fopen("/sys/devices/system/node/online", "r");
706         if (!fp)
707                 return -1;
708
709         if (getline(&buf, &len, fp) <= 0)
710                 goto done;
711
712         c = strchr(buf, '\n');
713         if (c)
714                 *c = '\0';
715
716         node_map = cpu_map__new(buf);
717         if (!node_map)
718                 goto done;
719
720         nr = (u32)node_map->nr;
721
722         ret = do_write(fd, &nr, sizeof(nr));
723         if (ret < 0)
724                 goto done;
725
726         for (i = 0; i < nr; i++) {
727                 j = (u32)node_map->map[i];
728                 ret = do_write(fd, &j, sizeof(j));
729                 if (ret < 0)
730                         break;
731
732                 ret = write_topo_node(fd, i);
733                 if (ret < 0)
734                         break;
735         }
736 done:
737         free(buf);
738         fclose(fp);
739         cpu_map__put(node_map);
740         return ret;
741 }
742
743 /*
744  * File format:
745  *
746  * struct pmu_mappings {
747  *      u32     pmu_num;
748  *      struct pmu_map {
749  *              u32     type;
750  *              char    name[];
751  *      }[pmu_num];
752  * };
753  */
754
755 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
756                               struct perf_evlist *evlist __maybe_unused)
757 {
758         struct perf_pmu *pmu = NULL;
759         off_t offset = lseek(fd, 0, SEEK_CUR);
760         __u32 pmu_num = 0;
761         int ret;
762
763         /* write real pmu_num later */
764         ret = do_write(fd, &pmu_num, sizeof(pmu_num));
765         if (ret < 0)
766                 return ret;
767
768         while ((pmu = perf_pmu__scan(pmu))) {
769                 if (!pmu->name)
770                         continue;
771                 pmu_num++;
772
773                 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
774                 if (ret < 0)
775                         return ret;
776
777                 ret = do_write_string(fd, pmu->name);
778                 if (ret < 0)
779                         return ret;
780         }
781
782         if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
783                 /* discard all */
784                 lseek(fd, offset, SEEK_SET);
785                 return -1;
786         }
787
788         return 0;
789 }
790
791 /*
792  * File format:
793  *
794  * struct group_descs {
795  *      u32     nr_groups;
796  *      struct group_desc {
797  *              char    name[];
798  *              u32     leader_idx;
799  *              u32     nr_members;
800  *      }[nr_groups];
801  * };
802  */
803 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
804                             struct perf_evlist *evlist)
805 {
806         u32 nr_groups = evlist->nr_groups;
807         struct perf_evsel *evsel;
808         int ret;
809
810         ret = do_write(fd, &nr_groups, sizeof(nr_groups));
811         if (ret < 0)
812                 return ret;
813
814         evlist__for_each_entry(evlist, evsel) {
815                 if (perf_evsel__is_group_leader(evsel) &&
816                     evsel->nr_members > 1) {
817                         const char *name = evsel->group_name ?: "{anon_group}";
818                         u32 leader_idx = evsel->idx;
819                         u32 nr_members = evsel->nr_members;
820
821                         ret = do_write_string(fd, name);
822                         if (ret < 0)
823                                 return ret;
824
825                         ret = do_write(fd, &leader_idx, sizeof(leader_idx));
826                         if (ret < 0)
827                                 return ret;
828
829                         ret = do_write(fd, &nr_members, sizeof(nr_members));
830                         if (ret < 0)
831                                 return ret;
832                 }
833         }
834         return 0;
835 }
836
837 /*
838  * default get_cpuid(): nothing gets recorded
839  * actual implementation must be in arch/$(ARCH)/util/header.c
840  */
841 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
842 {
843         return -1;
844 }
845
846 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
847                        struct perf_evlist *evlist __maybe_unused)
848 {
849         char buffer[64];
850         int ret;
851
852         ret = get_cpuid(buffer, sizeof(buffer));
853         if (!ret)
854                 goto write_it;
855
856         return -1;
857 write_it:
858         return do_write_string(fd, buffer);
859 }
860
861 static int write_branch_stack(int fd __maybe_unused,
862                               struct perf_header *h __maybe_unused,
863                        struct perf_evlist *evlist __maybe_unused)
864 {
865         return 0;
866 }
867
868 static int write_auxtrace(int fd, struct perf_header *h,
869                           struct perf_evlist *evlist __maybe_unused)
870 {
871         struct perf_session *session;
872         int err;
873
874         session = container_of(h, struct perf_session, header);
875
876         err = auxtrace_index__write(fd, &session->auxtrace_index);
877         if (err < 0)
878                 pr_err("Failed to write auxtrace index\n");
879         return err;
880 }
881
882 static int cpu_cache_level__sort(const void *a, const void *b)
883 {
884         struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
885         struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
886
887         return cache_a->level - cache_b->level;
888 }
889
890 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
891 {
892         if (a->level != b->level)
893                 return false;
894
895         if (a->line_size != b->line_size)
896                 return false;
897
898         if (a->sets != b->sets)
899                 return false;
900
901         if (a->ways != b->ways)
902                 return false;
903
904         if (strcmp(a->type, b->type))
905                 return false;
906
907         if (strcmp(a->size, b->size))
908                 return false;
909
910         if (strcmp(a->map, b->map))
911                 return false;
912
913         return true;
914 }
915
916 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
917 {
918         char path[PATH_MAX], file[PATH_MAX];
919         struct stat st;
920         size_t len;
921
922         scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
923         scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
924
925         if (stat(file, &st))
926                 return 1;
927
928         scnprintf(file, PATH_MAX, "%s/level", path);
929         if (sysfs__read_int(file, (int *) &cache->level))
930                 return -1;
931
932         scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
933         if (sysfs__read_int(file, (int *) &cache->line_size))
934                 return -1;
935
936         scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
937         if (sysfs__read_int(file, (int *) &cache->sets))
938                 return -1;
939
940         scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
941         if (sysfs__read_int(file, (int *) &cache->ways))
942                 return -1;
943
944         scnprintf(file, PATH_MAX, "%s/type", path);
945         if (sysfs__read_str(file, &cache->type, &len))
946                 return -1;
947
948         cache->type[len] = 0;
949         cache->type = rtrim(cache->type);
950
951         scnprintf(file, PATH_MAX, "%s/size", path);
952         if (sysfs__read_str(file, &cache->size, &len)) {
953                 free(cache->type);
954                 return -1;
955         }
956
957         cache->size[len] = 0;
958         cache->size = rtrim(cache->size);
959
960         scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
961         if (sysfs__read_str(file, &cache->map, &len)) {
962                 free(cache->map);
963                 free(cache->type);
964                 return -1;
965         }
966
967         cache->map[len] = 0;
968         cache->map = rtrim(cache->map);
969         return 0;
970 }
971
972 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
973 {
974         fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
975 }
976
977 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
978 {
979         u32 i, cnt = 0;
980         long ncpus;
981         u32 nr, cpu;
982         u16 level;
983
984         ncpus = sysconf(_SC_NPROCESSORS_CONF);
985         if (ncpus < 0)
986                 return -1;
987
988         nr = (u32)(ncpus & UINT_MAX);
989
990         for (cpu = 0; cpu < nr; cpu++) {
991                 for (level = 0; level < 10; level++) {
992                         struct cpu_cache_level c;
993                         int err;
994
995                         err = cpu_cache_level__read(&c, cpu, level);
996                         if (err < 0)
997                                 return err;
998
999                         if (err == 1)
1000                                 break;
1001
1002                         for (i = 0; i < cnt; i++) {
1003                                 if (cpu_cache_level__cmp(&c, &caches[i]))
1004                                         break;
1005                         }
1006
1007                         if (i == cnt)
1008                                 caches[cnt++] = c;
1009                         else
1010                                 cpu_cache_level__free(&c);
1011
1012                         if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1013                                 goto out;
1014                 }
1015         }
1016  out:
1017         *cntp = cnt;
1018         return 0;
1019 }
1020
1021 #define MAX_CACHES 2000
1022
1023 static int write_cache(int fd, struct perf_header *h __maybe_unused,
1024                           struct perf_evlist *evlist __maybe_unused)
1025 {
1026         struct cpu_cache_level caches[MAX_CACHES];
1027         u32 cnt = 0, i, version = 1;
1028         int ret;
1029
1030         ret = build_caches(caches, MAX_CACHES, &cnt);
1031         if (ret)
1032                 goto out;
1033
1034         qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1035
1036         ret = do_write(fd, &version, sizeof(u32));
1037         if (ret < 0)
1038                 goto out;
1039
1040         ret = do_write(fd, &cnt, sizeof(u32));
1041         if (ret < 0)
1042                 goto out;
1043
1044         for (i = 0; i < cnt; i++) {
1045                 struct cpu_cache_level *c = &caches[i];
1046
1047                 #define _W(v)                                   \
1048                         ret = do_write(fd, &c->v, sizeof(u32)); \
1049                         if (ret < 0)                            \
1050                                 goto out;
1051
1052                 _W(level)
1053                 _W(line_size)
1054                 _W(sets)
1055                 _W(ways)
1056                 #undef _W
1057
1058                 #define _W(v)                                           \
1059                         ret = do_write_string(fd, (const char *) c->v); \
1060                         if (ret < 0)                                    \
1061                                 goto out;
1062
1063                 _W(type)
1064                 _W(size)
1065                 _W(map)
1066                 #undef _W
1067         }
1068
1069 out:
1070         for (i = 0; i < cnt; i++)
1071                 cpu_cache_level__free(&caches[i]);
1072         return ret;
1073 }
1074
1075 static int write_stat(int fd __maybe_unused,
1076                       struct perf_header *h __maybe_unused,
1077                       struct perf_evlist *evlist __maybe_unused)
1078 {
1079         return 0;
1080 }
1081
1082 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1083                            FILE *fp)
1084 {
1085         fprintf(fp, "# hostname : %s\n", ph->env.hostname);
1086 }
1087
1088 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1089                             FILE *fp)
1090 {
1091         fprintf(fp, "# os release : %s\n", ph->env.os_release);
1092 }
1093
1094 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1095 {
1096         fprintf(fp, "# arch : %s\n", ph->env.arch);
1097 }
1098
1099 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1100                           FILE *fp)
1101 {
1102         fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
1103 }
1104
1105 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1106                          FILE *fp)
1107 {
1108         fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1109         fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
1110 }
1111
1112 static void print_version(struct perf_header *ph, int fd __maybe_unused,
1113                           FILE *fp)
1114 {
1115         fprintf(fp, "# perf version : %s\n", ph->env.version);
1116 }
1117
1118 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1119                           FILE *fp)
1120 {
1121         int nr, i;
1122
1123         nr = ph->env.nr_cmdline;
1124
1125         fprintf(fp, "# cmdline : ");
1126
1127         for (i = 0; i < nr; i++)
1128                 fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
1129         fputc('\n', fp);
1130 }
1131
1132 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1133                                FILE *fp)
1134 {
1135         int nr, i;
1136         char *str;
1137         int cpu_nr = ph->env.nr_cpus_avail;
1138
1139         nr = ph->env.nr_sibling_cores;
1140         str = ph->env.sibling_cores;
1141
1142         for (i = 0; i < nr; i++) {
1143                 fprintf(fp, "# sibling cores   : %s\n", str);
1144                 str += strlen(str) + 1;
1145         }
1146
1147         nr = ph->env.nr_sibling_threads;
1148         str = ph->env.sibling_threads;
1149
1150         for (i = 0; i < nr; i++) {
1151                 fprintf(fp, "# sibling threads : %s\n", str);
1152                 str += strlen(str) + 1;
1153         }
1154
1155         if (ph->env.cpu != NULL) {
1156                 for (i = 0; i < cpu_nr; i++)
1157                         fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1158                                 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1159         } else
1160                 fprintf(fp, "# Core ID and Socket ID information is not available\n");
1161 }
1162
1163 static void free_event_desc(struct perf_evsel *events)
1164 {
1165         struct perf_evsel *evsel;
1166
1167         if (!events)
1168                 return;
1169
1170         for (evsel = events; evsel->attr.size; evsel++) {
1171                 zfree(&evsel->name);
1172                 zfree(&evsel->id);
1173         }
1174
1175         free(events);
1176 }
1177
1178 static struct perf_evsel *
1179 read_event_desc(struct perf_header *ph, int fd)
1180 {
1181         struct perf_evsel *evsel, *events = NULL;
1182         u64 *id;
1183         void *buf = NULL;
1184         u32 nre, sz, nr, i, j;
1185         ssize_t ret;
1186         size_t msz;
1187
1188         /* number of events */
1189         ret = readn(fd, &nre, sizeof(nre));
1190         if (ret != (ssize_t)sizeof(nre))
1191                 goto error;
1192
1193         if (ph->needs_swap)
1194                 nre = bswap_32(nre);
1195
1196         ret = readn(fd, &sz, sizeof(sz));
1197         if (ret != (ssize_t)sizeof(sz))
1198                 goto error;
1199
1200         if (ph->needs_swap)
1201                 sz = bswap_32(sz);
1202
1203         /* buffer to hold on file attr struct */
1204         buf = malloc(sz);
1205         if (!buf)
1206                 goto error;
1207
1208         /* the last event terminates with evsel->attr.size == 0: */
1209         events = calloc(nre + 1, sizeof(*events));
1210         if (!events)
1211                 goto error;
1212
1213         msz = sizeof(evsel->attr);
1214         if (sz < msz)
1215                 msz = sz;
1216
1217         for (i = 0, evsel = events; i < nre; evsel++, i++) {
1218                 evsel->idx = i;
1219
1220                 /*
1221                  * must read entire on-file attr struct to
1222                  * sync up with layout.
1223                  */
1224                 ret = readn(fd, buf, sz);
1225                 if (ret != (ssize_t)sz)
1226                         goto error;
1227
1228                 if (ph->needs_swap)
1229                         perf_event__attr_swap(buf);
1230
1231                 memcpy(&evsel->attr, buf, msz);
1232
1233                 ret = readn(fd, &nr, sizeof(nr));
1234                 if (ret != (ssize_t)sizeof(nr))
1235                         goto error;
1236
1237                 if (ph->needs_swap) {
1238                         nr = bswap_32(nr);
1239                         evsel->needs_swap = true;
1240                 }
1241
1242                 evsel->name = do_read_string(fd, ph);
1243
1244                 if (!nr)
1245                         continue;
1246
1247                 id = calloc(nr, sizeof(*id));
1248                 if (!id)
1249                         goto error;
1250                 evsel->ids = nr;
1251                 evsel->id = id;
1252
1253                 for (j = 0 ; j < nr; j++) {
1254                         ret = readn(fd, id, sizeof(*id));
1255                         if (ret != (ssize_t)sizeof(*id))
1256                                 goto error;
1257                         if (ph->needs_swap)
1258                                 *id = bswap_64(*id);
1259                         id++;
1260                 }
1261         }
1262 out:
1263         free(buf);
1264         return events;
1265 error:
1266         free_event_desc(events);
1267         events = NULL;
1268         goto out;
1269 }
1270
1271 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1272                                 void *priv __attribute__((unused)))
1273 {
1274         return fprintf(fp, ", %s = %s", name, val);
1275 }
1276
1277 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1278 {
1279         struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1280         u32 j;
1281         u64 *id;
1282
1283         if (!events) {
1284                 fprintf(fp, "# event desc: not available or unable to read\n");
1285                 return;
1286         }
1287
1288         for (evsel = events; evsel->attr.size; evsel++) {
1289                 fprintf(fp, "# event : name = %s, ", evsel->name);
1290
1291                 if (evsel->ids) {
1292                         fprintf(fp, ", id = {");
1293                         for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1294                                 if (j)
1295                                         fputc(',', fp);
1296                                 fprintf(fp, " %"PRIu64, *id);
1297                         }
1298                         fprintf(fp, " }");
1299                 }
1300
1301                 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1302
1303                 fputc('\n', fp);
1304         }
1305
1306         free_event_desc(events);
1307 }
1308
1309 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1310                             FILE *fp)
1311 {
1312         fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1313 }
1314
1315 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1316                                 FILE *fp)
1317 {
1318         int i;
1319         struct numa_node *n;
1320
1321         for (i = 0; i < ph->env.nr_numa_nodes; i++) {
1322                 n = &ph->env.numa_nodes[i];
1323
1324                 fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1325                             " free = %"PRIu64" kB\n",
1326                         n->node, n->mem_total, n->mem_free);
1327
1328                 fprintf(fp, "# node%u cpu list : ", n->node);
1329                 cpu_map__fprintf(n->map, fp);
1330         }
1331 }
1332
1333 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1334 {
1335         fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1336 }
1337
1338 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1339                                int fd __maybe_unused, FILE *fp)
1340 {
1341         fprintf(fp, "# contains samples with branch stack\n");
1342 }
1343
1344 static void print_auxtrace(struct perf_header *ph __maybe_unused,
1345                            int fd __maybe_unused, FILE *fp)
1346 {
1347         fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1348 }
1349
1350 static void print_stat(struct perf_header *ph __maybe_unused,
1351                        int fd __maybe_unused, FILE *fp)
1352 {
1353         fprintf(fp, "# contains stat data\n");
1354 }
1355
1356 static void print_cache(struct perf_header *ph __maybe_unused,
1357                         int fd __maybe_unused, FILE *fp __maybe_unused)
1358 {
1359         int i;
1360
1361         fprintf(fp, "# CPU cache info:\n");
1362         for (i = 0; i < ph->env.caches_cnt; i++) {
1363                 fprintf(fp, "#  ");
1364                 cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1365         }
1366 }
1367
1368 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1369                                FILE *fp)
1370 {
1371         const char *delimiter = "# pmu mappings: ";
1372         char *str, *tmp;
1373         u32 pmu_num;
1374         u32 type;
1375
1376         pmu_num = ph->env.nr_pmu_mappings;
1377         if (!pmu_num) {
1378                 fprintf(fp, "# pmu mappings: not available\n");
1379                 return;
1380         }
1381
1382         str = ph->env.pmu_mappings;
1383
1384         while (pmu_num) {
1385                 type = strtoul(str, &tmp, 0);
1386                 if (*tmp != ':')
1387                         goto error;
1388
1389                 str = tmp + 1;
1390                 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1391
1392                 delimiter = ", ";
1393                 str += strlen(str) + 1;
1394                 pmu_num--;
1395         }
1396
1397         fprintf(fp, "\n");
1398
1399         if (!pmu_num)
1400                 return;
1401 error:
1402         fprintf(fp, "# pmu mappings: unable to read\n");
1403 }
1404
1405 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1406                              FILE *fp)
1407 {
1408         struct perf_session *session;
1409         struct perf_evsel *evsel;
1410         u32 nr = 0;
1411
1412         session = container_of(ph, struct perf_session, header);
1413
1414         evlist__for_each_entry(session->evlist, evsel) {
1415                 if (perf_evsel__is_group_leader(evsel) &&
1416                     evsel->nr_members > 1) {
1417                         fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1418                                 perf_evsel__name(evsel));
1419
1420                         nr = evsel->nr_members - 1;
1421                 } else if (nr) {
1422                         fprintf(fp, ",%s", perf_evsel__name(evsel));
1423
1424                         if (--nr == 0)
1425                                 fprintf(fp, "}\n");
1426                 }
1427         }
1428 }
1429
1430 static int __event_process_build_id(struct build_id_event *bev,
1431                                     char *filename,
1432                                     struct perf_session *session)
1433 {
1434         int err = -1;
1435         struct machine *machine;
1436         u16 cpumode;
1437         struct dso *dso;
1438         enum dso_kernel_type dso_type;
1439
1440         machine = perf_session__findnew_machine(session, bev->pid);
1441         if (!machine)
1442                 goto out;
1443
1444         cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1445
1446         switch (cpumode) {
1447         case PERF_RECORD_MISC_KERNEL:
1448                 dso_type = DSO_TYPE_KERNEL;
1449                 break;
1450         case PERF_RECORD_MISC_GUEST_KERNEL:
1451                 dso_type = DSO_TYPE_GUEST_KERNEL;
1452                 break;
1453         case PERF_RECORD_MISC_USER:
1454         case PERF_RECORD_MISC_GUEST_USER:
1455                 dso_type = DSO_TYPE_USER;
1456                 break;
1457         default:
1458                 goto out;
1459         }
1460
1461         dso = machine__findnew_dso(machine, filename);
1462         if (dso != NULL) {
1463                 char sbuild_id[SBUILD_ID_SIZE];
1464
1465                 dso__set_build_id(dso, &bev->build_id);
1466
1467                 if (!is_kernel_module(filename, cpumode))
1468                         dso->kernel = dso_type;
1469
1470                 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1471                                   sbuild_id);
1472                 pr_debug("build id event received for %s: %s\n",
1473                          dso->long_name, sbuild_id);
1474                 dso__put(dso);
1475         }
1476
1477         err = 0;
1478 out:
1479         return err;
1480 }
1481
1482 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1483                                                  int input, u64 offset, u64 size)
1484 {
1485         struct perf_session *session = container_of(header, struct perf_session, header);
1486         struct {
1487                 struct perf_event_header   header;
1488                 u8                         build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1489                 char                       filename[0];
1490         } old_bev;
1491         struct build_id_event bev;
1492         char filename[PATH_MAX];
1493         u64 limit = offset + size;
1494
1495         while (offset < limit) {
1496                 ssize_t len;
1497
1498                 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1499                         return -1;
1500
1501                 if (header->needs_swap)
1502                         perf_event_header__bswap(&old_bev.header);
1503
1504                 len = old_bev.header.size - sizeof(old_bev);
1505                 if (readn(input, filename, len) != len)
1506                         return -1;
1507
1508                 bev.header = old_bev.header;
1509
1510                 /*
1511                  * As the pid is the missing value, we need to fill
1512                  * it properly. The header.misc value give us nice hint.
1513                  */
1514                 bev.pid = HOST_KERNEL_ID;
1515                 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1516                     bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1517                         bev.pid = DEFAULT_GUEST_KERNEL_ID;
1518
1519                 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1520                 __event_process_build_id(&bev, filename, session);
1521
1522                 offset += bev.header.size;
1523         }
1524
1525         return 0;
1526 }
1527
1528 static int perf_header__read_build_ids(struct perf_header *header,
1529                                        int input, u64 offset, u64 size)
1530 {
1531         struct perf_session *session = container_of(header, struct perf_session, header);
1532         struct build_id_event bev;
1533         char filename[PATH_MAX];
1534         u64 limit = offset + size, orig_offset = offset;
1535         int err = -1;
1536
1537         while (offset < limit) {
1538                 ssize_t len;
1539
1540                 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1541                         goto out;
1542
1543                 if (header->needs_swap)
1544                         perf_event_header__bswap(&bev.header);
1545
1546                 len = bev.header.size - sizeof(bev);
1547                 if (readn(input, filename, len) != len)
1548                         goto out;
1549                 /*
1550                  * The a1645ce1 changeset:
1551                  *
1552                  * "perf: 'perf kvm' tool for monitoring guest performance from host"
1553                  *
1554                  * Added a field to struct build_id_event that broke the file
1555                  * format.
1556                  *
1557                  * Since the kernel build-id is the first entry, process the
1558                  * table using the old format if the well known
1559                  * '[kernel.kallsyms]' string for the kernel build-id has the
1560                  * first 4 characters chopped off (where the pid_t sits).
1561                  */
1562                 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1563                         if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1564                                 return -1;
1565                         return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1566                 }
1567
1568                 __event_process_build_id(&bev, filename, session);
1569
1570                 offset += bev.header.size;
1571         }
1572         err = 0;
1573 out:
1574         return err;
1575 }
1576
1577 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1578                                 struct perf_header *ph __maybe_unused,
1579                                 int fd, void *data)
1580 {
1581         ssize_t ret = trace_report(fd, data, false);
1582         return ret < 0 ? -1 : 0;
1583 }
1584
1585 static int process_build_id(struct perf_file_section *section,
1586                             struct perf_header *ph, int fd,
1587                             void *data __maybe_unused)
1588 {
1589         if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1590                 pr_debug("Failed to read buildids, continuing...\n");
1591         return 0;
1592 }
1593
1594 static int process_hostname(struct perf_file_section *section __maybe_unused,
1595                             struct perf_header *ph, int fd,
1596                             void *data __maybe_unused)
1597 {
1598         ph->env.hostname = do_read_string(fd, ph);
1599         return ph->env.hostname ? 0 : -ENOMEM;
1600 }
1601
1602 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1603                              struct perf_header *ph, int fd,
1604                              void *data __maybe_unused)
1605 {
1606         ph->env.os_release = do_read_string(fd, ph);
1607         return ph->env.os_release ? 0 : -ENOMEM;
1608 }
1609
1610 static int process_version(struct perf_file_section *section __maybe_unused,
1611                            struct perf_header *ph, int fd,
1612                            void *data __maybe_unused)
1613 {
1614         ph->env.version = do_read_string(fd, ph);
1615         return ph->env.version ? 0 : -ENOMEM;
1616 }
1617
1618 static int process_arch(struct perf_file_section *section __maybe_unused,
1619                         struct perf_header *ph, int fd,
1620                         void *data __maybe_unused)
1621 {
1622         ph->env.arch = do_read_string(fd, ph);
1623         return ph->env.arch ? 0 : -ENOMEM;
1624 }
1625
1626 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1627                           struct perf_header *ph, int fd,
1628                           void *data __maybe_unused)
1629 {
1630         ssize_t ret;
1631         u32 nr;
1632
1633         ret = readn(fd, &nr, sizeof(nr));
1634         if (ret != sizeof(nr))
1635                 return -1;
1636
1637         if (ph->needs_swap)
1638                 nr = bswap_32(nr);
1639
1640         ph->env.nr_cpus_avail = nr;
1641
1642         ret = readn(fd, &nr, sizeof(nr));
1643         if (ret != sizeof(nr))
1644                 return -1;
1645
1646         if (ph->needs_swap)
1647                 nr = bswap_32(nr);
1648
1649         ph->env.nr_cpus_online = nr;
1650         return 0;
1651 }
1652
1653 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1654                            struct perf_header *ph, int fd,
1655                            void *data __maybe_unused)
1656 {
1657         ph->env.cpu_desc = do_read_string(fd, ph);
1658         return ph->env.cpu_desc ? 0 : -ENOMEM;
1659 }
1660
1661 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1662                          struct perf_header *ph,  int fd,
1663                          void *data __maybe_unused)
1664 {
1665         ph->env.cpuid = do_read_string(fd, ph);
1666         return ph->env.cpuid ? 0 : -ENOMEM;
1667 }
1668
1669 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1670                              struct perf_header *ph, int fd,
1671                              void *data __maybe_unused)
1672 {
1673         uint64_t mem;
1674         ssize_t ret;
1675
1676         ret = readn(fd, &mem, sizeof(mem));
1677         if (ret != sizeof(mem))
1678                 return -1;
1679
1680         if (ph->needs_swap)
1681                 mem = bswap_64(mem);
1682
1683         ph->env.total_mem = mem;
1684         return 0;
1685 }
1686
1687 static struct perf_evsel *
1688 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1689 {
1690         struct perf_evsel *evsel;
1691
1692         evlist__for_each_entry(evlist, evsel) {
1693                 if (evsel->idx == idx)
1694                         return evsel;
1695         }
1696
1697         return NULL;
1698 }
1699
1700 static void
1701 perf_evlist__set_event_name(struct perf_evlist *evlist,
1702                             struct perf_evsel *event)
1703 {
1704         struct perf_evsel *evsel;
1705
1706         if (!event->name)
1707                 return;
1708
1709         evsel = perf_evlist__find_by_index(evlist, event->idx);
1710         if (!evsel)
1711                 return;
1712
1713         if (evsel->name)
1714                 return;
1715
1716         evsel->name = strdup(event->name);
1717 }
1718
1719 static int
1720 process_event_desc(struct perf_file_section *section __maybe_unused,
1721                    struct perf_header *header, int fd,
1722                    void *data __maybe_unused)
1723 {
1724         struct perf_session *session;
1725         struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1726
1727         if (!events)
1728                 return 0;
1729
1730         session = container_of(header, struct perf_session, header);
1731         for (evsel = events; evsel->attr.size; evsel++)
1732                 perf_evlist__set_event_name(session->evlist, evsel);
1733
1734         free_event_desc(events);
1735
1736         return 0;
1737 }
1738
1739 static int process_cmdline(struct perf_file_section *section,
1740                            struct perf_header *ph, int fd,
1741                            void *data __maybe_unused)
1742 {
1743         ssize_t ret;
1744         char *str, *cmdline = NULL, **argv = NULL;
1745         u32 nr, i, len = 0;
1746
1747         ret = readn(fd, &nr, sizeof(nr));
1748         if (ret != sizeof(nr))
1749                 return -1;
1750
1751         if (ph->needs_swap)
1752                 nr = bswap_32(nr);
1753
1754         ph->env.nr_cmdline = nr;
1755
1756         cmdline = zalloc(section->size + nr + 1);
1757         if (!cmdline)
1758                 return -1;
1759
1760         argv = zalloc(sizeof(char *) * (nr + 1));
1761         if (!argv)
1762                 goto error;
1763
1764         for (i = 0; i < nr; i++) {
1765                 str = do_read_string(fd, ph);
1766                 if (!str)
1767                         goto error;
1768
1769                 argv[i] = cmdline + len;
1770                 memcpy(argv[i], str, strlen(str) + 1);
1771                 len += strlen(str) + 1;
1772                 free(str);
1773         }
1774         ph->env.cmdline = cmdline;
1775         ph->env.cmdline_argv = (const char **) argv;
1776         return 0;
1777
1778 error:
1779         free(argv);
1780         free(cmdline);
1781         return -1;
1782 }
1783
1784 static int process_cpu_topology(struct perf_file_section *section,
1785                                 struct perf_header *ph, int fd,
1786                                 void *data __maybe_unused)
1787 {
1788         ssize_t ret;
1789         u32 nr, i;
1790         char *str;
1791         struct strbuf sb;
1792         int cpu_nr = ph->env.nr_cpus_avail;
1793         u64 size = 0;
1794
1795         ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1796         if (!ph->env.cpu)
1797                 return -1;
1798
1799         ret = readn(fd, &nr, sizeof(nr));
1800         if (ret != sizeof(nr))
1801                 goto free_cpu;
1802
1803         if (ph->needs_swap)
1804                 nr = bswap_32(nr);
1805
1806         ph->env.nr_sibling_cores = nr;
1807         size += sizeof(u32);
1808         if (strbuf_init(&sb, 128) < 0)
1809                 goto free_cpu;
1810
1811         for (i = 0; i < nr; i++) {
1812                 str = do_read_string(fd, ph);
1813                 if (!str)
1814                         goto error;
1815
1816                 /* include a NULL character at the end */
1817                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1818                         goto error;
1819                 size += string_size(str);
1820                 free(str);
1821         }
1822         ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1823
1824         ret = readn(fd, &nr, sizeof(nr));
1825         if (ret != sizeof(nr))
1826                 return -1;
1827
1828         if (ph->needs_swap)
1829                 nr = bswap_32(nr);
1830
1831         ph->env.nr_sibling_threads = nr;
1832         size += sizeof(u32);
1833
1834         for (i = 0; i < nr; i++) {
1835                 str = do_read_string(fd, ph);
1836                 if (!str)
1837                         goto error;
1838
1839                 /* include a NULL character at the end */
1840                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1841                         goto error;
1842                 size += string_size(str);
1843                 free(str);
1844         }
1845         ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1846
1847         /*
1848          * The header may be from old perf,
1849          * which doesn't include core id and socket id information.
1850          */
1851         if (section->size <= size) {
1852                 zfree(&ph->env.cpu);
1853                 return 0;
1854         }
1855
1856         for (i = 0; i < (u32)cpu_nr; i++) {
1857                 ret = readn(fd, &nr, sizeof(nr));
1858                 if (ret != sizeof(nr))
1859                         goto free_cpu;
1860
1861                 if (ph->needs_swap)
1862                         nr = bswap_32(nr);
1863
1864                 ph->env.cpu[i].core_id = nr;
1865
1866                 ret = readn(fd, &nr, sizeof(nr));
1867                 if (ret != sizeof(nr))
1868                         goto free_cpu;
1869
1870                 if (ph->needs_swap)
1871                         nr = bswap_32(nr);
1872
1873                 if (nr != (u32)-1 && nr > (u32)cpu_nr) {
1874                         pr_debug("socket_id number is too big."
1875                                  "You may need to upgrade the perf tool.\n");
1876                         goto free_cpu;
1877                 }
1878
1879                 ph->env.cpu[i].socket_id = nr;
1880         }
1881
1882         return 0;
1883
1884 error:
1885         strbuf_release(&sb);
1886 free_cpu:
1887         zfree(&ph->env.cpu);
1888         return -1;
1889 }
1890
1891 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1892                                  struct perf_header *ph, int fd,
1893                                  void *data __maybe_unused)
1894 {
1895         struct numa_node *nodes, *n;
1896         ssize_t ret;
1897         u32 nr, i;
1898         char *str;
1899
1900         /* nr nodes */
1901         ret = readn(fd, &nr, sizeof(nr));
1902         if (ret != sizeof(nr))
1903                 return -1;
1904
1905         if (ph->needs_swap)
1906                 nr = bswap_32(nr);
1907
1908         nodes = zalloc(sizeof(*nodes) * nr);
1909         if (!nodes)
1910                 return -ENOMEM;
1911
1912         for (i = 0; i < nr; i++) {
1913                 n = &nodes[i];
1914
1915                 /* node number */
1916                 ret = readn(fd, &n->node, sizeof(u32));
1917                 if (ret != sizeof(n->node))
1918                         goto error;
1919
1920                 ret = readn(fd, &n->mem_total, sizeof(u64));
1921                 if (ret != sizeof(u64))
1922                         goto error;
1923
1924                 ret = readn(fd, &n->mem_free, sizeof(u64));
1925                 if (ret != sizeof(u64))
1926                         goto error;
1927
1928                 if (ph->needs_swap) {
1929                         n->node      = bswap_32(n->node);
1930                         n->mem_total = bswap_64(n->mem_total);
1931                         n->mem_free  = bswap_64(n->mem_free);
1932                 }
1933
1934                 str = do_read_string(fd, ph);
1935                 if (!str)
1936                         goto error;
1937
1938                 n->map = cpu_map__new(str);
1939                 if (!n->map)
1940                         goto error;
1941
1942                 free(str);
1943         }
1944         ph->env.nr_numa_nodes = nr;
1945         ph->env.numa_nodes = nodes;
1946         return 0;
1947
1948 error:
1949         free(nodes);
1950         return -1;
1951 }
1952
1953 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1954                                 struct perf_header *ph, int fd,
1955                                 void *data __maybe_unused)
1956 {
1957         ssize_t ret;
1958         char *name;
1959         u32 pmu_num;
1960         u32 type;
1961         struct strbuf sb;
1962
1963         ret = readn(fd, &pmu_num, sizeof(pmu_num));
1964         if (ret != sizeof(pmu_num))
1965                 return -1;
1966
1967         if (ph->needs_swap)
1968                 pmu_num = bswap_32(pmu_num);
1969
1970         if (!pmu_num) {
1971                 pr_debug("pmu mappings not available\n");
1972                 return 0;
1973         }
1974
1975         ph->env.nr_pmu_mappings = pmu_num;
1976         if (strbuf_init(&sb, 128) < 0)
1977                 return -1;
1978
1979         while (pmu_num) {
1980                 if (readn(fd, &type, sizeof(type)) != sizeof(type))
1981                         goto error;
1982                 if (ph->needs_swap)
1983                         type = bswap_32(type);
1984
1985                 name = do_read_string(fd, ph);
1986                 if (!name)
1987                         goto error;
1988
1989                 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
1990                         goto error;
1991                 /* include a NULL character at the end */
1992                 if (strbuf_add(&sb, "", 1) < 0)
1993                         goto error;
1994
1995                 if (!strcmp(name, "msr"))
1996                         ph->env.msr_pmu_type = type;
1997
1998                 free(name);
1999                 pmu_num--;
2000         }
2001         ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2002         return 0;
2003
2004 error:
2005         strbuf_release(&sb);
2006         return -1;
2007 }
2008
2009 static int process_group_desc(struct perf_file_section *section __maybe_unused,
2010                               struct perf_header *ph, int fd,
2011                               void *data __maybe_unused)
2012 {
2013         size_t ret = -1;
2014         u32 i, nr, nr_groups;
2015         struct perf_session *session;
2016         struct perf_evsel *evsel, *leader = NULL;
2017         struct group_desc {
2018                 char *name;
2019                 u32 leader_idx;
2020                 u32 nr_members;
2021         } *desc;
2022
2023         if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2024                 return -1;
2025
2026         if (ph->needs_swap)
2027                 nr_groups = bswap_32(nr_groups);
2028
2029         ph->env.nr_groups = nr_groups;
2030         if (!nr_groups) {
2031                 pr_debug("group desc not available\n");
2032                 return 0;
2033         }
2034
2035         desc = calloc(nr_groups, sizeof(*desc));
2036         if (!desc)
2037                 return -1;
2038
2039         for (i = 0; i < nr_groups; i++) {
2040                 desc[i].name = do_read_string(fd, ph);
2041                 if (!desc[i].name)
2042                         goto out_free;
2043
2044                 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2045                         goto out_free;
2046
2047                 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2048                         goto out_free;
2049
2050                 if (ph->needs_swap) {
2051                         desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2052                         desc[i].nr_members = bswap_32(desc[i].nr_members);
2053                 }
2054         }
2055
2056         /*
2057          * Rebuild group relationship based on the group_desc
2058          */
2059         session = container_of(ph, struct perf_session, header);
2060         session->evlist->nr_groups = nr_groups;
2061
2062         i = nr = 0;
2063         evlist__for_each_entry(session->evlist, evsel) {
2064                 if (evsel->idx == (int) desc[i].leader_idx) {
2065                         evsel->leader = evsel;
2066                         /* {anon_group} is a dummy name */
2067                         if (strcmp(desc[i].name, "{anon_group}")) {
2068                                 evsel->group_name = desc[i].name;
2069                                 desc[i].name = NULL;
2070                         }
2071                         evsel->nr_members = desc[i].nr_members;
2072
2073                         if (i >= nr_groups || nr > 0) {
2074                                 pr_debug("invalid group desc\n");
2075                                 goto out_free;
2076                         }
2077
2078                         leader = evsel;
2079                         nr = evsel->nr_members - 1;
2080                         i++;
2081                 } else if (nr) {
2082                         /* This is a group member */
2083                         evsel->leader = leader;
2084
2085                         nr--;
2086                 }
2087         }
2088
2089         if (i != nr_groups || nr != 0) {
2090                 pr_debug("invalid group desc\n");
2091                 goto out_free;
2092         }
2093
2094         ret = 0;
2095 out_free:
2096         for (i = 0; i < nr_groups; i++)
2097                 zfree(&desc[i].name);
2098         free(desc);
2099
2100         return ret;
2101 }
2102
2103 static int process_auxtrace(struct perf_file_section *section,
2104                             struct perf_header *ph, int fd,
2105                             void *data __maybe_unused)
2106 {
2107         struct perf_session *session;
2108         int err;
2109
2110         session = container_of(ph, struct perf_session, header);
2111
2112         err = auxtrace_index__process(fd, section->size, session,
2113                                       ph->needs_swap);
2114         if (err < 0)
2115                 pr_err("Failed to process auxtrace index\n");
2116         return err;
2117 }
2118
2119 static int process_cache(struct perf_file_section *section __maybe_unused,
2120                          struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2121                          void *data __maybe_unused)
2122 {
2123         struct cpu_cache_level *caches;
2124         u32 cnt, i, version;
2125
2126         if (readn(fd, &version, sizeof(version)) != sizeof(version))
2127                 return -1;
2128
2129         if (ph->needs_swap)
2130                 version = bswap_32(version);
2131
2132         if (version != 1)
2133                 return -1;
2134
2135         if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2136                 return -1;
2137
2138         if (ph->needs_swap)
2139                 cnt = bswap_32(cnt);
2140
2141         caches = zalloc(sizeof(*caches) * cnt);
2142         if (!caches)
2143                 return -1;
2144
2145         for (i = 0; i < cnt; i++) {
2146                 struct cpu_cache_level c;
2147
2148                 #define _R(v)                                           \
2149                         if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2150                                 goto out_free_caches;                   \
2151                         if (ph->needs_swap)                             \
2152                                 c.v = bswap_32(c.v);                    \
2153
2154                 _R(level)
2155                 _R(line_size)
2156                 _R(sets)
2157                 _R(ways)
2158                 #undef _R
2159
2160                 #define _R(v)                           \
2161                         c.v = do_read_string(fd, ph);   \
2162                         if (!c.v)                       \
2163                                 goto out_free_caches;
2164
2165                 _R(type)
2166                 _R(size)
2167                 _R(map)
2168                 #undef _R
2169
2170                 caches[i] = c;
2171         }
2172
2173         ph->env.caches = caches;
2174         ph->env.caches_cnt = cnt;
2175         return 0;
2176 out_free_caches:
2177         free(caches);
2178         return -1;
2179 }
2180
2181 struct feature_ops {
2182         int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2183         void (*print)(struct perf_header *h, int fd, FILE *fp);
2184         int (*process)(struct perf_file_section *section,
2185                        struct perf_header *h, int fd, void *data);
2186         const char *name;
2187         bool full_only;
2188 };
2189
2190 #define FEAT_OPA(n, func) \
2191         [n] = { .name = #n, .write = write_##func, .print = print_##func }
2192 #define FEAT_OPP(n, func) \
2193         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2194                 .process = process_##func }
2195 #define FEAT_OPF(n, func) \
2196         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2197                 .process = process_##func, .full_only = true }
2198
2199 /* feature_ops not implemented: */
2200 #define print_tracing_data      NULL
2201 #define print_build_id          NULL
2202
2203 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2204         FEAT_OPP(HEADER_TRACING_DATA,   tracing_data),
2205         FEAT_OPP(HEADER_BUILD_ID,       build_id),
2206         FEAT_OPP(HEADER_HOSTNAME,       hostname),
2207         FEAT_OPP(HEADER_OSRELEASE,      osrelease),
2208         FEAT_OPP(HEADER_VERSION,        version),
2209         FEAT_OPP(HEADER_ARCH,           arch),
2210         FEAT_OPP(HEADER_NRCPUS,         nrcpus),
2211         FEAT_OPP(HEADER_CPUDESC,        cpudesc),
2212         FEAT_OPP(HEADER_CPUID,          cpuid),
2213         FEAT_OPP(HEADER_TOTAL_MEM,      total_mem),
2214         FEAT_OPP(HEADER_EVENT_DESC,     event_desc),
2215         FEAT_OPP(HEADER_CMDLINE,        cmdline),
2216         FEAT_OPF(HEADER_CPU_TOPOLOGY,   cpu_topology),
2217         FEAT_OPF(HEADER_NUMA_TOPOLOGY,  numa_topology),
2218         FEAT_OPA(HEADER_BRANCH_STACK,   branch_stack),
2219         FEAT_OPP(HEADER_PMU_MAPPINGS,   pmu_mappings),
2220         FEAT_OPP(HEADER_GROUP_DESC,     group_desc),
2221         FEAT_OPP(HEADER_AUXTRACE,       auxtrace),
2222         FEAT_OPA(HEADER_STAT,           stat),
2223         FEAT_OPF(HEADER_CACHE,          cache),
2224 };
2225
2226 struct header_print_data {
2227         FILE *fp;
2228         bool full; /* extended list of headers */
2229 };
2230
2231 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2232                                            struct perf_header *ph,
2233                                            int feat, int fd, void *data)
2234 {
2235         struct header_print_data *hd = data;
2236
2237         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2238                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2239                                 "%d, continuing...\n", section->offset, feat);
2240                 return 0;
2241         }
2242         if (feat >= HEADER_LAST_FEATURE) {
2243                 pr_warning("unknown feature %d\n", feat);
2244                 return 0;
2245         }
2246         if (!feat_ops[feat].print)
2247                 return 0;
2248
2249         if (!feat_ops[feat].full_only || hd->full)
2250                 feat_ops[feat].print(ph, fd, hd->fp);
2251         else
2252                 fprintf(hd->fp, "# %s info available, use -I to display\n",
2253                         feat_ops[feat].name);
2254
2255         return 0;
2256 }
2257
2258 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2259 {
2260         struct header_print_data hd;
2261         struct perf_header *header = &session->header;
2262         int fd = perf_data_file__fd(session->file);
2263         struct stat st;
2264         int ret, bit;
2265
2266         hd.fp = fp;
2267         hd.full = full;
2268
2269         ret = fstat(fd, &st);
2270         if (ret == -1)
2271                 return -1;
2272
2273         fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2274
2275         perf_header__process_sections(header, fd, &hd,
2276                                       perf_file_section__fprintf_info);
2277
2278         if (session->file->is_pipe)
2279                 return 0;
2280
2281         fprintf(fp, "# missing features: ");
2282         for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2283                 if (bit)
2284                         fprintf(fp, "%s ", feat_ops[bit].name);
2285         }
2286
2287         fprintf(fp, "\n");
2288         return 0;
2289 }
2290
2291 static int do_write_feat(int fd, struct perf_header *h, int type,
2292                          struct perf_file_section **p,
2293                          struct perf_evlist *evlist)
2294 {
2295         int err;
2296         int ret = 0;
2297
2298         if (perf_header__has_feat(h, type)) {
2299                 if (!feat_ops[type].write)
2300                         return -1;
2301
2302                 (*p)->offset = lseek(fd, 0, SEEK_CUR);
2303
2304                 err = feat_ops[type].write(fd, h, evlist);
2305                 if (err < 0) {
2306                         pr_debug("failed to write feature %s\n", feat_ops[type].name);
2307
2308                         /* undo anything written */
2309                         lseek(fd, (*p)->offset, SEEK_SET);
2310
2311                         return -1;
2312                 }
2313                 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2314                 (*p)++;
2315         }
2316         return ret;
2317 }
2318
2319 static int perf_header__adds_write(struct perf_header *header,
2320                                    struct perf_evlist *evlist, int fd)
2321 {
2322         int nr_sections;
2323         struct perf_file_section *feat_sec, *p;
2324         int sec_size;
2325         u64 sec_start;
2326         int feat;
2327         int err;
2328
2329         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2330         if (!nr_sections)
2331                 return 0;
2332
2333         feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2334         if (feat_sec == NULL)
2335                 return -ENOMEM;
2336
2337         sec_size = sizeof(*feat_sec) * nr_sections;
2338
2339         sec_start = header->feat_offset;
2340         lseek(fd, sec_start + sec_size, SEEK_SET);
2341
2342         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2343                 if (do_write_feat(fd, header, feat, &p, evlist))
2344                         perf_header__clear_feat(header, feat);
2345         }
2346
2347         lseek(fd, sec_start, SEEK_SET);
2348         /*
2349          * may write more than needed due to dropped feature, but
2350          * this is okay, reader will skip the mising entries
2351          */
2352         err = do_write(fd, feat_sec, sec_size);
2353         if (err < 0)
2354                 pr_debug("failed to write feature section\n");
2355         free(feat_sec);
2356         return err;
2357 }
2358
2359 int perf_header__write_pipe(int fd)
2360 {
2361         struct perf_pipe_file_header f_header;
2362         int err;
2363
2364         f_header = (struct perf_pipe_file_header){
2365                 .magic     = PERF_MAGIC,
2366                 .size      = sizeof(f_header),
2367         };
2368
2369         err = do_write(fd, &f_header, sizeof(f_header));
2370         if (err < 0) {
2371                 pr_debug("failed to write perf pipe header\n");
2372                 return err;
2373         }
2374
2375         return 0;
2376 }
2377
2378 int perf_session__write_header(struct perf_session *session,
2379                                struct perf_evlist *evlist,
2380                                int fd, bool at_exit)
2381 {
2382         struct perf_file_header f_header;
2383         struct perf_file_attr   f_attr;
2384         struct perf_header *header = &session->header;
2385         struct perf_evsel *evsel;
2386         u64 attr_offset;
2387         int err;
2388
2389         lseek(fd, sizeof(f_header), SEEK_SET);
2390
2391         evlist__for_each_entry(session->evlist, evsel) {
2392                 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2393                 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2394                 if (err < 0) {
2395                         pr_debug("failed to write perf header\n");
2396                         return err;
2397                 }
2398         }
2399
2400         attr_offset = lseek(fd, 0, SEEK_CUR);
2401
2402         evlist__for_each_entry(evlist, evsel) {
2403                 f_attr = (struct perf_file_attr){
2404                         .attr = evsel->attr,
2405                         .ids  = {
2406                                 .offset = evsel->id_offset,
2407                                 .size   = evsel->ids * sizeof(u64),
2408                         }
2409                 };
2410                 err = do_write(fd, &f_attr, sizeof(f_attr));
2411                 if (err < 0) {
2412                         pr_debug("failed to write perf header attribute\n");
2413                         return err;
2414                 }
2415         }
2416
2417         if (!header->data_offset)
2418                 header->data_offset = lseek(fd, 0, SEEK_CUR);
2419         header->feat_offset = header->data_offset + header->data_size;
2420
2421         if (at_exit) {
2422                 err = perf_header__adds_write(header, evlist, fd);
2423                 if (err < 0)
2424                         return err;
2425         }
2426
2427         f_header = (struct perf_file_header){
2428                 .magic     = PERF_MAGIC,
2429                 .size      = sizeof(f_header),
2430                 .attr_size = sizeof(f_attr),
2431                 .attrs = {
2432                         .offset = attr_offset,
2433                         .size   = evlist->nr_entries * sizeof(f_attr),
2434                 },
2435                 .data = {
2436                         .offset = header->data_offset,
2437                         .size   = header->data_size,
2438                 },
2439                 /* event_types is ignored, store zeros */
2440         };
2441
2442         memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2443
2444         lseek(fd, 0, SEEK_SET);
2445         err = do_write(fd, &f_header, sizeof(f_header));
2446         if (err < 0) {
2447                 pr_debug("failed to write perf header\n");
2448                 return err;
2449         }
2450         lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2451
2452         return 0;
2453 }
2454
2455 static int perf_header__getbuffer64(struct perf_header *header,
2456                                     int fd, void *buf, size_t size)
2457 {
2458         if (readn(fd, buf, size) <= 0)
2459                 return -1;
2460
2461         if (header->needs_swap)
2462                 mem_bswap_64(buf, size);
2463
2464         return 0;
2465 }
2466
2467 int perf_header__process_sections(struct perf_header *header, int fd,
2468                                   void *data,
2469                                   int (*process)(struct perf_file_section *section,
2470                                                  struct perf_header *ph,
2471                                                  int feat, int fd, void *data))
2472 {
2473         struct perf_file_section *feat_sec, *sec;
2474         int nr_sections;
2475         int sec_size;
2476         int feat;
2477         int err;
2478
2479         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2480         if (!nr_sections)
2481                 return 0;
2482
2483         feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2484         if (!feat_sec)
2485                 return -1;
2486
2487         sec_size = sizeof(*feat_sec) * nr_sections;
2488
2489         lseek(fd, header->feat_offset, SEEK_SET);
2490
2491         err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2492         if (err < 0)
2493                 goto out_free;
2494
2495         for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2496                 err = process(sec++, header, feat, fd, data);
2497                 if (err < 0)
2498                         goto out_free;
2499         }
2500         err = 0;
2501 out_free:
2502         free(feat_sec);
2503         return err;
2504 }
2505
2506 static const int attr_file_abi_sizes[] = {
2507         [0] = PERF_ATTR_SIZE_VER0,
2508         [1] = PERF_ATTR_SIZE_VER1,
2509         [2] = PERF_ATTR_SIZE_VER2,
2510         [3] = PERF_ATTR_SIZE_VER3,
2511         [4] = PERF_ATTR_SIZE_VER4,
2512         0,
2513 };
2514
2515 /*
2516  * In the legacy file format, the magic number is not used to encode endianness.
2517  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2518  * on ABI revisions, we need to try all combinations for all endianness to
2519  * detect the endianness.
2520  */
2521 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2522 {
2523         uint64_t ref_size, attr_size;
2524         int i;
2525
2526         for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2527                 ref_size = attr_file_abi_sizes[i]
2528                          + sizeof(struct perf_file_section);
2529                 if (hdr_sz != ref_size) {
2530                         attr_size = bswap_64(hdr_sz);
2531                         if (attr_size != ref_size)
2532                                 continue;
2533
2534                         ph->needs_swap = true;
2535                 }
2536                 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2537                          i,
2538                          ph->needs_swap);
2539                 return 0;
2540         }
2541         /* could not determine endianness */
2542         return -1;
2543 }
2544
2545 #define PERF_PIPE_HDR_VER0      16
2546
2547 static const size_t attr_pipe_abi_sizes[] = {
2548         [0] = PERF_PIPE_HDR_VER0,
2549         0,
2550 };
2551
2552 /*
2553  * In the legacy pipe format, there is an implicit assumption that endiannesss
2554  * between host recording the samples, and host parsing the samples is the
2555  * same. This is not always the case given that the pipe output may always be
2556  * redirected into a file and analyzed on a different machine with possibly a
2557  * different endianness and perf_event ABI revsions in the perf tool itself.
2558  */
2559 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2560 {
2561         u64 attr_size;
2562         int i;
2563
2564         for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2565                 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2566                         attr_size = bswap_64(hdr_sz);
2567                         if (attr_size != hdr_sz)
2568                                 continue;
2569
2570                         ph->needs_swap = true;
2571                 }
2572                 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2573                 return 0;
2574         }
2575         return -1;
2576 }
2577
2578 bool is_perf_magic(u64 magic)
2579 {
2580         if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2581                 || magic == __perf_magic2
2582                 || magic == __perf_magic2_sw)
2583                 return true;
2584
2585         return false;
2586 }
2587
2588 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2589                               bool is_pipe, struct perf_header *ph)
2590 {
2591         int ret;
2592
2593         /* check for legacy format */
2594         ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2595         if (ret == 0) {
2596                 ph->version = PERF_HEADER_VERSION_1;
2597                 pr_debug("legacy perf.data format\n");
2598                 if (is_pipe)
2599                         return try_all_pipe_abis(hdr_sz, ph);
2600
2601                 return try_all_file_abis(hdr_sz, ph);
2602         }
2603         /*
2604          * the new magic number serves two purposes:
2605          * - unique number to identify actual perf.data files
2606          * - encode endianness of file
2607          */
2608         ph->version = PERF_HEADER_VERSION_2;
2609
2610         /* check magic number with one endianness */
2611         if (magic == __perf_magic2)
2612                 return 0;
2613
2614         /* check magic number with opposite endianness */
2615         if (magic != __perf_magic2_sw)
2616                 return -1;
2617
2618         ph->needs_swap = true;
2619
2620         return 0;
2621 }
2622
2623 int perf_file_header__read(struct perf_file_header *header,
2624                            struct perf_header *ph, int fd)
2625 {
2626         ssize_t ret;
2627
2628         lseek(fd, 0, SEEK_SET);
2629
2630         ret = readn(fd, header, sizeof(*header));
2631         if (ret <= 0)
2632                 return -1;
2633
2634         if (check_magic_endian(header->magic,
2635                                header->attr_size, false, ph) < 0) {
2636                 pr_debug("magic/endian check failed\n");
2637                 return -1;
2638         }
2639
2640         if (ph->needs_swap) {
2641                 mem_bswap_64(header, offsetof(struct perf_file_header,
2642                              adds_features));
2643         }
2644
2645         if (header->size != sizeof(*header)) {
2646                 /* Support the previous format */
2647                 if (header->size == offsetof(typeof(*header), adds_features))
2648                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2649                 else
2650                         return -1;
2651         } else if (ph->needs_swap) {
2652                 /*
2653                  * feature bitmap is declared as an array of unsigned longs --
2654                  * not good since its size can differ between the host that
2655                  * generated the data file and the host analyzing the file.
2656                  *
2657                  * We need to handle endianness, but we don't know the size of
2658                  * the unsigned long where the file was generated. Take a best
2659                  * guess at determining it: try 64-bit swap first (ie., file
2660                  * created on a 64-bit host), and check if the hostname feature
2661                  * bit is set (this feature bit is forced on as of fbe96f2).
2662                  * If the bit is not, undo the 64-bit swap and try a 32-bit
2663                  * swap. If the hostname bit is still not set (e.g., older data
2664                  * file), punt and fallback to the original behavior --
2665                  * clearing all feature bits and setting buildid.
2666                  */
2667                 mem_bswap_64(&header->adds_features,
2668                             BITS_TO_U64(HEADER_FEAT_BITS));
2669
2670                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2671                         /* unswap as u64 */
2672                         mem_bswap_64(&header->adds_features,
2673                                     BITS_TO_U64(HEADER_FEAT_BITS));
2674
2675                         /* unswap as u32 */
2676                         mem_bswap_32(&header->adds_features,
2677                                     BITS_TO_U32(HEADER_FEAT_BITS));
2678                 }
2679
2680                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2681                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2682                         set_bit(HEADER_BUILD_ID, header->adds_features);
2683                 }
2684         }
2685
2686         memcpy(&ph->adds_features, &header->adds_features,
2687                sizeof(ph->adds_features));
2688
2689         ph->data_offset  = header->data.offset;
2690         ph->data_size    = header->data.size;
2691         ph->feat_offset  = header->data.offset + header->data.size;
2692         return 0;
2693 }
2694
2695 static int perf_file_section__process(struct perf_file_section *section,
2696                                       struct perf_header *ph,
2697                                       int feat, int fd, void *data)
2698 {
2699         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2700                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2701                           "%d, continuing...\n", section->offset, feat);
2702                 return 0;
2703         }
2704
2705         if (feat >= HEADER_LAST_FEATURE) {
2706                 pr_debug("unknown feature %d, continuing...\n", feat);
2707                 return 0;
2708         }
2709
2710         if (!feat_ops[feat].process)
2711                 return 0;
2712
2713         return feat_ops[feat].process(section, ph, fd, data);
2714 }
2715
2716 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2717                                        struct perf_header *ph, int fd,
2718                                        bool repipe)
2719 {
2720         ssize_t ret;
2721
2722         ret = readn(fd, header, sizeof(*header));
2723         if (ret <= 0)
2724                 return -1;
2725
2726         if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2727                 pr_debug("endian/magic failed\n");
2728                 return -1;
2729         }
2730
2731         if (ph->needs_swap)
2732                 header->size = bswap_64(header->size);
2733
2734         if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2735                 return -1;
2736
2737         return 0;
2738 }
2739
2740 static int perf_header__read_pipe(struct perf_session *session)
2741 {
2742         struct perf_header *header = &session->header;
2743         struct perf_pipe_file_header f_header;
2744
2745         if (perf_file_header__read_pipe(&f_header, header,
2746                                         perf_data_file__fd(session->file),
2747                                         session->repipe) < 0) {
2748                 pr_debug("incompatible file format\n");
2749                 return -EINVAL;
2750         }
2751
2752         return 0;
2753 }
2754
2755 static int read_attr(int fd, struct perf_header *ph,
2756                      struct perf_file_attr *f_attr)
2757 {
2758         struct perf_event_attr *attr = &f_attr->attr;
2759         size_t sz, left;
2760         size_t our_sz = sizeof(f_attr->attr);
2761         ssize_t ret;
2762
2763         memset(f_attr, 0, sizeof(*f_attr));
2764
2765         /* read minimal guaranteed structure */
2766         ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2767         if (ret <= 0) {
2768                 pr_debug("cannot read %d bytes of header attr\n",
2769                          PERF_ATTR_SIZE_VER0);
2770                 return -1;
2771         }
2772
2773         /* on file perf_event_attr size */
2774         sz = attr->size;
2775
2776         if (ph->needs_swap)
2777                 sz = bswap_32(sz);
2778
2779         if (sz == 0) {
2780                 /* assume ABI0 */
2781                 sz =  PERF_ATTR_SIZE_VER0;
2782         } else if (sz > our_sz) {
2783                 pr_debug("file uses a more recent and unsupported ABI"
2784                          " (%zu bytes extra)\n", sz - our_sz);
2785                 return -1;
2786         }
2787         /* what we have not yet read and that we know about */
2788         left = sz - PERF_ATTR_SIZE_VER0;
2789         if (left) {
2790                 void *ptr = attr;
2791                 ptr += PERF_ATTR_SIZE_VER0;
2792
2793                 ret = readn(fd, ptr, left);
2794         }
2795         /* read perf_file_section, ids are read in caller */
2796         ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2797
2798         return ret <= 0 ? -1 : 0;
2799 }
2800
2801 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2802                                                 struct pevent *pevent)
2803 {
2804         struct event_format *event;
2805         char bf[128];
2806
2807         /* already prepared */
2808         if (evsel->tp_format)
2809                 return 0;
2810
2811         if (pevent == NULL) {
2812                 pr_debug("broken or missing trace data\n");
2813                 return -1;
2814         }
2815
2816         event = pevent_find_event(pevent, evsel->attr.config);
2817         if (event == NULL) {
2818                 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
2819                 return -1;
2820         }
2821
2822         if (!evsel->name) {
2823                 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2824                 evsel->name = strdup(bf);
2825                 if (evsel->name == NULL)
2826                         return -1;
2827         }
2828
2829         evsel->tp_format = event;
2830         return 0;
2831 }
2832
2833 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2834                                                   struct pevent *pevent)
2835 {
2836         struct perf_evsel *pos;
2837
2838         evlist__for_each_entry(evlist, pos) {
2839                 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2840                     perf_evsel__prepare_tracepoint_event(pos, pevent))
2841                         return -1;
2842         }
2843
2844         return 0;
2845 }
2846
2847 int perf_session__read_header(struct perf_session *session)
2848 {
2849         struct perf_data_file *file = session->file;
2850         struct perf_header *header = &session->header;
2851         struct perf_file_header f_header;
2852         struct perf_file_attr   f_attr;
2853         u64                     f_id;
2854         int nr_attrs, nr_ids, i, j;
2855         int fd = perf_data_file__fd(file);
2856
2857         session->evlist = perf_evlist__new();
2858         if (session->evlist == NULL)
2859                 return -ENOMEM;
2860
2861         session->evlist->env = &header->env;
2862         session->machines.host.env = &header->env;
2863         if (perf_data_file__is_pipe(file))
2864                 return perf_header__read_pipe(session);
2865
2866         if (perf_file_header__read(&f_header, header, fd) < 0)
2867                 return -EINVAL;
2868
2869         /*
2870          * Sanity check that perf.data was written cleanly; data size is
2871          * initialized to 0 and updated only if the on_exit function is run.
2872          * If data size is still 0 then the file contains only partial
2873          * information.  Just warn user and process it as much as it can.
2874          */
2875         if (f_header.data.size == 0) {
2876                 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2877                            "Was the 'perf record' command properly terminated?\n",
2878                            file->path);
2879         }
2880
2881         nr_attrs = f_header.attrs.size / f_header.attr_size;
2882         lseek(fd, f_header.attrs.offset, SEEK_SET);
2883
2884         for (i = 0; i < nr_attrs; i++) {
2885                 struct perf_evsel *evsel;
2886                 off_t tmp;
2887
2888                 if (read_attr(fd, header, &f_attr) < 0)
2889                         goto out_errno;
2890
2891                 if (header->needs_swap) {
2892                         f_attr.ids.size   = bswap_64(f_attr.ids.size);
2893                         f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2894                         perf_event__attr_swap(&f_attr.attr);
2895                 }
2896
2897                 tmp = lseek(fd, 0, SEEK_CUR);
2898                 evsel = perf_evsel__new(&f_attr.attr);
2899
2900                 if (evsel == NULL)
2901                         goto out_delete_evlist;
2902
2903                 evsel->needs_swap = header->needs_swap;
2904                 /*
2905                  * Do it before so that if perf_evsel__alloc_id fails, this
2906                  * entry gets purged too at perf_evlist__delete().
2907                  */
2908                 perf_evlist__add(session->evlist, evsel);
2909
2910                 nr_ids = f_attr.ids.size / sizeof(u64);
2911                 /*
2912                  * We don't have the cpu and thread maps on the header, so
2913                  * for allocating the perf_sample_id table we fake 1 cpu and
2914                  * hattr->ids threads.
2915                  */
2916                 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2917                         goto out_delete_evlist;
2918
2919                 lseek(fd, f_attr.ids.offset, SEEK_SET);
2920
2921                 for (j = 0; j < nr_ids; j++) {
2922                         if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2923                                 goto out_errno;
2924
2925                         perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2926                 }
2927
2928                 lseek(fd, tmp, SEEK_SET);
2929         }
2930
2931         symbol_conf.nr_events = nr_attrs;
2932
2933         perf_header__process_sections(header, fd, &session->tevent,
2934                                       perf_file_section__process);
2935
2936         if (perf_evlist__prepare_tracepoint_events(session->evlist,
2937                                                    session->tevent.pevent))
2938                 goto out_delete_evlist;
2939
2940         return 0;
2941 out_errno:
2942         return -errno;
2943
2944 out_delete_evlist:
2945         perf_evlist__delete(session->evlist);
2946         session->evlist = NULL;
2947         return -ENOMEM;
2948 }
2949
2950 int perf_event__synthesize_attr(struct perf_tool *tool,
2951                                 struct perf_event_attr *attr, u32 ids, u64 *id,
2952                                 perf_event__handler_t process)
2953 {
2954         union perf_event *ev;
2955         size_t size;
2956         int err;
2957
2958         size = sizeof(struct perf_event_attr);
2959         size = PERF_ALIGN(size, sizeof(u64));
2960         size += sizeof(struct perf_event_header);
2961         size += ids * sizeof(u64);
2962
2963         ev = malloc(size);
2964
2965         if (ev == NULL)
2966                 return -ENOMEM;
2967
2968         ev->attr.attr = *attr;
2969         memcpy(ev->attr.id, id, ids * sizeof(u64));
2970
2971         ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2972         ev->attr.header.size = (u16)size;
2973
2974         if (ev->attr.header.size == size)
2975                 err = process(tool, ev, NULL, NULL);
2976         else
2977                 err = -E2BIG;
2978
2979         free(ev);
2980
2981         return err;
2982 }
2983
2984 static struct event_update_event *
2985 event_update_event__new(size_t size, u64 type, u64 id)
2986 {
2987         struct event_update_event *ev;
2988
2989         size += sizeof(*ev);
2990         size  = PERF_ALIGN(size, sizeof(u64));
2991
2992         ev = zalloc(size);
2993         if (ev) {
2994                 ev->header.type = PERF_RECORD_EVENT_UPDATE;
2995                 ev->header.size = (u16)size;
2996                 ev->type = type;
2997                 ev->id = id;
2998         }
2999         return ev;
3000 }
3001
3002 int
3003 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3004                                          struct perf_evsel *evsel,
3005                                          perf_event__handler_t process)
3006 {
3007         struct event_update_event *ev;
3008         size_t size = strlen(evsel->unit);
3009         int err;
3010
3011         ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3012         if (ev == NULL)
3013                 return -ENOMEM;
3014
3015         strncpy(ev->data, evsel->unit, size);
3016         err = process(tool, (union perf_event *)ev, NULL, NULL);
3017         free(ev);
3018         return err;
3019 }
3020
3021 int
3022 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3023                                           struct perf_evsel *evsel,
3024                                           perf_event__handler_t process)
3025 {
3026         struct event_update_event *ev;
3027         struct event_update_event_scale *ev_data;
3028         int err;
3029
3030         ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3031         if (ev == NULL)
3032                 return -ENOMEM;
3033
3034         ev_data = (struct event_update_event_scale *) ev->data;
3035         ev_data->scale = evsel->scale;
3036         err = process(tool, (union perf_event*) ev, NULL, NULL);
3037         free(ev);
3038         return err;
3039 }
3040
3041 int
3042 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3043                                          struct perf_evsel *evsel,
3044                                          perf_event__handler_t process)
3045 {
3046         struct event_update_event *ev;
3047         size_t len = strlen(evsel->name);
3048         int err;
3049
3050         ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3051         if (ev == NULL)
3052                 return -ENOMEM;
3053
3054         strncpy(ev->data, evsel->name, len);
3055         err = process(tool, (union perf_event*) ev, NULL, NULL);
3056         free(ev);
3057         return err;
3058 }
3059
3060 int
3061 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3062                                         struct perf_evsel *evsel,
3063                                         perf_event__handler_t process)
3064 {
3065         size_t size = sizeof(struct event_update_event);
3066         struct event_update_event *ev;
3067         int max, err;
3068         u16 type;
3069
3070         if (!evsel->own_cpus)
3071                 return 0;
3072
3073         ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3074         if (!ev)
3075                 return -ENOMEM;
3076
3077         ev->header.type = PERF_RECORD_EVENT_UPDATE;
3078         ev->header.size = (u16)size;
3079         ev->type = PERF_EVENT_UPDATE__CPUS;
3080         ev->id   = evsel->id[0];
3081
3082         cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3083                                  evsel->own_cpus,
3084                                  type, max);
3085
3086         err = process(tool, (union perf_event*) ev, NULL, NULL);
3087         free(ev);
3088         return err;
3089 }
3090
3091 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3092 {
3093         struct event_update_event *ev = &event->event_update;
3094         struct event_update_event_scale *ev_scale;
3095         struct event_update_event_cpus *ev_cpus;
3096         struct cpu_map *map;
3097         size_t ret;
3098
3099         ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);
3100
3101         switch (ev->type) {
3102         case PERF_EVENT_UPDATE__SCALE:
3103                 ev_scale = (struct event_update_event_scale *) ev->data;
3104                 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3105                 break;
3106         case PERF_EVENT_UPDATE__UNIT:
3107                 ret += fprintf(fp, "... unit:  %s\n", ev->data);
3108                 break;
3109         case PERF_EVENT_UPDATE__NAME:
3110                 ret += fprintf(fp, "... name:  %s\n", ev->data);
3111                 break;
3112         case PERF_EVENT_UPDATE__CPUS:
3113                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3114                 ret += fprintf(fp, "... ");
3115
3116                 map = cpu_map__new_data(&ev_cpus->cpus);
3117                 if (map)
3118                         ret += cpu_map__fprintf(map, fp);
3119                 else
3120                         ret += fprintf(fp, "failed to get cpus\n");
3121                 break;
3122         default:
3123                 ret += fprintf(fp, "... unknown type\n");
3124                 break;
3125         }
3126
3127         return ret;
3128 }
3129
3130 int perf_event__synthesize_attrs(struct perf_tool *tool,
3131                                    struct perf_session *session,
3132                                    perf_event__handler_t process)
3133 {
3134         struct perf_evsel *evsel;
3135         int err = 0;
3136
3137         evlist__for_each_entry(session->evlist, evsel) {
3138                 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3139                                                   evsel->id, process);
3140                 if (err) {
3141                         pr_debug("failed to create perf header attribute\n");
3142                         return err;
3143                 }
3144         }
3145
3146         return err;
3147 }
3148
3149 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3150                              union perf_event *event,
3151                              struct perf_evlist **pevlist)
3152 {
3153         u32 i, ids, n_ids;
3154         struct perf_evsel *evsel;
3155         struct perf_evlist *evlist = *pevlist;
3156
3157         if (evlist == NULL) {
3158                 *pevlist = evlist = perf_evlist__new();
3159                 if (evlist == NULL)
3160                         return -ENOMEM;
3161         }
3162
3163         evsel = perf_evsel__new(&event->attr.attr);
3164         if (evsel == NULL)
3165                 return -ENOMEM;
3166
3167         perf_evlist__add(evlist, evsel);
3168
3169         ids = event->header.size;
3170         ids -= (void *)&event->attr.id - (void *)event;
3171         n_ids = ids / sizeof(u64);
3172         /*
3173          * We don't have the cpu and thread maps on the header, so
3174          * for allocating the perf_sample_id table we fake 1 cpu and
3175          * hattr->ids threads.
3176          */
3177         if (perf_evsel__alloc_id(evsel, 1, n_ids))
3178                 return -ENOMEM;
3179
3180         for (i = 0; i < n_ids; i++) {
3181                 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3182         }
3183
3184         symbol_conf.nr_events = evlist->nr_entries;
3185
3186         return 0;
3187 }
3188
3189 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3190                                      union perf_event *event,
3191                                      struct perf_evlist **pevlist)
3192 {
3193         struct event_update_event *ev = &event->event_update;
3194         struct event_update_event_scale *ev_scale;
3195         struct event_update_event_cpus *ev_cpus;
3196         struct perf_evlist *evlist;
3197         struct perf_evsel *evsel;
3198         struct cpu_map *map;
3199
3200         if (!pevlist || *pevlist == NULL)
3201                 return -EINVAL;
3202
3203         evlist = *pevlist;
3204
3205         evsel = perf_evlist__id2evsel(evlist, ev->id);
3206         if (evsel == NULL)
3207                 return -EINVAL;
3208
3209         switch (ev->type) {
3210         case PERF_EVENT_UPDATE__UNIT:
3211                 evsel->unit = strdup(ev->data);
3212                 break;
3213         case PERF_EVENT_UPDATE__NAME:
3214                 evsel->name = strdup(ev->data);
3215                 break;
3216         case PERF_EVENT_UPDATE__SCALE:
3217                 ev_scale = (struct event_update_event_scale *) ev->data;
3218                 evsel->scale = ev_scale->scale;
3219                 break;
3220         case PERF_EVENT_UPDATE__CPUS:
3221                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3222
3223                 map = cpu_map__new_data(&ev_cpus->cpus);
3224                 if (map)
3225                         evsel->own_cpus = map;
3226                 else
3227                         pr_err("failed to get event_update cpus\n");
3228         default:
3229                 break;
3230         }
3231
3232         return 0;
3233 }
3234
3235 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3236                                         struct perf_evlist *evlist,
3237                                         perf_event__handler_t process)
3238 {
3239         union perf_event ev;
3240         struct tracing_data *tdata;
3241         ssize_t size = 0, aligned_size = 0, padding;
3242         int err __maybe_unused = 0;
3243
3244         /*
3245          * We are going to store the size of the data followed
3246          * by the data contents. Since the fd descriptor is a pipe,
3247          * we cannot seek back to store the size of the data once
3248          * we know it. Instead we:
3249          *
3250          * - write the tracing data to the temp file
3251          * - get/write the data size to pipe
3252          * - write the tracing data from the temp file
3253          *   to the pipe
3254          */
3255         tdata = tracing_data_get(&evlist->entries, fd, true);
3256         if (!tdata)
3257                 return -1;
3258
3259         memset(&ev, 0, sizeof(ev));
3260
3261         ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3262         size = tdata->size;
3263         aligned_size = PERF_ALIGN(size, sizeof(u64));
3264         padding = aligned_size - size;
3265         ev.tracing_data.header.size = sizeof(ev.tracing_data);
3266         ev.tracing_data.size = aligned_size;
3267
3268         process(tool, &ev, NULL, NULL);
3269
3270         /*
3271          * The put function will copy all the tracing data
3272          * stored in temp file to the pipe.
3273          */
3274         tracing_data_put(tdata);
3275
3276         write_padded(fd, NULL, 0, padding);
3277
3278         return aligned_size;
3279 }
3280
3281 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3282                                      union perf_event *event,
3283                                      struct perf_session *session)
3284 {
3285         ssize_t size_read, padding, size = event->tracing_data.size;
3286         int fd = perf_data_file__fd(session->file);
3287         off_t offset = lseek(fd, 0, SEEK_CUR);
3288         char buf[BUFSIZ];
3289
3290         /* setup for reading amidst mmap */
3291         lseek(fd, offset + sizeof(struct tracing_data_event),
3292               SEEK_SET);
3293
3294         size_read = trace_report(fd, &session->tevent,
3295                                  session->repipe);
3296         padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3297
3298         if (readn(fd, buf, padding) < 0) {
3299                 pr_err("%s: reading input file", __func__);
3300                 return -1;
3301         }
3302         if (session->repipe) {
3303                 int retw = write(STDOUT_FILENO, buf, padding);
3304                 if (retw <= 0 || retw != padding) {
3305                         pr_err("%s: repiping tracing data padding", __func__);
3306                         return -1;
3307                 }
3308         }
3309
3310         if (size_read + padding != size) {
3311                 pr_err("%s: tracing data size mismatch", __func__);
3312                 return -1;
3313         }
3314
3315         perf_evlist__prepare_tracepoint_events(session->evlist,
3316                                                session->tevent.pevent);
3317
3318         return size_read + padding;
3319 }
3320
3321 int perf_event__synthesize_build_id(struct perf_tool *tool,
3322                                     struct dso *pos, u16 misc,
3323                                     perf_event__handler_t process,
3324                                     struct machine *machine)
3325 {
3326         union perf_event ev;
3327         size_t len;
3328         int err = 0;
3329
3330         if (!pos->hit)
3331                 return err;
3332
3333         memset(&ev, 0, sizeof(ev));
3334
3335         len = pos->long_name_len + 1;
3336         len = PERF_ALIGN(len, NAME_ALIGN);
3337         memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3338         ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3339         ev.build_id.header.misc = misc;
3340         ev.build_id.pid = machine->pid;
3341         ev.build_id.header.size = sizeof(ev.build_id) + len;
3342         memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3343
3344         err = process(tool, &ev, NULL, machine);
3345
3346         return err;
3347 }
3348
3349 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3350                                  union perf_event *event,
3351                                  struct perf_session *session)
3352 {
3353         __event_process_build_id(&event->build_id,
3354                                  event->build_id.filename,
3355                                  session);
3356         return 0;
3357 }