perf tools: Rename perf_event::bpf_event to perf_event::bpf
[linux-2.6-block.git] / tools / perf / util / session.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a43783ae 2#include <errno.h>
fd20e811 3#include <inttypes.h>
57fc032a 4#include <linux/err.h>
94c744b6 5#include <linux/kernel.h>
7f7c536f 6#include <linux/zalloc.h>
4e319027 7#include <traceevent/event-parse.h>
05a1f47e 8#include <api/fs/fs.h>
94c744b6 9
ba21594c 10#include <byteswap.h>
94c744b6
ACM
11#include <unistd.h>
12#include <sys/types.h>
a41794cd 13#include <sys/mman.h>
9c3516d1 14#include <perf/cpumap.h>
94c744b6 15
e248de33
ACM
16#include "evlist.h"
17#include "evsel.h"
98521b38 18#include "memswap.h"
1101f69a 19#include "map.h"
daecf9e0 20#include "symbol.h"
94c744b6 21#include "session.h"
45694aa7 22#include "tool.h"
a328626b 23#include "sort.h"
5d67be97 24#include "cpumap.h"
0f6a3015 25#include "perf_regs.h"
b0a45203 26#include "asm/bug.h"
c446870d 27#include "auxtrace.h"
e7ff8920 28#include "thread.h"
a5499b37 29#include "thread-stack.h"
93115d32 30#include "sample-raw.h"
2d2aea6a 31#include "stat.h"
ec1891af 32#include "arch/common.h"
94c744b6 33
cb62c6f1
AB
34#ifdef HAVE_ZSTD_SUPPORT
35static int perf_session__process_compressed_event(struct perf_session *session,
36 union perf_event *event, u64 file_offset)
37{
38 void *src;
39 size_t decomp_size, src_size;
40 u64 decomp_last_rem = 0;
872c8ee8 41 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
cb62c6f1
AB
42 struct decomp *decomp, *decomp_last = session->decomp_last;
43
872c8ee8
AB
44 if (decomp_last) {
45 decomp_last_rem = decomp_last->size - decomp_last->head;
46 decomp_len += decomp_last_rem;
47 }
48
49 mmap_len = sizeof(struct decomp) + decomp_len;
50 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
cb62c6f1
AB
51 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
52 if (decomp == MAP_FAILED) {
53 pr_err("Couldn't allocate memory for decompression\n");
54 return -1;
55 }
56
57 decomp->file_pos = file_offset;
872c8ee8 58 decomp->mmap_len = mmap_len;
cb62c6f1
AB
59 decomp->head = 0;
60
872c8ee8 61 if (decomp_last_rem) {
cb62c6f1
AB
62 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
63 decomp->size = decomp_last_rem;
64 }
65
66 src = (void *)event + sizeof(struct compressed_event);
67 src_size = event->pack.header.size - sizeof(struct compressed_event);
68
69 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
70 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
71 if (!decomp_size) {
872c8ee8 72 munmap(decomp, mmap_len);
cb62c6f1
AB
73 pr_err("Couldn't decompress data\n");
74 return -1;
75 }
76
77 decomp->size += decomp_size;
78
79 if (session->decomp == NULL) {
80 session->decomp = decomp;
81 session->decomp_last = decomp;
82 } else {
83 session->decomp_last->next = decomp;
84 session->decomp_last = decomp;
85 }
86
87 pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
88
89 return 0;
90}
91#else /* !HAVE_ZSTD_SUPPORT */
92#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
93#endif
94
c446870d
AH
95static int perf_session__deliver_event(struct perf_session *session,
96 union perf_event *event,
c446870d
AH
97 struct perf_tool *tool,
98 u64 file_offset);
d10eb1eb 99
316c7136 100static int perf_session__open(struct perf_session *session)
94c744b6 101{
8ceb41d7 102 struct perf_data *data = session->data;
8dc58101 103
316c7136 104 if (perf_session__read_header(session) < 0) {
e87b4911 105 pr_err("incompatible file format (rerun with -v to learn more)\n");
6a4d98d7 106 return -1;
94c744b6
ACM
107 }
108
8ceb41d7 109 if (perf_data__is_pipe(data))
cc9784bd
JO
110 return 0;
111
3ba78bd0
JO
112 if (perf_header__has_feat(&session->header, HEADER_STAT))
113 return 0;
114
316c7136 115 if (!perf_evlist__valid_sample_type(session->evlist)) {
e87b4911 116 pr_err("non matching sample_type\n");
6a4d98d7 117 return -1;
c2a70653
ACM
118 }
119
316c7136 120 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
e87b4911 121 pr_err("non matching sample_id_all\n");
6a4d98d7 122 return -1;
c2a70653
ACM
123 }
124
316c7136 125 if (!perf_evlist__valid_read_format(session->evlist)) {
e87b4911 126 pr_err("non matching read_format\n");
6a4d98d7 127 return -1;
9ede473c
JO
128 }
129
94c744b6 130 return 0;
94c744b6
ACM
131}
132
7b56cce2 133void perf_session__set_id_hdr_size(struct perf_session *session)
9c90a61c 134{
7b56cce2
ACM
135 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
136
7b56cce2 137 machines__set_id_hdr_size(&session->machines, id_hdr_size);
9c90a61c
ACM
138}
139
316c7136 140int perf_session__create_kernel_maps(struct perf_session *session)
a1645ce1 141{
316c7136 142 int ret = machine__create_kernel_maps(&session->machines.host);
a1645ce1 143
a1645ce1 144 if (ret >= 0)
316c7136 145 ret = machines__create_guest_kernel_maps(&session->machines);
a1645ce1
ZY
146 return ret;
147}
148
316c7136 149static void perf_session__destroy_kernel_maps(struct perf_session *session)
076c6e45 150{
316c7136 151 machines__destroy_kernel_maps(&session->machines);
076c6e45
ACM
152}
153
cfe1c414
AH
154static bool perf_session__has_comm_exec(struct perf_session *session)
155{
32dcd021 156 struct evsel *evsel;
cfe1c414 157
e5cadb93 158 evlist__for_each_entry(session->evlist, evsel) {
1fc632ce 159 if (evsel->core.attr.comm_exec)
cfe1c414
AH
160 return true;
161 }
162
163 return false;
164}
165
166static void perf_session__set_comm_exec(struct perf_session *session)
167{
168 bool comm_exec = perf_session__has_comm_exec(session);
169
170 machines__set_comm_exec(&session->machines, comm_exec);
171}
172
d10eb1eb 173static int ordered_events__deliver_event(struct ordered_events *oe,
9870d780 174 struct ordered_event *event)
d10eb1eb 175{
9870d780
ACM
176 struct perf_session *session = container_of(oe, struct perf_session,
177 ordered_events);
9870d780 178
93d10af2 179 return perf_session__deliver_event(session, event->event,
c446870d 180 session->tool, event->file_offset);
d10eb1eb
ACM
181}
182
8ceb41d7 183struct perf_session *perf_session__new(struct perf_data *data,
f5fc1412 184 bool repipe, struct perf_tool *tool)
94c744b6 185{
316c7136 186 struct perf_session *session = zalloc(sizeof(*session));
efad1415 187
316c7136 188 if (!session)
94c744b6
ACM
189 goto out;
190
316c7136 191 session->repipe = repipe;
9870d780 192 session->tool = tool;
99fa2984 193 INIT_LIST_HEAD(&session->auxtrace_index);
316c7136 194 machines__init(&session->machines);
a4a6668a
JO
195 ordered_events__init(&session->ordered_events,
196 ordered_events__deliver_event, NULL);
94c744b6 197
e4378f0c 198 perf_env__init(&session->header.env);
8ceb41d7
JO
199 if (data) {
200 if (perf_data__open(data))
64abebf7 201 goto out_delete;
6a4d98d7 202
8ceb41d7 203 session->data = data;
6a4d98d7 204
8ceb41d7 205 if (perf_data__is_read(data)) {
316c7136 206 if (perf_session__open(session) < 0)
befa09b6 207 goto out_delete;
6a4d98d7 208
0973ad97
DCC
209 /*
210 * set session attributes that are present in perf.data
211 * but not in pipe-mode.
212 */
8ceb41d7 213 if (!data->is_pipe) {
0973ad97
DCC
214 perf_session__set_id_hdr_size(session);
215 perf_session__set_comm_exec(session);
216 }
93115d32
TR
217
218 perf_evlist__init_trace_event_sample_raw(session->evlist);
ec65def1
JO
219
220 /* Open the directory data. */
221 if (data->is_dir && perf_data__open_dir(data))
222 goto out_delete;
6a4d98d7 223 }
4cde998d
ACM
224 } else {
225 session->machines.host.env = &perf_env;
6a4d98d7
JO
226 }
227
ec1891af
AH
228 session->machines.host.single_address_space =
229 perf_env__single_address_space(session->machines.host.env);
230
8ceb41d7 231 if (!data || perf_data__is_write(data)) {
64abebf7
ACM
232 /*
233 * In O_RDONLY mode this will be performed when reading the
8115d60c 234 * kernel MMAP event, in perf_event__process_mmap().
64abebf7 235 */
316c7136 236 if (perf_session__create_kernel_maps(session) < 0)
a5c2a4c9 237 pr_warning("Cannot read kernel map\n");
64abebf7 238 }
d549c769 239
0973ad97
DCC
240 /*
241 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
242 * processed, so perf_evlist__sample_id_all is not meaningful here.
243 */
8ceb41d7 244 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
0a8cb85c 245 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
21ef97f0 246 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
0a8cb85c 247 tool->ordered_events = false;
d10eb1eb 248 }
21ef97f0 249
316c7136 250 return session;
6a4d98d7 251
6a4d98d7 252 out_delete:
316c7136 253 perf_session__delete(session);
6a4d98d7 254 out:
4aa65636 255 return NULL;
94c744b6
ACM
256}
257
b424eba2
ACM
258static void perf_session__delete_threads(struct perf_session *session)
259{
876650e6 260 machine__delete_threads(&session->machines.host);
b424eba2
ACM
261}
262
cb62c6f1
AB
263static void perf_session__release_decomp_events(struct perf_session *session)
264{
265 struct decomp *next, *decomp;
872c8ee8 266 size_t mmap_len;
cb62c6f1 267 next = session->decomp;
cb62c6f1
AB
268 do {
269 decomp = next;
270 if (decomp == NULL)
271 break;
272 next = decomp->next;
872c8ee8
AB
273 mmap_len = decomp->mmap_len;
274 munmap(decomp, mmap_len);
cb62c6f1
AB
275 } while (1);
276}
277
316c7136 278void perf_session__delete(struct perf_session *session)
94c744b6 279{
e1446551
ACM
280 if (session == NULL)
281 return;
c446870d 282 auxtrace__free(session);
99fa2984 283 auxtrace_index__free(&session->auxtrace_index);
316c7136 284 perf_session__destroy_kernel_maps(session);
316c7136 285 perf_session__delete_threads(session);
cb62c6f1 286 perf_session__release_decomp_events(session);
f0ce888c 287 perf_env__exit(&session->header.env);
316c7136 288 machines__exit(&session->machines);
8ceb41d7
JO
289 if (session->data)
290 perf_data__close(session->data);
316c7136 291 free(session);
94c744b6 292}
a328626b 293
89f1688a 294static int process_event_synth_tracing_data_stub(struct perf_session *session
47c3d109
AH
295 __maybe_unused,
296 union perf_event *event
89f1688a 297 __maybe_unused)
d20deb64
ACM
298{
299 dump_printf(": unhandled!\n");
300 return 0;
301}
302
47c3d109
AH
303static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
304 union perf_event *event __maybe_unused,
63503dba 305 struct evlist **pevlist
1d037ca1 306 __maybe_unused)
10d0f086
ACM
307{
308 dump_printf(": unhandled!\n");
309 return 0;
310}
311
ffe77725
JO
312static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
313 union perf_event *event __maybe_unused,
63503dba 314 struct evlist **pevlist
ffe77725
JO
315 __maybe_unused)
316{
2d2aea6a
JO
317 if (dump_trace)
318 perf_event__fprintf_event_update(event, stdout);
319
ffe77725
JO
320 dump_printf(": unhandled!\n");
321 return 0;
322}
323
1d037ca1
IT
324static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
325 union perf_event *event __maybe_unused,
326 struct perf_sample *sample __maybe_unused,
32dcd021 327 struct evsel *evsel __maybe_unused,
1d037ca1 328 struct machine *machine __maybe_unused)
9e69c210
ACM
329{
330 dump_printf(": unhandled!\n");
331 return 0;
332}
333
1d037ca1
IT
334static int process_event_stub(struct perf_tool *tool __maybe_unused,
335 union perf_event *event __maybe_unused,
336 struct perf_sample *sample __maybe_unused,
337 struct machine *machine __maybe_unused)
06aae590
ACM
338{
339 dump_printf(": unhandled!\n");
340 return 0;
341}
342
1d037ca1
IT
343static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
344 union perf_event *event __maybe_unused,
d704ebda 345 struct ordered_events *oe __maybe_unused)
743eb868
ACM
346{
347 dump_printf(": unhandled!\n");
348 return 0;
349}
350
45694aa7 351static int process_finished_round(struct perf_tool *tool,
d20deb64 352 union perf_event *event,
d704ebda 353 struct ordered_events *oe);
d6b17beb 354
a16ac023
AH
355static int skipn(int fd, off_t n)
356{
357 char buf[4096];
358 ssize_t ret;
359
360 while (n > 0) {
361 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
362 if (ret <= 0)
363 return ret;
364 n -= ret;
365 }
366
367 return 0;
368}
369
7336555a
JO
370static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
371 union perf_event *event)
a16ac023
AH
372{
373 dump_printf(": unhandled!\n");
8ceb41d7
JO
374 if (perf_data__is_pipe(session->data))
375 skipn(perf_data__fd(session->data), event->auxtrace.size);
a16ac023
AH
376 return event->auxtrace.size;
377}
378
89f1688a
JO
379static int process_event_op2_stub(struct perf_session *session __maybe_unused,
380 union perf_event *event __maybe_unused)
e9bf54d2
AH
381{
382 dump_printf(": unhandled!\n");
383 return 0;
384}
385
5f3339d2
JO
386
387static
89f1688a
JO
388int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
389 union perf_event *event __maybe_unused)
5f3339d2 390{
2d2aea6a
JO
391 if (dump_trace)
392 perf_event__fprintf_thread_map(event, stdout);
393
5f3339d2
JO
394 dump_printf(": unhandled!\n");
395 return 0;
396}
397
6640b6c2 398static
89f1688a
JO
399int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
400 union perf_event *event __maybe_unused)
6640b6c2 401{
2d2aea6a
JO
402 if (dump_trace)
403 perf_event__fprintf_cpu_map(event, stdout);
404
6640b6c2
JO
405 dump_printf(": unhandled!\n");
406 return 0;
407}
408
374fb9e3 409static
89f1688a
JO
410int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
411 union perf_event *event __maybe_unused)
374fb9e3 412{
2d2aea6a
JO
413 if (dump_trace)
414 perf_event__fprintf_stat_config(event, stdout);
415
374fb9e3
JO
416 dump_printf(": unhandled!\n");
417 return 0;
418}
419
89f1688a
JO
420static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
421 union perf_event *event)
d80518c9 422{
2d2aea6a
JO
423 if (dump_trace)
424 perf_event__fprintf_stat(event, stdout);
425
d80518c9
JO
426 dump_printf(": unhandled!\n");
427 return 0;
428}
429
89f1688a
JO
430static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
431 union perf_event *event)
2d8f0f18 432{
2d2aea6a
JO
433 if (dump_trace)
434 perf_event__fprintf_stat_round(event, stdout);
435
2d8f0f18
JO
436 dump_printf(": unhandled!\n");
437 return 0;
438}
439
61a7773c
AB
440static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
441 union perf_event *event __maybe_unused,
442 u64 file_offset __maybe_unused)
443{
444 dump_printf(": unhandled!\n");
445 return 0;
446}
447
9c501402 448void perf_tool__fill_defaults(struct perf_tool *tool)
06aae590 449{
45694aa7
ACM
450 if (tool->sample == NULL)
451 tool->sample = process_event_sample_stub;
452 if (tool->mmap == NULL)
453 tool->mmap = process_event_stub;
6adb0b0a
DA
454 if (tool->mmap2 == NULL)
455 tool->mmap2 = process_event_stub;
45694aa7
ACM
456 if (tool->comm == NULL)
457 tool->comm = process_event_stub;
7f0cd236
NK
458 if (tool->namespaces == NULL)
459 tool->namespaces = process_event_stub;
45694aa7
ACM
460 if (tool->fork == NULL)
461 tool->fork = process_event_stub;
462 if (tool->exit == NULL)
463 tool->exit = process_event_stub;
464 if (tool->lost == NULL)
465 tool->lost = perf_event__process_lost;
c4937a91
KL
466 if (tool->lost_samples == NULL)
467 tool->lost_samples = perf_event__process_lost_samples;
4a96f7a0
AH
468 if (tool->aux == NULL)
469 tool->aux = perf_event__process_aux;
0ad21f68
AH
470 if (tool->itrace_start == NULL)
471 tool->itrace_start = perf_event__process_itrace_start;
0286039f
AH
472 if (tool->context_switch == NULL)
473 tool->context_switch = perf_event__process_switch;
9aa0bfa3
SL
474 if (tool->ksymbol == NULL)
475 tool->ksymbol = perf_event__process_ksymbol;
45178a92
SL
476 if (tool->bpf_event == NULL)
477 tool->bpf_event = perf_event__process_bpf_event;
45694aa7
ACM
478 if (tool->read == NULL)
479 tool->read = process_event_sample_stub;
480 if (tool->throttle == NULL)
481 tool->throttle = process_event_stub;
482 if (tool->unthrottle == NULL)
483 tool->unthrottle = process_event_stub;
484 if (tool->attr == NULL)
485 tool->attr = process_event_synth_attr_stub;
ffe77725
JO
486 if (tool->event_update == NULL)
487 tool->event_update = process_event_synth_event_update_stub;
45694aa7
ACM
488 if (tool->tracing_data == NULL)
489 tool->tracing_data = process_event_synth_tracing_data_stub;
490 if (tool->build_id == NULL)
5fb0ac16 491 tool->build_id = process_event_op2_stub;
45694aa7 492 if (tool->finished_round == NULL) {
0a8cb85c 493 if (tool->ordered_events)
45694aa7 494 tool->finished_round = process_finished_round;
d6b17beb 495 else
45694aa7 496 tool->finished_round = process_finished_round_stub;
d6b17beb 497 }
3c659eed 498 if (tool->id_index == NULL)
5fb0ac16 499 tool->id_index = process_event_op2_stub;
a16ac023 500 if (tool->auxtrace_info == NULL)
5fb0ac16 501 tool->auxtrace_info = process_event_op2_stub;
a16ac023
AH
502 if (tool->auxtrace == NULL)
503 tool->auxtrace = process_event_auxtrace_stub;
e9bf54d2 504 if (tool->auxtrace_error == NULL)
5fb0ac16 505 tool->auxtrace_error = process_event_op2_stub;
5f3339d2
JO
506 if (tool->thread_map == NULL)
507 tool->thread_map = process_event_thread_map_stub;
6640b6c2
JO
508 if (tool->cpu_map == NULL)
509 tool->cpu_map = process_event_cpu_map_stub;
374fb9e3
JO
510 if (tool->stat_config == NULL)
511 tool->stat_config = process_event_stat_config_stub;
d80518c9
JO
512 if (tool->stat == NULL)
513 tool->stat = process_stat_stub;
2d8f0f18
JO
514 if (tool->stat_round == NULL)
515 tool->stat_round = process_stat_round_stub;
46bc29b9
AH
516 if (tool->time_conv == NULL)
517 tool->time_conv = process_event_op2_stub;
e9def1b2
DCC
518 if (tool->feature == NULL)
519 tool->feature = process_event_op2_stub;
61a7773c 520 if (tool->compressed == NULL)
cb62c6f1 521 tool->compressed = perf_session__process_compressed_event;
06aae590 522}
48000a1a 523
268fb20f
JO
524static void swap_sample_id_all(union perf_event *event, void *data)
525{
526 void *end = (void *) event + event->header.size;
527 int size = end - data;
528
529 BUG_ON(size % sizeof(u64));
530 mem_bswap_64(data, size);
531}
532
533static void perf_event__all64_swap(union perf_event *event,
1d037ca1 534 bool sample_id_all __maybe_unused)
ba21594c 535{
8115d60c
ACM
536 struct perf_event_header *hdr = &event->header;
537 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
ba21594c
ACM
538}
539
268fb20f 540static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
ba21594c 541{
8115d60c
ACM
542 event->comm.pid = bswap_32(event->comm.pid);
543 event->comm.tid = bswap_32(event->comm.tid);
268fb20f
JO
544
545 if (sample_id_all) {
546 void *data = &event->comm.comm;
547
9ac3e487 548 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
268fb20f
JO
549 swap_sample_id_all(event, data);
550 }
ba21594c
ACM
551}
552
268fb20f
JO
553static void perf_event__mmap_swap(union perf_event *event,
554 bool sample_id_all)
ba21594c 555{
8115d60c
ACM
556 event->mmap.pid = bswap_32(event->mmap.pid);
557 event->mmap.tid = bswap_32(event->mmap.tid);
558 event->mmap.start = bswap_64(event->mmap.start);
559 event->mmap.len = bswap_64(event->mmap.len);
560 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
268fb20f
JO
561
562 if (sample_id_all) {
563 void *data = &event->mmap.filename;
564
9ac3e487 565 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
268fb20f
JO
566 swap_sample_id_all(event, data);
567 }
ba21594c
ACM
568}
569
5c5e854b
SE
570static void perf_event__mmap2_swap(union perf_event *event,
571 bool sample_id_all)
572{
573 event->mmap2.pid = bswap_32(event->mmap2.pid);
574 event->mmap2.tid = bswap_32(event->mmap2.tid);
575 event->mmap2.start = bswap_64(event->mmap2.start);
576 event->mmap2.len = bswap_64(event->mmap2.len);
577 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
578 event->mmap2.maj = bswap_32(event->mmap2.maj);
579 event->mmap2.min = bswap_32(event->mmap2.min);
580 event->mmap2.ino = bswap_64(event->mmap2.ino);
581
582 if (sample_id_all) {
583 void *data = &event->mmap2.filename;
584
585 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
586 swap_sample_id_all(event, data);
587 }
588}
268fb20f 589static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
ba21594c 590{
8115d60c
ACM
591 event->fork.pid = bswap_32(event->fork.pid);
592 event->fork.tid = bswap_32(event->fork.tid);
593 event->fork.ppid = bswap_32(event->fork.ppid);
594 event->fork.ptid = bswap_32(event->fork.ptid);
595 event->fork.time = bswap_64(event->fork.time);
268fb20f
JO
596
597 if (sample_id_all)
598 swap_sample_id_all(event, &event->fork + 1);
ba21594c
ACM
599}
600
268fb20f 601static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
ba21594c 602{
8115d60c
ACM
603 event->read.pid = bswap_32(event->read.pid);
604 event->read.tid = bswap_32(event->read.tid);
605 event->read.value = bswap_64(event->read.value);
606 event->read.time_enabled = bswap_64(event->read.time_enabled);
607 event->read.time_running = bswap_64(event->read.time_running);
608 event->read.id = bswap_64(event->read.id);
268fb20f
JO
609
610 if (sample_id_all)
611 swap_sample_id_all(event, &event->read + 1);
ba21594c
ACM
612}
613
4a96f7a0
AH
614static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
615{
616 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
617 event->aux.aux_size = bswap_64(event->aux.aux_size);
618 event->aux.flags = bswap_64(event->aux.flags);
619
620 if (sample_id_all)
621 swap_sample_id_all(event, &event->aux + 1);
622}
623
0ad21f68
AH
624static void perf_event__itrace_start_swap(union perf_event *event,
625 bool sample_id_all)
626{
627 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
628 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
629
630 if (sample_id_all)
631 swap_sample_id_all(event, &event->itrace_start + 1);
632}
633
0286039f
AH
634static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
635{
636 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
637 event->context_switch.next_prev_pid =
638 bswap_32(event->context_switch.next_prev_pid);
639 event->context_switch.next_prev_tid =
640 bswap_32(event->context_switch.next_prev_tid);
641 }
642
643 if (sample_id_all)
644 swap_sample_id_all(event, &event->context_switch + 1);
645}
646
dd96c46b
JO
647static void perf_event__throttle_swap(union perf_event *event,
648 bool sample_id_all)
649{
650 event->throttle.time = bswap_64(event->throttle.time);
651 event->throttle.id = bswap_64(event->throttle.id);
652 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
653
654 if (sample_id_all)
655 swap_sample_id_all(event, &event->throttle + 1);
656}
657
acd244b8
NK
658static void perf_event__namespaces_swap(union perf_event *event,
659 bool sample_id_all)
660{
661 u64 i;
662
663 event->namespaces.pid = bswap_32(event->namespaces.pid);
664 event->namespaces.tid = bswap_32(event->namespaces.tid);
665 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
666
667 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
668 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
669
670 ns->dev = bswap_64(ns->dev);
671 ns->ino = bswap_64(ns->ino);
672 }
673
674 if (sample_id_all)
675 swap_sample_id_all(event, &event->namespaces.link_info[i]);
676}
677
e108c66e
JO
678static u8 revbyte(u8 b)
679{
680 int rev = (b >> 4) | ((b & 0xf) << 4);
681 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
682 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
683 return (u8) rev;
684}
685
686/*
687 * XXX this is hack in attempt to carry flags bitfield
bd1a0be5 688 * through endian village. ABI says:
e108c66e
JO
689 *
690 * Bit-fields are allocated from right to left (least to most significant)
691 * on little-endian implementations and from left to right (most to least
692 * significant) on big-endian implementations.
693 *
694 * The above seems to be byte specific, so we need to reverse each
695 * byte of the bitfield. 'Internet' also says this might be implementation
696 * specific and we probably need proper fix and carry perf_event_attr
697 * bitfield flags in separate data file FEAT_ section. Thought this seems
698 * to work for now.
699 */
700static void swap_bitfield(u8 *p, unsigned len)
701{
702 unsigned i;
703
704 for (i = 0; i < len; i++) {
705 *p = revbyte(*p);
706 p++;
707 }
708}
709
eda3913b
DA
710/* exported for swapping attributes in file header */
711void perf_event__attr_swap(struct perf_event_attr *attr)
712{
713 attr->type = bswap_32(attr->type);
714 attr->size = bswap_32(attr->size);
b30b6172
WN
715
716#define bswap_safe(f, n) \
717 (attr->size > (offsetof(struct perf_event_attr, f) + \
718 sizeof(attr->f) * (n)))
719#define bswap_field(f, sz) \
720do { \
721 if (bswap_safe(f, 0)) \
722 attr->f = bswap_##sz(attr->f); \
723} while(0)
792d48b4 724#define bswap_field_16(f) bswap_field(f, 16)
b30b6172
WN
725#define bswap_field_32(f) bswap_field(f, 32)
726#define bswap_field_64(f) bswap_field(f, 64)
727
728 bswap_field_64(config);
729 bswap_field_64(sample_period);
730 bswap_field_64(sample_type);
731 bswap_field_64(read_format);
732 bswap_field_32(wakeup_events);
733 bswap_field_32(bp_type);
734 bswap_field_64(bp_addr);
735 bswap_field_64(bp_len);
736 bswap_field_64(branch_sample_type);
737 bswap_field_64(sample_regs_user);
738 bswap_field_32(sample_stack_user);
739 bswap_field_32(aux_watermark);
792d48b4 740 bswap_field_16(sample_max_stack);
b30b6172
WN
741
742 /*
743 * After read_format are bitfields. Check read_format because
744 * we are unable to use offsetof on bitfield.
745 */
746 if (bswap_safe(read_format, 1))
747 swap_bitfield((u8 *) (&attr->read_format + 1),
748 sizeof(u64));
749#undef bswap_field_64
750#undef bswap_field_32
751#undef bswap_field
752#undef bswap_safe
eda3913b
DA
753}
754
268fb20f 755static void perf_event__hdr_attr_swap(union perf_event *event,
1d037ca1 756 bool sample_id_all __maybe_unused)
2c46dbb5
TZ
757{
758 size_t size;
759
eda3913b 760 perf_event__attr_swap(&event->attr.attr);
2c46dbb5 761
8115d60c
ACM
762 size = event->header.size;
763 size -= (void *)&event->attr.id - (void *)event;
764 mem_bswap_64(event->attr.id, size);
2c46dbb5
TZ
765}
766
ffe77725
JO
767static void perf_event__event_update_swap(union perf_event *event,
768 bool sample_id_all __maybe_unused)
769{
770 event->event_update.type = bswap_64(event->event_update.type);
771 event->event_update.id = bswap_64(event->event_update.id);
772}
773
268fb20f 774static void perf_event__event_type_swap(union perf_event *event,
1d037ca1 775 bool sample_id_all __maybe_unused)
cd19a035 776{
8115d60c
ACM
777 event->event_type.event_type.event_id =
778 bswap_64(event->event_type.event_type.event_id);
cd19a035
TZ
779}
780
268fb20f 781static void perf_event__tracing_data_swap(union perf_event *event,
1d037ca1 782 bool sample_id_all __maybe_unused)
9215545e 783{
8115d60c 784 event->tracing_data.size = bswap_32(event->tracing_data.size);
9215545e
TZ
785}
786
a16ac023
AH
787static void perf_event__auxtrace_info_swap(union perf_event *event,
788 bool sample_id_all __maybe_unused)
789{
790 size_t size;
791
792 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
793
794 size = event->header.size;
795 size -= (void *)&event->auxtrace_info.priv - (void *)event;
796 mem_bswap_64(event->auxtrace_info.priv, size);
797}
798
799static void perf_event__auxtrace_swap(union perf_event *event,
800 bool sample_id_all __maybe_unused)
801{
802 event->auxtrace.size = bswap_64(event->auxtrace.size);
803 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
804 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
805 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
806 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
807 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
808}
809
e9bf54d2
AH
810static void perf_event__auxtrace_error_swap(union perf_event *event,
811 bool sample_id_all __maybe_unused)
812{
813 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
814 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
815 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
816 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
817 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
16bd4321 818 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
e9bf54d2 819 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
16bd4321
AH
820 if (event->auxtrace_error.fmt)
821 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
e9bf54d2
AH
822}
823
5f3339d2
JO
824static void perf_event__thread_map_swap(union perf_event *event,
825 bool sample_id_all __maybe_unused)
826{
827 unsigned i;
828
829 event->thread_map.nr = bswap_64(event->thread_map.nr);
830
831 for (i = 0; i < event->thread_map.nr; i++)
832 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
833}
834
6640b6c2
JO
835static void perf_event__cpu_map_swap(union perf_event *event,
836 bool sample_id_all __maybe_unused)
837{
838 struct cpu_map_data *data = &event->cpu_map.data;
839 struct cpu_map_entries *cpus;
840 struct cpu_map_mask *mask;
841 unsigned i;
842
843 data->type = bswap_64(data->type);
844
845 switch (data->type) {
846 case PERF_CPU_MAP__CPUS:
847 cpus = (struct cpu_map_entries *)data->data;
848
849 cpus->nr = bswap_16(cpus->nr);
850
851 for (i = 0; i < cpus->nr; i++)
852 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
853 break;
854 case PERF_CPU_MAP__MASK:
855 mask = (struct cpu_map_mask *) data->data;
856
857 mask->nr = bswap_16(mask->nr);
858 mask->long_size = bswap_16(mask->long_size);
859
860 switch (mask->long_size) {
861 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
862 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
863 default:
864 pr_err("cpu_map swap: unsupported long size\n");
865 }
866 default:
867 break;
868 }
869}
870
374fb9e3
JO
871static void perf_event__stat_config_swap(union perf_event *event,
872 bool sample_id_all __maybe_unused)
873{
874 u64 size;
875
876 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
877 size += 1; /* nr item itself */
878 mem_bswap_64(&event->stat_config.nr, size);
879}
880
d80518c9
JO
881static void perf_event__stat_swap(union perf_event *event,
882 bool sample_id_all __maybe_unused)
883{
884 event->stat.id = bswap_64(event->stat.id);
885 event->stat.thread = bswap_32(event->stat.thread);
886 event->stat.cpu = bswap_32(event->stat.cpu);
887 event->stat.val = bswap_64(event->stat.val);
888 event->stat.ena = bswap_64(event->stat.ena);
889 event->stat.run = bswap_64(event->stat.run);
890}
891
2d8f0f18
JO
892static void perf_event__stat_round_swap(union perf_event *event,
893 bool sample_id_all __maybe_unused)
894{
895 event->stat_round.type = bswap_64(event->stat_round.type);
896 event->stat_round.time = bswap_64(event->stat_round.time);
897}
898
268fb20f
JO
899typedef void (*perf_event__swap_op)(union perf_event *event,
900 bool sample_id_all);
ba21594c 901
8115d60c
ACM
902static perf_event__swap_op perf_event__swap_ops[] = {
903 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
5c5e854b 904 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
8115d60c
ACM
905 [PERF_RECORD_COMM] = perf_event__comm_swap,
906 [PERF_RECORD_FORK] = perf_event__task_swap,
907 [PERF_RECORD_EXIT] = perf_event__task_swap,
908 [PERF_RECORD_LOST] = perf_event__all64_swap,
909 [PERF_RECORD_READ] = perf_event__read_swap,
dd96c46b
JO
910 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
911 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
8115d60c 912 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
4a96f7a0 913 [PERF_RECORD_AUX] = perf_event__aux_swap,
0ad21f68 914 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
c4937a91 915 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
0286039f
AH
916 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
917 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
acd244b8 918 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
eda3913b 919 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
8115d60c
ACM
920 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
921 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
922 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
3c659eed 923 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
a16ac023
AH
924 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
925 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
e9bf54d2 926 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
5f3339d2 927 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
6640b6c2 928 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
374fb9e3 929 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
d80518c9 930 [PERF_RECORD_STAT] = perf_event__stat_swap,
2d8f0f18 931 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
ffe77725 932 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
46bc29b9 933 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
8115d60c 934 [PERF_RECORD_HEADER_MAX] = NULL,
ba21594c
ACM
935};
936
d6b17beb
FW
937/*
938 * When perf record finishes a pass on every buffers, it records this pseudo
939 * event.
940 * We record the max timestamp t found in the pass n.
941 * Assuming these timestamps are monotonic across cpus, we know that if
942 * a buffer still has events with timestamps below t, they will be all
943 * available and then read in the pass n + 1.
944 * Hence when we start to read the pass n + 2, we can safely flush every
945 * events with timestamps below t.
946 *
947 * ============ PASS n =================
948 * CPU 0 | CPU 1
949 * |
950 * cnt1 timestamps | cnt2 timestamps
951 * 1 | 2
952 * 2 | 3
953 * - | 4 <--- max recorded
954 *
955 * ============ PASS n + 1 ==============
956 * CPU 0 | CPU 1
957 * |
958 * cnt1 timestamps | cnt2 timestamps
959 * 3 | 5
960 * 4 | 6
961 * 5 | 7 <---- max recorded
962 *
963 * Flush every events below timestamp 4
964 *
965 * ============ PASS n + 2 ==============
966 * CPU 0 | CPU 1
967 * |
968 * cnt1 timestamps | cnt2 timestamps
969 * 6 | 8
970 * 7 | 9
971 * - | 10
972 *
973 * Flush every events below timestamp 7
974 * etc...
975 */
b7b61cbe 976static int process_finished_round(struct perf_tool *tool __maybe_unused,
1d037ca1 977 union perf_event *event __maybe_unused,
d704ebda 978 struct ordered_events *oe)
d6b17beb 979{
5531e162
AH
980 if (dump_trace)
981 fprintf(stdout, "\n");
b7b61cbe 982 return ordered_events__flush(oe, OE_FLUSH__ROUND);
d6b17beb
FW
983}
984
b7b61cbe 985int perf_session__queue_event(struct perf_session *s, union perf_event *event,
dc83e139 986 u64 timestamp, u64 file_offset)
c61e52ee 987{
dc83e139 988 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
640c03ce 989}
c61e52ee 990
384b6055 991static void callchain__lbr_callstack_printf(struct perf_sample *sample)
640c03ce 992{
384b6055
KL
993 struct ip_callchain *callchain = sample->callchain;
994 struct branch_stack *lbr_stack = sample->branch_stack;
995 u64 kernel_callchain_nr = callchain->nr;
640c03ce 996 unsigned int i;
c61e52ee 997
384b6055
KL
998 for (i = 0; i < kernel_callchain_nr; i++) {
999 if (callchain->ips[i] == PERF_CONTEXT_USER)
1000 break;
1001 }
1002
1003 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1004 u64 total_nr;
1005 /*
1006 * LBR callstack can only get user call chain,
1007 * i is kernel call chain number,
1008 * 1 is PERF_CONTEXT_USER.
1009 *
1010 * The user call chain is stored in LBR registers.
1011 * LBR are pair registers. The caller is stored
1012 * in "from" register, while the callee is stored
1013 * in "to" register.
1014 * For example, there is a call stack
1015 * "A"->"B"->"C"->"D".
1016 * The LBR registers will recorde like
1017 * "C"->"D", "B"->"C", "A"->"B".
1018 * So only the first "to" register and all "from"
1019 * registers are needed to construct the whole stack.
1020 */
1021 total_nr = i + 1 + lbr_stack->nr + 1;
1022 kernel_callchain_nr = i + 1;
1023
1024 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1025
1026 for (i = 0; i < kernel_callchain_nr; i++)
1027 printf("..... %2d: %016" PRIx64 "\n",
1028 i, callchain->ips[i]);
1029
1030 printf("..... %2d: %016" PRIx64 "\n",
1031 (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
1032 for (i = 0; i < lbr_stack->nr; i++)
1033 printf("..... %2d: %016" PRIx64 "\n",
1034 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
1035 }
1036}
1037
32dcd021 1038static void callchain__printf(struct evsel *evsel,
384b6055
KL
1039 struct perf_sample *sample)
1040{
1041 unsigned int i;
1042 struct ip_callchain *callchain = sample->callchain;
1043
acf2abbd 1044 if (perf_evsel__has_branch_callstack(evsel))
384b6055
KL
1045 callchain__lbr_callstack_printf(sample);
1046
1047 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
640c03ce 1048
384b6055 1049 for (i = 0; i < callchain->nr; i++)
9486aa38 1050 printf("..... %2d: %016" PRIx64 "\n",
384b6055 1051 i, callchain->ips[i]);
c61e52ee
FW
1052}
1053
d2720c3d 1054static void branch_stack__printf(struct perf_sample *sample, bool callstack)
b5387528
RAV
1055{
1056 uint64_t i;
1057
d2720c3d
AB
1058 printf("%s: nr:%" PRIu64 "\n",
1059 !callstack ? "... branch stack" : "... branch callstack",
1060 sample->branch_stack->nr);
b5387528 1061
0e332f03
AK
1062 for (i = 0; i < sample->branch_stack->nr; i++) {
1063 struct branch_entry *e = &sample->branch_stack->entries[i];
1064
d2720c3d
AB
1065 if (!callstack) {
1066 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1067 i, e->from, e->to,
1068 (unsigned short)e->flags.cycles,
1069 e->flags.mispred ? "M" : " ",
1070 e->flags.predicted ? "P" : " ",
1071 e->flags.abort ? "A" : " ",
1072 e->flags.in_tx ? "T" : " ",
1073 (unsigned)e->flags.reserved);
1074 } else {
1075 printf("..... %2"PRIu64": %016" PRIx64 "\n",
1076 i, i > 0 ? e->from : e->to);
1077 }
0e332f03 1078 }
b5387528
RAV
1079}
1080
0f6a3015
JO
1081static void regs_dump__printf(u64 mask, u64 *regs)
1082{
1083 unsigned rid, i = 0;
1084
1085 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1086 u64 val = regs[i++];
1087
1088 printf(".... %-5s 0x%" PRIx64 "\n",
1089 perf_reg_name(rid), val);
1090 }
1091}
1092
6a21c0b5
SE
1093static const char *regs_abi[] = {
1094 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1095 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1096 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1097};
1098
1099static inline const char *regs_dump_abi(struct regs_dump *d)
1100{
1101 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1102 return "unknown";
1103
1104 return regs_abi[d->abi];
1105}
1106
1107static void regs__printf(const char *type, struct regs_dump *regs)
1108{
1109 u64 mask = regs->mask;
1110
1111 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1112 type,
1113 mask,
1114 regs_dump_abi(regs));
1115
1116 regs_dump__printf(mask, regs->regs);
1117}
1118
352ea45a 1119static void regs_user__printf(struct perf_sample *sample)
0f6a3015
JO
1120{
1121 struct regs_dump *user_regs = &sample->user_regs;
1122
6a21c0b5
SE
1123 if (user_regs->regs)
1124 regs__printf("user", user_regs);
1125}
1126
1127static void regs_intr__printf(struct perf_sample *sample)
1128{
1129 struct regs_dump *intr_regs = &sample->intr_regs;
1130
1131 if (intr_regs->regs)
1132 regs__printf("intr", intr_regs);
0f6a3015
JO
1133}
1134
1135static void stack_user__printf(struct stack_dump *dump)
1136{
1137 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1138 dump->size, dump->offset);
1139}
1140
63503dba 1141static void perf_evlist__print_tstamp(struct evlist *evlist,
8115d60c 1142 union perf_event *event,
8d50e5b4 1143 struct perf_sample *sample)
9c90a61c 1144{
9fa8727a 1145 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
7f3be652 1146
9c90a61c 1147 if (event->header.type != PERF_RECORD_SAMPLE &&
9fa8727a 1148 !perf_evlist__sample_id_all(evlist)) {
9c90a61c
ACM
1149 fputs("-1 -1 ", stdout);
1150 return;
1151 }
1152
7f3be652 1153 if ((sample_type & PERF_SAMPLE_CPU))
9c90a61c
ACM
1154 printf("%u ", sample->cpu);
1155
7f3be652 1156 if (sample_type & PERF_SAMPLE_TIME)
9486aa38 1157 printf("%" PRIu64 " ", sample->time);
9c90a61c
ACM
1158}
1159
9ede473c
JO
1160static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1161{
1162 printf("... sample_read:\n");
1163
1164 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1165 printf("...... time enabled %016" PRIx64 "\n",
1166 sample->read.time_enabled);
1167
1168 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1169 printf("...... time running %016" PRIx64 "\n",
1170 sample->read.time_running);
1171
1172 if (read_format & PERF_FORMAT_GROUP) {
1173 u64 i;
1174
1175 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1176
1177 for (i = 0; i < sample->read.group.nr; i++) {
1178 struct sample_read_value *value;
1179
1180 value = &sample->read.group.values[i];
1181 printf("..... id %016" PRIx64
1182 ", value %016" PRIx64 "\n",
1183 value->id, value->value);
1184 }
1185 } else
1186 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1187 sample->read.one.id, sample->read.one.value);
1188}
1189
63503dba 1190static void dump_event(struct evlist *evlist, union perf_event *event,
8d50e5b4 1191 u64 file_offset, struct perf_sample *sample)
9aefcab0
TG
1192{
1193 if (!dump_trace)
1194 return;
1195
9486aa38
ACM
1196 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1197 file_offset, event->header.size, event->header.type);
9aefcab0
TG
1198
1199 trace_event(event);
93115d32
TR
1200 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1201 evlist->trace_event_sample_raw(evlist, event, sample);
9aefcab0
TG
1202
1203 if (sample)
9fa8727a 1204 perf_evlist__print_tstamp(evlist, event, sample);
9aefcab0 1205
9486aa38 1206 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
8115d60c 1207 event->header.size, perf_event__name(event->header.type));
9aefcab0
TG
1208}
1209
32dcd021 1210static void dump_sample(struct evsel *evsel, union perf_event *event,
8d50e5b4 1211 struct perf_sample *sample)
9aefcab0 1212{
7f3be652
ACM
1213 u64 sample_type;
1214
ddbc24b7
ACM
1215 if (!dump_trace)
1216 return;
1217
0ea590ae 1218 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
9486aa38 1219 event->header.misc, sample->pid, sample->tid, sample->ip,
7cec0922 1220 sample->period, sample->addr);
9aefcab0 1221
1fc632ce 1222 sample_type = evsel->core.attr.sample_type;
7f3be652 1223
27de9b2b 1224 if (evsel__has_callchain(evsel))
384b6055 1225 callchain__printf(evsel, sample);
b5387528 1226
d2720c3d
AB
1227 if (sample_type & PERF_SAMPLE_BRANCH_STACK)
1228 branch_stack__printf(sample, perf_evsel__has_branch_callstack(evsel));
0f6a3015
JO
1229
1230 if (sample_type & PERF_SAMPLE_REGS_USER)
352ea45a 1231 regs_user__printf(sample);
0f6a3015 1232
6a21c0b5
SE
1233 if (sample_type & PERF_SAMPLE_REGS_INTR)
1234 regs_intr__printf(sample);
1235
0f6a3015
JO
1236 if (sample_type & PERF_SAMPLE_STACK_USER)
1237 stack_user__printf(&sample->user_stack);
05484298
AK
1238
1239 if (sample_type & PERF_SAMPLE_WEIGHT)
1240 printf("... weight: %" PRIu64 "\n", sample->weight);
98a3b32c
SE
1241
1242 if (sample_type & PERF_SAMPLE_DATA_SRC)
1243 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
9ede473c 1244
8780fb25
KL
1245 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1246 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1247
475eeab9
AK
1248 if (sample_type & PERF_SAMPLE_TRANSACTION)
1249 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1250
9ede473c 1251 if (sample_type & PERF_SAMPLE_READ)
1fc632ce 1252 sample_read__printf(sample, evsel->core.attr.read_format);
9aefcab0
TG
1253}
1254
32dcd021 1255static void dump_read(struct evsel *evsel, union perf_event *event)
dac7f6b7 1256{
69d81f09 1257 struct perf_record_read *read_event = &event->read;
dac7f6b7
JO
1258 u64 read_format;
1259
1260 if (!dump_trace)
1261 return;
1262
213a6c1d 1263 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
fc50e0ba 1264 perf_evsel__name(evsel),
dac7f6b7
JO
1265 event->read.value);
1266
f3c8d907
LY
1267 if (!evsel)
1268 return;
1269
1fc632ce 1270 read_format = evsel->core.attr.read_format;
dac7f6b7
JO
1271
1272 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
213a6c1d 1273 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
dac7f6b7
JO
1274
1275 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
213a6c1d 1276 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
dac7f6b7
JO
1277
1278 if (read_format & PERF_FORMAT_ID)
213a6c1d 1279 printf("... id : %" PRI_lu64 "\n", read_event->id);
dac7f6b7
JO
1280}
1281
54245fdc 1282static struct machine *machines__find_for_cpumode(struct machines *machines,
ef89325f
AH
1283 union perf_event *event,
1284 struct perf_sample *sample)
743eb868 1285{
ad85ace0 1286 struct machine *machine;
743eb868 1287
7c0f4a41 1288 if (perf_guest &&
473398a2
ACM
1289 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1290 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
7fb0a5ee
ND
1291 u32 pid;
1292
5c5e854b
SE
1293 if (event->header.type == PERF_RECORD_MMAP
1294 || event->header.type == PERF_RECORD_MMAP2)
7fb0a5ee
ND
1295 pid = event->mmap.pid;
1296 else
ef89325f 1297 pid = sample->pid;
7fb0a5ee 1298
54245fdc 1299 machine = machines__find(machines, pid);
ad85ace0 1300 if (!machine)
3caeaa56 1301 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
ad85ace0 1302 return machine;
7fb0a5ee 1303 }
743eb868 1304
54245fdc 1305 return &machines->host;
743eb868
ACM
1306}
1307
63503dba 1308static int deliver_sample_value(struct evlist *evlist,
e4caec0d
JO
1309 struct perf_tool *tool,
1310 union perf_event *event,
1311 struct perf_sample *sample,
1312 struct sample_read_value *v,
1313 struct machine *machine)
1314{
313e53b0 1315 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
e4caec0d 1316
e4caec0d
JO
1317 if (sid) {
1318 sample->id = v->id;
1319 sample->period = v->value - sid->period;
1320 sid->period = v->value;
1321 }
1322
1323 if (!sid || sid->evsel == NULL) {
313e53b0 1324 ++evlist->stats.nr_unknown_id;
e4caec0d
JO
1325 return 0;
1326 }
1327
529c1a9e
JO
1328 /*
1329 * There's no reason to deliver sample
1330 * for zero period, bail out.
1331 */
1332 if (!sample->period)
1333 return 0;
1334
e4caec0d
JO
1335 return tool->sample(tool, event, sample, sid->evsel, machine);
1336}
1337
63503dba 1338static int deliver_sample_group(struct evlist *evlist,
e4caec0d
JO
1339 struct perf_tool *tool,
1340 union perf_event *event,
1341 struct perf_sample *sample,
1342 struct machine *machine)
1343{
1344 int ret = -EINVAL;
1345 u64 i;
1346
1347 for (i = 0; i < sample->read.group.nr; i++) {
313e53b0 1348 ret = deliver_sample_value(evlist, tool, event, sample,
e4caec0d
JO
1349 &sample->read.group.values[i],
1350 machine);
1351 if (ret)
1352 break;
1353 }
1354
1355 return ret;
1356}
1357
1358static int
63503dba 1359 perf_evlist__deliver_sample(struct evlist *evlist,
e4caec0d
JO
1360 struct perf_tool *tool,
1361 union perf_event *event,
1362 struct perf_sample *sample,
32dcd021 1363 struct evsel *evsel,
e4caec0d
JO
1364 struct machine *machine)
1365{
1366 /* We know evsel != NULL. */
1fc632ce
JO
1367 u64 sample_type = evsel->core.attr.sample_type;
1368 u64 read_format = evsel->core.attr.read_format;
e4caec0d 1369
d94386f2 1370 /* Standard sample delivery. */
e4caec0d
JO
1371 if (!(sample_type & PERF_SAMPLE_READ))
1372 return tool->sample(tool, event, sample, evsel, machine);
1373
1374 /* For PERF_SAMPLE_READ we have either single or group mode. */
1375 if (read_format & PERF_FORMAT_GROUP)
313e53b0 1376 return deliver_sample_group(evlist, tool, event, sample,
e4caec0d
JO
1377 machine);
1378 else
313e53b0 1379 return deliver_sample_value(evlist, tool, event, sample,
e4caec0d
JO
1380 &sample->read.one, machine);
1381}
1382
d10eb1eb 1383static int machines__deliver_event(struct machines *machines,
63503dba 1384 struct evlist *evlist,
d10eb1eb
ACM
1385 union perf_event *event,
1386 struct perf_sample *sample,
1387 struct perf_tool *tool, u64 file_offset)
cbf41645 1388{
32dcd021 1389 struct evsel *evsel;
743eb868 1390 struct machine *machine;
9e69c210 1391
9fa8727a 1392 dump_event(evlist, event, file_offset, sample);
532e7269 1393
313e53b0 1394 evsel = perf_evlist__id2evsel(evlist, sample->id);
7b27509f 1395
fa713a4e 1396 machine = machines__find_for_cpumode(machines, event, sample);
743eb868 1397
cbf41645
TG
1398 switch (event->header.type) {
1399 case PERF_RECORD_SAMPLE:
9e69c210 1400 if (evsel == NULL) {
313e53b0 1401 ++evlist->stats.nr_unknown_id;
6782206b 1402 return 0;
9e69c210 1403 }
1b29ac59 1404 dump_sample(evsel, event, sample);
0c095715 1405 if (machine == NULL) {
313e53b0 1406 ++evlist->stats.nr_unprocessable_samples;
6782206b 1407 return 0;
0c095715 1408 }
313e53b0 1409 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
cbf41645 1410 case PERF_RECORD_MMAP:
45694aa7 1411 return tool->mmap(tool, event, sample, machine);
5c5e854b 1412 case PERF_RECORD_MMAP2:
930e6fcd
KL
1413 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1414 ++evlist->stats.nr_proc_map_timeout;
5c5e854b 1415 return tool->mmap2(tool, event, sample, machine);
cbf41645 1416 case PERF_RECORD_COMM:
45694aa7 1417 return tool->comm(tool, event, sample, machine);
f3b3614a
HB
1418 case PERF_RECORD_NAMESPACES:
1419 return tool->namespaces(tool, event, sample, machine);
cbf41645 1420 case PERF_RECORD_FORK:
45694aa7 1421 return tool->fork(tool, event, sample, machine);
cbf41645 1422 case PERF_RECORD_EXIT:
45694aa7 1423 return tool->exit(tool, event, sample, machine);
cbf41645 1424 case PERF_RECORD_LOST:
45694aa7 1425 if (tool->lost == perf_event__process_lost)
313e53b0 1426 evlist->stats.total_lost += event->lost.lost;
45694aa7 1427 return tool->lost(tool, event, sample, machine);
c4937a91
KL
1428 case PERF_RECORD_LOST_SAMPLES:
1429 if (tool->lost_samples == perf_event__process_lost_samples)
1430 evlist->stats.total_lost_samples += event->lost_samples.lost;
1431 return tool->lost_samples(tool, event, sample, machine);
cbf41645 1432 case PERF_RECORD_READ:
dac7f6b7 1433 dump_read(evsel, event);
45694aa7 1434 return tool->read(tool, event, sample, evsel, machine);
cbf41645 1435 case PERF_RECORD_THROTTLE:
45694aa7 1436 return tool->throttle(tool, event, sample, machine);
cbf41645 1437 case PERF_RECORD_UNTHROTTLE:
45694aa7 1438 return tool->unthrottle(tool, event, sample, machine);
4a96f7a0 1439 case PERF_RECORD_AUX:
05a1f47e
AS
1440 if (tool->aux == perf_event__process_aux) {
1441 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1442 evlist->stats.total_aux_lost += 1;
1443 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1444 evlist->stats.total_aux_partial += 1;
1445 }
4a96f7a0 1446 return tool->aux(tool, event, sample, machine);
0ad21f68
AH
1447 case PERF_RECORD_ITRACE_START:
1448 return tool->itrace_start(tool, event, sample, machine);
0286039f
AH
1449 case PERF_RECORD_SWITCH:
1450 case PERF_RECORD_SWITCH_CPU_WIDE:
1451 return tool->context_switch(tool, event, sample, machine);
9aa0bfa3
SL
1452 case PERF_RECORD_KSYMBOL:
1453 return tool->ksymbol(tool, event, sample, machine);
45178a92
SL
1454 case PERF_RECORD_BPF_EVENT:
1455 return tool->bpf_event(tool, event, sample, machine);
cbf41645 1456 default:
313e53b0 1457 ++evlist->stats.nr_unknown_events;
cbf41645
TG
1458 return -1;
1459 }
1460}
1461
c446870d
AH
1462static int perf_session__deliver_event(struct perf_session *session,
1463 union perf_event *event,
c446870d
AH
1464 struct perf_tool *tool,
1465 u64 file_offset)
1466{
93d10af2 1467 struct perf_sample sample;
c446870d
AH
1468 int ret;
1469
93d10af2
JO
1470 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1471 if (ret) {
1472 pr_err("Can't parse sample, err = %d\n", ret);
1473 return ret;
1474 }
1475
1476 ret = auxtrace__process_event(session, event, &sample, tool);
c446870d
AH
1477 if (ret < 0)
1478 return ret;
1479 if (ret > 0)
1480 return 0;
1481
1482 return machines__deliver_event(&session->machines, session->evlist,
93d10af2 1483 event, &sample, tool, file_offset);
c446870d
AH
1484}
1485
d5652d86
AH
1486static s64 perf_session__process_user_event(struct perf_session *session,
1487 union perf_event *event,
d5652d86 1488 u64 file_offset)
06aae590 1489{
d704ebda 1490 struct ordered_events *oe = &session->ordered_events;
9870d780 1491 struct perf_tool *tool = session->tool;
f250b09c 1492 struct perf_sample sample = { .time = 0, };
8ceb41d7 1493 int fd = perf_data__fd(session->data);
10d0f086
ACM
1494 int err;
1495
61a7773c
AB
1496 if (event->header.type != PERF_RECORD_COMPRESSED ||
1497 tool->compressed == perf_session__process_compressed_event_stub)
1498 dump_event(session->evlist, event, file_offset, &sample);
06aae590 1499
cbf41645 1500 /* These events are processed right away */
06aae590 1501 switch (event->header.type) {
2c46dbb5 1502 case PERF_RECORD_HEADER_ATTR:
47c3d109 1503 err = tool->attr(tool, event, &session->evlist);
cfe1c414 1504 if (err == 0) {
7b56cce2 1505 perf_session__set_id_hdr_size(session);
cfe1c414
AH
1506 perf_session__set_comm_exec(session);
1507 }
10d0f086 1508 return err;
ffe77725
JO
1509 case PERF_RECORD_EVENT_UPDATE:
1510 return tool->event_update(tool, event, &session->evlist);
f67697bd
JO
1511 case PERF_RECORD_HEADER_EVENT_TYPE:
1512 /*
1513 * Depreceated, but we need to handle it for sake
1514 * of old data files create in pipe mode.
1515 */
1516 return 0;
9215545e
TZ
1517 case PERF_RECORD_HEADER_TRACING_DATA:
1518 /* setup for reading amidst mmap */
cc9784bd 1519 lseek(fd, file_offset, SEEK_SET);
89f1688a 1520 return tool->tracing_data(session, event);
c7929e47 1521 case PERF_RECORD_HEADER_BUILD_ID:
89f1688a 1522 return tool->build_id(session, event);
d6b17beb 1523 case PERF_RECORD_FINISHED_ROUND:
d704ebda 1524 return tool->finished_round(tool, event, oe);
3c659eed 1525 case PERF_RECORD_ID_INDEX:
89f1688a 1526 return tool->id_index(session, event);
a16ac023 1527 case PERF_RECORD_AUXTRACE_INFO:
89f1688a 1528 return tool->auxtrace_info(session, event);
a16ac023
AH
1529 case PERF_RECORD_AUXTRACE:
1530 /* setup for reading amidst mmap */
1531 lseek(fd, file_offset + event->header.size, SEEK_SET);
7336555a 1532 return tool->auxtrace(session, event);
e9bf54d2 1533 case PERF_RECORD_AUXTRACE_ERROR:
85ed4729 1534 perf_session__auxtrace_error_inc(session, event);
89f1688a 1535 return tool->auxtrace_error(session, event);
5f3339d2 1536 case PERF_RECORD_THREAD_MAP:
89f1688a 1537 return tool->thread_map(session, event);
6640b6c2 1538 case PERF_RECORD_CPU_MAP:
89f1688a 1539 return tool->cpu_map(session, event);
374fb9e3 1540 case PERF_RECORD_STAT_CONFIG:
89f1688a 1541 return tool->stat_config(session, event);
d80518c9 1542 case PERF_RECORD_STAT:
89f1688a 1543 return tool->stat(session, event);
2d8f0f18 1544 case PERF_RECORD_STAT_ROUND:
89f1688a 1545 return tool->stat_round(session, event);
46bc29b9
AH
1546 case PERF_RECORD_TIME_CONV:
1547 session->time_conv = event->time_conv;
89f1688a 1548 return tool->time_conv(session, event);
e9def1b2 1549 case PERF_RECORD_HEADER_FEATURE:
89f1688a 1550 return tool->feature(session, event);
61a7773c
AB
1551 case PERF_RECORD_COMPRESSED:
1552 err = tool->compressed(session, event, file_offset);
1553 if (err)
1554 dump_event(session->evlist, event, file_offset, &sample);
1555 return err;
06aae590 1556 default:
ba74f064 1557 return -EINVAL;
06aae590 1558 }
ba74f064
TG
1559}
1560
a293829d
AH
1561int perf_session__deliver_synth_event(struct perf_session *session,
1562 union perf_event *event,
b7b61cbe 1563 struct perf_sample *sample)
a293829d 1564{
63503dba 1565 struct evlist *evlist = session->evlist;
9870d780 1566 struct perf_tool *tool = session->tool;
fa713a4e
ACM
1567
1568 events_stats__inc(&evlist->stats, event->header.type);
a293829d
AH
1569
1570 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
b7b61cbe 1571 return perf_session__process_user_event(session, event, 0);
a293829d 1572
fa713a4e 1573 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
a293829d
AH
1574}
1575
268fb20f
JO
1576static void event_swap(union perf_event *event, bool sample_id_all)
1577{
1578 perf_event__swap_op swap;
1579
1580 swap = perf_event__swap_ops[event->header.type];
1581 if (swap)
1582 swap(event, sample_id_all);
1583}
1584
5a52f33a
AH
1585int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1586 void *buf, size_t buf_sz,
1587 union perf_event **event_ptr,
1588 struct perf_sample *sample)
1589{
1590 union perf_event *event;
1591 size_t hdr_sz, rest;
1592 int fd;
1593
1594 if (session->one_mmap && !session->header.needs_swap) {
1595 event = file_offset - session->one_mmap_offset +
1596 session->one_mmap_addr;
1597 goto out_parse_sample;
1598 }
1599
8ceb41d7 1600 if (perf_data__is_pipe(session->data))
5a52f33a
AH
1601 return -1;
1602
8ceb41d7 1603 fd = perf_data__fd(session->data);
5a52f33a
AH
1604 hdr_sz = sizeof(struct perf_event_header);
1605
1606 if (buf_sz < hdr_sz)
1607 return -1;
1608
1609 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
554e92ed 1610 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
5a52f33a
AH
1611 return -1;
1612
1613 event = (union perf_event *)buf;
1614
1615 if (session->header.needs_swap)
1616 perf_event_header__bswap(&event->header);
1617
554e92ed 1618 if (event->header.size < hdr_sz || event->header.size > buf_sz)
5a52f33a
AH
1619 return -1;
1620
1621 rest = event->header.size - hdr_sz;
1622
554e92ed 1623 if (readn(fd, buf, rest) != (ssize_t)rest)
5a52f33a
AH
1624 return -1;
1625
1626 if (session->header.needs_swap)
1627 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1628
1629out_parse_sample:
1630
1631 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1632 perf_evlist__parse_sample(session->evlist, event, sample))
1633 return -1;
1634
1635 *event_ptr = event;
1636
1637 return 0;
1638}
1639
d5652d86 1640static s64 perf_session__process_event(struct perf_session *session,
b7b61cbe 1641 union perf_event *event, u64 file_offset)
ba74f064 1642{
63503dba 1643 struct evlist *evlist = session->evlist;
9870d780 1644 struct perf_tool *tool = session->tool;
ba74f064
TG
1645 int ret;
1646
268fb20f 1647 if (session->header.needs_swap)
313e53b0 1648 event_swap(event, perf_evlist__sample_id_all(evlist));
ba74f064
TG
1649
1650 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1651 return -EINVAL;
1652
313e53b0 1653 events_stats__inc(&evlist->stats, event->header.type);
ba74f064
TG
1654
1655 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
b7b61cbe 1656 return perf_session__process_user_event(session, event, file_offset);
cbf41645 1657
0a8cb85c 1658 if (tool->ordered_events) {
631e8f0a 1659 u64 timestamp = -1ULL;
93d10af2
JO
1660
1661 ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
631e8f0a 1662 if (ret && ret != -1)
93d10af2
JO
1663 return ret;
1664
1665 ret = perf_session__queue_event(session, event, timestamp, file_offset);
cbf41645
TG
1666 if (ret != -ETIME)
1667 return ret;
1668 }
1669
93d10af2 1670 return perf_session__deliver_event(session, event, tool, file_offset);
06aae590
ACM
1671}
1672
316c7136 1673void perf_event_header__bswap(struct perf_event_header *hdr)
ba21594c 1674{
316c7136
ACM
1675 hdr->type = bswap_32(hdr->type);
1676 hdr->misc = bswap_16(hdr->misc);
1677 hdr->size = bswap_16(hdr->size);
ba21594c
ACM
1678}
1679
b424eba2
ACM
1680struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1681{
1fcb8768 1682 return machine__findnew_thread(&session->machines.host, -1, pid);
b424eba2
ACM
1683}
1684
b25756df
AH
1685/*
1686 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1687 * So here a single thread is created for that, but actually there is a separate
1688 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1689 * is only 1. That causes problems for some tools, requiring workarounds. For
1690 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1691 */
9d8b172f 1692int perf_session__register_idle_thread(struct perf_session *session)
06aae590 1693{
1fcb8768 1694 struct thread *thread;
9d8b172f 1695 int err = 0;
06aae590 1696
1fcb8768 1697 thread = machine__findnew_thread(&session->machines.host, 0, 0);
162f0bef 1698 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
06aae590 1699 pr_err("problem inserting idle task.\n");
9d8b172f 1700 err = -1;
06aae590
ACM
1701 }
1702
f3b3614a
HB
1703 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1704 pr_err("problem inserting idle task.\n");
1705 err = -1;
1706 }
1707
9d8b172f
MH
1708 /* machine__findnew_thread() got the thread, so put it */
1709 thread__put(thread);
1710 return err;
06aae590
ACM
1711}
1712
f06149c0
WN
1713static void
1714perf_session__warn_order(const struct perf_session *session)
1715{
1716 const struct ordered_events *oe = &session->ordered_events;
32dcd021 1717 struct evsel *evsel;
f06149c0
WN
1718 bool should_warn = true;
1719
1720 evlist__for_each_entry(session->evlist, evsel) {
1fc632ce 1721 if (evsel->core.attr.write_backward)
f06149c0
WN
1722 should_warn = false;
1723 }
1724
1725 if (!should_warn)
1726 return;
1727 if (oe->nr_unordered_events != 0)
1728 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1729}
1730
9870d780 1731static void perf_session__warn_about_errors(const struct perf_session *session)
11095994 1732{
9870d780 1733 const struct events_stats *stats = &session->evlist->stats;
9870d780
ACM
1734
1735 if (session->tool->lost == perf_event__process_lost &&
ccda068f 1736 stats->nr_events[PERF_RECORD_LOST] != 0) {
7b27509f
ACM
1737 ui__warning("Processed %d events and lost %d chunks!\n\n"
1738 "Check IO/CPU overload!\n\n",
ccda068f
ACM
1739 stats->nr_events[0],
1740 stats->nr_events[PERF_RECORD_LOST]);
11095994
ACM
1741 }
1742
c4937a91
KL
1743 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1744 double drop_rate;
1745
1746 drop_rate = (double)stats->total_lost_samples /
1747 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1748 if (drop_rate > 0.05) {
41a43dac 1749 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
c4937a91
KL
1750 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1751 drop_rate * 100.0);
1752 }
1753 }
1754
a38f48e3
AH
1755 if (session->tool->aux == perf_event__process_aux &&
1756 stats->total_aux_lost != 0) {
1757 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1758 stats->total_aux_lost,
1759 stats->nr_events[PERF_RECORD_AUX]);
1760 }
1761
05a1f47e
AS
1762 if (session->tool->aux == perf_event__process_aux &&
1763 stats->total_aux_partial != 0) {
1764 bool vmm_exclusive = false;
1765
1766 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1767 &vmm_exclusive);
1768
1769 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1770 "Are you running a KVM guest in the background?%s\n\n",
1771 stats->total_aux_partial,
1772 stats->nr_events[PERF_RECORD_AUX],
1773 vmm_exclusive ?
1774 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1775 "will reduce the gaps to only guest's timeslices." :
1776 "");
1777 }
1778
ccda068f 1779 if (stats->nr_unknown_events != 0) {
11095994
ACM
1780 ui__warning("Found %u unknown events!\n\n"
1781 "Is this an older tool processing a perf.data "
1782 "file generated by a more recent tool?\n\n"
1783 "If that is not the case, consider "
1784 "reporting to linux-kernel@vger.kernel.org.\n\n",
ccda068f 1785 stats->nr_unknown_events);
11095994
ACM
1786 }
1787
ccda068f 1788 if (stats->nr_unknown_id != 0) {
9e69c210 1789 ui__warning("%u samples with id not present in the header\n",
ccda068f 1790 stats->nr_unknown_id);
9e69c210
ACM
1791 }
1792
ccda068f 1793 if (stats->nr_invalid_chains != 0) {
75be989a
ACM
1794 ui__warning("Found invalid callchains!\n\n"
1795 "%u out of %u events were discarded for this reason.\n\n"
1796 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
ccda068f
ACM
1797 stats->nr_invalid_chains,
1798 stats->nr_events[PERF_RECORD_SAMPLE]);
75be989a 1799 }
0c095715 1800
ccda068f 1801 if (stats->nr_unprocessable_samples != 0) {
0c095715
JR
1802 ui__warning("%u unprocessable samples recorded.\n"
1803 "Do you have a KVM guest running and not using 'perf kvm'?\n",
ccda068f 1804 stats->nr_unprocessable_samples);
0c095715 1805 }
f61ff6c0 1806
f06149c0 1807 perf_session__warn_order(session);
85ed4729
AH
1808
1809 events_stats__auxtrace_error_warn(stats);
930e6fcd
KL
1810
1811 if (stats->nr_proc_map_timeout != 0) {
1812 ui__warning("%d map information files for pre-existing threads were\n"
1813 "not processed, if there are samples for addresses they\n"
1814 "will not be resolved, you may find out which are these\n"
1815 "threads by running with -v and redirecting the output\n"
9d9cad76
KL
1816 "to a file.\n"
1817 "The time limit to process proc map is too short?\n"
1818 "Increase it by --proc-map-timeout\n",
930e6fcd
KL
1819 stats->nr_proc_map_timeout);
1820 }
11095994
ACM
1821}
1822
a5499b37
AH
1823static int perf_session__flush_thread_stack(struct thread *thread,
1824 void *p __maybe_unused)
1825{
1826 return thread_stack__flush(thread);
1827}
1828
1829static int perf_session__flush_thread_stacks(struct perf_session *session)
1830{
1831 return machines__for_each_thread(&session->machines,
1832 perf_session__flush_thread_stack,
1833 NULL);
1834}
1835
8dc58101
TZ
1836volatile int session_done;
1837
cb62c6f1
AB
1838static int __perf_session__process_decomp_events(struct perf_session *session);
1839
b7b61cbe 1840static int __perf_session__process_pipe_events(struct perf_session *session)
8dc58101 1841{
fa713a4e 1842 struct ordered_events *oe = &session->ordered_events;
9870d780 1843 struct perf_tool *tool = session->tool;
8ceb41d7 1844 int fd = perf_data__fd(session->data);
444d2866
SE
1845 union perf_event *event;
1846 uint32_t size, cur_size = 0;
1847 void *buf = NULL;
d5652d86 1848 s64 skip = 0;
8dc58101 1849 u64 head;
727ebd54 1850 ssize_t err;
8dc58101
TZ
1851 void *p;
1852
45694aa7 1853 perf_tool__fill_defaults(tool);
8dc58101
TZ
1854
1855 head = 0;
444d2866
SE
1856 cur_size = sizeof(union perf_event);
1857
1858 buf = malloc(cur_size);
1859 if (!buf)
1860 return -errno;
1e0d4f02 1861 ordered_events__set_copy_on_queue(oe, true);
8dc58101 1862more:
444d2866 1863 event = buf;
cc9784bd 1864 err = readn(fd, event, sizeof(struct perf_event_header));
8dc58101
TZ
1865 if (err <= 0) {
1866 if (err == 0)
1867 goto done;
1868
1869 pr_err("failed to read event header\n");
1870 goto out_err;
1871 }
1872
316c7136 1873 if (session->header.needs_swap)
444d2866 1874 perf_event_header__bswap(&event->header);
8dc58101 1875
444d2866 1876 size = event->header.size;
27389d78
AH
1877 if (size < sizeof(struct perf_event_header)) {
1878 pr_err("bad event header size\n");
1879 goto out_err;
1880 }
8dc58101 1881
444d2866
SE
1882 if (size > cur_size) {
1883 void *new = realloc(buf, size);
1884 if (!new) {
1885 pr_err("failed to allocate memory to read event\n");
1886 goto out_err;
1887 }
1888 buf = new;
1889 cur_size = size;
1890 event = buf;
1891 }
1892 p = event;
8dc58101
TZ
1893 p += sizeof(struct perf_event_header);
1894
794e43b5 1895 if (size - sizeof(struct perf_event_header)) {
cc9784bd 1896 err = readn(fd, p, size - sizeof(struct perf_event_header));
794e43b5
TZ
1897 if (err <= 0) {
1898 if (err == 0) {
1899 pr_err("unexpected end of event stream\n");
1900 goto done;
1901 }
8dc58101 1902
794e43b5
TZ
1903 pr_err("failed to read event data\n");
1904 goto out_err;
1905 }
8dc58101
TZ
1906 }
1907
b7b61cbe 1908 if ((skip = perf_session__process_event(session, event, head)) < 0) {
9389a460 1909 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
444d2866 1910 head, event->header.size, event->header.type);
9389a460
JO
1911 err = -EINVAL;
1912 goto out_err;
8dc58101
TZ
1913 }
1914
1915 head += size;
1916
8dc58101
TZ
1917 if (skip > 0)
1918 head += skip;
1919
cb62c6f1
AB
1920 err = __perf_session__process_decomp_events(session);
1921 if (err)
1922 goto out_err;
1923
8dc58101
TZ
1924 if (!session_done())
1925 goto more;
1926done:
8c16b649 1927 /* do the final flush for ordered samples */
b7b61cbe 1928 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
c446870d
AH
1929 if (err)
1930 goto out_err;
1931 err = auxtrace__flush_events(session, tool);
a5499b37
AH
1932 if (err)
1933 goto out_err;
1934 err = perf_session__flush_thread_stacks(session);
8dc58101 1935out_err:
444d2866 1936 free(buf);
075ca1eb
JO
1937 if (!tool->no_warn)
1938 perf_session__warn_about_errors(session);
adc56ed1 1939 ordered_events__free(&session->ordered_events);
c446870d 1940 auxtrace__free_events(session);
8dc58101
TZ
1941 return err;
1942}
1943
998bedc8
FW
1944static union perf_event *
1945fetch_mmaped_event(struct perf_session *session,
1946 u64 head, size_t mmap_size, char *buf)
1947{
1948 union perf_event *event;
1949
1950 /*
1951 * Ensure we have enough space remaining to read
1952 * the size of the event in the headers.
1953 */
1954 if (head + sizeof(event->header) > mmap_size)
1955 return NULL;
1956
1957 event = (union perf_event *)(buf + head);
1958
1959 if (session->header.needs_swap)
1960 perf_event_header__bswap(&event->header);
1961
27389d78
AH
1962 if (head + event->header.size > mmap_size) {
1963 /* We're not fetching the event so swap back again */
1964 if (session->header.needs_swap)
1965 perf_event_header__bswap(&event->header);
57fc032a
ACM
1966 pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx: fuzzed perf.data?\n",
1967 __func__, head, event->header.size, mmap_size);
1968 return ERR_PTR(-EINVAL);
27389d78 1969 }
998bedc8
FW
1970
1971 return event;
1972}
1973
cb62c6f1
AB
1974static int __perf_session__process_decomp_events(struct perf_session *session)
1975{
1976 s64 skip;
1977 u64 size, file_pos = 0;
1978 struct decomp *decomp = session->decomp_last;
1979
1980 if (!decomp)
1981 return 0;
1982
1983 while (decomp->head < decomp->size && !session_done()) {
1984 union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data);
1985
57fc032a
ACM
1986 if (IS_ERR(event))
1987 return PTR_ERR(event);
1988
cb62c6f1
AB
1989 if (!event)
1990 break;
1991
1992 size = event->header.size;
1993
1994 if (size < sizeof(struct perf_event_header) ||
1995 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1996 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1997 decomp->file_pos + decomp->head, event->header.size, event->header.type);
1998 return -EINVAL;
1999 }
2000
2001 if (skip)
2002 size += skip;
2003
2004 decomp->head += size;
2005 }
2006
2007 return 0;
2008}
2009
35d48ddf
DM
2010/*
2011 * On 64bit we can mmap the data file in one go. No need for tiny mmap
2012 * slices. On 32bit we use 32MB.
2013 */
2014#if BITS_PER_LONG == 64
2015#define MMAP_SIZE ULLONG_MAX
2016#define NUM_MMAPS 1
2017#else
2018#define MMAP_SIZE (32 * 1024 * 1024ULL)
2019#define NUM_MMAPS 128
2020#endif
2021
e51f8061
JO
2022struct reader;
2023
2024typedef s64 (*reader_cb_t)(struct perf_session *session,
2025 union perf_event *event,
2026 u64 file_offset);
2027
82715eb1 2028struct reader {
e51f8061
JO
2029 int fd;
2030 u64 data_size;
2031 u64 data_offset;
2032 reader_cb_t process;
82715eb1
JO
2033};
2034
3c7b67b2
JO
2035static int
2036reader__process_events(struct reader *rd, struct perf_session *session,
2037 struct ui_progress *prog)
06aae590 2038{
3c7b67b2 2039 u64 data_size = rd->data_size;
d5652d86 2040 u64 head, page_offset, file_offset, file_pos, size;
3c7b67b2 2041 int err = 0, mmap_prot, mmap_flags, map_idx = 0;
0c1fe6b2 2042 size_t mmap_size;
35d48ddf 2043 char *buf, *mmaps[NUM_MMAPS];
8115d60c 2044 union perf_event *event;
d5652d86 2045 s64 skip;
0331ee0c 2046
3c7b67b2 2047 page_offset = page_size * (rd->data_offset / page_size);
0331ee0c 2048 file_offset = page_offset;
3c7b67b2 2049 head = rd->data_offset - page_offset;
06aae590 2050
3c7b67b2 2051 ui_progress__init_size(prog, data_size, "Processing events...");
381c02f6 2052
3c7b67b2 2053 data_size += rd->data_offset;
55b44629 2054
35d48ddf 2055 mmap_size = MMAP_SIZE;
4f5a473d
JO
2056 if (mmap_size > data_size) {
2057 mmap_size = data_size;
919d86d3
AH
2058 session->one_mmap = true;
2059 }
55b44629 2060
fe174207
TG
2061 memset(mmaps, 0, sizeof(mmaps));
2062
ba21594c
ACM
2063 mmap_prot = PROT_READ;
2064 mmap_flags = MAP_SHARED;
2065
0331ee0c 2066 if (session->header.needs_swap) {
ba21594c
ACM
2067 mmap_prot |= PROT_WRITE;
2068 mmap_flags = MAP_PRIVATE;
2069 }
06aae590 2070remap:
3c7b67b2 2071 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
55b44629 2072 file_offset);
06aae590
ACM
2073 if (buf == MAP_FAILED) {
2074 pr_err("failed to mmap file\n");
2075 err = -errno;
3c7b67b2 2076 goto out;
06aae590 2077 }
fe174207
TG
2078 mmaps[map_idx] = buf;
2079 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
d6513281 2080 file_pos = file_offset + head;
919d86d3
AH
2081 if (session->one_mmap) {
2082 session->one_mmap_addr = buf;
2083 session->one_mmap_offset = file_offset;
2084 }
06aae590
ACM
2085
2086more:
998bedc8 2087 event = fetch_mmaped_event(session, head, mmap_size, buf);
57fc032a
ACM
2088 if (IS_ERR(event))
2089 return PTR_ERR(event);
2090
998bedc8 2091 if (!event) {
fe174207
TG
2092 if (mmaps[map_idx]) {
2093 munmap(mmaps[map_idx], mmap_size);
2094 mmaps[map_idx] = NULL;
2095 }
06aae590 2096
0331ee0c
TG
2097 page_offset = page_size * (head / page_size);
2098 file_offset += page_offset;
2099 head -= page_offset;
06aae590
ACM
2100 goto remap;
2101 }
2102
2103 size = event->header.size;
2104
167e418f
TR
2105 skip = -EINVAL;
2106
27389d78 2107 if (size < sizeof(struct perf_event_header) ||
e51f8061 2108 (skip = rd->process(session, event, file_pos)) < 0) {
167e418f 2109 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
9389a460 2110 file_offset + head, event->header.size,
167e418f
TR
2111 event->header.type, strerror(-skip));
2112 err = skip;
3c7b67b2 2113 goto out;
06aae590
ACM
2114 }
2115
6f917c70
AH
2116 if (skip)
2117 size += skip;
2118
06aae590 2119 head += size;
d6513281 2120 file_pos += size;
06aae590 2121
cb62c6f1
AB
2122 err = __perf_session__process_decomp_events(session);
2123 if (err)
2124 goto out;
2125
3c7b67b2 2126 ui_progress__update(prog, size);
55b44629 2127
33e940a2 2128 if (session_done())
8c16b649 2129 goto out;
33e940a2 2130
4f5a473d 2131 if (file_pos < data_size)
06aae590 2132 goto more;
d6513281 2133
8c16b649 2134out:
3c7b67b2
JO
2135 return err;
2136}
2137
e51f8061
JO
2138static s64 process_simple(struct perf_session *session,
2139 union perf_event *event,
2140 u64 file_offset)
2141{
2142 return perf_session__process_event(session, event, file_offset);
2143}
2144
3c7b67b2
JO
2145static int __perf_session__process_events(struct perf_session *session)
2146{
2147 struct reader rd = {
2148 .fd = perf_data__fd(session->data),
2149 .data_size = session->header.data_size,
2150 .data_offset = session->header.data_offset,
e51f8061 2151 .process = process_simple,
3c7b67b2
JO
2152 };
2153 struct ordered_events *oe = &session->ordered_events;
2154 struct perf_tool *tool = session->tool;
2155 struct ui_progress prog;
2156 int err;
2157
2158 perf_tool__fill_defaults(tool);
2159
2160 if (rd.data_size == 0)
2161 return -1;
2162
2163 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2164
2165 err = reader__process_events(&rd, session, &prog);
2166 if (err)
2167 goto out_err;
c61e52ee 2168 /* do the final flush for ordered samples */
b7b61cbe 2169 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
c446870d
AH
2170 if (err)
2171 goto out_err;
2172 err = auxtrace__flush_events(session, tool);
a5499b37
AH
2173 if (err)
2174 goto out_err;
2175 err = perf_session__flush_thread_stacks(session);
06aae590 2176out_err:
a5580f3e 2177 ui_progress__finish();
075ca1eb
JO
2178 if (!tool->no_warn)
2179 perf_session__warn_about_errors(session);
b26dc730
WN
2180 /*
2181 * We may switching perf.data output, make ordered_events
2182 * reusable.
2183 */
2184 ordered_events__reinit(&session->ordered_events);
c446870d 2185 auxtrace__free_events(session);
919d86d3 2186 session->one_mmap = false;
06aae590
ACM
2187 return err;
2188}
27295592 2189
b7b61cbe 2190int perf_session__process_events(struct perf_session *session)
6122e4e4 2191{
9d8b172f 2192 if (perf_session__register_idle_thread(session) < 0)
6122e4e4
ACM
2193 return -ENOMEM;
2194
7ba4da10
JO
2195 if (perf_data__is_pipe(session->data))
2196 return __perf_session__process_pipe_events(session);
88ca895d 2197
7ba4da10 2198 return __perf_session__process_events(session);
6122e4e4
ACM
2199}
2200
7f3be652 2201bool perf_session__has_traces(struct perf_session *session, const char *msg)
27295592 2202{
32dcd021 2203 struct evsel *evsel;
93ea01c2 2204
e5cadb93 2205 evlist__for_each_entry(session->evlist, evsel) {
1fc632ce 2206 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
93ea01c2 2207 return true;
27295592
ACM
2208 }
2209
93ea01c2
DA
2210 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2211 return false;
27295592 2212}
56b03f3c 2213
3183f8ca 2214int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
56b03f3c
ACM
2215{
2216 char *bracket;
a1645ce1 2217 struct ref_reloc_sym *ref;
3183f8ca 2218 struct kmap *kmap;
a1645ce1
ZY
2219
2220 ref = zalloc(sizeof(struct ref_reloc_sym));
2221 if (ref == NULL)
2222 return -ENOMEM;
56b03f3c 2223
a1645ce1
ZY
2224 ref->name = strdup(symbol_name);
2225 if (ref->name == NULL) {
2226 free(ref);
56b03f3c 2227 return -ENOMEM;
a1645ce1 2228 }
56b03f3c 2229
a1645ce1 2230 bracket = strchr(ref->name, ']');
56b03f3c
ACM
2231 if (bracket)
2232 *bracket = '\0';
2233
a1645ce1 2234 ref->addr = addr;
9de89fe7 2235
3183f8ca
ACM
2236 kmap = map__kmap(map);
2237 if (kmap)
a1645ce1 2238 kmap->ref_reloc_sym = ref;
9de89fe7 2239
56b03f3c
ACM
2240 return 0;
2241}
1f626bc3 2242
316c7136 2243size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1f626bc3 2244{
316c7136 2245 return machines__fprintf_dsos(&session->machines, fp);
1f626bc3 2246}
f869097e 2247
316c7136 2248size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
417c2ff6 2249 bool (skip)(struct dso *dso, int parm), int parm)
f869097e 2250{
316c7136 2251 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
f869097e 2252}
e248de33
ACM
2253
2254size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2255{
c446870d
AH
2256 size_t ret;
2257 const char *msg = "";
2258
2259 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2260 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2261
fe692ac8 2262 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
e248de33 2263
75be989a 2264 ret += events_stats__fprintf(&session->evlist->stats, fp);
e248de33
ACM
2265 return ret;
2266}
c0230b2b 2267
b424eba2
ACM
2268size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2269{
2270 /*
2271 * FIXME: Here we have to actually print all the machines in this
2272 * session, not just the host...
2273 */
876650e6 2274 return machine__fprintf(&session->machines.host, fp);
b424eba2
ACM
2275}
2276
32dcd021 2277struct evsel *perf_session__find_first_evtype(struct perf_session *session,
9cbdb702
DA
2278 unsigned int type)
2279{
32dcd021 2280 struct evsel *pos;
9cbdb702 2281
e5cadb93 2282 evlist__for_each_entry(session->evlist, pos) {
1fc632ce 2283 if (pos->core.attr.type == type)
9cbdb702
DA
2284 return pos;
2285 }
2286 return NULL;
2287}
2288
5d67be97
AB
2289int perf_session__cpu_bitmap(struct perf_session *session,
2290 const char *cpu_list, unsigned long *cpu_bitmap)
2291{
8bac41cb 2292 int i, err = -1;
f854839b 2293 struct perf_cpu_map *map;
5d67be97
AB
2294
2295 for (i = 0; i < PERF_TYPE_MAX; ++i) {
32dcd021 2296 struct evsel *evsel;
5d67be97
AB
2297
2298 evsel = perf_session__find_first_evtype(session, i);
2299 if (!evsel)
2300 continue;
2301
1fc632ce 2302 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
5d67be97 2303 pr_err("File does not contain CPU events. "
30795467 2304 "Remove -C option to proceed.\n");
5d67be97
AB
2305 return -1;
2306 }
2307 }
2308
9c3516d1 2309 map = perf_cpu_map__new(cpu_list);
47fbe53b
DA
2310 if (map == NULL) {
2311 pr_err("Invalid cpu_list\n");
2312 return -1;
2313 }
5d67be97
AB
2314
2315 for (i = 0; i < map->nr; i++) {
2316 int cpu = map->map[i];
2317
2318 if (cpu >= MAX_NR_CPUS) {
2319 pr_err("Requested CPU %d too large. "
2320 "Consider raising MAX_NR_CPUS\n", cpu);
8bac41cb 2321 goto out_delete_map;
5d67be97
AB
2322 }
2323
2324 set_bit(cpu, cpu_bitmap);
2325 }
2326
8bac41cb
SF
2327 err = 0;
2328
2329out_delete_map:
38f01d8d 2330 perf_cpu_map__put(map);
8bac41cb 2331 return err;
5d67be97 2332}
fbe96f29
SE
2333
2334void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2335 bool full)
2336{
fbe96f29
SE
2337 if (session == NULL || fp == NULL)
2338 return;
2339
fbe96f29 2340 fprintf(fp, "# ========\n");
fbe96f29
SE
2341 perf_header__fprintf_info(session, fp, full);
2342 fprintf(fp, "# ========\n#\n");
2343}
da378962
ACM
2344
2345
2346int __perf_session__set_tracepoints_handlers(struct perf_session *session,
32dcd021 2347 const struct evsel_str_handler *assocs,
da378962
ACM
2348 size_t nr_assocs)
2349{
32dcd021 2350 struct evsel *evsel;
da378962
ACM
2351 size_t i;
2352 int err;
2353
2354 for (i = 0; i < nr_assocs; i++) {
ccf53eac
ACM
2355 /*
2356 * Adding a handler for an event not in the session,
2357 * just ignore it.
2358 */
2359 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
da378962 2360 if (evsel == NULL)
ccf53eac 2361 continue;
da378962
ACM
2362
2363 err = -EEXIST;
744a9719 2364 if (evsel->handler != NULL)
ccf53eac 2365 goto out;
744a9719 2366 evsel->handler = assocs[i].handler;
da378962
ACM
2367 }
2368
2369 err = 0;
2370out:
2371 return err;
da378962 2372}
3c659eed 2373
89f1688a
JO
2374int perf_event__process_id_index(struct perf_session *session,
2375 union perf_event *event)
3c659eed 2376{
63503dba 2377 struct evlist *evlist = session->evlist;
3c659eed
AH
2378 struct id_index_event *ie = &event->id_index;
2379 size_t i, nr, max_nr;
2380
2381 max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2382 sizeof(struct id_index_entry);
2383 nr = ie->nr;
2384 if (nr > max_nr)
2385 return -EINVAL;
2386
2387 if (dump_trace)
2388 fprintf(stdout, " nr: %zu\n", nr);
2389
2390 for (i = 0; i < nr; i++) {
2391 struct id_index_entry *e = &ie->entries[i];
2392 struct perf_sample_id *sid;
2393
2394 if (dump_trace) {
2395 fprintf(stdout, " ... id: %"PRIu64, e->id);
2396 fprintf(stdout, " idx: %"PRIu64, e->idx);
2397 fprintf(stdout, " cpu: %"PRId64, e->cpu);
2398 fprintf(stdout, " tid: %"PRId64"\n", e->tid);
2399 }
2400
2401 sid = perf_evlist__id2sid(evlist, e->id);
2402 if (!sid)
2403 return -ENOENT;
2404 sid->idx = e->idx;
2405 sid->cpu = e->cpu;
2406 sid->tid = e->tid;
2407 }
2408 return 0;
2409}
2410
2411int perf_event__synthesize_id_index(struct perf_tool *tool,
2412 perf_event__handler_t process,
63503dba 2413 struct evlist *evlist,
3c659eed
AH
2414 struct machine *machine)
2415{
2416 union perf_event *ev;
32dcd021 2417 struct evsel *evsel;
3c659eed
AH
2418 size_t nr = 0, i = 0, sz, max_nr, n;
2419 int err;
2420
2421 pr_debug2("Synthesizing id index\n");
2422
2423 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2424 sizeof(struct id_index_entry);
2425
e5cadb93 2426 evlist__for_each_entry(evlist, evsel)
3c659eed
AH
2427 nr += evsel->ids;
2428
2429 n = nr > max_nr ? max_nr : nr;
2430 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2431 ev = zalloc(sz);
2432 if (!ev)
2433 return -ENOMEM;
2434
2435 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2436 ev->id_index.header.size = sz;
2437 ev->id_index.nr = n;
2438
e5cadb93 2439 evlist__for_each_entry(evlist, evsel) {
3c659eed
AH
2440 u32 j;
2441
2442 for (j = 0; j < evsel->ids; j++) {
2443 struct id_index_entry *e;
2444 struct perf_sample_id *sid;
2445
2446 if (i >= n) {
2447 err = process(tool, ev, NULL, machine);
2448 if (err)
2449 goto out_err;
2450 nr -= n;
2451 i = 0;
2452 }
2453
2454 e = &ev->id_index.entries[i++];
2455
2456 e->id = evsel->id[j];
2457
2458 sid = perf_evlist__id2sid(evlist, e->id);
2459 if (!sid) {
2460 free(ev);
2461 return -ENOENT;
2462 }
2463
2464 e->idx = sid->idx;
2465 e->cpu = sid->cpu;
2466 e->tid = sid->tid;
2467 }
2468 }
2469
2470 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2471 ev->id_index.header.size = sz;
2472 ev->id_index.nr = nr;
2473
2474 err = process(tool, ev, NULL, machine);
2475out_err:
2476 free(ev);
2477
2478 return err;
2479}