perf ordered_events: Pass timestamp arg in perf_session__queue_event
[linux-2.6-block.git] / tools / perf / util / session.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a43783ae 2#include <errno.h>
fd20e811 3#include <inttypes.h>
94c744b6 4#include <linux/kernel.h>
4e319027 5#include <traceevent/event-parse.h>
05a1f47e 6#include <api/fs/fs.h>
94c744b6 7
ba21594c 8#include <byteswap.h>
94c744b6
ACM
9#include <unistd.h>
10#include <sys/types.h>
a41794cd 11#include <sys/mman.h>
94c744b6 12
e248de33
ACM
13#include "evlist.h"
14#include "evsel.h"
98521b38 15#include "memswap.h"
94c744b6 16#include "session.h"
45694aa7 17#include "tool.h"
a328626b 18#include "sort.h"
94c744b6 19#include "util.h"
5d67be97 20#include "cpumap.h"
0f6a3015 21#include "perf_regs.h"
b0a45203 22#include "asm/bug.h"
c446870d 23#include "auxtrace.h"
e7ff8920 24#include "thread.h"
a5499b37 25#include "thread-stack.h"
2d2aea6a 26#include "stat.h"
94c744b6 27
c446870d
AH
28static int perf_session__deliver_event(struct perf_session *session,
29 union perf_event *event,
30 struct perf_sample *sample,
31 struct perf_tool *tool,
32 u64 file_offset);
d10eb1eb 33
316c7136 34static int perf_session__open(struct perf_session *session)
94c744b6 35{
8ceb41d7 36 struct perf_data *data = session->data;
8dc58101 37
316c7136 38 if (perf_session__read_header(session) < 0) {
e87b4911 39 pr_err("incompatible file format (rerun with -v to learn more)\n");
6a4d98d7 40 return -1;
94c744b6
ACM
41 }
42
8ceb41d7 43 if (perf_data__is_pipe(data))
cc9784bd
JO
44 return 0;
45
3ba78bd0
JO
46 if (perf_header__has_feat(&session->header, HEADER_STAT))
47 return 0;
48
316c7136 49 if (!perf_evlist__valid_sample_type(session->evlist)) {
e87b4911 50 pr_err("non matching sample_type\n");
6a4d98d7 51 return -1;
c2a70653
ACM
52 }
53
316c7136 54 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
e87b4911 55 pr_err("non matching sample_id_all\n");
6a4d98d7 56 return -1;
c2a70653
ACM
57 }
58
316c7136 59 if (!perf_evlist__valid_read_format(session->evlist)) {
e87b4911 60 pr_err("non matching read_format\n");
6a4d98d7 61 return -1;
9ede473c
JO
62 }
63
94c744b6 64 return 0;
94c744b6
ACM
65}
66
7b56cce2 67void perf_session__set_id_hdr_size(struct perf_session *session)
9c90a61c 68{
7b56cce2
ACM
69 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
70
7b56cce2 71 machines__set_id_hdr_size(&session->machines, id_hdr_size);
9c90a61c
ACM
72}
73
316c7136 74int perf_session__create_kernel_maps(struct perf_session *session)
a1645ce1 75{
316c7136 76 int ret = machine__create_kernel_maps(&session->machines.host);
a1645ce1 77
a1645ce1 78 if (ret >= 0)
316c7136 79 ret = machines__create_guest_kernel_maps(&session->machines);
a1645ce1
ZY
80 return ret;
81}
82
316c7136 83static void perf_session__destroy_kernel_maps(struct perf_session *session)
076c6e45 84{
316c7136 85 machines__destroy_kernel_maps(&session->machines);
076c6e45
ACM
86}
87
cfe1c414
AH
88static bool perf_session__has_comm_exec(struct perf_session *session)
89{
90 struct perf_evsel *evsel;
91
e5cadb93 92 evlist__for_each_entry(session->evlist, evsel) {
cfe1c414
AH
93 if (evsel->attr.comm_exec)
94 return true;
95 }
96
97 return false;
98}
99
100static void perf_session__set_comm_exec(struct perf_session *session)
101{
102 bool comm_exec = perf_session__has_comm_exec(session);
103
104 machines__set_comm_exec(&session->machines, comm_exec);
105}
106
d10eb1eb 107static int ordered_events__deliver_event(struct ordered_events *oe,
9870d780 108 struct ordered_event *event)
d10eb1eb 109{
9870d780
ACM
110 struct perf_sample sample;
111 struct perf_session *session = container_of(oe, struct perf_session,
112 ordered_events);
113 int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);
114
115 if (ret) {
116 pr_err("Can't parse sample, err = %d\n", ret);
117 return ret;
118 }
119
c446870d
AH
120 return perf_session__deliver_event(session, event->event, &sample,
121 session->tool, event->file_offset);
d10eb1eb
ACM
122}
123
8ceb41d7 124struct perf_session *perf_session__new(struct perf_data *data,
f5fc1412 125 bool repipe, struct perf_tool *tool)
94c744b6 126{
316c7136 127 struct perf_session *session = zalloc(sizeof(*session));
efad1415 128
316c7136 129 if (!session)
94c744b6
ACM
130 goto out;
131
316c7136 132 session->repipe = repipe;
9870d780 133 session->tool = tool;
99fa2984 134 INIT_LIST_HEAD(&session->auxtrace_index);
316c7136 135 machines__init(&session->machines);
9870d780 136 ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
94c744b6 137
8ceb41d7
JO
138 if (data) {
139 if (perf_data__open(data))
64abebf7 140 goto out_delete;
6a4d98d7 141
8ceb41d7 142 session->data = data;
6a4d98d7 143
8ceb41d7 144 if (perf_data__is_read(data)) {
316c7136 145 if (perf_session__open(session) < 0)
6a4d98d7
JO
146 goto out_close;
147
0973ad97
DCC
148 /*
149 * set session attributes that are present in perf.data
150 * but not in pipe-mode.
151 */
8ceb41d7 152 if (!data->is_pipe) {
0973ad97
DCC
153 perf_session__set_id_hdr_size(session);
154 perf_session__set_comm_exec(session);
155 }
6a4d98d7 156 }
4cde998d
ACM
157 } else {
158 session->machines.host.env = &perf_env;
6a4d98d7
JO
159 }
160
8ceb41d7 161 if (!data || perf_data__is_write(data)) {
64abebf7
ACM
162 /*
163 * In O_RDONLY mode this will be performed when reading the
8115d60c 164 * kernel MMAP event, in perf_event__process_mmap().
64abebf7 165 */
316c7136 166 if (perf_session__create_kernel_maps(session) < 0)
a5c2a4c9 167 pr_warning("Cannot read kernel map\n");
64abebf7 168 }
d549c769 169
0973ad97
DCC
170 /*
171 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
172 * processed, so perf_evlist__sample_id_all is not meaningful here.
173 */
8ceb41d7 174 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
0a8cb85c 175 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
21ef97f0 176 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
0a8cb85c 177 tool->ordered_events = false;
d10eb1eb 178 }
21ef97f0 179
316c7136 180 return session;
6a4d98d7
JO
181
182 out_close:
8ceb41d7 183 perf_data__close(data);
6a4d98d7 184 out_delete:
316c7136 185 perf_session__delete(session);
6a4d98d7 186 out:
4aa65636 187 return NULL;
94c744b6
ACM
188}
189
b424eba2
ACM
190static void perf_session__delete_threads(struct perf_session *session)
191{
876650e6 192 machine__delete_threads(&session->machines.host);
b424eba2
ACM
193}
194
316c7136 195void perf_session__delete(struct perf_session *session)
94c744b6 196{
e1446551
ACM
197 if (session == NULL)
198 return;
c446870d 199 auxtrace__free(session);
99fa2984 200 auxtrace_index__free(&session->auxtrace_index);
316c7136 201 perf_session__destroy_kernel_maps(session);
316c7136 202 perf_session__delete_threads(session);
f0ce888c 203 perf_env__exit(&session->header.env);
316c7136 204 machines__exit(&session->machines);
8ceb41d7
JO
205 if (session->data)
206 perf_data__close(session->data);
316c7136 207 free(session);
94c744b6 208}
a328626b 209
47c3d109
AH
210static int process_event_synth_tracing_data_stub(struct perf_tool *tool
211 __maybe_unused,
212 union perf_event *event
1d037ca1
IT
213 __maybe_unused,
214 struct perf_session *session
215 __maybe_unused)
d20deb64
ACM
216{
217 dump_printf(": unhandled!\n");
218 return 0;
219}
220
47c3d109
AH
221static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
222 union perf_event *event __maybe_unused,
1d037ca1
IT
223 struct perf_evlist **pevlist
224 __maybe_unused)
10d0f086
ACM
225{
226 dump_printf(": unhandled!\n");
227 return 0;
228}
229
ffe77725
JO
230static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
231 union perf_event *event __maybe_unused,
232 struct perf_evlist **pevlist
233 __maybe_unused)
234{
2d2aea6a
JO
235 if (dump_trace)
236 perf_event__fprintf_event_update(event, stdout);
237
ffe77725
JO
238 dump_printf(": unhandled!\n");
239 return 0;
240}
241
1d037ca1
IT
242static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
243 union perf_event *event __maybe_unused,
244 struct perf_sample *sample __maybe_unused,
245 struct perf_evsel *evsel __maybe_unused,
246 struct machine *machine __maybe_unused)
9e69c210
ACM
247{
248 dump_printf(": unhandled!\n");
249 return 0;
250}
251
1d037ca1
IT
252static int process_event_stub(struct perf_tool *tool __maybe_unused,
253 union perf_event *event __maybe_unused,
254 struct perf_sample *sample __maybe_unused,
255 struct machine *machine __maybe_unused)
06aae590
ACM
256{
257 dump_printf(": unhandled!\n");
258 return 0;
259}
260
1d037ca1
IT
261static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
262 union perf_event *event __maybe_unused,
d704ebda 263 struct ordered_events *oe __maybe_unused)
743eb868
ACM
264{
265 dump_printf(": unhandled!\n");
266 return 0;
267}
268
45694aa7 269static int process_finished_round(struct perf_tool *tool,
d20deb64 270 union perf_event *event,
d704ebda 271 struct ordered_events *oe);
d6b17beb 272
a16ac023
AH
273static int skipn(int fd, off_t n)
274{
275 char buf[4096];
276 ssize_t ret;
277
278 while (n > 0) {
279 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
280 if (ret <= 0)
281 return ret;
282 n -= ret;
283 }
284
285 return 0;
286}
287
288static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
289 union perf_event *event,
290 struct perf_session *session
291 __maybe_unused)
292{
293 dump_printf(": unhandled!\n");
8ceb41d7
JO
294 if (perf_data__is_pipe(session->data))
295 skipn(perf_data__fd(session->data), event->auxtrace.size);
a16ac023
AH
296 return event->auxtrace.size;
297}
298
5fb0ac16
AH
299static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
300 union perf_event *event __maybe_unused,
301 struct perf_session *session __maybe_unused)
e9bf54d2
AH
302{
303 dump_printf(": unhandled!\n");
304 return 0;
305}
306
5f3339d2
JO
307
308static
309int process_event_thread_map_stub(struct perf_tool *tool __maybe_unused,
310 union perf_event *event __maybe_unused,
311 struct perf_session *session __maybe_unused)
312{
2d2aea6a
JO
313 if (dump_trace)
314 perf_event__fprintf_thread_map(event, stdout);
315
5f3339d2
JO
316 dump_printf(": unhandled!\n");
317 return 0;
318}
319
6640b6c2
JO
320static
321int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
322 union perf_event *event __maybe_unused,
323 struct perf_session *session __maybe_unused)
324{
2d2aea6a
JO
325 if (dump_trace)
326 perf_event__fprintf_cpu_map(event, stdout);
327
6640b6c2
JO
328 dump_printf(": unhandled!\n");
329 return 0;
330}
331
374fb9e3
JO
332static
333int process_event_stat_config_stub(struct perf_tool *tool __maybe_unused,
334 union perf_event *event __maybe_unused,
335 struct perf_session *session __maybe_unused)
336{
2d2aea6a
JO
337 if (dump_trace)
338 perf_event__fprintf_stat_config(event, stdout);
339
374fb9e3
JO
340 dump_printf(": unhandled!\n");
341 return 0;
342}
343
d80518c9
JO
344static int process_stat_stub(struct perf_tool *tool __maybe_unused,
345 union perf_event *event __maybe_unused,
346 struct perf_session *perf_session
347 __maybe_unused)
348{
2d2aea6a
JO
349 if (dump_trace)
350 perf_event__fprintf_stat(event, stdout);
351
d80518c9
JO
352 dump_printf(": unhandled!\n");
353 return 0;
354}
355
2d8f0f18
JO
356static int process_stat_round_stub(struct perf_tool *tool __maybe_unused,
357 union perf_event *event __maybe_unused,
358 struct perf_session *perf_session
359 __maybe_unused)
360{
2d2aea6a
JO
361 if (dump_trace)
362 perf_event__fprintf_stat_round(event, stdout);
363
2d8f0f18
JO
364 dump_printf(": unhandled!\n");
365 return 0;
366}
367
9c501402 368void perf_tool__fill_defaults(struct perf_tool *tool)
06aae590 369{
45694aa7
ACM
370 if (tool->sample == NULL)
371 tool->sample = process_event_sample_stub;
372 if (tool->mmap == NULL)
373 tool->mmap = process_event_stub;
6adb0b0a
DA
374 if (tool->mmap2 == NULL)
375 tool->mmap2 = process_event_stub;
45694aa7
ACM
376 if (tool->comm == NULL)
377 tool->comm = process_event_stub;
7f0cd236
NK
378 if (tool->namespaces == NULL)
379 tool->namespaces = process_event_stub;
45694aa7
ACM
380 if (tool->fork == NULL)
381 tool->fork = process_event_stub;
382 if (tool->exit == NULL)
383 tool->exit = process_event_stub;
384 if (tool->lost == NULL)
385 tool->lost = perf_event__process_lost;
c4937a91
KL
386 if (tool->lost_samples == NULL)
387 tool->lost_samples = perf_event__process_lost_samples;
4a96f7a0
AH
388 if (tool->aux == NULL)
389 tool->aux = perf_event__process_aux;
0ad21f68
AH
390 if (tool->itrace_start == NULL)
391 tool->itrace_start = perf_event__process_itrace_start;
0286039f
AH
392 if (tool->context_switch == NULL)
393 tool->context_switch = perf_event__process_switch;
45694aa7
ACM
394 if (tool->read == NULL)
395 tool->read = process_event_sample_stub;
396 if (tool->throttle == NULL)
397 tool->throttle = process_event_stub;
398 if (tool->unthrottle == NULL)
399 tool->unthrottle = process_event_stub;
400 if (tool->attr == NULL)
401 tool->attr = process_event_synth_attr_stub;
ffe77725
JO
402 if (tool->event_update == NULL)
403 tool->event_update = process_event_synth_event_update_stub;
45694aa7
ACM
404 if (tool->tracing_data == NULL)
405 tool->tracing_data = process_event_synth_tracing_data_stub;
406 if (tool->build_id == NULL)
5fb0ac16 407 tool->build_id = process_event_op2_stub;
45694aa7 408 if (tool->finished_round == NULL) {
0a8cb85c 409 if (tool->ordered_events)
45694aa7 410 tool->finished_round = process_finished_round;
d6b17beb 411 else
45694aa7 412 tool->finished_round = process_finished_round_stub;
d6b17beb 413 }
3c659eed 414 if (tool->id_index == NULL)
5fb0ac16 415 tool->id_index = process_event_op2_stub;
a16ac023 416 if (tool->auxtrace_info == NULL)
5fb0ac16 417 tool->auxtrace_info = process_event_op2_stub;
a16ac023
AH
418 if (tool->auxtrace == NULL)
419 tool->auxtrace = process_event_auxtrace_stub;
e9bf54d2 420 if (tool->auxtrace_error == NULL)
5fb0ac16 421 tool->auxtrace_error = process_event_op2_stub;
5f3339d2
JO
422 if (tool->thread_map == NULL)
423 tool->thread_map = process_event_thread_map_stub;
6640b6c2
JO
424 if (tool->cpu_map == NULL)
425 tool->cpu_map = process_event_cpu_map_stub;
374fb9e3
JO
426 if (tool->stat_config == NULL)
427 tool->stat_config = process_event_stat_config_stub;
d80518c9
JO
428 if (tool->stat == NULL)
429 tool->stat = process_stat_stub;
2d8f0f18
JO
430 if (tool->stat_round == NULL)
431 tool->stat_round = process_stat_round_stub;
46bc29b9
AH
432 if (tool->time_conv == NULL)
433 tool->time_conv = process_event_op2_stub;
e9def1b2
DCC
434 if (tool->feature == NULL)
435 tool->feature = process_event_op2_stub;
06aae590 436}
48000a1a 437
268fb20f
JO
438static void swap_sample_id_all(union perf_event *event, void *data)
439{
440 void *end = (void *) event + event->header.size;
441 int size = end - data;
442
443 BUG_ON(size % sizeof(u64));
444 mem_bswap_64(data, size);
445}
446
447static void perf_event__all64_swap(union perf_event *event,
1d037ca1 448 bool sample_id_all __maybe_unused)
ba21594c 449{
8115d60c
ACM
450 struct perf_event_header *hdr = &event->header;
451 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
ba21594c
ACM
452}
453
268fb20f 454static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
ba21594c 455{
8115d60c
ACM
456 event->comm.pid = bswap_32(event->comm.pid);
457 event->comm.tid = bswap_32(event->comm.tid);
268fb20f
JO
458
459 if (sample_id_all) {
460 void *data = &event->comm.comm;
461
9ac3e487 462 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
268fb20f
JO
463 swap_sample_id_all(event, data);
464 }
ba21594c
ACM
465}
466
268fb20f
JO
467static void perf_event__mmap_swap(union perf_event *event,
468 bool sample_id_all)
ba21594c 469{
8115d60c
ACM
470 event->mmap.pid = bswap_32(event->mmap.pid);
471 event->mmap.tid = bswap_32(event->mmap.tid);
472 event->mmap.start = bswap_64(event->mmap.start);
473 event->mmap.len = bswap_64(event->mmap.len);
474 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
268fb20f
JO
475
476 if (sample_id_all) {
477 void *data = &event->mmap.filename;
478
9ac3e487 479 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
268fb20f
JO
480 swap_sample_id_all(event, data);
481 }
ba21594c
ACM
482}
483
5c5e854b
SE
484static void perf_event__mmap2_swap(union perf_event *event,
485 bool sample_id_all)
486{
487 event->mmap2.pid = bswap_32(event->mmap2.pid);
488 event->mmap2.tid = bswap_32(event->mmap2.tid);
489 event->mmap2.start = bswap_64(event->mmap2.start);
490 event->mmap2.len = bswap_64(event->mmap2.len);
491 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
492 event->mmap2.maj = bswap_32(event->mmap2.maj);
493 event->mmap2.min = bswap_32(event->mmap2.min);
494 event->mmap2.ino = bswap_64(event->mmap2.ino);
495
496 if (sample_id_all) {
497 void *data = &event->mmap2.filename;
498
499 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
500 swap_sample_id_all(event, data);
501 }
502}
268fb20f 503static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
ba21594c 504{
8115d60c
ACM
505 event->fork.pid = bswap_32(event->fork.pid);
506 event->fork.tid = bswap_32(event->fork.tid);
507 event->fork.ppid = bswap_32(event->fork.ppid);
508 event->fork.ptid = bswap_32(event->fork.ptid);
509 event->fork.time = bswap_64(event->fork.time);
268fb20f
JO
510
511 if (sample_id_all)
512 swap_sample_id_all(event, &event->fork + 1);
ba21594c
ACM
513}
514
268fb20f 515static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
ba21594c 516{
8115d60c
ACM
517 event->read.pid = bswap_32(event->read.pid);
518 event->read.tid = bswap_32(event->read.tid);
519 event->read.value = bswap_64(event->read.value);
520 event->read.time_enabled = bswap_64(event->read.time_enabled);
521 event->read.time_running = bswap_64(event->read.time_running);
522 event->read.id = bswap_64(event->read.id);
268fb20f
JO
523
524 if (sample_id_all)
525 swap_sample_id_all(event, &event->read + 1);
ba21594c
ACM
526}
527
4a96f7a0
AH
528static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
529{
530 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
531 event->aux.aux_size = bswap_64(event->aux.aux_size);
532 event->aux.flags = bswap_64(event->aux.flags);
533
534 if (sample_id_all)
535 swap_sample_id_all(event, &event->aux + 1);
536}
537
0ad21f68
AH
538static void perf_event__itrace_start_swap(union perf_event *event,
539 bool sample_id_all)
540{
541 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
542 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
543
544 if (sample_id_all)
545 swap_sample_id_all(event, &event->itrace_start + 1);
546}
547
0286039f
AH
548static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
549{
550 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
551 event->context_switch.next_prev_pid =
552 bswap_32(event->context_switch.next_prev_pid);
553 event->context_switch.next_prev_tid =
554 bswap_32(event->context_switch.next_prev_tid);
555 }
556
557 if (sample_id_all)
558 swap_sample_id_all(event, &event->context_switch + 1);
559}
560
dd96c46b
JO
561static void perf_event__throttle_swap(union perf_event *event,
562 bool sample_id_all)
563{
564 event->throttle.time = bswap_64(event->throttle.time);
565 event->throttle.id = bswap_64(event->throttle.id);
566 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
567
568 if (sample_id_all)
569 swap_sample_id_all(event, &event->throttle + 1);
570}
571
e108c66e
JO
572static u8 revbyte(u8 b)
573{
574 int rev = (b >> 4) | ((b & 0xf) << 4);
575 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
576 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
577 return (u8) rev;
578}
579
580/*
581 * XXX this is hack in attempt to carry flags bitfield
bd1a0be5 582 * through endian village. ABI says:
e108c66e
JO
583 *
584 * Bit-fields are allocated from right to left (least to most significant)
585 * on little-endian implementations and from left to right (most to least
586 * significant) on big-endian implementations.
587 *
588 * The above seems to be byte specific, so we need to reverse each
589 * byte of the bitfield. 'Internet' also says this might be implementation
590 * specific and we probably need proper fix and carry perf_event_attr
591 * bitfield flags in separate data file FEAT_ section. Thought this seems
592 * to work for now.
593 */
594static void swap_bitfield(u8 *p, unsigned len)
595{
596 unsigned i;
597
598 for (i = 0; i < len; i++) {
599 *p = revbyte(*p);
600 p++;
601 }
602}
603
eda3913b
DA
604/* exported for swapping attributes in file header */
605void perf_event__attr_swap(struct perf_event_attr *attr)
606{
607 attr->type = bswap_32(attr->type);
608 attr->size = bswap_32(attr->size);
b30b6172
WN
609
610#define bswap_safe(f, n) \
611 (attr->size > (offsetof(struct perf_event_attr, f) + \
612 sizeof(attr->f) * (n)))
613#define bswap_field(f, sz) \
614do { \
615 if (bswap_safe(f, 0)) \
616 attr->f = bswap_##sz(attr->f); \
617} while(0)
792d48b4 618#define bswap_field_16(f) bswap_field(f, 16)
b30b6172
WN
619#define bswap_field_32(f) bswap_field(f, 32)
620#define bswap_field_64(f) bswap_field(f, 64)
621
622 bswap_field_64(config);
623 bswap_field_64(sample_period);
624 bswap_field_64(sample_type);
625 bswap_field_64(read_format);
626 bswap_field_32(wakeup_events);
627 bswap_field_32(bp_type);
628 bswap_field_64(bp_addr);
629 bswap_field_64(bp_len);
630 bswap_field_64(branch_sample_type);
631 bswap_field_64(sample_regs_user);
632 bswap_field_32(sample_stack_user);
633 bswap_field_32(aux_watermark);
792d48b4 634 bswap_field_16(sample_max_stack);
b30b6172
WN
635
636 /*
637 * After read_format are bitfields. Check read_format because
638 * we are unable to use offsetof on bitfield.
639 */
640 if (bswap_safe(read_format, 1))
641 swap_bitfield((u8 *) (&attr->read_format + 1),
642 sizeof(u64));
643#undef bswap_field_64
644#undef bswap_field_32
645#undef bswap_field
646#undef bswap_safe
eda3913b
DA
647}
648
268fb20f 649static void perf_event__hdr_attr_swap(union perf_event *event,
1d037ca1 650 bool sample_id_all __maybe_unused)
2c46dbb5
TZ
651{
652 size_t size;
653
eda3913b 654 perf_event__attr_swap(&event->attr.attr);
2c46dbb5 655
8115d60c
ACM
656 size = event->header.size;
657 size -= (void *)&event->attr.id - (void *)event;
658 mem_bswap_64(event->attr.id, size);
2c46dbb5
TZ
659}
660
ffe77725
JO
661static void perf_event__event_update_swap(union perf_event *event,
662 bool sample_id_all __maybe_unused)
663{
664 event->event_update.type = bswap_64(event->event_update.type);
665 event->event_update.id = bswap_64(event->event_update.id);
666}
667
268fb20f 668static void perf_event__event_type_swap(union perf_event *event,
1d037ca1 669 bool sample_id_all __maybe_unused)
cd19a035 670{
8115d60c
ACM
671 event->event_type.event_type.event_id =
672 bswap_64(event->event_type.event_type.event_id);
cd19a035
TZ
673}
674
268fb20f 675static void perf_event__tracing_data_swap(union perf_event *event,
1d037ca1 676 bool sample_id_all __maybe_unused)
9215545e 677{
8115d60c 678 event->tracing_data.size = bswap_32(event->tracing_data.size);
9215545e
TZ
679}
680
a16ac023
AH
681static void perf_event__auxtrace_info_swap(union perf_event *event,
682 bool sample_id_all __maybe_unused)
683{
684 size_t size;
685
686 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
687
688 size = event->header.size;
689 size -= (void *)&event->auxtrace_info.priv - (void *)event;
690 mem_bswap_64(event->auxtrace_info.priv, size);
691}
692
693static void perf_event__auxtrace_swap(union perf_event *event,
694 bool sample_id_all __maybe_unused)
695{
696 event->auxtrace.size = bswap_64(event->auxtrace.size);
697 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
698 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
699 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
700 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
701 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
702}
703
e9bf54d2
AH
704static void perf_event__auxtrace_error_swap(union perf_event *event,
705 bool sample_id_all __maybe_unused)
706{
707 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
708 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
709 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
710 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
711 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
712 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
713}
714
5f3339d2
JO
715static void perf_event__thread_map_swap(union perf_event *event,
716 bool sample_id_all __maybe_unused)
717{
718 unsigned i;
719
720 event->thread_map.nr = bswap_64(event->thread_map.nr);
721
722 for (i = 0; i < event->thread_map.nr; i++)
723 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
724}
725
6640b6c2
JO
726static void perf_event__cpu_map_swap(union perf_event *event,
727 bool sample_id_all __maybe_unused)
728{
729 struct cpu_map_data *data = &event->cpu_map.data;
730 struct cpu_map_entries *cpus;
731 struct cpu_map_mask *mask;
732 unsigned i;
733
734 data->type = bswap_64(data->type);
735
736 switch (data->type) {
737 case PERF_CPU_MAP__CPUS:
738 cpus = (struct cpu_map_entries *)data->data;
739
740 cpus->nr = bswap_16(cpus->nr);
741
742 for (i = 0; i < cpus->nr; i++)
743 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
744 break;
745 case PERF_CPU_MAP__MASK:
746 mask = (struct cpu_map_mask *) data->data;
747
748 mask->nr = bswap_16(mask->nr);
749 mask->long_size = bswap_16(mask->long_size);
750
751 switch (mask->long_size) {
752 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
753 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
754 default:
755 pr_err("cpu_map swap: unsupported long size\n");
756 }
757 default:
758 break;
759 }
760}
761
374fb9e3
JO
762static void perf_event__stat_config_swap(union perf_event *event,
763 bool sample_id_all __maybe_unused)
764{
765 u64 size;
766
767 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
768 size += 1; /* nr item itself */
769 mem_bswap_64(&event->stat_config.nr, size);
770}
771
d80518c9
JO
772static void perf_event__stat_swap(union perf_event *event,
773 bool sample_id_all __maybe_unused)
774{
775 event->stat.id = bswap_64(event->stat.id);
776 event->stat.thread = bswap_32(event->stat.thread);
777 event->stat.cpu = bswap_32(event->stat.cpu);
778 event->stat.val = bswap_64(event->stat.val);
779 event->stat.ena = bswap_64(event->stat.ena);
780 event->stat.run = bswap_64(event->stat.run);
781}
782
2d8f0f18
JO
783static void perf_event__stat_round_swap(union perf_event *event,
784 bool sample_id_all __maybe_unused)
785{
786 event->stat_round.type = bswap_64(event->stat_round.type);
787 event->stat_round.time = bswap_64(event->stat_round.time);
788}
789
268fb20f
JO
790typedef void (*perf_event__swap_op)(union perf_event *event,
791 bool sample_id_all);
ba21594c 792
8115d60c
ACM
793static perf_event__swap_op perf_event__swap_ops[] = {
794 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
5c5e854b 795 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
8115d60c
ACM
796 [PERF_RECORD_COMM] = perf_event__comm_swap,
797 [PERF_RECORD_FORK] = perf_event__task_swap,
798 [PERF_RECORD_EXIT] = perf_event__task_swap,
799 [PERF_RECORD_LOST] = perf_event__all64_swap,
800 [PERF_RECORD_READ] = perf_event__read_swap,
dd96c46b
JO
801 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
802 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
8115d60c 803 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
4a96f7a0 804 [PERF_RECORD_AUX] = perf_event__aux_swap,
0ad21f68 805 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
c4937a91 806 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
0286039f
AH
807 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
808 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
eda3913b 809 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
8115d60c
ACM
810 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
811 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
812 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
3c659eed 813 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
a16ac023
AH
814 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
815 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
e9bf54d2 816 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
5f3339d2 817 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
6640b6c2 818 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
374fb9e3 819 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
d80518c9 820 [PERF_RECORD_STAT] = perf_event__stat_swap,
2d8f0f18 821 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
ffe77725 822 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
46bc29b9 823 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
8115d60c 824 [PERF_RECORD_HEADER_MAX] = NULL,
ba21594c
ACM
825};
826
d6b17beb
FW
827/*
828 * When perf record finishes a pass on every buffers, it records this pseudo
829 * event.
830 * We record the max timestamp t found in the pass n.
831 * Assuming these timestamps are monotonic across cpus, we know that if
832 * a buffer still has events with timestamps below t, they will be all
833 * available and then read in the pass n + 1.
834 * Hence when we start to read the pass n + 2, we can safely flush every
835 * events with timestamps below t.
836 *
837 * ============ PASS n =================
838 * CPU 0 | CPU 1
839 * |
840 * cnt1 timestamps | cnt2 timestamps
841 * 1 | 2
842 * 2 | 3
843 * - | 4 <--- max recorded
844 *
845 * ============ PASS n + 1 ==============
846 * CPU 0 | CPU 1
847 * |
848 * cnt1 timestamps | cnt2 timestamps
849 * 3 | 5
850 * 4 | 6
851 * 5 | 7 <---- max recorded
852 *
853 * Flush every events below timestamp 4
854 *
855 * ============ PASS n + 2 ==============
856 * CPU 0 | CPU 1
857 * |
858 * cnt1 timestamps | cnt2 timestamps
859 * 6 | 8
860 * 7 | 9
861 * - | 10
862 *
863 * Flush every events below timestamp 7
864 * etc...
865 */
b7b61cbe 866static int process_finished_round(struct perf_tool *tool __maybe_unused,
1d037ca1 867 union perf_event *event __maybe_unused,
d704ebda 868 struct ordered_events *oe)
d6b17beb 869{
5531e162
AH
870 if (dump_trace)
871 fprintf(stdout, "\n");
b7b61cbe 872 return ordered_events__flush(oe, OE_FLUSH__ROUND);
d6b17beb
FW
873}
874
b7b61cbe 875int perf_session__queue_event(struct perf_session *s, union perf_event *event,
dc83e139 876 u64 timestamp, u64 file_offset)
c61e52ee 877{
dc83e139 878 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
640c03ce 879}
c61e52ee 880
384b6055 881static void callchain__lbr_callstack_printf(struct perf_sample *sample)
640c03ce 882{
384b6055
KL
883 struct ip_callchain *callchain = sample->callchain;
884 struct branch_stack *lbr_stack = sample->branch_stack;
885 u64 kernel_callchain_nr = callchain->nr;
640c03ce 886 unsigned int i;
c61e52ee 887
384b6055
KL
888 for (i = 0; i < kernel_callchain_nr; i++) {
889 if (callchain->ips[i] == PERF_CONTEXT_USER)
890 break;
891 }
892
893 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
894 u64 total_nr;
895 /*
896 * LBR callstack can only get user call chain,
897 * i is kernel call chain number,
898 * 1 is PERF_CONTEXT_USER.
899 *
900 * The user call chain is stored in LBR registers.
901 * LBR are pair registers. The caller is stored
902 * in "from" register, while the callee is stored
903 * in "to" register.
904 * For example, there is a call stack
905 * "A"->"B"->"C"->"D".
906 * The LBR registers will recorde like
907 * "C"->"D", "B"->"C", "A"->"B".
908 * So only the first "to" register and all "from"
909 * registers are needed to construct the whole stack.
910 */
911 total_nr = i + 1 + lbr_stack->nr + 1;
912 kernel_callchain_nr = i + 1;
913
914 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
915
916 for (i = 0; i < kernel_callchain_nr; i++)
917 printf("..... %2d: %016" PRIx64 "\n",
918 i, callchain->ips[i]);
919
920 printf("..... %2d: %016" PRIx64 "\n",
921 (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
922 for (i = 0; i < lbr_stack->nr; i++)
923 printf("..... %2d: %016" PRIx64 "\n",
924 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
925 }
926}
927
928static void callchain__printf(struct perf_evsel *evsel,
929 struct perf_sample *sample)
930{
931 unsigned int i;
932 struct ip_callchain *callchain = sample->callchain;
933
acf2abbd 934 if (perf_evsel__has_branch_callstack(evsel))
384b6055
KL
935 callchain__lbr_callstack_printf(sample);
936
937 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
640c03ce 938
384b6055 939 for (i = 0; i < callchain->nr; i++)
9486aa38 940 printf("..... %2d: %016" PRIx64 "\n",
384b6055 941 i, callchain->ips[i]);
c61e52ee
FW
942}
943
b5387528
RAV
944static void branch_stack__printf(struct perf_sample *sample)
945{
946 uint64_t i;
947
948 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
949
0e332f03
AK
950 for (i = 0; i < sample->branch_stack->nr; i++) {
951 struct branch_entry *e = &sample->branch_stack->entries[i];
952
953 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
954 i, e->from, e->to,
8074bf51 955 (unsigned short)e->flags.cycles,
0e332f03
AK
956 e->flags.mispred ? "M" : " ",
957 e->flags.predicted ? "P" : " ",
958 e->flags.abort ? "A" : " ",
959 e->flags.in_tx ? "T" : " ",
960 (unsigned)e->flags.reserved);
961 }
b5387528
RAV
962}
963
0f6a3015
JO
964static void regs_dump__printf(u64 mask, u64 *regs)
965{
966 unsigned rid, i = 0;
967
968 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
969 u64 val = regs[i++];
970
971 printf(".... %-5s 0x%" PRIx64 "\n",
972 perf_reg_name(rid), val);
973 }
974}
975
6a21c0b5
SE
976static const char *regs_abi[] = {
977 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
978 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
979 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
980};
981
982static inline const char *regs_dump_abi(struct regs_dump *d)
983{
984 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
985 return "unknown";
986
987 return regs_abi[d->abi];
988}
989
990static void regs__printf(const char *type, struct regs_dump *regs)
991{
992 u64 mask = regs->mask;
993
994 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
995 type,
996 mask,
997 regs_dump_abi(regs));
998
999 regs_dump__printf(mask, regs->regs);
1000}
1001
352ea45a 1002static void regs_user__printf(struct perf_sample *sample)
0f6a3015
JO
1003{
1004 struct regs_dump *user_regs = &sample->user_regs;
1005
6a21c0b5
SE
1006 if (user_regs->regs)
1007 regs__printf("user", user_regs);
1008}
1009
1010static void regs_intr__printf(struct perf_sample *sample)
1011{
1012 struct regs_dump *intr_regs = &sample->intr_regs;
1013
1014 if (intr_regs->regs)
1015 regs__printf("intr", intr_regs);
0f6a3015
JO
1016}
1017
1018static void stack_user__printf(struct stack_dump *dump)
1019{
1020 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1021 dump->size, dump->offset);
1022}
1023
9fa8727a 1024static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
8115d60c 1025 union perf_event *event,
8d50e5b4 1026 struct perf_sample *sample)
9c90a61c 1027{
9fa8727a 1028 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
7f3be652 1029
9c90a61c 1030 if (event->header.type != PERF_RECORD_SAMPLE &&
9fa8727a 1031 !perf_evlist__sample_id_all(evlist)) {
9c90a61c
ACM
1032 fputs("-1 -1 ", stdout);
1033 return;
1034 }
1035
7f3be652 1036 if ((sample_type & PERF_SAMPLE_CPU))
9c90a61c
ACM
1037 printf("%u ", sample->cpu);
1038
7f3be652 1039 if (sample_type & PERF_SAMPLE_TIME)
9486aa38 1040 printf("%" PRIu64 " ", sample->time);
9c90a61c
ACM
1041}
1042
9ede473c
JO
1043static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1044{
1045 printf("... sample_read:\n");
1046
1047 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1048 printf("...... time enabled %016" PRIx64 "\n",
1049 sample->read.time_enabled);
1050
1051 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1052 printf("...... time running %016" PRIx64 "\n",
1053 sample->read.time_running);
1054
1055 if (read_format & PERF_FORMAT_GROUP) {
1056 u64 i;
1057
1058 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1059
1060 for (i = 0; i < sample->read.group.nr; i++) {
1061 struct sample_read_value *value;
1062
1063 value = &sample->read.group.values[i];
1064 printf("..... id %016" PRIx64
1065 ", value %016" PRIx64 "\n",
1066 value->id, value->value);
1067 }
1068 } else
1069 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1070 sample->read.one.id, sample->read.one.value);
1071}
1072
9fa8727a 1073static void dump_event(struct perf_evlist *evlist, union perf_event *event,
8d50e5b4 1074 u64 file_offset, struct perf_sample *sample)
9aefcab0
TG
1075{
1076 if (!dump_trace)
1077 return;
1078
9486aa38
ACM
1079 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1080 file_offset, event->header.size, event->header.type);
9aefcab0
TG
1081
1082 trace_event(event);
1083
1084 if (sample)
9fa8727a 1085 perf_evlist__print_tstamp(evlist, event, sample);
9aefcab0 1086
9486aa38 1087 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
8115d60c 1088 event->header.size, perf_event__name(event->header.type));
9aefcab0
TG
1089}
1090
0f6a3015 1091static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
8d50e5b4 1092 struct perf_sample *sample)
9aefcab0 1093{
7f3be652
ACM
1094 u64 sample_type;
1095
ddbc24b7
ACM
1096 if (!dump_trace)
1097 return;
1098
0ea590ae 1099 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
9486aa38 1100 event->header.misc, sample->pid, sample->tid, sample->ip,
7cec0922 1101 sample->period, sample->addr);
9aefcab0 1102
0f6a3015 1103 sample_type = evsel->attr.sample_type;
7f3be652
ACM
1104
1105 if (sample_type & PERF_SAMPLE_CALLCHAIN)
384b6055 1106 callchain__printf(evsel, sample);
b5387528 1107
acf2abbd 1108 if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
b5387528 1109 branch_stack__printf(sample);
0f6a3015
JO
1110
1111 if (sample_type & PERF_SAMPLE_REGS_USER)
352ea45a 1112 regs_user__printf(sample);
0f6a3015 1113
6a21c0b5
SE
1114 if (sample_type & PERF_SAMPLE_REGS_INTR)
1115 regs_intr__printf(sample);
1116
0f6a3015
JO
1117 if (sample_type & PERF_SAMPLE_STACK_USER)
1118 stack_user__printf(&sample->user_stack);
05484298
AK
1119
1120 if (sample_type & PERF_SAMPLE_WEIGHT)
1121 printf("... weight: %" PRIu64 "\n", sample->weight);
98a3b32c
SE
1122
1123 if (sample_type & PERF_SAMPLE_DATA_SRC)
1124 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
9ede473c 1125
8780fb25
KL
1126 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1127 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1128
475eeab9
AK
1129 if (sample_type & PERF_SAMPLE_TRANSACTION)
1130 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1131
9ede473c
JO
1132 if (sample_type & PERF_SAMPLE_READ)
1133 sample_read__printf(sample, evsel->attr.read_format);
9aefcab0
TG
1134}
1135
dac7f6b7
JO
1136static void dump_read(struct perf_evsel *evsel, union perf_event *event)
1137{
1138 struct read_event *read_event = &event->read;
1139 u64 read_format;
1140
1141 if (!dump_trace)
1142 return;
1143
1144 printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
1145 evsel ? perf_evsel__name(evsel) : "FAIL",
1146 event->read.value);
1147
1148 read_format = evsel->attr.read_format;
1149
1150 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1151 printf("... time enabled : %" PRIu64 "\n", read_event->time_enabled);
1152
1153 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1154 printf("... time running : %" PRIu64 "\n", read_event->time_running);
1155
1156 if (read_format & PERF_FORMAT_ID)
1157 printf("... id : %" PRIu64 "\n", read_event->id);
1158}
1159
54245fdc 1160static struct machine *machines__find_for_cpumode(struct machines *machines,
ef89325f
AH
1161 union perf_event *event,
1162 struct perf_sample *sample)
743eb868 1163{
ad85ace0 1164 struct machine *machine;
743eb868 1165
7c0f4a41 1166 if (perf_guest &&
473398a2
ACM
1167 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1168 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
7fb0a5ee
ND
1169 u32 pid;
1170
5c5e854b
SE
1171 if (event->header.type == PERF_RECORD_MMAP
1172 || event->header.type == PERF_RECORD_MMAP2)
7fb0a5ee
ND
1173 pid = event->mmap.pid;
1174 else
ef89325f 1175 pid = sample->pid;
7fb0a5ee 1176
54245fdc 1177 machine = machines__find(machines, pid);
ad85ace0 1178 if (!machine)
3caeaa56 1179 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
ad85ace0 1180 return machine;
7fb0a5ee 1181 }
743eb868 1182
54245fdc 1183 return &machines->host;
743eb868
ACM
1184}
1185
313e53b0 1186static int deliver_sample_value(struct perf_evlist *evlist,
e4caec0d
JO
1187 struct perf_tool *tool,
1188 union perf_event *event,
1189 struct perf_sample *sample,
1190 struct sample_read_value *v,
1191 struct machine *machine)
1192{
313e53b0 1193 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
e4caec0d 1194
e4caec0d
JO
1195 if (sid) {
1196 sample->id = v->id;
1197 sample->period = v->value - sid->period;
1198 sid->period = v->value;
1199 }
1200
1201 if (!sid || sid->evsel == NULL) {
313e53b0 1202 ++evlist->stats.nr_unknown_id;
e4caec0d
JO
1203 return 0;
1204 }
1205
1206 return tool->sample(tool, event, sample, sid->evsel, machine);
1207}
1208
313e53b0 1209static int deliver_sample_group(struct perf_evlist *evlist,
e4caec0d
JO
1210 struct perf_tool *tool,
1211 union perf_event *event,
1212 struct perf_sample *sample,
1213 struct machine *machine)
1214{
1215 int ret = -EINVAL;
1216 u64 i;
1217
1218 for (i = 0; i < sample->read.group.nr; i++) {
313e53b0 1219 ret = deliver_sample_value(evlist, tool, event, sample,
e4caec0d
JO
1220 &sample->read.group.values[i],
1221 machine);
1222 if (ret)
1223 break;
1224 }
1225
1226 return ret;
1227}
1228
1229static int
313e53b0 1230 perf_evlist__deliver_sample(struct perf_evlist *evlist,
e4caec0d
JO
1231 struct perf_tool *tool,
1232 union perf_event *event,
1233 struct perf_sample *sample,
1234 struct perf_evsel *evsel,
1235 struct machine *machine)
1236{
1237 /* We know evsel != NULL. */
1238 u64 sample_type = evsel->attr.sample_type;
1239 u64 read_format = evsel->attr.read_format;
1240
d94386f2 1241 /* Standard sample delivery. */
e4caec0d
JO
1242 if (!(sample_type & PERF_SAMPLE_READ))
1243 return tool->sample(tool, event, sample, evsel, machine);
1244
1245 /* For PERF_SAMPLE_READ we have either single or group mode. */
1246 if (read_format & PERF_FORMAT_GROUP)
313e53b0 1247 return deliver_sample_group(evlist, tool, event, sample,
e4caec0d
JO
1248 machine);
1249 else
313e53b0 1250 return deliver_sample_value(evlist, tool, event, sample,
e4caec0d
JO
1251 &sample->read.one, machine);
1252}
1253
d10eb1eb
ACM
1254static int machines__deliver_event(struct machines *machines,
1255 struct perf_evlist *evlist,
1256 union perf_event *event,
1257 struct perf_sample *sample,
1258 struct perf_tool *tool, u64 file_offset)
cbf41645 1259{
9e69c210 1260 struct perf_evsel *evsel;
743eb868 1261 struct machine *machine;
9e69c210 1262
9fa8727a 1263 dump_event(evlist, event, file_offset, sample);
532e7269 1264
313e53b0 1265 evsel = perf_evlist__id2evsel(evlist, sample->id);
7b27509f 1266
fa713a4e 1267 machine = machines__find_for_cpumode(machines, event, sample);
743eb868 1268
cbf41645
TG
1269 switch (event->header.type) {
1270 case PERF_RECORD_SAMPLE:
9e69c210 1271 if (evsel == NULL) {
313e53b0 1272 ++evlist->stats.nr_unknown_id;
6782206b 1273 return 0;
9e69c210 1274 }
1b29ac59 1275 dump_sample(evsel, event, sample);
0c095715 1276 if (machine == NULL) {
313e53b0 1277 ++evlist->stats.nr_unprocessable_samples;
6782206b 1278 return 0;
0c095715 1279 }
313e53b0 1280 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
cbf41645 1281 case PERF_RECORD_MMAP:
45694aa7 1282 return tool->mmap(tool, event, sample, machine);
5c5e854b 1283 case PERF_RECORD_MMAP2:
930e6fcd
KL
1284 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1285 ++evlist->stats.nr_proc_map_timeout;
5c5e854b 1286 return tool->mmap2(tool, event, sample, machine);
cbf41645 1287 case PERF_RECORD_COMM:
45694aa7 1288 return tool->comm(tool, event, sample, machine);
f3b3614a
HB
1289 case PERF_RECORD_NAMESPACES:
1290 return tool->namespaces(tool, event, sample, machine);
cbf41645 1291 case PERF_RECORD_FORK:
45694aa7 1292 return tool->fork(tool, event, sample, machine);
cbf41645 1293 case PERF_RECORD_EXIT:
45694aa7 1294 return tool->exit(tool, event, sample, machine);
cbf41645 1295 case PERF_RECORD_LOST:
45694aa7 1296 if (tool->lost == perf_event__process_lost)
313e53b0 1297 evlist->stats.total_lost += event->lost.lost;
45694aa7 1298 return tool->lost(tool, event, sample, machine);
c4937a91
KL
1299 case PERF_RECORD_LOST_SAMPLES:
1300 if (tool->lost_samples == perf_event__process_lost_samples)
1301 evlist->stats.total_lost_samples += event->lost_samples.lost;
1302 return tool->lost_samples(tool, event, sample, machine);
cbf41645 1303 case PERF_RECORD_READ:
dac7f6b7 1304 dump_read(evsel, event);
45694aa7 1305 return tool->read(tool, event, sample, evsel, machine);
cbf41645 1306 case PERF_RECORD_THROTTLE:
45694aa7 1307 return tool->throttle(tool, event, sample, machine);
cbf41645 1308 case PERF_RECORD_UNTHROTTLE:
45694aa7 1309 return tool->unthrottle(tool, event, sample, machine);
4a96f7a0 1310 case PERF_RECORD_AUX:
05a1f47e
AS
1311 if (tool->aux == perf_event__process_aux) {
1312 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1313 evlist->stats.total_aux_lost += 1;
1314 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1315 evlist->stats.total_aux_partial += 1;
1316 }
4a96f7a0 1317 return tool->aux(tool, event, sample, machine);
0ad21f68
AH
1318 case PERF_RECORD_ITRACE_START:
1319 return tool->itrace_start(tool, event, sample, machine);
0286039f
AH
1320 case PERF_RECORD_SWITCH:
1321 case PERF_RECORD_SWITCH_CPU_WIDE:
1322 return tool->context_switch(tool, event, sample, machine);
cbf41645 1323 default:
313e53b0 1324 ++evlist->stats.nr_unknown_events;
cbf41645
TG
1325 return -1;
1326 }
1327}
1328
c446870d
AH
1329static int perf_session__deliver_event(struct perf_session *session,
1330 union perf_event *event,
1331 struct perf_sample *sample,
1332 struct perf_tool *tool,
1333 u64 file_offset)
1334{
1335 int ret;
1336
1337 ret = auxtrace__process_event(session, event, sample, tool);
1338 if (ret < 0)
1339 return ret;
1340 if (ret > 0)
1341 return 0;
1342
1343 return machines__deliver_event(&session->machines, session->evlist,
1344 event, sample, tool, file_offset);
1345}
1346
d5652d86
AH
1347static s64 perf_session__process_user_event(struct perf_session *session,
1348 union perf_event *event,
d5652d86 1349 u64 file_offset)
06aae590 1350{
d704ebda 1351 struct ordered_events *oe = &session->ordered_events;
9870d780 1352 struct perf_tool *tool = session->tool;
8ceb41d7 1353 int fd = perf_data__fd(session->data);
10d0f086
ACM
1354 int err;
1355
9fa8727a 1356 dump_event(session->evlist, event, file_offset, NULL);
06aae590 1357
cbf41645 1358 /* These events are processed right away */
06aae590 1359 switch (event->header.type) {
2c46dbb5 1360 case PERF_RECORD_HEADER_ATTR:
47c3d109 1361 err = tool->attr(tool, event, &session->evlist);
cfe1c414 1362 if (err == 0) {
7b56cce2 1363 perf_session__set_id_hdr_size(session);
cfe1c414
AH
1364 perf_session__set_comm_exec(session);
1365 }
10d0f086 1366 return err;
ffe77725
JO
1367 case PERF_RECORD_EVENT_UPDATE:
1368 return tool->event_update(tool, event, &session->evlist);
f67697bd
JO
1369 case PERF_RECORD_HEADER_EVENT_TYPE:
1370 /*
1371 * Depreceated, but we need to handle it for sake
1372 * of old data files create in pipe mode.
1373 */
1374 return 0;
9215545e
TZ
1375 case PERF_RECORD_HEADER_TRACING_DATA:
1376 /* setup for reading amidst mmap */
cc9784bd 1377 lseek(fd, file_offset, SEEK_SET);
47c3d109 1378 return tool->tracing_data(tool, event, session);
c7929e47 1379 case PERF_RECORD_HEADER_BUILD_ID:
45694aa7 1380 return tool->build_id(tool, event, session);
d6b17beb 1381 case PERF_RECORD_FINISHED_ROUND:
d704ebda 1382 return tool->finished_round(tool, event, oe);
3c659eed
AH
1383 case PERF_RECORD_ID_INDEX:
1384 return tool->id_index(tool, event, session);
a16ac023
AH
1385 case PERF_RECORD_AUXTRACE_INFO:
1386 return tool->auxtrace_info(tool, event, session);
1387 case PERF_RECORD_AUXTRACE:
1388 /* setup for reading amidst mmap */
1389 lseek(fd, file_offset + event->header.size, SEEK_SET);
1390 return tool->auxtrace(tool, event, session);
e9bf54d2 1391 case PERF_RECORD_AUXTRACE_ERROR:
85ed4729 1392 perf_session__auxtrace_error_inc(session, event);
e9bf54d2 1393 return tool->auxtrace_error(tool, event, session);
5f3339d2
JO
1394 case PERF_RECORD_THREAD_MAP:
1395 return tool->thread_map(tool, event, session);
6640b6c2
JO
1396 case PERF_RECORD_CPU_MAP:
1397 return tool->cpu_map(tool, event, session);
374fb9e3
JO
1398 case PERF_RECORD_STAT_CONFIG:
1399 return tool->stat_config(tool, event, session);
d80518c9
JO
1400 case PERF_RECORD_STAT:
1401 return tool->stat(tool, event, session);
2d8f0f18
JO
1402 case PERF_RECORD_STAT_ROUND:
1403 return tool->stat_round(tool, event, session);
46bc29b9
AH
1404 case PERF_RECORD_TIME_CONV:
1405 session->time_conv = event->time_conv;
1406 return tool->time_conv(tool, event, session);
e9def1b2
DCC
1407 case PERF_RECORD_HEADER_FEATURE:
1408 return tool->feature(tool, event, session);
06aae590 1409 default:
ba74f064 1410 return -EINVAL;
06aae590 1411 }
ba74f064
TG
1412}
1413
a293829d
AH
1414int perf_session__deliver_synth_event(struct perf_session *session,
1415 union perf_event *event,
b7b61cbe 1416 struct perf_sample *sample)
a293829d 1417{
fa713a4e 1418 struct perf_evlist *evlist = session->evlist;
9870d780 1419 struct perf_tool *tool = session->tool;
fa713a4e
ACM
1420
1421 events_stats__inc(&evlist->stats, event->header.type);
a293829d
AH
1422
1423 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
b7b61cbe 1424 return perf_session__process_user_event(session, event, 0);
a293829d 1425
fa713a4e 1426 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
a293829d
AH
1427}
1428
268fb20f
JO
1429static void event_swap(union perf_event *event, bool sample_id_all)
1430{
1431 perf_event__swap_op swap;
1432
1433 swap = perf_event__swap_ops[event->header.type];
1434 if (swap)
1435 swap(event, sample_id_all);
1436}
1437
5a52f33a
AH
1438int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1439 void *buf, size_t buf_sz,
1440 union perf_event **event_ptr,
1441 struct perf_sample *sample)
1442{
1443 union perf_event *event;
1444 size_t hdr_sz, rest;
1445 int fd;
1446
1447 if (session->one_mmap && !session->header.needs_swap) {
1448 event = file_offset - session->one_mmap_offset +
1449 session->one_mmap_addr;
1450 goto out_parse_sample;
1451 }
1452
8ceb41d7 1453 if (perf_data__is_pipe(session->data))
5a52f33a
AH
1454 return -1;
1455
8ceb41d7 1456 fd = perf_data__fd(session->data);
5a52f33a
AH
1457 hdr_sz = sizeof(struct perf_event_header);
1458
1459 if (buf_sz < hdr_sz)
1460 return -1;
1461
1462 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
554e92ed 1463 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
5a52f33a
AH
1464 return -1;
1465
1466 event = (union perf_event *)buf;
1467
1468 if (session->header.needs_swap)
1469 perf_event_header__bswap(&event->header);
1470
554e92ed 1471 if (event->header.size < hdr_sz || event->header.size > buf_sz)
5a52f33a
AH
1472 return -1;
1473
1474 rest = event->header.size - hdr_sz;
1475
554e92ed 1476 if (readn(fd, buf, rest) != (ssize_t)rest)
5a52f33a
AH
1477 return -1;
1478
1479 if (session->header.needs_swap)
1480 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1481
1482out_parse_sample:
1483
1484 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1485 perf_evlist__parse_sample(session->evlist, event, sample))
1486 return -1;
1487
1488 *event_ptr = event;
1489
1490 return 0;
1491}
1492
d5652d86 1493static s64 perf_session__process_event(struct perf_session *session,
b7b61cbe 1494 union perf_event *event, u64 file_offset)
ba74f064 1495{
313e53b0 1496 struct perf_evlist *evlist = session->evlist;
9870d780 1497 struct perf_tool *tool = session->tool;
8d50e5b4 1498 struct perf_sample sample;
ba74f064
TG
1499 int ret;
1500
268fb20f 1501 if (session->header.needs_swap)
313e53b0 1502 event_swap(event, perf_evlist__sample_id_all(evlist));
ba74f064
TG
1503
1504 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1505 return -EINVAL;
1506
313e53b0 1507 events_stats__inc(&evlist->stats, event->header.type);
ba74f064
TG
1508
1509 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
b7b61cbe 1510 return perf_session__process_user_event(session, event, file_offset);
cbf41645 1511
3dfc2c0a
TG
1512 /*
1513 * For all kernel events we get the sample data
1514 */
313e53b0 1515 ret = perf_evlist__parse_sample(evlist, event, &sample);
5538beca
FW
1516 if (ret)
1517 return ret;
3dfc2c0a 1518
0a8cb85c 1519 if (tool->ordered_events) {
dc83e139 1520 ret = perf_session__queue_event(session, event, sample.time, file_offset);
cbf41645
TG
1521 if (ret != -ETIME)
1522 return ret;
1523 }
1524
c446870d
AH
1525 return perf_session__deliver_event(session, event, &sample, tool,
1526 file_offset);
06aae590
ACM
1527}
1528
316c7136 1529void perf_event_header__bswap(struct perf_event_header *hdr)
ba21594c 1530{
316c7136
ACM
1531 hdr->type = bswap_32(hdr->type);
1532 hdr->misc = bswap_16(hdr->misc);
1533 hdr->size = bswap_16(hdr->size);
ba21594c
ACM
1534}
1535
b424eba2
ACM
1536struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1537{
1fcb8768 1538 return machine__findnew_thread(&session->machines.host, -1, pid);
b424eba2
ACM
1539}
1540
9d8b172f 1541int perf_session__register_idle_thread(struct perf_session *session)
06aae590 1542{
1fcb8768 1543 struct thread *thread;
9d8b172f 1544 int err = 0;
06aae590 1545
1fcb8768 1546 thread = machine__findnew_thread(&session->machines.host, 0, 0);
162f0bef 1547 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
06aae590 1548 pr_err("problem inserting idle task.\n");
9d8b172f 1549 err = -1;
06aae590
ACM
1550 }
1551
f3b3614a
HB
1552 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1553 pr_err("problem inserting idle task.\n");
1554 err = -1;
1555 }
1556
9d8b172f
MH
1557 /* machine__findnew_thread() got the thread, so put it */
1558 thread__put(thread);
1559 return err;
06aae590
ACM
1560}
1561
f06149c0
WN
1562static void
1563perf_session__warn_order(const struct perf_session *session)
1564{
1565 const struct ordered_events *oe = &session->ordered_events;
1566 struct perf_evsel *evsel;
1567 bool should_warn = true;
1568
1569 evlist__for_each_entry(session->evlist, evsel) {
1570 if (evsel->attr.write_backward)
1571 should_warn = false;
1572 }
1573
1574 if (!should_warn)
1575 return;
1576 if (oe->nr_unordered_events != 0)
1577 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1578}
1579
9870d780 1580static void perf_session__warn_about_errors(const struct perf_session *session)
11095994 1581{
9870d780 1582 const struct events_stats *stats = &session->evlist->stats;
9870d780
ACM
1583
1584 if (session->tool->lost == perf_event__process_lost &&
ccda068f 1585 stats->nr_events[PERF_RECORD_LOST] != 0) {
7b27509f
ACM
1586 ui__warning("Processed %d events and lost %d chunks!\n\n"
1587 "Check IO/CPU overload!\n\n",
ccda068f
ACM
1588 stats->nr_events[0],
1589 stats->nr_events[PERF_RECORD_LOST]);
11095994
ACM
1590 }
1591
c4937a91
KL
1592 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1593 double drop_rate;
1594
1595 drop_rate = (double)stats->total_lost_samples /
1596 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1597 if (drop_rate > 0.05) {
6ba29c2f 1598 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
c4937a91
KL
1599 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1600 drop_rate * 100.0);
1601 }
1602 }
1603
a38f48e3
AH
1604 if (session->tool->aux == perf_event__process_aux &&
1605 stats->total_aux_lost != 0) {
1606 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1607 stats->total_aux_lost,
1608 stats->nr_events[PERF_RECORD_AUX]);
1609 }
1610
05a1f47e
AS
1611 if (session->tool->aux == perf_event__process_aux &&
1612 stats->total_aux_partial != 0) {
1613 bool vmm_exclusive = false;
1614
1615 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1616 &vmm_exclusive);
1617
1618 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1619 "Are you running a KVM guest in the background?%s\n\n",
1620 stats->total_aux_partial,
1621 stats->nr_events[PERF_RECORD_AUX],
1622 vmm_exclusive ?
1623 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1624 "will reduce the gaps to only guest's timeslices." :
1625 "");
1626 }
1627
ccda068f 1628 if (stats->nr_unknown_events != 0) {
11095994
ACM
1629 ui__warning("Found %u unknown events!\n\n"
1630 "Is this an older tool processing a perf.data "
1631 "file generated by a more recent tool?\n\n"
1632 "If that is not the case, consider "
1633 "reporting to linux-kernel@vger.kernel.org.\n\n",
ccda068f 1634 stats->nr_unknown_events);
11095994
ACM
1635 }
1636
ccda068f 1637 if (stats->nr_unknown_id != 0) {
9e69c210 1638 ui__warning("%u samples with id not present in the header\n",
ccda068f 1639 stats->nr_unknown_id);
9e69c210
ACM
1640 }
1641
ccda068f 1642 if (stats->nr_invalid_chains != 0) {
75be989a
ACM
1643 ui__warning("Found invalid callchains!\n\n"
1644 "%u out of %u events were discarded for this reason.\n\n"
1645 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
ccda068f
ACM
1646 stats->nr_invalid_chains,
1647 stats->nr_events[PERF_RECORD_SAMPLE]);
75be989a 1648 }
0c095715 1649
ccda068f 1650 if (stats->nr_unprocessable_samples != 0) {
0c095715
JR
1651 ui__warning("%u unprocessable samples recorded.\n"
1652 "Do you have a KVM guest running and not using 'perf kvm'?\n",
ccda068f 1653 stats->nr_unprocessable_samples);
0c095715 1654 }
f61ff6c0 1655
f06149c0 1656 perf_session__warn_order(session);
85ed4729
AH
1657
1658 events_stats__auxtrace_error_warn(stats);
930e6fcd
KL
1659
1660 if (stats->nr_proc_map_timeout != 0) {
1661 ui__warning("%d map information files for pre-existing threads were\n"
1662 "not processed, if there are samples for addresses they\n"
1663 "will not be resolved, you may find out which are these\n"
1664 "threads by running with -v and redirecting the output\n"
9d9cad76
KL
1665 "to a file.\n"
1666 "The time limit to process proc map is too short?\n"
1667 "Increase it by --proc-map-timeout\n",
930e6fcd
KL
1668 stats->nr_proc_map_timeout);
1669 }
11095994
ACM
1670}
1671
a5499b37
AH
1672static int perf_session__flush_thread_stack(struct thread *thread,
1673 void *p __maybe_unused)
1674{
1675 return thread_stack__flush(thread);
1676}
1677
1678static int perf_session__flush_thread_stacks(struct perf_session *session)
1679{
1680 return machines__for_each_thread(&session->machines,
1681 perf_session__flush_thread_stack,
1682 NULL);
1683}
1684
8dc58101
TZ
1685volatile int session_done;
1686
b7b61cbe 1687static int __perf_session__process_pipe_events(struct perf_session *session)
8dc58101 1688{
fa713a4e 1689 struct ordered_events *oe = &session->ordered_events;
9870d780 1690 struct perf_tool *tool = session->tool;
8ceb41d7 1691 int fd = perf_data__fd(session->data);
444d2866
SE
1692 union perf_event *event;
1693 uint32_t size, cur_size = 0;
1694 void *buf = NULL;
d5652d86 1695 s64 skip = 0;
8dc58101 1696 u64 head;
727ebd54 1697 ssize_t err;
8dc58101
TZ
1698 void *p;
1699
45694aa7 1700 perf_tool__fill_defaults(tool);
8dc58101
TZ
1701
1702 head = 0;
444d2866
SE
1703 cur_size = sizeof(union perf_event);
1704
1705 buf = malloc(cur_size);
1706 if (!buf)
1707 return -errno;
1e0d4f02 1708 ordered_events__set_copy_on_queue(oe, true);
8dc58101 1709more:
444d2866 1710 event = buf;
cc9784bd 1711 err = readn(fd, event, sizeof(struct perf_event_header));
8dc58101
TZ
1712 if (err <= 0) {
1713 if (err == 0)
1714 goto done;
1715
1716 pr_err("failed to read event header\n");
1717 goto out_err;
1718 }
1719
316c7136 1720 if (session->header.needs_swap)
444d2866 1721 perf_event_header__bswap(&event->header);
8dc58101 1722
444d2866 1723 size = event->header.size;
27389d78
AH
1724 if (size < sizeof(struct perf_event_header)) {
1725 pr_err("bad event header size\n");
1726 goto out_err;
1727 }
8dc58101 1728
444d2866
SE
1729 if (size > cur_size) {
1730 void *new = realloc(buf, size);
1731 if (!new) {
1732 pr_err("failed to allocate memory to read event\n");
1733 goto out_err;
1734 }
1735 buf = new;
1736 cur_size = size;
1737 event = buf;
1738 }
1739 p = event;
8dc58101
TZ
1740 p += sizeof(struct perf_event_header);
1741
794e43b5 1742 if (size - sizeof(struct perf_event_header)) {
cc9784bd 1743 err = readn(fd, p, size - sizeof(struct perf_event_header));
794e43b5
TZ
1744 if (err <= 0) {
1745 if (err == 0) {
1746 pr_err("unexpected end of event stream\n");
1747 goto done;
1748 }
8dc58101 1749
794e43b5
TZ
1750 pr_err("failed to read event data\n");
1751 goto out_err;
1752 }
8dc58101
TZ
1753 }
1754
b7b61cbe 1755 if ((skip = perf_session__process_event(session, event, head)) < 0) {
9389a460 1756 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
444d2866 1757 head, event->header.size, event->header.type);
9389a460
JO
1758 err = -EINVAL;
1759 goto out_err;
8dc58101
TZ
1760 }
1761
1762 head += size;
1763
8dc58101
TZ
1764 if (skip > 0)
1765 head += skip;
1766
1767 if (!session_done())
1768 goto more;
1769done:
8c16b649 1770 /* do the final flush for ordered samples */
b7b61cbe 1771 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
c446870d
AH
1772 if (err)
1773 goto out_err;
1774 err = auxtrace__flush_events(session, tool);
a5499b37
AH
1775 if (err)
1776 goto out_err;
1777 err = perf_session__flush_thread_stacks(session);
8dc58101 1778out_err:
444d2866 1779 free(buf);
9870d780 1780 perf_session__warn_about_errors(session);
adc56ed1 1781 ordered_events__free(&session->ordered_events);
c446870d 1782 auxtrace__free_events(session);
8dc58101
TZ
1783 return err;
1784}
1785
998bedc8
FW
1786static union perf_event *
1787fetch_mmaped_event(struct perf_session *session,
1788 u64 head, size_t mmap_size, char *buf)
1789{
1790 union perf_event *event;
1791
1792 /*
1793 * Ensure we have enough space remaining to read
1794 * the size of the event in the headers.
1795 */
1796 if (head + sizeof(event->header) > mmap_size)
1797 return NULL;
1798
1799 event = (union perf_event *)(buf + head);
1800
1801 if (session->header.needs_swap)
1802 perf_event_header__bswap(&event->header);
1803
27389d78
AH
1804 if (head + event->header.size > mmap_size) {
1805 /* We're not fetching the event so swap back again */
1806 if (session->header.needs_swap)
1807 perf_event_header__bswap(&event->header);
998bedc8 1808 return NULL;
27389d78 1809 }
998bedc8
FW
1810
1811 return event;
1812}
1813
35d48ddf
DM
1814/*
1815 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1816 * slices. On 32bit we use 32MB.
1817 */
1818#if BITS_PER_LONG == 64
1819#define MMAP_SIZE ULLONG_MAX
1820#define NUM_MMAPS 1
1821#else
1822#define MMAP_SIZE (32 * 1024 * 1024ULL)
1823#define NUM_MMAPS 128
1824#endif
1825
4ac30cf7
NK
1826static int __perf_session__process_events(struct perf_session *session,
1827 u64 data_offset, u64 data_size,
b7b61cbe 1828 u64 file_size)
06aae590 1829{
fa713a4e 1830 struct ordered_events *oe = &session->ordered_events;
9870d780 1831 struct perf_tool *tool = session->tool;
8ceb41d7 1832 int fd = perf_data__fd(session->data);
d5652d86 1833 u64 head, page_offset, file_offset, file_pos, size;
fe174207 1834 int err, mmap_prot, mmap_flags, map_idx = 0;
0c1fe6b2 1835 size_t mmap_size;
35d48ddf 1836 char *buf, *mmaps[NUM_MMAPS];
8115d60c 1837 union perf_event *event;
4d3001fd 1838 struct ui_progress prog;
d5652d86 1839 s64 skip;
0331ee0c 1840
45694aa7 1841 perf_tool__fill_defaults(tool);
06aae590 1842
0331ee0c
TG
1843 page_offset = page_size * (data_offset / page_size);
1844 file_offset = page_offset;
1845 head = data_offset - page_offset;
06aae590 1846
381c02f6
MR
1847 if (data_size == 0)
1848 goto out;
1849
1850 if (data_offset + data_size < file_size)
d6513281
TG
1851 file_size = data_offset + data_size;
1852
8233822f 1853 ui_progress__init_size(&prog, file_size, "Processing events...");
55b44629 1854
35d48ddf 1855 mmap_size = MMAP_SIZE;
919d86d3 1856 if (mmap_size > file_size) {
55b44629 1857 mmap_size = file_size;
919d86d3
AH
1858 session->one_mmap = true;
1859 }
55b44629 1860
fe174207
TG
1861 memset(mmaps, 0, sizeof(mmaps));
1862
ba21594c
ACM
1863 mmap_prot = PROT_READ;
1864 mmap_flags = MAP_SHARED;
1865
0331ee0c 1866 if (session->header.needs_swap) {
ba21594c
ACM
1867 mmap_prot |= PROT_WRITE;
1868 mmap_flags = MAP_PRIVATE;
1869 }
06aae590 1870remap:
cc9784bd 1871 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
55b44629 1872 file_offset);
06aae590
ACM
1873 if (buf == MAP_FAILED) {
1874 pr_err("failed to mmap file\n");
1875 err = -errno;
1876 goto out_err;
1877 }
fe174207
TG
1878 mmaps[map_idx] = buf;
1879 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
d6513281 1880 file_pos = file_offset + head;
919d86d3
AH
1881 if (session->one_mmap) {
1882 session->one_mmap_addr = buf;
1883 session->one_mmap_offset = file_offset;
1884 }
06aae590
ACM
1885
1886more:
998bedc8
FW
1887 event = fetch_mmaped_event(session, head, mmap_size, buf);
1888 if (!event) {
fe174207
TG
1889 if (mmaps[map_idx]) {
1890 munmap(mmaps[map_idx], mmap_size);
1891 mmaps[map_idx] = NULL;
1892 }
06aae590 1893
0331ee0c
TG
1894 page_offset = page_size * (head / page_size);
1895 file_offset += page_offset;
1896 head -= page_offset;
06aae590
ACM
1897 goto remap;
1898 }
1899
1900 size = event->header.size;
1901
27389d78 1902 if (size < sizeof(struct perf_event_header) ||
b7b61cbe 1903 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
9389a460
JO
1904 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1905 file_offset + head, event->header.size,
1906 event->header.type);
1907 err = -EINVAL;
1908 goto out_err;
06aae590
ACM
1909 }
1910
6f917c70
AH
1911 if (skip)
1912 size += skip;
1913
06aae590 1914 head += size;
d6513281 1915 file_pos += size;
06aae590 1916
4d3001fd 1917 ui_progress__update(&prog, size);
55b44629 1918
33e940a2 1919 if (session_done())
8c16b649 1920 goto out;
33e940a2 1921
d6513281 1922 if (file_pos < file_size)
06aae590 1923 goto more;
d6513281 1924
8c16b649 1925out:
c61e52ee 1926 /* do the final flush for ordered samples */
b7b61cbe 1927 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
c446870d
AH
1928 if (err)
1929 goto out_err;
1930 err = auxtrace__flush_events(session, tool);
a5499b37
AH
1931 if (err)
1932 goto out_err;
1933 err = perf_session__flush_thread_stacks(session);
06aae590 1934out_err:
a5580f3e 1935 ui_progress__finish();
9870d780 1936 perf_session__warn_about_errors(session);
b26dc730
WN
1937 /*
1938 * We may switching perf.data output, make ordered_events
1939 * reusable.
1940 */
1941 ordered_events__reinit(&session->ordered_events);
c446870d 1942 auxtrace__free_events(session);
919d86d3 1943 session->one_mmap = false;
06aae590
ACM
1944 return err;
1945}
27295592 1946
b7b61cbe 1947int perf_session__process_events(struct perf_session *session)
6122e4e4 1948{
8ceb41d7 1949 u64 size = perf_data__size(session->data);
6122e4e4
ACM
1950 int err;
1951
9d8b172f 1952 if (perf_session__register_idle_thread(session) < 0)
6122e4e4
ACM
1953 return -ENOMEM;
1954
8ceb41d7 1955 if (!perf_data__is_pipe(session->data))
316c7136
ACM
1956 err = __perf_session__process_events(session,
1957 session->header.data_offset,
b7b61cbe 1958 session->header.data_size, size);
8dc58101 1959 else
b7b61cbe 1960 err = __perf_session__process_pipe_events(session);
88ca895d 1961
6122e4e4
ACM
1962 return err;
1963}
1964
7f3be652 1965bool perf_session__has_traces(struct perf_session *session, const char *msg)
27295592 1966{
93ea01c2
DA
1967 struct perf_evsel *evsel;
1968
e5cadb93 1969 evlist__for_each_entry(session->evlist, evsel) {
93ea01c2
DA
1970 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1971 return true;
27295592
ACM
1972 }
1973
93ea01c2
DA
1974 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1975 return false;
27295592 1976}
56b03f3c 1977
743eb868
ACM
1978int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1979 const char *symbol_name, u64 addr)
56b03f3c
ACM
1980{
1981 char *bracket;
a0b2f5af 1982 int i;
a1645ce1
ZY
1983 struct ref_reloc_sym *ref;
1984
1985 ref = zalloc(sizeof(struct ref_reloc_sym));
1986 if (ref == NULL)
1987 return -ENOMEM;
56b03f3c 1988
a1645ce1
ZY
1989 ref->name = strdup(symbol_name);
1990 if (ref->name == NULL) {
1991 free(ref);
56b03f3c 1992 return -ENOMEM;
a1645ce1 1993 }
56b03f3c 1994
a1645ce1 1995 bracket = strchr(ref->name, ']');
56b03f3c
ACM
1996 if (bracket)
1997 *bracket = '\0';
1998
a1645ce1 1999 ref->addr = addr;
9de89fe7
ACM
2000
2001 for (i = 0; i < MAP__NR_TYPES; ++i) {
a1645ce1 2002 struct kmap *kmap = map__kmap(maps[i]);
ba92732e
WN
2003
2004 if (!kmap)
2005 continue;
a1645ce1 2006 kmap->ref_reloc_sym = ref;
9de89fe7
ACM
2007 }
2008
56b03f3c
ACM
2009 return 0;
2010}
1f626bc3 2011
316c7136 2012size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1f626bc3 2013{
316c7136 2014 return machines__fprintf_dsos(&session->machines, fp);
1f626bc3 2015}
f869097e 2016
316c7136 2017size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
417c2ff6 2018 bool (skip)(struct dso *dso, int parm), int parm)
f869097e 2019{
316c7136 2020 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
f869097e 2021}
e248de33
ACM
2022
2023size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2024{
c446870d
AH
2025 size_t ret;
2026 const char *msg = "";
2027
2028 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2029 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2030
fe692ac8 2031 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
e248de33 2032
75be989a 2033 ret += events_stats__fprintf(&session->evlist->stats, fp);
e248de33
ACM
2034 return ret;
2035}
c0230b2b 2036
b424eba2
ACM
2037size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2038{
2039 /*
2040 * FIXME: Here we have to actually print all the machines in this
2041 * session, not just the host...
2042 */
876650e6 2043 return machine__fprintf(&session->machines.host, fp);
b424eba2
ACM
2044}
2045
9cbdb702
DA
2046struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
2047 unsigned int type)
2048{
2049 struct perf_evsel *pos;
2050
e5cadb93 2051 evlist__for_each_entry(session->evlist, pos) {
9cbdb702
DA
2052 if (pos->attr.type == type)
2053 return pos;
2054 }
2055 return NULL;
2056}
2057
5d67be97
AB
2058int perf_session__cpu_bitmap(struct perf_session *session,
2059 const char *cpu_list, unsigned long *cpu_bitmap)
2060{
8bac41cb 2061 int i, err = -1;
5d67be97
AB
2062 struct cpu_map *map;
2063
2064 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2065 struct perf_evsel *evsel;
2066
2067 evsel = perf_session__find_first_evtype(session, i);
2068 if (!evsel)
2069 continue;
2070
2071 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
2072 pr_err("File does not contain CPU events. "
30795467 2073 "Remove -C option to proceed.\n");
5d67be97
AB
2074 return -1;
2075 }
2076 }
2077
2078 map = cpu_map__new(cpu_list);
47fbe53b
DA
2079 if (map == NULL) {
2080 pr_err("Invalid cpu_list\n");
2081 return -1;
2082 }
5d67be97
AB
2083
2084 for (i = 0; i < map->nr; i++) {
2085 int cpu = map->map[i];
2086
2087 if (cpu >= MAX_NR_CPUS) {
2088 pr_err("Requested CPU %d too large. "
2089 "Consider raising MAX_NR_CPUS\n", cpu);
8bac41cb 2090 goto out_delete_map;
5d67be97
AB
2091 }
2092
2093 set_bit(cpu, cpu_bitmap);
2094 }
2095
8bac41cb
SF
2096 err = 0;
2097
2098out_delete_map:
f30a79b0 2099 cpu_map__put(map);
8bac41cb 2100 return err;
5d67be97 2101}
fbe96f29
SE
2102
2103void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2104 bool full)
2105{
fbe96f29
SE
2106 if (session == NULL || fp == NULL)
2107 return;
2108
fbe96f29 2109 fprintf(fp, "# ========\n");
fbe96f29
SE
2110 perf_header__fprintf_info(session, fp, full);
2111 fprintf(fp, "# ========\n#\n");
2112}
da378962
ACM
2113
2114
2115int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2116 const struct perf_evsel_str_handler *assocs,
2117 size_t nr_assocs)
2118{
da378962 2119 struct perf_evsel *evsel;
da378962
ACM
2120 size_t i;
2121 int err;
2122
2123 for (i = 0; i < nr_assocs; i++) {
ccf53eac
ACM
2124 /*
2125 * Adding a handler for an event not in the session,
2126 * just ignore it.
2127 */
2128 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
da378962 2129 if (evsel == NULL)
ccf53eac 2130 continue;
da378962
ACM
2131
2132 err = -EEXIST;
744a9719 2133 if (evsel->handler != NULL)
ccf53eac 2134 goto out;
744a9719 2135 evsel->handler = assocs[i].handler;
da378962
ACM
2136 }
2137
2138 err = 0;
2139out:
2140 return err;
da378962 2141}
3c659eed
AH
2142
2143int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
2144 union perf_event *event,
2145 struct perf_session *session)
2146{
2147 struct perf_evlist *evlist = session->evlist;
2148 struct id_index_event *ie = &event->id_index;
2149 size_t i, nr, max_nr;
2150
2151 max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2152 sizeof(struct id_index_entry);
2153 nr = ie->nr;
2154 if (nr > max_nr)
2155 return -EINVAL;
2156
2157 if (dump_trace)
2158 fprintf(stdout, " nr: %zu\n", nr);
2159
2160 for (i = 0; i < nr; i++) {
2161 struct id_index_entry *e = &ie->entries[i];
2162 struct perf_sample_id *sid;
2163
2164 if (dump_trace) {
2165 fprintf(stdout, " ... id: %"PRIu64, e->id);
2166 fprintf(stdout, " idx: %"PRIu64, e->idx);
2167 fprintf(stdout, " cpu: %"PRId64, e->cpu);
2168 fprintf(stdout, " tid: %"PRId64"\n", e->tid);
2169 }
2170
2171 sid = perf_evlist__id2sid(evlist, e->id);
2172 if (!sid)
2173 return -ENOENT;
2174 sid->idx = e->idx;
2175 sid->cpu = e->cpu;
2176 sid->tid = e->tid;
2177 }
2178 return 0;
2179}
2180
2181int perf_event__synthesize_id_index(struct perf_tool *tool,
2182 perf_event__handler_t process,
2183 struct perf_evlist *evlist,
2184 struct machine *machine)
2185{
2186 union perf_event *ev;
2187 struct perf_evsel *evsel;
2188 size_t nr = 0, i = 0, sz, max_nr, n;
2189 int err;
2190
2191 pr_debug2("Synthesizing id index\n");
2192
2193 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2194 sizeof(struct id_index_entry);
2195
e5cadb93 2196 evlist__for_each_entry(evlist, evsel)
3c659eed
AH
2197 nr += evsel->ids;
2198
2199 n = nr > max_nr ? max_nr : nr;
2200 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2201 ev = zalloc(sz);
2202 if (!ev)
2203 return -ENOMEM;
2204
2205 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2206 ev->id_index.header.size = sz;
2207 ev->id_index.nr = n;
2208
e5cadb93 2209 evlist__for_each_entry(evlist, evsel) {
3c659eed
AH
2210 u32 j;
2211
2212 for (j = 0; j < evsel->ids; j++) {
2213 struct id_index_entry *e;
2214 struct perf_sample_id *sid;
2215
2216 if (i >= n) {
2217 err = process(tool, ev, NULL, machine);
2218 if (err)
2219 goto out_err;
2220 nr -= n;
2221 i = 0;
2222 }
2223
2224 e = &ev->id_index.entries[i++];
2225
2226 e->id = evsel->id[j];
2227
2228 sid = perf_evlist__id2sid(evlist, e->id);
2229 if (!sid) {
2230 free(ev);
2231 return -ENOENT;
2232 }
2233
2234 e->idx = sid->idx;
2235 e->cpu = sid->cpu;
2236 e->tid = sid->tid;
2237 }
2238 }
2239
2240 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2241 ev->id_index.header.size = sz;
2242 ev->id_index.nr = nr;
2243
2244 err = process(tool, ev, NULL, machine);
2245out_err:
2246 free(ev);
2247
2248 return err;
2249}