perf kvm: Add dimensions for KVM event statistics
[linux-block.git] / tools / perf / builtin-kvm.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a1645ce1
ZY
2#include "builtin.h"
3#include "perf.h"
4
4a3cec84 5#include "util/build-id.h"
bcf6edcd 6#include "util/evsel.h"
1afe1d14 7#include "util/evlist.h"
e0fcfb08 8#include "util/mmap.h"
b0742e90 9#include "util/term.h"
a1645ce1
ZY
10#include "util/symbol.h"
11#include "util/thread.h"
12#include "util/header.h"
13#include "util/session.h"
2e73f00f 14#include "util/intlist.h"
fa0d9846 15#include <subcmd/pager.h>
4b6ab94e 16#include <subcmd/parse-options.h>
a1645ce1 17#include "util/trace-event.h"
a1645ce1 18#include "util/debug.h"
bcf6edcd
XG
19#include "util/tool.h"
20#include "util/stat.h"
ea49e01c 21#include "util/synthetic-events.h"
1afe1d14 22#include "util/top.h"
f5fc1412 23#include "util/data.h"
d704ebda 24#include "util/ordered-events.h"
8067b3da 25#include "util/kvm-stat.h"
fa0d9846 26#include "ui/ui.h"
ae0f4eb3 27#include "util/string2.h"
a1645ce1
ZY
28
29#include <sys/prctl.h>
87419c9a 30#ifdef HAVE_TIMERFD_SUPPORT
1afe1d14 31#include <sys/timerfd.h>
87419c9a 32#endif
c5e4027e 33#include <sys/time.h>
bafae98e
ACM
34#include <sys/types.h>
35#include <sys/stat.h>
36#include <fcntl.h>
a1645ce1 37
6ef81c55 38#include <linux/err.h>
877a7a11 39#include <linux/kernel.h>
fa0d9846 40#include <linux/string.h>
bd48c63e 41#include <linux/time64.h>
7f7c536f 42#include <linux/zalloc.h>
a43783ae 43#include <errno.h>
fd20e811 44#include <inttypes.h>
4208735d 45#include <poll.h>
1afe1d14 46#include <termios.h>
a1645ce1 47#include <semaphore.h>
9607ad3a 48#include <signal.h>
a1645ce1 49#include <math.h>
7728fa0c 50#include <perf/mmap.h>
a1645ce1 51
f098376d
LY
52#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
53#define GET_EVENT_KEY(func, field) \
54static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
55{ \
56 if (vcpu == -1) \
57 return event->total.field; \
58 \
59 if (vcpu >= event->max_vcpu) \
60 return 0; \
61 \
62 return event->vcpu[vcpu].field; \
63}
64
65#define COMPARE_EVENT_KEY(func, field) \
66GET_EVENT_KEY(func, field) \
dd787ae4 67static int64_t cmp_event_ ## func(struct kvm_event *one, \
f098376d
LY
68 struct kvm_event *two, int vcpu) \
69{ \
dd787ae4 70 return get_event_ ##func(one, vcpu) - \
f098376d
LY
71 get_event_ ##func(two, vcpu); \
72}
73
41f1138e
LY
74COMPARE_EVENT_KEY(time, time);
75COMPARE_EVENT_KEY(max, stats.max);
76COMPARE_EVENT_KEY(min, stats.min);
f098376d
LY
77COMPARE_EVENT_KEY(count, stats.n);
78COMPARE_EVENT_KEY(mean, stats.mean);
79
80#define DEF_SORT_NAME_KEY(name, compare_key) \
81 { #name, cmp_event_ ## compare_key }
82
83static struct kvm_event_key keys[] = {
84 DEF_SORT_NAME_KEY(sample, count),
85 DEF_SORT_NAME_KEY(time, mean),
86 { NULL, NULL }
87};
2d08124b
LY
88
89struct kvm_hists {
90 struct hists hists;
91 struct perf_hpp_list list;
92};
93
41f1138e
LY
94struct kvm_dimension {
95 const char *name;
96 int64_t (*cmp)(struct perf_hpp_fmt *fmt, struct hist_entry *left,
97 struct hist_entry *right);
98};
99
100struct kvm_fmt {
101 struct perf_hpp_fmt fmt;
102 struct kvm_dimension *dim;
103};
104
2d08124b
LY
105static struct kvm_hists kvm_hists;
106
41f1138e
LY
107static int64_t ev_name_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
108 struct hist_entry *left,
109 struct hist_entry *right)
110{
111 /* Return opposite number for sorting in alphabetical order */
112 return -strcmp(left->kvm_info->name, right->kvm_info->name);
113}
114
115static struct kvm_dimension dim_event = {
116 .name = "ev_name",
117 .cmp = ev_name_cmp,
118};
119
120#define EV_METRIC_CMP(metric) \
121static int64_t ev_cmp_##metric(struct perf_hpp_fmt *fmt __maybe_unused, \
122 struct hist_entry *left, \
123 struct hist_entry *right) \
124{ \
125 struct kvm_event *event_left; \
126 struct kvm_event *event_right; \
127 struct perf_kvm_stat *perf_kvm; \
128 \
129 event_left = container_of(left, struct kvm_event, he); \
130 event_right = container_of(right, struct kvm_event, he); \
131 \
132 perf_kvm = event_left->perf_kvm; \
133 return cmp_event_##metric(event_left, event_right, \
134 perf_kvm->trace_vcpu); \
135}
136
137EV_METRIC_CMP(time)
138EV_METRIC_CMP(count)
139EV_METRIC_CMP(max)
140EV_METRIC_CMP(min)
141EV_METRIC_CMP(mean)
142
143static struct kvm_dimension dim_time = {
144 .name = "time",
145 .cmp = ev_cmp_time,
146};
147
148static struct kvm_dimension dim_count = {
149 .name = "sample",
150 .cmp = ev_cmp_count,
151};
152
153static struct kvm_dimension dim_max_time = {
154 .name = "max_t",
155 .cmp = ev_cmp_max,
156};
157
158static struct kvm_dimension dim_min_time = {
159 .name = "min_t",
160 .cmp = ev_cmp_min,
161};
162
163static struct kvm_dimension dim_mean_time = {
164 .name = "mean_t",
165 .cmp = ev_cmp_mean,
166};
167
168static struct kvm_dimension *dimensions[] = {
169 &dim_event,
170 &dim_time,
171 &dim_count,
172 &dim_max_time,
173 &dim_min_time,
174 &dim_mean_time,
175 NULL,
176};
177
178static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
179{
180 struct kvm_fmt *kvm_fmt_a = container_of(a, struct kvm_fmt, fmt);
181 struct kvm_fmt *kvm_fmt_b = container_of(b, struct kvm_fmt, fmt);
182
183 return kvm_fmt_a->dim == kvm_fmt_b->dim;
184}
185
186static void fmt_free(struct perf_hpp_fmt *fmt)
187{
188 struct kvm_fmt *kvm_fmt;
189
190 kvm_fmt = container_of(fmt, struct kvm_fmt, fmt);
191 free(kvm_fmt);
192}
193
194static struct kvm_dimension *get_dimension(const char *name)
195{
196 unsigned int i;
197
198 for (i = 0; dimensions[i] != NULL; i++) {
199 if (!strcmp(dimensions[i]->name, name))
200 return dimensions[i];
201 }
202
203 return NULL;
204}
205
206static struct kvm_fmt *get_format(const char *name)
207{
208 struct kvm_dimension *dim = get_dimension(name);
209 struct kvm_fmt *kvm_fmt;
210 struct perf_hpp_fmt *fmt;
211
212 if (!dim)
213 return NULL;
214
215 kvm_fmt = zalloc(sizeof(*kvm_fmt));
216 if (!kvm_fmt)
217 return NULL;
218
219 kvm_fmt->dim = dim;
220
221 fmt = &kvm_fmt->fmt;
222 INIT_LIST_HEAD(&fmt->list);
223 INIT_LIST_HEAD(&fmt->sort_list);
224 fmt->cmp = dim->cmp;
225 fmt->sort = dim->cmp;
226 fmt->color = NULL;
227 fmt->entry = NULL;
228 fmt->header = NULL;
229 fmt->width = NULL;
230 fmt->collapse = dim->cmp;
231 fmt->equal = fmt_equal;
232 fmt->free = fmt_free;
233
234 return kvm_fmt;
235}
236
237static int kvm_hists__init_output(struct perf_hpp_list *hpp_list, char *name)
238{
239 struct kvm_fmt *kvm_fmt = get_format(name);
240
241 if (!kvm_fmt) {
242 pr_warning("Fail to find format for output field %s.\n", name);
243 return -EINVAL;
244 }
245
246 perf_hpp_list__column_register(hpp_list, &kvm_fmt->fmt);
247 return 0;
248}
249
250static int kvm_hists__init_sort(struct perf_hpp_list *hpp_list, char *name)
251{
252 struct kvm_fmt *kvm_fmt = get_format(name);
253
254 if (!kvm_fmt) {
255 pr_warning("Fail to find format for sorting %s.\n", name);
256 return -EINVAL;
257 }
258
259 perf_hpp_list__register_sort_field(hpp_list, &kvm_fmt->fmt);
260 return 0;
261}
262
263static int kvm_hpp_list__init(char *list,
264 struct perf_hpp_list *hpp_list,
265 int (*fn)(struct perf_hpp_list *hpp_list,
266 char *name))
267{
268 char *tmp, *tok;
269 int ret;
270
271 if (!list || !fn)
272 return 0;
273
274 for (tok = strtok_r(list, ", ", &tmp); tok;
275 tok = strtok_r(NULL, ", ", &tmp)) {
276 ret = fn(hpp_list, tok);
277 if (!ret)
278 continue;
279
280 /* Handle errors */
281 if (ret == -EINVAL)
282 pr_err("Invalid field key: '%s'", tok);
283 else if (ret == -ESRCH)
284 pr_err("Unknown field key: '%s'", tok);
285 else
286 pr_err("Fail to initialize for field key: '%s'", tok);
287
288 break;
289 }
290
291 return ret;
292}
293
294static int kvm_hpp_list__parse(struct perf_hpp_list *hpp_list,
295 const char *output_, const char *sort_)
296{
297 char *output = output_ ? strdup(output_) : NULL;
298 char *sort = sort_ ? strdup(sort_) : NULL;
299 int ret;
300
301 ret = kvm_hpp_list__init(output, hpp_list, kvm_hists__init_output);
302 if (ret)
303 goto out;
304
305 ret = kvm_hpp_list__init(sort, hpp_list, kvm_hists__init_sort);
306 if (ret)
307 goto out;
308
309 /* Copy sort keys to output fields */
310 perf_hpp__setup_output_field(hpp_list);
311
312 /* and then copy output fields to sort keys */
313 perf_hpp__append_sort_keys(hpp_list);
314out:
315 free(output);
316 free(sort);
317 return ret;
318}
319
2d08124b
LY
320static int kvm_hists__init(void)
321{
322 __hists__init(&kvm_hists.hists, &kvm_hists.list);
323 perf_hpp_list__init(&kvm_hists.list);
41f1138e 324 return kvm_hpp_list__parse(&kvm_hists.list, NULL, "ev_name");
2d08124b 325}
f098376d
LY
326#endif // defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
327
bb8c16db
ACM
328static const char *get_filename_for_perf_kvm(void)
329{
330 const char *filename;
331
332 if (perf_host && !perf_guest)
333 filename = strdup("perf.data.host");
334 else if (!perf_host && perf_guest)
335 filename = strdup("perf.data.guest");
336 else
337 filename = strdup("perf.data.kvm");
338
339 return filename;
340}
341
378ef0f5 342#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
bcf6edcd 343
32dcd021 344void exit_event_get_key(struct evsel *evsel,
9daa8123
AY
345 struct perf_sample *sample,
346 struct event_key *key)
bcf6edcd
XG
347{
348 key->info = 0;
efc0cdc9 349 key->key = evsel__intval(evsel, sample, kvm_exit_reason);
bcf6edcd
XG
350}
351
32dcd021 352bool kvm_exit_event(struct evsel *evsel)
bcf6edcd 353{
162607ea 354 return !strcmp(evsel->name, kvm_exit_trace);
bcf6edcd
XG
355}
356
32dcd021 357bool exit_event_begin(struct evsel *evsel,
9daa8123 358 struct perf_sample *sample, struct event_key *key)
bcf6edcd 359{
14907e73
ACM
360 if (kvm_exit_event(evsel)) {
361 exit_event_get_key(evsel, sample, key);
bcf6edcd
XG
362 return true;
363 }
364
365 return false;
366}
367
32dcd021 368bool kvm_entry_event(struct evsel *evsel)
bcf6edcd 369{
162607ea 370 return !strcmp(evsel->name, kvm_entry_trace);
bcf6edcd
XG
371}
372
32dcd021 373bool exit_event_end(struct evsel *evsel,
9daa8123
AY
374 struct perf_sample *sample __maybe_unused,
375 struct event_key *key __maybe_unused)
bcf6edcd 376{
14907e73 377 return kvm_entry_event(evsel);
bcf6edcd
XG
378}
379
df74c13b
AY
380static const char *get_exit_reason(struct perf_kvm_stat *kvm,
381 struct exit_reasons_table *tbl,
382 u64 exit_code)
bcf6edcd 383{
df74c13b 384 while (tbl->reason != NULL) {
de332ac4
DA
385 if (tbl->exit_code == exit_code)
386 return tbl->reason;
387 tbl++;
bcf6edcd
XG
388 }
389
390 pr_err("unknown kvm exit code:%lld on %s\n",
de332ac4 391 (unsigned long long)exit_code, kvm->exit_reasons_isa);
bcf6edcd
XG
392 return "UNKNOWN";
393}
394
9daa8123
AY
395void exit_event_decode_key(struct perf_kvm_stat *kvm,
396 struct event_key *key,
397 char *decode)
bcf6edcd 398{
3be8e2a0 399 const char *exit_reason = get_exit_reason(kvm, key->exit_reasons,
df74c13b 400 key->key);
bcf6edcd 401
2d31e0bf 402 scnprintf(decode, KVM_EVENT_NAME_LEN, "%s", exit_reason);
bcf6edcd
XG
403}
404
9daa8123 405static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
bcf6edcd 406{
9daa8123 407 struct kvm_reg_events_ops *events_ops = kvm_reg_events_ops;
bcf6edcd 408
9daa8123
AY
409 for (events_ops = kvm_reg_events_ops; events_ops->name; events_ops++) {
410 if (!strcmp(events_ops->name, kvm->report_event)) {
411 kvm->events_ops = events_ops->ops;
412 return true;
413 }
bcf6edcd
XG
414 }
415
416 return false;
417}
418
bcf6edcd
XG
419struct vcpu_event_record {
420 int vcpu_id;
421 u64 start_time;
422 struct kvm_event *last_event;
423};
424
bcf6edcd 425
3786063a 426static void init_kvm_event_record(struct perf_kvm_stat *kvm)
bcf6edcd 427{
b880deea 428 unsigned int i;
bcf6edcd 429
b880deea 430 for (i = 0; i < EVENTS_CACHE_SIZE; i++)
de332ac4 431 INIT_LIST_HEAD(&kvm->kvm_events_cache[i]);
bcf6edcd
XG
432}
433
87419c9a 434#ifdef HAVE_TIMERFD_SUPPORT
1afe1d14
DA
435static void clear_events_cache_stats(struct list_head *kvm_events_cache)
436{
437 struct list_head *head;
438 struct kvm_event *event;
439 unsigned int i;
62d04dbf 440 int j;
1afe1d14
DA
441
442 for (i = 0; i < EVENTS_CACHE_SIZE; i++) {
443 head = &kvm_events_cache[i];
444 list_for_each_entry(event, head, hash_entry) {
445 /* reset stats for event */
62d04dbf
DA
446 event->total.time = 0;
447 init_stats(&event->total.stats);
448
449 for (j = 0; j < event->max_vcpu; ++j) {
450 event->vcpu[j].time = 0;
451 init_stats(&event->vcpu[j].stats);
452 }
1afe1d14
DA
453 }
454 }
455}
87419c9a 456#endif
1afe1d14 457
bcf6edcd
XG
458static int kvm_events_hash_fn(u64 key)
459{
460 return key & (EVENTS_CACHE_SIZE - 1);
461}
462
463static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
464{
465 int old_max_vcpu = event->max_vcpu;
6ca5f308 466 void *prev;
bcf6edcd
XG
467
468 if (vcpu_id < event->max_vcpu)
469 return true;
470
471 while (event->max_vcpu <= vcpu_id)
472 event->max_vcpu += DEFAULT_VCPU_NUM;
473
6ca5f308 474 prev = event->vcpu;
bcf6edcd
XG
475 event->vcpu = realloc(event->vcpu,
476 event->max_vcpu * sizeof(*event->vcpu));
477 if (!event->vcpu) {
6ca5f308 478 free(prev);
bcf6edcd
XG
479 pr_err("Not enough memory\n");
480 return false;
481 }
482
483 memset(event->vcpu + old_max_vcpu, 0,
484 (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu));
485 return true;
486}
487
a7d451a8 488static struct kvm_event *kvm_alloc_init_event(struct perf_kvm_stat *kvm,
730651f7
LY
489 struct event_key *key,
490 struct perf_sample *sample __maybe_unused)
bcf6edcd
XG
491{
492 struct kvm_event *event;
493
494 event = zalloc(sizeof(*event));
495 if (!event) {
496 pr_err("Not enough memory\n");
497 return NULL;
498 }
499
a7d451a8 500 event->perf_kvm = kvm;
bcf6edcd 501 event->key = *key;
acb61fc8 502 init_stats(&event->total.stats);
bcf6edcd
XG
503 return event;
504}
505
3786063a 506static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm,
730651f7
LY
507 struct event_key *key,
508 struct perf_sample *sample)
bcf6edcd
XG
509{
510 struct kvm_event *event;
511 struct list_head *head;
512
513 BUG_ON(key->key == INVALID_KEY);
514
de332ac4 515 head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)];
355afe81 516 list_for_each_entry(event, head, hash_entry) {
bcf6edcd
XG
517 if (event->key.key == key->key && event->key.info == key->info)
518 return event;
355afe81 519 }
bcf6edcd 520
730651f7 521 event = kvm_alloc_init_event(kvm, key, sample);
bcf6edcd
XG
522 if (!event)
523 return NULL;
524
525 list_add(&event->hash_entry, head);
526 return event;
527}
528
3786063a 529static bool handle_begin_event(struct perf_kvm_stat *kvm,
de332ac4 530 struct vcpu_event_record *vcpu_record,
730651f7
LY
531 struct event_key *key,
532 struct perf_sample *sample)
bcf6edcd
XG
533{
534 struct kvm_event *event = NULL;
535
536 if (key->key != INVALID_KEY)
730651f7 537 event = find_create_kvm_event(kvm, key, sample);
bcf6edcd
XG
538
539 vcpu_record->last_event = event;
730651f7 540 vcpu_record->start_time = sample->time;
bcf6edcd
XG
541 return true;
542}
543
544static void
545kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff)
546{
547 kvm_stats->time += time_diff;
548 update_stats(&kvm_stats->stats, time_diff);
549}
550
551static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event)
552{
553 struct kvm_event_stats *kvm_stats = &event->total;
554
555 if (vcpu_id != -1)
556 kvm_stats = &event->vcpu[vcpu_id];
557
558 return rel_stddev_stats(stddev_stats(&kvm_stats->stats),
559 avg_stats(&kvm_stats->stats));
560}
561
9c3aa1f4
LY
562static bool update_kvm_event(struct perf_kvm_stat *kvm,
563 struct kvm_event *event, int vcpu_id,
bcf6edcd
XG
564 u64 time_diff)
565{
9c3aa1f4
LY
566 /* Update overall statistics */
567 kvm->total_count++;
568 kvm->total_time += time_diff;
569
2aa8eab0
DA
570 if (vcpu_id == -1) {
571 kvm_update_event_stats(&event->total, time_diff);
572 return true;
573 }
bcf6edcd
XG
574
575 if (!kvm_event_expand(event, vcpu_id))
576 return false;
577
578 kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff);
579 return true;
580}
581
3be8e2a0 582static bool is_child_event(struct perf_kvm_stat *kvm,
32dcd021 583 struct evsel *evsel,
3be8e2a0
AY
584 struct perf_sample *sample,
585 struct event_key *key)
586{
587 struct child_event_ops *child_ops;
588
589 child_ops = kvm->events_ops->child_ops;
590
591 if (!child_ops)
592 return false;
593
594 for (; child_ops->name; child_ops++) {
595 if (!strcmp(evsel->name, child_ops->name)) {
596 child_ops->get_key(evsel, sample, key);
597 return true;
598 }
599 }
600
601 return false;
602}
603
604static bool handle_child_event(struct perf_kvm_stat *kvm,
605 struct vcpu_event_record *vcpu_record,
606 struct event_key *key,
730651f7 607 struct perf_sample *sample)
3be8e2a0
AY
608{
609 struct kvm_event *event = NULL;
610
611 if (key->key != INVALID_KEY)
730651f7 612 event = find_create_kvm_event(kvm, key, sample);
3be8e2a0
AY
613
614 vcpu_record->last_event = event;
615
616 return true;
617}
618
54c801ff
AY
619static bool skip_event(const char *event)
620{
621 const char * const *skip_events;
622
623 for (skip_events = kvm_skip_events; *skip_events; skip_events++)
624 if (!strcmp(event, *skip_events))
625 return true;
626
627 return false;
628}
629
3786063a 630static bool handle_end_event(struct perf_kvm_stat *kvm,
de332ac4
DA
631 struct vcpu_event_record *vcpu_record,
632 struct event_key *key,
70f7b4a7 633 struct perf_sample *sample)
bcf6edcd
XG
634{
635 struct kvm_event *event;
636 u64 time_begin, time_diff;
2aa8eab0
DA
637 int vcpu;
638
639 if (kvm->trace_vcpu == -1)
640 vcpu = -1;
641 else
642 vcpu = vcpu_record->vcpu_id;
bcf6edcd
XG
643
644 event = vcpu_record->last_event;
645 time_begin = vcpu_record->start_time;
646
647 /* The begin event is not caught. */
648 if (!time_begin)
649 return true;
650
651 /*
652 * In some case, the 'begin event' only records the start timestamp,
653 * the actual event is recognized in the 'end event' (e.g. mmio-event).
654 */
655
656 /* Both begin and end events did not get the key. */
657 if (!event && key->key == INVALID_KEY)
658 return true;
659
660 if (!event)
730651f7 661 event = find_create_kvm_event(kvm, key, sample);
bcf6edcd
XG
662
663 if (!event)
664 return false;
665
666 vcpu_record->last_event = NULL;
667 vcpu_record->start_time = 0;
668
1afe1d14 669 /* seems to happen once in a while during live mode */
70f7b4a7 670 if (sample->time < time_begin) {
1afe1d14
DA
671 pr_debug("End time before begin time; skipping event.\n");
672 return true;
673 }
bcf6edcd 674
70f7b4a7
DA
675 time_diff = sample->time - time_begin;
676
677 if (kvm->duration && time_diff > kvm->duration) {
2d31e0bf 678 char decode[KVM_EVENT_NAME_LEN];
70f7b4a7
DA
679
680 kvm->events_ops->decode_key(kvm, &event->key, decode);
54c801ff 681 if (!skip_event(decode)) {
70f7b4a7
DA
682 pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n",
683 sample->time, sample->pid, vcpu_record->vcpu_id,
c05a6e14 684 decode, time_diff / NSEC_PER_USEC);
70f7b4a7
DA
685 }
686 }
687
9c3aa1f4 688 return update_kvm_event(kvm, event, vcpu, time_diff);
bcf6edcd
XG
689}
690
14907e73
ACM
691static
692struct vcpu_event_record *per_vcpu_record(struct thread *thread,
32dcd021 693 struct evsel *evsel,
14907e73 694 struct perf_sample *sample)
bcf6edcd
XG
695{
696 /* Only kvm_entry records vcpu id. */
69e865c3 697 if (!thread__priv(thread) && kvm_entry_event(evsel)) {
bcf6edcd
XG
698 struct vcpu_event_record *vcpu_record;
699
14907e73 700 vcpu_record = zalloc(sizeof(*vcpu_record));
bcf6edcd 701 if (!vcpu_record) {
14907e73 702 pr_err("%s: Not enough memory\n", __func__);
bcf6edcd
XG
703 return NULL;
704 }
705
efc0cdc9 706 vcpu_record->vcpu_id = evsel__intval(evsel, sample, vcpu_id_str);
69e865c3 707 thread__set_priv(thread, vcpu_record);
bcf6edcd
XG
708 }
709
69e865c3 710 return thread__priv(thread);
bcf6edcd
XG
711}
712
3786063a 713static bool handle_kvm_event(struct perf_kvm_stat *kvm,
de332ac4 714 struct thread *thread,
32dcd021 715 struct evsel *evsel,
14907e73 716 struct perf_sample *sample)
bcf6edcd
XG
717{
718 struct vcpu_event_record *vcpu_record;
3be8e2a0
AY
719 struct event_key key = { .key = INVALID_KEY,
720 .exit_reasons = kvm->exit_reasons };
bcf6edcd 721
14907e73 722 vcpu_record = per_vcpu_record(thread, evsel, sample);
bcf6edcd
XG
723 if (!vcpu_record)
724 return true;
725
2aa8eab0
DA
726 /* only process events for vcpus user cares about */
727 if ((kvm->trace_vcpu != -1) &&
728 (kvm->trace_vcpu != vcpu_record->vcpu_id))
729 return true;
730
de332ac4 731 if (kvm->events_ops->is_begin_event(evsel, sample, &key))
730651f7 732 return handle_begin_event(kvm, vcpu_record, &key, sample);
bcf6edcd 733
3be8e2a0
AY
734 if (is_child_event(kvm, evsel, sample, &key))
735 return handle_child_event(kvm, vcpu_record, &key, sample);
736
de332ac4 737 if (kvm->events_ops->is_end_event(evsel, sample, &key))
70f7b4a7 738 return handle_end_event(kvm, vcpu_record, &key, sample);
bcf6edcd
XG
739
740 return true;
741}
742
3786063a 743static bool select_key(struct perf_kvm_stat *kvm)
bcf6edcd
XG
744{
745 int i;
746
747 for (i = 0; keys[i].name; i++) {
de332ac4
DA
748 if (!strcmp(keys[i].name, kvm->sort_key)) {
749 kvm->compare = keys[i].key;
bcf6edcd
XG
750 return true;
751 }
752 }
753
de332ac4 754 pr_err("Unknown compare key:%s\n", kvm->sort_key);
bcf6edcd
XG
755 return false;
756}
757
de332ac4
DA
758static void insert_to_result(struct rb_root *result, struct kvm_event *event,
759 key_cmp_fun bigger, int vcpu)
bcf6edcd 760{
de332ac4 761 struct rb_node **rb = &result->rb_node;
bcf6edcd
XG
762 struct rb_node *parent = NULL;
763 struct kvm_event *p;
764
765 while (*rb) {
766 p = container_of(*rb, struct kvm_event, rb);
767 parent = *rb;
768
dd787ae4 769 if (bigger(event, p, vcpu) > 0)
bcf6edcd
XG
770 rb = &(*rb)->rb_left;
771 else
772 rb = &(*rb)->rb_right;
773 }
774
775 rb_link_node(&event->rb, parent, rb);
de332ac4 776 rb_insert_color(&event->rb, result);
bcf6edcd
XG
777}
778
bcf6edcd
XG
779static bool event_is_valid(struct kvm_event *event, int vcpu)
780{
781 return !!get_event_count(event, vcpu);
782}
783
3786063a 784static void sort_result(struct perf_kvm_stat *kvm)
bcf6edcd
XG
785{
786 unsigned int i;
de332ac4 787 int vcpu = kvm->trace_vcpu;
bcf6edcd
XG
788 struct kvm_event *event;
789
355afe81
DA
790 for (i = 0; i < EVENTS_CACHE_SIZE; i++) {
791 list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) {
bcf6edcd 792 if (event_is_valid(event, vcpu)) {
de332ac4
DA
793 insert_to_result(&kvm->result, event,
794 kvm->compare, vcpu);
bcf6edcd 795 }
355afe81
DA
796 }
797 }
bcf6edcd
XG
798}
799
800/* returns left most element of result, and erase it */
de332ac4 801static struct kvm_event *pop_from_result(struct rb_root *result)
bcf6edcd 802{
de332ac4 803 struct rb_node *node = rb_first(result);
bcf6edcd
XG
804
805 if (!node)
806 return NULL;
807
de332ac4 808 rb_erase(node, result);
bcf6edcd
XG
809 return container_of(node, struct kvm_event, rb);
810}
811
1afe1d14 812static void print_vcpu_info(struct perf_kvm_stat *kvm)
bcf6edcd 813{
1afe1d14
DA
814 int vcpu = kvm->trace_vcpu;
815
bcf6edcd
XG
816 pr_info("Analyze events for ");
817
1f3e5b55
AY
818 if (kvm->opts.target.system_wide)
819 pr_info("all VMs, ");
820 else if (kvm->opts.target.pid)
821 pr_info("pid(s) %s, ", kvm->opts.target.pid);
822 else
823 pr_info("dazed and confused on what is monitored, ");
1afe1d14 824
bcf6edcd
XG
825 if (vcpu == -1)
826 pr_info("all VCPUs:\n\n");
827 else
828 pr_info("VCPU %d:\n\n", vcpu);
829}
830
1afe1d14
DA
831static void show_timeofday(void)
832{
833 char date[64];
834 struct timeval tv;
835 struct tm ltime;
836
837 gettimeofday(&tv, NULL);
838 if (localtime_r(&tv.tv_sec, &ltime)) {
839 strftime(date, sizeof(date), "%H:%M:%S", &ltime);
840 pr_info("%s.%06ld", date, tv.tv_usec);
841 } else
842 pr_info("00:00:00.000000");
843
844 return;
845}
846
3786063a 847static void print_result(struct perf_kvm_stat *kvm)
bcf6edcd 848{
2d31e0bf 849 char decode[KVM_EVENT_NAME_LEN];
bcf6edcd 850 struct kvm_event *event;
de332ac4 851 int vcpu = kvm->trace_vcpu;
bcf6edcd 852
1afe1d14
DA
853 if (kvm->live) {
854 puts(CONSOLE_CLEAR);
855 show_timeofday();
856 }
857
bcf6edcd 858 pr_info("\n\n");
1afe1d14 859 print_vcpu_info(kvm);
2d31e0bf 860 pr_info("%*s ", KVM_EVENT_NAME_LEN, kvm->events_ops->name);
bcf6edcd
XG
861 pr_info("%10s ", "Samples");
862 pr_info("%9s ", "Samples%");
863
864 pr_info("%9s ", "Time%");
b048a24c
CB
865 pr_info("%11s ", "Min Time");
866 pr_info("%11s ", "Max Time");
bcf6edcd
XG
867 pr_info("%16s ", "Avg time");
868 pr_info("\n\n");
869
de332ac4 870 while ((event = pop_from_result(&kvm->result))) {
62d04dbf 871 u64 ecount, etime, max, min;
bcf6edcd
XG
872
873 ecount = get_event_count(event, vcpu);
874 etime = get_event_time(event, vcpu);
62d04dbf
DA
875 max = get_event_max(event, vcpu);
876 min = get_event_min(event, vcpu);
bcf6edcd 877
de332ac4 878 kvm->events_ops->decode_key(kvm, &event->key, decode);
2d31e0bf 879 pr_info("%*s ", KVM_EVENT_NAME_LEN, decode);
bcf6edcd 880 pr_info("%10llu ", (unsigned long long)ecount);
de332ac4
DA
881 pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
882 pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
c05a6e14
ACM
883 pr_info("%9.2fus ", (double)min / NSEC_PER_USEC);
884 pr_info("%9.2fus ", (double)max / NSEC_PER_USEC);
885 pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount / NSEC_PER_USEC,
bcf6edcd
XG
886 kvm_event_rel_stddev(vcpu, event));
887 pr_info("\n");
888 }
889
e4f7637f 890 pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n",
c05a6e14 891 kvm->total_count, kvm->total_time / (double)NSEC_PER_USEC);
1afe1d14
DA
892
893 if (kvm->lost_events)
894 pr_info("\nLost events: %" PRIu64 "\n\n", kvm->lost_events);
895}
896
378ef0f5 897#if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
1afe1d14
DA
898static int process_lost_event(struct perf_tool *tool,
899 union perf_event *event __maybe_unused,
900 struct perf_sample *sample __maybe_unused,
901 struct machine *machine __maybe_unused)
902{
903 struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, tool);
904
905 kvm->lost_events++;
906 return 0;
bcf6edcd 907}
87419c9a 908#endif
bcf6edcd 909
2e73f00f
DA
910static bool skip_sample(struct perf_kvm_stat *kvm,
911 struct perf_sample *sample)
912{
913 if (kvm->pid_list && intlist__find(kvm->pid_list, sample->pid) == NULL)
914 return true;
915
916 return false;
917}
918
de332ac4 919static int process_sample_event(struct perf_tool *tool,
bcf6edcd
XG
920 union perf_event *event,
921 struct perf_sample *sample,
32dcd021 922 struct evsel *evsel,
bcf6edcd
XG
923 struct machine *machine)
924{
b91fc39f 925 int err = 0;
2e73f00f 926 struct thread *thread;
3786063a
XG
927 struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
928 tool);
bcf6edcd 929
2e73f00f
DA
930 if (skip_sample(kvm, sample))
931 return 0;
932
001b08f4
LY
933 if (machine__resolve(machine, &kvm->al, sample) < 0) {
934 pr_warning("Fail to resolve address location, skip sample.\n");
935 return 0;
936 }
937
314add6b 938 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
bcf6edcd
XG
939 if (thread == NULL) {
940 pr_debug("problem processing %d event, skipping it.\n",
941 event->header.type);
942 return -1;
943 }
944
de332ac4 945 if (!handle_kvm_event(kvm, thread, evsel, sample))
b91fc39f 946 err = -1;
bcf6edcd 947
b91fc39f
ACM
948 thread__put(thread);
949 return err;
bcf6edcd
XG
950}
951
1afe1d14 952static int cpu_isa_config(struct perf_kvm_stat *kvm)
bcf6edcd 953{
f67001a4 954 char buf[128], *cpuid;
65c647a6 955 int err;
1afe1d14
DA
956
957 if (kvm->live) {
958 err = get_cpuid(buf, sizeof(buf));
959 if (err != 0) {
f67001a4
ACM
960 pr_err("Failed to look up CPU type: %s\n",
961 str_error_r(err, buf, sizeof(buf)));
962 return -err;
1afe1d14
DA
963 }
964 cpuid = buf;
965 } else
966 cpuid = kvm->session->header.env.cpuid;
bcf6edcd 967
65c647a6
AY
968 if (!cpuid) {
969 pr_err("Failed to look up CPU type\n");
970 return -EINVAL;
1afe1d14
DA
971 }
972
65c647a6
AY
973 err = cpu_isa_init(kvm, cpuid);
974 if (err == -ENOTSUP)
975 pr_err("CPU %s is not supported.\n", cpuid);
1afe1d14 976
65c647a6 977 return err;
1afe1d14
DA
978}
979
980static bool verify_vcpu(int vcpu)
981{
982 if (vcpu != -1 && vcpu < 0) {
983 pr_err("Invalid vcpu:%d.\n", vcpu);
984 return false;
985 }
986
987 return true;
988}
989
378ef0f5 990#if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
1afe1d14
DA
991/* keeping the max events to a modest level to keep
992 * the processing of samples per mmap smooth.
993 */
994#define PERF_KVM__MAX_EVENTS_PER_MMAP 25
995
996static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
997 u64 *mmap_time)
998{
63503dba 999 struct evlist *evlist = kvm->evlist;
1afe1d14 1000 union perf_event *event;
a5830532 1001 struct mmap *md;
93d10af2 1002 u64 timestamp;
1afe1d14
DA
1003 s64 n = 0;
1004 int err;
1005
1006 *mmap_time = ULLONG_MAX;
53172f90 1007 md = &evlist->mmap[idx];
7c4d4182 1008 err = perf_mmap__read_init(&md->core);
53172f90
KL
1009 if (err < 0)
1010 return (err == -EAGAIN) ? 0 : -1;
1011
151ed5d7 1012 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
2a6599cd 1013 err = evlist__parse_sample_timestamp(evlist, event, &timestamp);
1afe1d14 1014 if (err) {
7728fa0c 1015 perf_mmap__consume(&md->core);
1afe1d14
DA
1016 pr_err("Failed to parse sample\n");
1017 return -1;
1018 }
1019
2292083f 1020 err = perf_session__queue_event(kvm->session, event, timestamp, 0, NULL);
8e50d384 1021 /*
b7b61cbe 1022 * FIXME: Here we can't consume the event, as perf_session__queue_event will
8e50d384
ZZ
1023 * point to it, and it'll get possibly overwritten by the kernel.
1024 */
7728fa0c 1025 perf_mmap__consume(&md->core);
8e50d384 1026
1afe1d14
DA
1027 if (err) {
1028 pr_err("Failed to enqueue sample: %d\n", err);
1029 return -1;
1030 }
1031
1032 /* save time stamp of our first sample for this mmap */
1033 if (n == 0)
93d10af2 1034 *mmap_time = timestamp;
1afe1d14
DA
1035
1036 /* limit events per mmap handled all at once */
1037 n++;
1038 if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
1039 break;
1040 }
1041
32fdc2ca 1042 perf_mmap__read_done(&md->core);
1afe1d14
DA
1043 return n;
1044}
1045
1046static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm)
1047{
1048 int i, err, throttled = 0;
1049 s64 n, ntotal = 0;
1050 u64 flush_time = ULLONG_MAX, mmap_time;
1051
c976ee11 1052 for (i = 0; i < kvm->evlist->core.nr_mmaps; i++) {
1afe1d14
DA
1053 n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time);
1054 if (n < 0)
1055 return -1;
1056
1057 /* flush time is going to be the minimum of all the individual
1058 * mmap times. Essentially, we flush all the samples queued up
1059 * from the last pass under our minimal start time -- that leaves
1060 * a very small race for samples to come in with a lower timestamp.
1061 * The ioctl to return the perf_clock timestamp should close the
1062 * race entirely.
1063 */
1064 if (mmap_time < flush_time)
1065 flush_time = mmap_time;
1066
1067 ntotal += n;
1068 if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
1069 throttled = 1;
1070 }
1071
1072 /* flush queue after each round in which we processed events */
1073 if (ntotal) {
d704ebda
ACM
1074 struct ordered_events *oe = &kvm->session->ordered_events;
1075
1076 oe->next_flush = flush_time;
1077 err = ordered_events__flush(oe, OE_FLUSH__ROUND);
1afe1d14
DA
1078 if (err) {
1079 if (kvm->lost_events)
1080 pr_info("\nLost events: %" PRIu64 "\n\n",
1081 kvm->lost_events);
1082 return err;
1083 }
1084 }
1085
1086 return throttled;
1087}
1088
1089static volatile int done;
1090
1091static void sig_handler(int sig __maybe_unused)
1092{
1093 done = 1;
1094}
1095
1096static int perf_kvm__timerfd_create(struct perf_kvm_stat *kvm)
1097{
1098 struct itimerspec new_value;
1099 int rc = -1;
1100
1101 kvm->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
1102 if (kvm->timerfd < 0) {
1103 pr_err("timerfd_create failed\n");
1104 goto out;
1105 }
1106
1107 new_value.it_value.tv_sec = kvm->display_time;
1108 new_value.it_value.tv_nsec = 0;
1109 new_value.it_interval.tv_sec = kvm->display_time;
1110 new_value.it_interval.tv_nsec = 0;
1111
1112 if (timerfd_settime(kvm->timerfd, 0, &new_value, NULL) != 0) {
1113 pr_err("timerfd_settime failed: %d\n", errno);
1114 close(kvm->timerfd);
1115 goto out;
1116 }
1117
1118 rc = 0;
1119out:
1120 return rc;
1121}
1122
1123static int perf_kvm__handle_timerfd(struct perf_kvm_stat *kvm)
1124{
1125 uint64_t c;
1126 int rc;
1127
1128 rc = read(kvm->timerfd, &c, sizeof(uint64_t));
1129 if (rc < 0) {
1130 if (errno == EAGAIN)
1131 return 0;
1132
1133 pr_err("Failed to read timer fd: %d\n", errno);
1134 return -1;
1135 }
1136
1137 if (rc != sizeof(uint64_t)) {
1138 pr_err("Error reading timer fd - invalid size returned\n");
1139 return -1;
1140 }
1141
1142 if (c != 1)
1143 pr_debug("Missed timer beats: %" PRIu64 "\n", c-1);
1144
1145 /* update display */
1146 sort_result(kvm);
1147 print_result(kvm);
1148
1149 /* reset counts */
1150 clear_events_cache_stats(kvm->kvm_events_cache);
1151 kvm->total_count = 0;
1152 kvm->total_time = 0;
1153 kvm->lost_events = 0;
1154
1155 return 0;
1156}
1157
1158static int fd_set_nonblock(int fd)
1159{
1160 long arg = 0;
1161
1162 arg = fcntl(fd, F_GETFL);
1163 if (arg < 0) {
1164 pr_err("Failed to get current flags for fd %d\n", fd);
1165 return -1;
1166 }
1167
1168 if (fcntl(fd, F_SETFL, arg | O_NONBLOCK) < 0) {
1169 pr_err("Failed to set non-block option on fd %d\n", fd);
1170 return -1;
1171 }
1172
1173 return 0;
1174}
1175
d5b4130a 1176static int perf_kvm__handle_stdin(void)
1afe1d14
DA
1177{
1178 int c;
1179
1afe1d14 1180 c = getc(stdin);
1afe1d14
DA
1181 if (c == 'q')
1182 return 1;
1183
1184 return 0;
1185}
1186
1187static int kvm_events_live_report(struct perf_kvm_stat *kvm)
1188{
1ca72260 1189 int nr_stdin, ret, err = -EINVAL;
d5b4130a 1190 struct termios save;
1afe1d14
DA
1191
1192 /* live flag must be set first */
1193 kvm->live = true;
1194
1195 ret = cpu_isa_config(kvm);
1196 if (ret < 0)
1197 return ret;
1198
1199 if (!verify_vcpu(kvm->trace_vcpu) ||
1200 !select_key(kvm) ||
1201 !register_kvm_events_ops(kvm)) {
1202 goto out;
1203 }
1204
d5b4130a 1205 set_term_quiet_input(&save);
1afe1d14
DA
1206 init_kvm_event_record(kvm);
1207
2d08124b
LY
1208 kvm_hists__init();
1209
1afe1d14
DA
1210 signal(SIGINT, sig_handler);
1211 signal(SIGTERM, sig_handler);
1212
1afe1d14
DA
1213 /* add timer fd */
1214 if (perf_kvm__timerfd_create(kvm) < 0) {
1215 err = -1;
1216 goto out;
1217 }
1218
f4009e7b 1219 if (evlist__add_pollfd(kvm->evlist, kvm->timerfd) < 0)
0a04c9e0
ACM
1220 goto out;
1221
f4009e7b 1222 nr_stdin = evlist__add_pollfd(kvm->evlist, fileno(stdin));
0cae013c 1223 if (nr_stdin < 0)
0a04c9e0
ACM
1224 goto out;
1225
1afe1d14
DA
1226 if (fd_set_nonblock(fileno(stdin)) != 0)
1227 goto out;
1228
1229 /* everything is good - enable the events and process */
1c87f165 1230 evlist__enable(kvm->evlist);
1afe1d14
DA
1231
1232 while (!done) {
40cb2d51 1233 struct fdarray *fda = &kvm->evlist->core.pollfd;
1afe1d14
DA
1234 int rc;
1235
1236 rc = perf_kvm__mmap_read(kvm);
1237 if (rc < 0)
1238 break;
1239
1240 err = perf_kvm__handle_timerfd(kvm);
1241 if (err)
1242 goto out;
1243
1ca72260 1244 if (fda->entries[nr_stdin].revents & POLLIN)
d5b4130a 1245 done = perf_kvm__handle_stdin();
1afe1d14
DA
1246
1247 if (!rc && !done)
4bfbcf3e 1248 err = evlist__poll(kvm->evlist, 100);
1afe1d14
DA
1249 }
1250
e74676de 1251 evlist__disable(kvm->evlist);
1afe1d14
DA
1252
1253 if (err == 0) {
1254 sort_result(kvm);
1255 print_result(kvm);
1256 }
1257
1258out:
1259 if (kvm->timerfd >= 0)
1260 close(kvm->timerfd);
1261
d5b4130a 1262 tcsetattr(0, TCSAFLUSH, &save);
1afe1d14
DA
1263 return err;
1264}
1265
1266static int kvm_live_open_events(struct perf_kvm_stat *kvm)
1267{
1268 int err, rc = -1;
32dcd021 1269 struct evsel *pos;
63503dba 1270 struct evlist *evlist = kvm->evlist;
f9f33fdb 1271 char sbuf[STRERR_BUFSIZE];
1afe1d14 1272
78e1bc25 1273 evlist__config(evlist, &kvm->opts, NULL);
1afe1d14
DA
1274
1275 /*
1276 * Note: exclude_{guest,host} do not apply here.
1277 * This command processes KVM tracepoints from host only
1278 */
e5cadb93 1279 evlist__for_each_entry(evlist, pos) {
1fc632ce 1280 struct perf_event_attr *attr = &pos->core.attr;
1afe1d14
DA
1281
1282 /* make sure these *are* set */
862b2f8f
ACM
1283 evsel__set_sample_bit(pos, TID);
1284 evsel__set_sample_bit(pos, TIME);
1285 evsel__set_sample_bit(pos, CPU);
1286 evsel__set_sample_bit(pos, RAW);
1afe1d14 1287 /* make sure these are *not*; want as small a sample as possible */
862b2f8f
ACM
1288 evsel__reset_sample_bit(pos, PERIOD);
1289 evsel__reset_sample_bit(pos, IP);
1290 evsel__reset_sample_bit(pos, CALLCHAIN);
1291 evsel__reset_sample_bit(pos, ADDR);
1292 evsel__reset_sample_bit(pos, READ);
1afe1d14
DA
1293 attr->mmap = 0;
1294 attr->comm = 0;
1295 attr->task = 0;
1296
1297 attr->sample_period = 1;
1298
1299 attr->watermark = 0;
1300 attr->wakeup_events = 1000;
1301
1302 /* will enable all once we are ready */
1303 attr->disabled = 1;
1304 }
1305
474ddc4c 1306 err = evlist__open(evlist);
1afe1d14 1307 if (err < 0) {
f9f33fdb 1308 printf("Couldn't create the events: %s\n",
c8b5f2c9 1309 str_error_r(errno, sbuf, sizeof(sbuf)));
1afe1d14 1310 goto out;
bcf6edcd
XG
1311 }
1312
9521b5f2 1313 if (evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
f9f33fdb 1314 ui__error("Failed to mmap the events: %s\n",
c8b5f2c9 1315 str_error_r(errno, sbuf, sizeof(sbuf)));
750b4ede 1316 evlist__close(evlist);
1afe1d14
DA
1317 goto out;
1318 }
1319
1320 rc = 0;
1321
1322out:
1323 return rc;
bcf6edcd 1324}
87419c9a 1325#endif
bcf6edcd 1326
3786063a 1327static int read_events(struct perf_kvm_stat *kvm)
bcf6edcd 1328{
bcf6edcd
XG
1329 int ret;
1330
de332ac4
DA
1331 struct perf_tool eops = {
1332 .sample = process_sample_event,
1333 .comm = perf_event__process_comm,
f3b3614a 1334 .namespaces = perf_event__process_namespaces,
0a8cb85c 1335 .ordered_events = true,
de332ac4 1336 };
8ceb41d7 1337 struct perf_data file = {
2d4f2799
JO
1338 .path = kvm->file_name,
1339 .mode = PERF_DATA_MODE_READ,
1340 .force = kvm->force,
f5fc1412 1341 };
de332ac4
DA
1342
1343 kvm->tool = eops;
2681bd85 1344 kvm->session = perf_session__new(&file, &kvm->tool);
6ef81c55 1345 if (IS_ERR(kvm->session)) {
bcf6edcd 1346 pr_err("Initializing perf session failed\n");
6ef81c55 1347 return PTR_ERR(kvm->session);
bcf6edcd
XG
1348 }
1349
0a7e6d1b 1350 symbol__init(&kvm->session->header.env);
14d37f38 1351
41b98360
TS
1352 if (!perf_session__has_traces(kvm->session, "kvm record")) {
1353 ret = -EINVAL;
1354 goto out_delete;
1355 }
bcf6edcd
XG
1356
1357 /*
1358 * Do not use 'isa' recorded in kvm_exit tracepoint since it is not
1359 * traced in the old kernel.
1360 */
1afe1d14 1361 ret = cpu_isa_config(kvm);
bcf6edcd 1362 if (ret < 0)
41b98360
TS
1363 goto out_delete;
1364
1365 ret = perf_session__process_events(kvm->session);
bcf6edcd 1366
41b98360
TS
1367out_delete:
1368 perf_session__delete(kvm->session);
1369 return ret;
bcf6edcd
XG
1370}
1371
2e73f00f
DA
1372static int parse_target_str(struct perf_kvm_stat *kvm)
1373{
3ae4a76a
AY
1374 if (kvm->opts.target.pid) {
1375 kvm->pid_list = intlist__new(kvm->opts.target.pid);
2e73f00f
DA
1376 if (kvm->pid_list == NULL) {
1377 pr_err("Error parsing process id string\n");
1378 return -EINVAL;
1379 }
1380 }
1381
1382 return 0;
1383}
1384
3786063a 1385static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm)
bcf6edcd
XG
1386{
1387 int ret = -EINVAL;
de332ac4 1388 int vcpu = kvm->trace_vcpu;
bcf6edcd 1389
2e73f00f
DA
1390 if (parse_target_str(kvm) != 0)
1391 goto exit;
1392
bcf6edcd
XG
1393 if (!verify_vcpu(vcpu))
1394 goto exit;
1395
de332ac4 1396 if (!select_key(kvm))
bcf6edcd
XG
1397 goto exit;
1398
de332ac4 1399 if (!register_kvm_events_ops(kvm))
bcf6edcd
XG
1400 goto exit;
1401
de332ac4 1402 init_kvm_event_record(kvm);
bcf6edcd
XG
1403 setup_pager();
1404
2d08124b
LY
1405 kvm_hists__init();
1406
de332ac4 1407 ret = read_events(kvm);
bcf6edcd
XG
1408 if (ret)
1409 goto exit;
1410
de332ac4
DA
1411 sort_result(kvm);
1412 print_result(kvm);
1413
bcf6edcd
XG
1414exit:
1415 return ret;
1416}
1417
bcf6edcd
XG
1418#define STRDUP_FAIL_EXIT(s) \
1419 ({ char *_p; \
1420 _p = strdup(s); \
1421 if (!_p) \
1422 return -ENOMEM; \
1423 _p; \
1424 })
1425
066d3593
HK
1426int __weak setup_kvm_events_tp(struct perf_kvm_stat *kvm __maybe_unused)
1427{
1428 return 0;
1429}
1430
3786063a
XG
1431static int
1432kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
bcf6edcd 1433{
9daa8123 1434 unsigned int rec_argc, i, j, events_tp_size;
bcf6edcd 1435 const char **rec_argv;
8fdd84c4
DA
1436 const char * const record_args[] = {
1437 "record",
1438 "-R",
8fdd84c4
DA
1439 "-m", "1024",
1440 "-c", "1",
1441 };
f45d20ff
NK
1442 const char * const kvm_stat_record_usage[] = {
1443 "perf kvm stat record [<options>]",
1444 NULL
1445 };
9daa8123 1446 const char * const *events_tp;
066d3593
HK
1447 int ret;
1448
9daa8123 1449 events_tp_size = 0;
066d3593
HK
1450 ret = setup_kvm_events_tp(kvm);
1451 if (ret < 0) {
1452 pr_err("Unable to setup the kvm tracepoints\n");
1453 return ret;
1454 }
9daa8123
AY
1455
1456 for (events_tp = kvm_events_tp; *events_tp; events_tp++)
1457 events_tp_size++;
bcf6edcd 1458
8fdd84c4 1459 rec_argc = ARRAY_SIZE(record_args) + argc + 2 +
9daa8123 1460 2 * events_tp_size;
bcf6edcd
XG
1461 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1462
1463 if (rec_argv == NULL)
1464 return -ENOMEM;
1465
1466 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1467 rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
1468
9daa8123 1469 for (j = 0; j < events_tp_size; j++) {
8fdd84c4
DA
1470 rec_argv[i++] = "-e";
1471 rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]);
1472 }
1473
bcf6edcd 1474 rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
de332ac4 1475 rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name);
bcf6edcd
XG
1476
1477 for (j = 1; j < (unsigned int)argc; j++, i++)
1478 rec_argv[i] = argv[j];
1479
f45d20ff
NK
1480 set_option_flag(record_options, 'e', "event", PARSE_OPT_HIDDEN);
1481 set_option_flag(record_options, 0, "filter", PARSE_OPT_HIDDEN);
1482 set_option_flag(record_options, 'R', "raw-samples", PARSE_OPT_HIDDEN);
1483
1484 set_option_flag(record_options, 'F', "freq", PARSE_OPT_DISABLED);
1485 set_option_flag(record_options, 0, "group", PARSE_OPT_DISABLED);
1486 set_option_flag(record_options, 'g', NULL, PARSE_OPT_DISABLED);
1487 set_option_flag(record_options, 0, "call-graph", PARSE_OPT_DISABLED);
1488 set_option_flag(record_options, 'd', "data", PARSE_OPT_DISABLED);
1489 set_option_flag(record_options, 'T', "timestamp", PARSE_OPT_DISABLED);
1490 set_option_flag(record_options, 'P', "period", PARSE_OPT_DISABLED);
1491 set_option_flag(record_options, 'n', "no-samples", PARSE_OPT_DISABLED);
1492 set_option_flag(record_options, 'N', "no-buildid-cache", PARSE_OPT_DISABLED);
1493 set_option_flag(record_options, 'B', "no-buildid", PARSE_OPT_DISABLED);
1494 set_option_flag(record_options, 'G', "cgroup", PARSE_OPT_DISABLED);
1495 set_option_flag(record_options, 'b', "branch-any", PARSE_OPT_DISABLED);
1496 set_option_flag(record_options, 'j', "branch-filter", PARSE_OPT_DISABLED);
1497 set_option_flag(record_options, 'W', "weight", PARSE_OPT_DISABLED);
1498 set_option_flag(record_options, 0, "transaction", PARSE_OPT_DISABLED);
1499
1500 record_usage = kvm_stat_record_usage;
b0ad8ea6 1501 return cmd_record(i, rec_argv);
bcf6edcd
XG
1502}
1503
3786063a
XG
1504static int
1505kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
de332ac4
DA
1506{
1507 const struct option kvm_events_report_options[] = {
1508 OPT_STRING(0, "event", &kvm->report_event, "report event",
3be8e2a0
AY
1509 "event for reporting: vmexit, "
1510 "mmio (x86 only), ioport (x86 only)"),
de332ac4
DA
1511 OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
1512 "vcpu id to report"),
1513 OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
1514 "key for sorting: sample(sort by samples number)"
1515 " time (sort by avg time)"),
3ae4a76a 1516 OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
2e73f00f 1517 "analyze events only for given process id(s)"),
8cc5ec1f 1518 OPT_BOOLEAN('f', "force", &kvm->force, "don't complain, do it"),
de332ac4
DA
1519 OPT_END()
1520 };
bcf6edcd 1521
de332ac4
DA
1522 const char * const kvm_events_report_usage[] = {
1523 "perf kvm stat report [<options>]",
1524 NULL
1525 };
bcf6edcd 1526
bcf6edcd
XG
1527 if (argc) {
1528 argc = parse_options(argc, argv,
1529 kvm_events_report_options,
1530 kvm_events_report_usage, 0);
1531 if (argc)
1532 usage_with_options(kvm_events_report_usage,
1533 kvm_events_report_options);
1534 }
1535
f181957c
AY
1536 if (!kvm->opts.target.pid)
1537 kvm->opts.target.system_wide = true;
1538
de332ac4 1539 return kvm_events_report_vcpu(kvm);
bcf6edcd
XG
1540}
1541
378ef0f5 1542#if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
63503dba 1543static struct evlist *kvm_live_event_list(void)
1afe1d14 1544{
63503dba 1545 struct evlist *evlist;
1afe1d14 1546 char *tp, *name, *sys;
1afe1d14 1547 int err = -1;
9daa8123 1548 const char * const *events_tp;
1afe1d14 1549
0f98b11c 1550 evlist = evlist__new();
1afe1d14
DA
1551 if (evlist == NULL)
1552 return NULL;
1553
9daa8123 1554 for (events_tp = kvm_events_tp; *events_tp; events_tp++) {
1afe1d14 1555
9daa8123 1556 tp = strdup(*events_tp);
1afe1d14
DA
1557 if (tp == NULL)
1558 goto out;
1559
1560 /* split tracepoint into subsystem and name */
1561 sys = tp;
1562 name = strchr(tp, ':');
1563 if (name == NULL) {
1564 pr_err("Error parsing %s tracepoint: subsystem delimiter not found\n",
9daa8123 1565 *events_tp);
1afe1d14
DA
1566 free(tp);
1567 goto out;
1568 }
1569 *name = '\0';
1570 name++;
1571
e251abee 1572 if (evlist__add_newtp(evlist, sys, name, NULL)) {
9daa8123 1573 pr_err("Failed to add %s tracepoint to the list\n", *events_tp);
1afe1d14
DA
1574 free(tp);
1575 goto out;
1576 }
1577
1578 free(tp);
1579 }
1580
1581 err = 0;
1582
1583out:
1584 if (err) {
c12995a5 1585 evlist__delete(evlist);
1afe1d14
DA
1586 evlist = NULL;
1587 }
1588
1589 return evlist;
1590}
1591
1592static int kvm_events_live(struct perf_kvm_stat *kvm,
1593 int argc, const char **argv)
1594{
1595 char errbuf[BUFSIZ];
1596 int err;
1597
1598 const struct option live_options[] = {
1599 OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
1600 "record events on existing process id"),
994a1f78 1601 OPT_CALLBACK('m', "mmap-pages", &kvm->opts.mmap_pages, "pages",
25f84702 1602 "number of mmap data pages", evlist__parse_mmap_pages),
1afe1d14
DA
1603 OPT_INCR('v', "verbose", &verbose,
1604 "be more verbose (show counter open errors, etc)"),
1605 OPT_BOOLEAN('a', "all-cpus", &kvm->opts.target.system_wide,
1606 "system-wide collection from all CPUs"),
1607 OPT_UINTEGER('d', "display", &kvm->display_time,
1608 "time in seconds between display updates"),
1609 OPT_STRING(0, "event", &kvm->report_event, "report event",
99d348a8
AY
1610 "event for reporting: "
1611 "vmexit, mmio (x86 only), ioport (x86 only)"),
1afe1d14
DA
1612 OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
1613 "vcpu id to report"),
1614 OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
1615 "key for sorting: sample(sort by samples number)"
1616 " time (sort by avg time)"),
70f7b4a7 1617 OPT_U64(0, "duration", &kvm->duration,
3be8e2a0
AY
1618 "show events other than"
1619 " HLT (x86 only) or Wait state (s390 only)"
1620 " that take longer than duration usecs"),
3fcb10e4 1621 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
9d9cad76 1622 "per thread proc mmap processing timeout in ms"),
1afe1d14
DA
1623 OPT_END()
1624 };
1625 const char * const live_usage[] = {
1626 "perf kvm stat live [<options>]",
1627 NULL
1628 };
8ceb41d7 1629 struct perf_data data = {
f5fc1412
JO
1630 .mode = PERF_DATA_MODE_WRITE,
1631 };
1afe1d14
DA
1632
1633
1634 /* event handling */
1635 kvm->tool.sample = process_sample_event;
1636 kvm->tool.comm = perf_event__process_comm;
1637 kvm->tool.exit = perf_event__process_exit;
1638 kvm->tool.fork = perf_event__process_fork;
1639 kvm->tool.lost = process_lost_event;
f3b3614a 1640 kvm->tool.namespaces = perf_event__process_namespaces;
0a8cb85c 1641 kvm->tool.ordered_events = true;
1afe1d14
DA
1642 perf_tool__fill_defaults(&kvm->tool);
1643
1644 /* set defaults */
1645 kvm->display_time = 1;
1646 kvm->opts.user_interval = 1;
1647 kvm->opts.mmap_pages = 512;
1648 kvm->opts.target.uses_mmap = false;
1649 kvm->opts.target.uid_str = NULL;
1650 kvm->opts.target.uid = UINT_MAX;
1651
0a7e6d1b 1652 symbol__init(NULL);
1afe1d14
DA
1653 disable_buildid_cache();
1654
1655 use_browser = 0;
1afe1d14
DA
1656
1657 if (argc) {
1658 argc = parse_options(argc, argv, live_options,
1659 live_usage, 0);
1660 if (argc)
1661 usage_with_options(live_usage, live_options);
1662 }
1663
70f7b4a7
DA
1664 kvm->duration *= NSEC_PER_USEC; /* convert usec to nsec */
1665
1afe1d14
DA
1666 /*
1667 * target related setups
1668 */
602ad878 1669 err = target__validate(&kvm->opts.target);
1afe1d14 1670 if (err) {
602ad878 1671 target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
1afe1d14
DA
1672 ui__warning("%s", errbuf);
1673 }
1674
602ad878 1675 if (target__none(&kvm->opts.target))
1afe1d14
DA
1676 kvm->opts.target.system_wide = true;
1677
1678
1679 /*
1680 * generate the event list
1681 */
066d3593
HK
1682 err = setup_kvm_events_tp(kvm);
1683 if (err < 0) {
1684 pr_err("Unable to setup the kvm tracepoints\n");
1685 return err;
1686 }
1687
1afe1d14
DA
1688 kvm->evlist = kvm_live_event_list();
1689 if (kvm->evlist == NULL) {
1690 err = -1;
1691 goto out;
1692 }
1693
7748bb71 1694 if (evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0)
1afe1d14
DA
1695 usage_with_options(live_usage, live_options);
1696
1697 /*
1698 * perf session
1699 */
2681bd85 1700 kvm->session = perf_session__new(&data, &kvm->tool);
6ef81c55
MI
1701 if (IS_ERR(kvm->session)) {
1702 err = PTR_ERR(kvm->session);
1afe1d14
DA
1703 goto out;
1704 }
1705 kvm->session->evlist = kvm->evlist;
1706 perf_session__set_id_hdr_size(kvm->session);
673d659f 1707 ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
a33fbd56 1708 machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
84111b9c 1709 kvm->evlist->core.threads, true, false, 1);
1afe1d14
DA
1710 err = kvm_live_open_events(kvm);
1711 if (err)
1712 goto out;
1713
1714 err = kvm_events_live_report(kvm);
1715
1716out:
e1446551 1717 perf_session__delete(kvm->session);
1afe1d14 1718 kvm->session = NULL;
c12995a5 1719 evlist__delete(kvm->evlist);
1afe1d14
DA
1720
1721 return err;
1722}
87419c9a 1723#endif
1afe1d14 1724
bcf6edcd
XG
1725static void print_kvm_stat_usage(void)
1726{
1727 printf("Usage: perf kvm stat <command>\n\n");
1728
1729 printf("# Available commands:\n");
1730 printf("\trecord: record kvm events\n");
1731 printf("\treport: report statistical data of kvm events\n");
1afe1d14 1732 printf("\tlive: live reporting of statistical data of kvm events\n");
bcf6edcd
XG
1733
1734 printf("\nOtherwise, it is the alias of 'perf stat':\n");
1735}
1736
3786063a 1737static int kvm_cmd_stat(const char *file_name, int argc, const char **argv)
bcf6edcd 1738{
3786063a
XG
1739 struct perf_kvm_stat kvm = {
1740 .file_name = file_name,
1741
1742 .trace_vcpu = -1,
1743 .report_event = "vmexit",
1744 .sort_key = "sample",
1745
3786063a
XG
1746 };
1747
bcf6edcd
XG
1748 if (argc == 1) {
1749 print_kvm_stat_usage();
1750 goto perf_stat;
1751 }
1752
ae0f4eb3 1753 if (strlen(argv[1]) > 2 && strstarts("record", argv[1]))
3786063a 1754 return kvm_events_record(&kvm, argc - 1, argv + 1);
bcf6edcd 1755
ae0f4eb3 1756 if (strlen(argv[1]) > 2 && strstarts("report", argv[1]))
3786063a 1757 return kvm_events_report(&kvm, argc - 1 , argv + 1);
bcf6edcd 1758
378ef0f5 1759#if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
1afe1d14
DA
1760 if (!strncmp(argv[1], "live", 4))
1761 return kvm_events_live(&kvm, argc - 1 , argv + 1);
87419c9a 1762#endif
1afe1d14 1763
bcf6edcd 1764perf_stat:
b0ad8ea6 1765 return cmd_stat(argc, argv);
bcf6edcd 1766}
da50ad69 1767#endif /* HAVE_KVM_STAT_SUPPORT */
bcf6edcd 1768
124eb5f8
AS
1769int __weak kvm_add_default_arch_event(int *argc __maybe_unused,
1770 const char **argv __maybe_unused)
1771{
1772 return 0;
1773}
1774
3786063a 1775static int __cmd_record(const char *file_name, int argc, const char **argv)
a1645ce1 1776{
124eb5f8 1777 int rec_argc, i = 0, j, ret;
a1645ce1
ZY
1778 const char **rec_argv;
1779
124eb5f8
AS
1780 ret = kvm_add_default_arch_event(&argc, argv);
1781 if (ret)
1782 return -EINVAL;
1783
a1645ce1
ZY
1784 rec_argc = argc + 2;
1785 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1786 rec_argv[i++] = strdup("record");
1787 rec_argv[i++] = strdup("-o");
3786063a 1788 rec_argv[i++] = strdup(file_name);
a1645ce1
ZY
1789 for (j = 1; j < argc; j++, i++)
1790 rec_argv[i] = argv[j];
1791
1792 BUG_ON(i != rec_argc);
1793
b0ad8ea6 1794 return cmd_record(i, rec_argv);
a1645ce1
ZY
1795}
1796
3786063a 1797static int __cmd_report(const char *file_name, int argc, const char **argv)
a1645ce1
ZY
1798{
1799 int rec_argc, i = 0, j;
1800 const char **rec_argv;
1801
1802 rec_argc = argc + 2;
1803 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1804 rec_argv[i++] = strdup("report");
1805 rec_argv[i++] = strdup("-i");
3786063a 1806 rec_argv[i++] = strdup(file_name);
a1645ce1
ZY
1807 for (j = 1; j < argc; j++, i++)
1808 rec_argv[i] = argv[j];
1809
1810 BUG_ON(i != rec_argc);
1811
b0ad8ea6 1812 return cmd_report(i, rec_argv);
a1645ce1
ZY
1813}
1814
3786063a
XG
1815static int
1816__cmd_buildid_list(const char *file_name, int argc, const char **argv)
a1645ce1
ZY
1817{
1818 int rec_argc, i = 0, j;
1819 const char **rec_argv;
1820
1821 rec_argc = argc + 2;
1822 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1823 rec_argv[i++] = strdup("buildid-list");
1824 rec_argv[i++] = strdup("-i");
3786063a 1825 rec_argv[i++] = strdup(file_name);
a1645ce1
ZY
1826 for (j = 1; j < argc; j++, i++)
1827 rec_argv[i] = argv[j];
1828
1829 BUG_ON(i != rec_argc);
1830
b0ad8ea6 1831 return cmd_buildid_list(i, rec_argv);
a1645ce1
ZY
1832}
1833
b0ad8ea6 1834int cmd_kvm(int argc, const char **argv)
a1645ce1 1835{
20914ce5 1836 const char *file_name = NULL;
de332ac4 1837 const struct option kvm_options[] = {
3786063a 1838 OPT_STRING('i', "input", &file_name, "file",
de332ac4 1839 "Input file name"),
3786063a 1840 OPT_STRING('o', "output", &file_name, "file",
de332ac4
DA
1841 "Output file name"),
1842 OPT_BOOLEAN(0, "guest", &perf_guest,
1843 "Collect guest os data"),
1844 OPT_BOOLEAN(0, "host", &perf_host,
1845 "Collect host os data"),
1846 OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
1847 "guest mount directory under which every guest os"
1848 " instance has a subdir"),
1849 OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
1850 "file", "file saving guest os vmlinux"),
1851 OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
1852 "file", "file saving guest os /proc/kallsyms"),
1853 OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
1854 "file", "file saving guest os /proc/modules"),
512a09fb
AH
1855 OPT_BOOLEAN(0, "guest-code", &symbol_conf.guest_code,
1856 "Guest code can be found in hypervisor process"),
100b9073
DY
1857 OPT_INCR('v', "verbose", &verbose,
1858 "be more verbose (show counter open errors, etc)"),
de332ac4
DA
1859 OPT_END()
1860 };
1861
09a71b97
RR
1862 const char *const kvm_subcommands[] = { "top", "record", "report", "diff",
1863 "buildid-list", "stat", NULL };
1864 const char *kvm_usage[] = { NULL, NULL };
de332ac4 1865
1aed2671
JR
1866 perf_host = 0;
1867 perf_guest = 1;
a1645ce1 1868
09a71b97
RR
1869 argc = parse_options_subcommand(argc, argv, kvm_options, kvm_subcommands, kvm_usage,
1870 PARSE_OPT_STOP_AT_NON_OPTION);
a1645ce1
ZY
1871 if (!argc)
1872 usage_with_options(kvm_usage, kvm_options);
1873
1874 if (!perf_host)
1875 perf_guest = 1;
1876
3786063a 1877 if (!file_name) {
e1a2b174 1878 file_name = get_filename_for_perf_kvm();
de332ac4 1879
3786063a 1880 if (!file_name) {
de332ac4
DA
1881 pr_err("Failed to allocate memory for filename\n");
1882 return -ENOMEM;
1883 }
a1645ce1
ZY
1884 }
1885
ae0f4eb3 1886 if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
3786063a 1887 return __cmd_record(file_name, argc, argv);
ae0f4eb3 1888 else if (strlen(argv[0]) > 2 && strstarts("report", argv[0]))
3786063a 1889 return __cmd_report(file_name, argc, argv);
d2f30b79 1890 else if (strlen(argv[0]) > 2 && strstarts("diff", argv[0]))
b0ad8ea6 1891 return cmd_diff(argc, argv);
d2f30b79 1892 else if (!strcmp(argv[0], "top"))
b0ad8ea6 1893 return cmd_top(argc, argv);
d2f30b79 1894 else if (strlen(argv[0]) > 2 && strstarts("buildid-list", argv[0]))
3786063a 1895 return __cmd_buildid_list(file_name, argc, argv);
378ef0f5 1896#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
d2f30b79 1897 else if (strlen(argv[0]) > 2 && strstarts("stat", argv[0]))
3786063a 1898 return kvm_cmd_stat(file_name, argc, argv);
7321090f 1899#endif
a1645ce1
ZY
1900 else
1901 usage_with_options(kvm_usage, kvm_options);
1902
1903 return 0;
1904}