perf_counter: some simple userspace profiling
[linux-2.6-block.git] / Documentation / perf_counter / perf-record.c
CommitLineData
de9ac07b
PZ
1
2
3#define _GNU_SOURCE
4#include <sys/types.h>
5#include <sys/stat.h>
6#include <sys/time.h>
7#include <unistd.h>
8#include <stdint.h>
9#include <stdlib.h>
10#include <string.h>
11#include <limits.h>
12#include <getopt.h>
13#include <assert.h>
14#include <fcntl.h>
15#include <stdio.h>
16#include <errno.h>
17#include <ctype.h>
18#include <time.h>
19#include <sched.h>
20#include <pthread.h>
21
22#include <sys/syscall.h>
23#include <sys/ioctl.h>
24#include <sys/poll.h>
25#include <sys/prctl.h>
26#include <sys/wait.h>
27#include <sys/uio.h>
28#include <sys/mman.h>
29
30#include <linux/unistd.h>
31#include <linux/types.h>
32
33#include "../../include/linux/perf_counter.h"
34
35
36/*
37 * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
38 * counters in the current task.
39 */
40#define PR_TASK_PERF_COUNTERS_DISABLE 31
41#define PR_TASK_PERF_COUNTERS_ENABLE 32
42
43#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
44
45#define rdclock() \
46({ \
47 struct timespec ts; \
48 \
49 clock_gettime(CLOCK_MONOTONIC, &ts); \
50 ts.tv_sec * 1000000000ULL + ts.tv_nsec; \
51})
52
53/*
54 * Pick up some kernel type conventions:
55 */
56#define __user
57#define asmlinkage
58
59#ifdef __x86_64__
60#define __NR_perf_counter_open 295
61#define rmb() asm volatile("lfence" ::: "memory")
62#define cpu_relax() asm volatile("rep; nop" ::: "memory");
63#endif
64
65#ifdef __i386__
66#define __NR_perf_counter_open 333
67#define rmb() asm volatile("lfence" ::: "memory")
68#define cpu_relax() asm volatile("rep; nop" ::: "memory");
69#endif
70
71#ifdef __powerpc__
72#define __NR_perf_counter_open 319
73#define rmb() asm volatile ("sync" ::: "memory")
74#define cpu_relax() asm volatile ("" ::: "memory");
75#endif
76
77#define unlikely(x) __builtin_expect(!!(x), 0)
78#define min(x, y) ({ \
79 typeof(x) _min1 = (x); \
80 typeof(y) _min2 = (y); \
81 (void) (&_min1 == &_min2); \
82 _min1 < _min2 ? _min1 : _min2; })
83
84asmlinkage int sys_perf_counter_open(
85 struct perf_counter_hw_event *hw_event_uptr __user,
86 pid_t pid,
87 int cpu,
88 int group_fd,
89 unsigned long flags)
90{
91 return syscall(
92 __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags);
93}
94
95#define MAX_COUNTERS 64
96#define MAX_NR_CPUS 256
97
98#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id))
99
100static int nr_counters = 0;
101static __u64 event_id[MAX_COUNTERS] = { };
102static int default_interval = 100000;
103static int event_count[MAX_COUNTERS];
104static int fd[MAX_NR_CPUS][MAX_COUNTERS];
105static int nr_cpus = 0;
106static unsigned int page_size;
107static unsigned int mmap_pages = 16;
108static int output;
109static char *output_name = "output.perf";
110static int group = 0;
111static unsigned int realtime_prio = 0;
112
113const unsigned int default_count[] = {
114 1000000,
115 1000000,
116 10000,
117 10000,
118 1000000,
119 10000,
120};
121
122static char *hw_event_names[] = {
123 "CPU cycles",
124 "instructions",
125 "cache references",
126 "cache misses",
127 "branches",
128 "branch misses",
129 "bus cycles",
130};
131
132static char *sw_event_names[] = {
133 "cpu clock ticks",
134 "task clock ticks",
135 "pagefaults",
136 "context switches",
137 "CPU migrations",
138 "minor faults",
139 "major faults",
140};
141
142struct event_symbol {
143 __u64 event;
144 char *symbol;
145};
146
147static struct event_symbol event_symbols[] = {
148 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", },
149 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", },
150 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", },
151 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", },
152 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", },
153 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", },
154 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", },
155 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", },
156 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", },
157
158 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", },
159 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", },
160 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", },
161 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", },
162 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", },
163 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", },
164 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", },
165 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", },
166 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", },
167 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", },
168};
169
170/*
171 * Each event can have multiple symbolic names.
172 * Symbolic names are (almost) exactly matched.
173 */
174static __u64 match_event_symbols(char *str)
175{
176 __u64 config, id;
177 int type;
178 unsigned int i;
179
180 if (sscanf(str, "r%llx", &config) == 1)
181 return config | PERF_COUNTER_RAW_MASK;
182
183 if (sscanf(str, "%d:%llu", &type, &id) == 2)
184 return EID(type, id);
185
186 for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
187 if (!strncmp(str, event_symbols[i].symbol,
188 strlen(event_symbols[i].symbol)))
189 return event_symbols[i].event;
190 }
191
192 return ~0ULL;
193}
194
195static int parse_events(char *str)
196{
197 __u64 config;
198
199again:
200 if (nr_counters == MAX_COUNTERS)
201 return -1;
202
203 config = match_event_symbols(str);
204 if (config == ~0ULL)
205 return -1;
206
207 event_id[nr_counters] = config;
208 nr_counters++;
209
210 str = strstr(str, ",");
211 if (str) {
212 str++;
213 goto again;
214 }
215
216 return 0;
217}
218
219#define __PERF_COUNTER_FIELD(config, name) \
220 ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT)
221
222#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW)
223#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG)
224#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
225#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
226
227static void display_events_help(void)
228{
229 unsigned int i;
230 __u64 e;
231
232 printf(
233 " -e EVENT --event=EVENT # symbolic-name abbreviations");
234
235 for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
236 int type, id;
237
238 e = event_symbols[i].event;
239 type = PERF_COUNTER_TYPE(e);
240 id = PERF_COUNTER_ID(e);
241
242 printf("\n %d:%d: %-20s",
243 type, id, event_symbols[i].symbol);
244 }
245
246 printf("\n"
247 " rNNN: raw PMU events (eventsel+umask)\n\n");
248}
249
250static void display_help(void)
251{
252 printf(
253 "Usage: perf-record [<options>]\n"
254 "perf-record Options (up to %d event types can be specified at once):\n\n",
255 MAX_COUNTERS);
256
257 display_events_help();
258
259 printf(
260 " -c CNT --count=CNT # event period to sample\n"
261 " -m pages --mmap_pages=<pages> # number of mmap data pages\n"
262 " -o file --output=<file> # output file\n"
263 " -r prio --realtime=<prio> # use RT prio\n"
264 );
265
266 exit(0);
267}
268
269static void process_options(int argc, char *argv[])
270{
271 int error = 0, counter;
272
273 for (;;) {
274 int option_index = 0;
275 /** Options for getopt */
276 static struct option long_options[] = {
277 {"count", required_argument, NULL, 'c'},
278 {"event", required_argument, NULL, 'e'},
279 {"mmap_pages", required_argument, NULL, 'm'},
280 {"output", required_argument, NULL, 'o'},
281 {"realtime", required_argument, NULL, 'r'},
282 {NULL, 0, NULL, 0 }
283 };
284 int c = getopt_long(argc, argv, "+:c:e:m:o:r:",
285 long_options, &option_index);
286 if (c == -1)
287 break;
288
289 switch (c) {
290 case 'c': default_interval = atoi(optarg); break;
291 case 'e': error = parse_events(optarg); break;
292 case 'm': mmap_pages = atoi(optarg); break;
293 case 'o': output_name = strdup(optarg); break;
294 case 'r': realtime_prio = atoi(optarg); break;
295 default: error = 1; break;
296 }
297 }
298 if (error)
299 display_help();
300
301 if (!nr_counters) {
302 nr_counters = 1;
303 event_id[0] = 0;
304 }
305
306 for (counter = 0; counter < nr_counters; counter++) {
307 if (event_count[counter])
308 continue;
309
310 event_count[counter] = default_interval;
311 }
312}
313
314struct mmap_data {
315 int counter;
316 void *base;
317 unsigned int mask;
318 unsigned int prev;
319};
320
321static unsigned int mmap_read_head(struct mmap_data *md)
322{
323 struct perf_counter_mmap_page *pc = md->base;
324 int head;
325
326 head = pc->data_head;
327 rmb();
328
329 return head;
330}
331
332static long events;
333static struct timeval last_read, this_read;
334
335static void mmap_read(struct mmap_data *md)
336{
337 unsigned int head = mmap_read_head(md);
338 unsigned int old = md->prev;
339 unsigned char *data = md->base + page_size;
340 unsigned long size;
341 void *buf;
342 int diff;
343
344 gettimeofday(&this_read, NULL);
345
346 /*
347 * If we're further behind than half the buffer, there's a chance
348 * the writer will bite our tail and screw up the events under us.
349 *
350 * If we somehow ended up ahead of the head, we got messed up.
351 *
352 * In either case, truncate and restart at head.
353 */
354 diff = head - old;
355 if (diff > md->mask / 2 || diff < 0) {
356 struct timeval iv;
357 unsigned long msecs;
358
359 timersub(&this_read, &last_read, &iv);
360 msecs = iv.tv_sec*1000 + iv.tv_usec/1000;
361
362 fprintf(stderr, "WARNING: failed to keep up with mmap data."
363 " Last read %lu msecs ago.\n", msecs);
364
365 /*
366 * head points to a known good entry, start there.
367 */
368 old = head;
369 }
370
371 last_read = this_read;
372
373 if (old != head)
374 events++;
375
376 size = head - old;
377
378 if ((old & md->mask) + size != (head & md->mask)) {
379 buf = &data[old & md->mask];
380 size = md->mask + 1 - (old & md->mask);
381 old += size;
382 while (size) {
383 int ret = write(output, buf, size);
384 if (ret < 0) {
385 perror("failed to write");
386 exit(-1);
387 }
388 size -= ret;
389 buf += ret;
390 }
391 }
392
393 buf = &data[old & md->mask];
394 size = head - old;
395 old += size;
396 while (size) {
397 int ret = write(output, buf, size);
398 if (ret < 0) {
399 perror("failed to write");
400 exit(-1);
401 }
402 size -= ret;
403 buf += ret;
404 }
405
406 md->prev = old;
407}
408
409static volatile int done = 0;
410
411static void sigchld_handler(int sig)
412{
413 if (sig == SIGCHLD)
414 done = 1;
415}
416
417int main(int argc, char *argv[])
418{
419 struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
420 struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
421 struct perf_counter_hw_event hw_event;
422 int i, counter, group_fd, nr_poll = 0;
423 pid_t pid;
424 int ret;
425
426 page_size = sysconf(_SC_PAGE_SIZE);
427
428 process_options(argc, argv);
429
430 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
431 assert(nr_cpus <= MAX_NR_CPUS);
432 assert(nr_cpus >= 0);
433
434 output = open(output_name, O_CREAT|O_RDWR, S_IRWXU);
435 if (output < 0) {
436 perror("failed to create output file");
437 exit(-1);
438 }
439
440 argc -= optind;
441 argv += optind;
442
443 for (i = 0; i < nr_cpus; i++) {
444 group_fd = -1;
445 for (counter = 0; counter < nr_counters; counter++) {
446
447 memset(&hw_event, 0, sizeof(hw_event));
448 hw_event.config = event_id[counter];
449 hw_event.irq_period = event_count[counter];
450 hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID;
451 hw_event.nmi = 1;
452 hw_event.mmap = 1;
453 hw_event.comm = 1;
454
455 fd[i][counter] = sys_perf_counter_open(&hw_event, -1, i, group_fd, 0);
456 if (fd[i][counter] < 0) {
457 int err = errno;
458 printf("kerneltop error: syscall returned with %d (%s)\n",
459 fd[i][counter], strerror(err));
460 if (err == EPERM)
461 printf("Are you root?\n");
462 exit(-1);
463 }
464 assert(fd[i][counter] >= 0);
465 fcntl(fd[i][counter], F_SETFL, O_NONBLOCK);
466
467 /*
468 * First counter acts as the group leader:
469 */
470 if (group && group_fd == -1)
471 group_fd = fd[i][counter];
472
473 event_array[nr_poll].fd = fd[i][counter];
474 event_array[nr_poll].events = POLLIN;
475 nr_poll++;
476
477 mmap_array[i][counter].counter = counter;
478 mmap_array[i][counter].prev = 0;
479 mmap_array[i][counter].mask = mmap_pages*page_size - 1;
480 mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size,
481 PROT_READ, MAP_SHARED, fd[i][counter], 0);
482 if (mmap_array[i][counter].base == MAP_FAILED) {
483 printf("kerneltop error: failed to mmap with %d (%s)\n",
484 errno, strerror(errno));
485 exit(-1);
486 }
487 }
488 }
489
490 signal(SIGCHLD, sigchld_handler);
491
492 pid = fork();
493 if (pid < 0)
494 perror("failed to fork");
495
496 if (!pid) {
497 if (execvp(argv[0], argv)) {
498 perror(argv[0]);
499 exit(-1);
500 }
501 }
502
503 if (realtime_prio) {
504 struct sched_param param;
505
506 param.sched_priority = realtime_prio;
507 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
508 printf("Could not set realtime priority.\n");
509 exit(-1);
510 }
511 }
512
513 /*
514 * TODO: store the current /proc/$/maps information somewhere
515 */
516
517 while (!done) {
518 int hits = events;
519
520 for (i = 0; i < nr_cpus; i++) {
521 for (counter = 0; counter < nr_counters; counter++)
522 mmap_read(&mmap_array[i][counter]);
523 }
524
525 if (hits == events)
526 ret = poll(event_array, nr_poll, 100);
527 }
528
529 return 0;
530}