tracing: Add kprobe flag
[linux-2.6-block.git] / include / uapi / linux / perf_event.h
1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _UAPI_LINUX_PERF_EVENT_H
15 #define _UAPI_LINUX_PERF_EVENT_H
16
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
20
21 /*
22  * User-space ABI bits:
23  */
24
25 /*
26  * attr.type
27  */
28 enum perf_type_id {
29         PERF_TYPE_HARDWARE                      = 0,
30         PERF_TYPE_SOFTWARE                      = 1,
31         PERF_TYPE_TRACEPOINT                    = 2,
32         PERF_TYPE_HW_CACHE                      = 3,
33         PERF_TYPE_RAW                           = 4,
34         PERF_TYPE_BREAKPOINT                    = 5,
35
36         PERF_TYPE_MAX,                          /* non-ABI */
37 };
38
39 /*
40  * Generalized performance event event_id types, used by the
41  * attr.event_id parameter of the sys_perf_event_open()
42  * syscall:
43  */
44 enum perf_hw_id {
45         /*
46          * Common hardware events, generalized by the kernel:
47          */
48         PERF_COUNT_HW_CPU_CYCLES                = 0,
49         PERF_COUNT_HW_INSTRUCTIONS              = 1,
50         PERF_COUNT_HW_CACHE_REFERENCES          = 2,
51         PERF_COUNT_HW_CACHE_MISSES              = 3,
52         PERF_COUNT_HW_BRANCH_INSTRUCTIONS       = 4,
53         PERF_COUNT_HW_BRANCH_MISSES             = 5,
54         PERF_COUNT_HW_BUS_CYCLES                = 6,
55         PERF_COUNT_HW_STALLED_CYCLES_FRONTEND   = 7,
56         PERF_COUNT_HW_STALLED_CYCLES_BACKEND    = 8,
57         PERF_COUNT_HW_REF_CPU_CYCLES            = 9,
58
59         PERF_COUNT_HW_MAX,                      /* non-ABI */
60 };
61
62 /*
63  * Generalized hardware cache events:
64  *
65  *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
66  *       { read, write, prefetch } x
67  *       { accesses, misses }
68  */
69 enum perf_hw_cache_id {
70         PERF_COUNT_HW_CACHE_L1D                 = 0,
71         PERF_COUNT_HW_CACHE_L1I                 = 1,
72         PERF_COUNT_HW_CACHE_LL                  = 2,
73         PERF_COUNT_HW_CACHE_DTLB                = 3,
74         PERF_COUNT_HW_CACHE_ITLB                = 4,
75         PERF_COUNT_HW_CACHE_BPU                 = 5,
76         PERF_COUNT_HW_CACHE_NODE                = 6,
77
78         PERF_COUNT_HW_CACHE_MAX,                /* non-ABI */
79 };
80
81 enum perf_hw_cache_op_id {
82         PERF_COUNT_HW_CACHE_OP_READ             = 0,
83         PERF_COUNT_HW_CACHE_OP_WRITE            = 1,
84         PERF_COUNT_HW_CACHE_OP_PREFETCH         = 2,
85
86         PERF_COUNT_HW_CACHE_OP_MAX,             /* non-ABI */
87 };
88
89 enum perf_hw_cache_op_result_id {
90         PERF_COUNT_HW_CACHE_RESULT_ACCESS       = 0,
91         PERF_COUNT_HW_CACHE_RESULT_MISS         = 1,
92
93         PERF_COUNT_HW_CACHE_RESULT_MAX,         /* non-ABI */
94 };
95
96 /*
97  * Special "software" events provided by the kernel, even if the hardware
98  * does not support performance events. These events measure various
99  * physical and sw events of the kernel (and allow the profiling of them as
100  * well):
101  */
102 enum perf_sw_ids {
103         PERF_COUNT_SW_CPU_CLOCK                 = 0,
104         PERF_COUNT_SW_TASK_CLOCK                = 1,
105         PERF_COUNT_SW_PAGE_FAULTS               = 2,
106         PERF_COUNT_SW_CONTEXT_SWITCHES          = 3,
107         PERF_COUNT_SW_CPU_MIGRATIONS            = 4,
108         PERF_COUNT_SW_PAGE_FAULTS_MIN           = 5,
109         PERF_COUNT_SW_PAGE_FAULTS_MAJ           = 6,
110         PERF_COUNT_SW_ALIGNMENT_FAULTS          = 7,
111         PERF_COUNT_SW_EMULATION_FAULTS          = 8,
112         PERF_COUNT_SW_DUMMY                     = 9,
113
114         PERF_COUNT_SW_MAX,                      /* non-ABI */
115 };
116
117 /*
118  * Bits that can be set in attr.sample_type to request information
119  * in the overflow packets.
120  */
121 enum perf_event_sample_format {
122         PERF_SAMPLE_IP                          = 1U << 0,
123         PERF_SAMPLE_TID                         = 1U << 1,
124         PERF_SAMPLE_TIME                        = 1U << 2,
125         PERF_SAMPLE_ADDR                        = 1U << 3,
126         PERF_SAMPLE_READ                        = 1U << 4,
127         PERF_SAMPLE_CALLCHAIN                   = 1U << 5,
128         PERF_SAMPLE_ID                          = 1U << 6,
129         PERF_SAMPLE_CPU                         = 1U << 7,
130         PERF_SAMPLE_PERIOD                      = 1U << 8,
131         PERF_SAMPLE_STREAM_ID                   = 1U << 9,
132         PERF_SAMPLE_RAW                         = 1U << 10,
133         PERF_SAMPLE_BRANCH_STACK                = 1U << 11,
134         PERF_SAMPLE_REGS_USER                   = 1U << 12,
135         PERF_SAMPLE_STACK_USER                  = 1U << 13,
136         PERF_SAMPLE_WEIGHT                      = 1U << 14,
137         PERF_SAMPLE_DATA_SRC                    = 1U << 15,
138         PERF_SAMPLE_IDENTIFIER                  = 1U << 16,
139         PERF_SAMPLE_TRANSACTION                 = 1U << 17,
140         PERF_SAMPLE_REGS_INTR                   = 1U << 18,
141
142         PERF_SAMPLE_MAX = 1U << 19,             /* non-ABI */
143 };
144
145 /*
146  * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
147  *
148  * If the user does not pass priv level information via branch_sample_type,
149  * the kernel uses the event's priv level. Branch and event priv levels do
150  * not have to match. Branch priv level is checked for permissions.
151  *
152  * The branch types can be combined, however BRANCH_ANY covers all types
153  * of branches and therefore it supersedes all the other types.
154  */
155 enum perf_branch_sample_type_shift {
156         PERF_SAMPLE_BRANCH_USER_SHIFT           = 0, /* user branches */
157         PERF_SAMPLE_BRANCH_KERNEL_SHIFT         = 1, /* kernel branches */
158         PERF_SAMPLE_BRANCH_HV_SHIFT             = 2, /* hypervisor branches */
159
160         PERF_SAMPLE_BRANCH_ANY_SHIFT            = 3, /* any branch types */
161         PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT       = 4, /* any call branch */
162         PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT     = 5, /* any return branch */
163         PERF_SAMPLE_BRANCH_IND_CALL_SHIFT       = 6, /* indirect calls */
164         PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT       = 7, /* transaction aborts */
165         PERF_SAMPLE_BRANCH_IN_TX_SHIFT          = 8, /* in transaction */
166         PERF_SAMPLE_BRANCH_NO_TX_SHIFT          = 9, /* not in transaction */
167         PERF_SAMPLE_BRANCH_COND_SHIFT           = 10, /* conditional branches */
168
169         PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT     = 11, /* call/ret stack */
170
171         PERF_SAMPLE_BRANCH_MAX_SHIFT            /* non-ABI */
172 };
173
174 enum perf_branch_sample_type {
175         PERF_SAMPLE_BRANCH_USER         = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
176         PERF_SAMPLE_BRANCH_KERNEL       = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
177         PERF_SAMPLE_BRANCH_HV           = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
178
179         PERF_SAMPLE_BRANCH_ANY          = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
180         PERF_SAMPLE_BRANCH_ANY_CALL     = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
181         PERF_SAMPLE_BRANCH_ANY_RETURN   = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
182         PERF_SAMPLE_BRANCH_IND_CALL     = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
183         PERF_SAMPLE_BRANCH_ABORT_TX     = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
184         PERF_SAMPLE_BRANCH_IN_TX        = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
185         PERF_SAMPLE_BRANCH_NO_TX        = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
186         PERF_SAMPLE_BRANCH_COND         = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
187
188         PERF_SAMPLE_BRANCH_CALL_STACK   = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
189
190         PERF_SAMPLE_BRANCH_MAX          = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
191 };
192
193 #define PERF_SAMPLE_BRANCH_PLM_ALL \
194         (PERF_SAMPLE_BRANCH_USER|\
195          PERF_SAMPLE_BRANCH_KERNEL|\
196          PERF_SAMPLE_BRANCH_HV)
197
198 /*
199  * Values to determine ABI of the registers dump.
200  */
201 enum perf_sample_regs_abi {
202         PERF_SAMPLE_REGS_ABI_NONE       = 0,
203         PERF_SAMPLE_REGS_ABI_32         = 1,
204         PERF_SAMPLE_REGS_ABI_64         = 2,
205 };
206
207 /*
208  * Values for the memory transaction event qualifier, mostly for
209  * abort events. Multiple bits can be set.
210  */
211 enum {
212         PERF_TXN_ELISION        = (1 << 0), /* From elision */
213         PERF_TXN_TRANSACTION    = (1 << 1), /* From transaction */
214         PERF_TXN_SYNC           = (1 << 2), /* Instruction is related */
215         PERF_TXN_ASYNC          = (1 << 3), /* Instruction not related */
216         PERF_TXN_RETRY          = (1 << 4), /* Retry possible */
217         PERF_TXN_CONFLICT       = (1 << 5), /* Conflict abort */
218         PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
219         PERF_TXN_CAPACITY_READ  = (1 << 7), /* Capacity read abort */
220
221         PERF_TXN_MAX            = (1 << 8), /* non-ABI */
222
223         /* bits 32..63 are reserved for the abort code */
224
225         PERF_TXN_ABORT_MASK  = (0xffffffffULL << 32),
226         PERF_TXN_ABORT_SHIFT = 32,
227 };
228
229 /*
230  * The format of the data returned by read() on a perf event fd,
231  * as specified by attr.read_format:
232  *
233  * struct read_format {
234  *      { u64           value;
235  *        { u64         time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
236  *        { u64         time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
237  *        { u64         id;           } && PERF_FORMAT_ID
238  *      } && !PERF_FORMAT_GROUP
239  *
240  *      { u64           nr;
241  *        { u64         time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
242  *        { u64         time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
243  *        { u64         value;
244  *          { u64       id;           } && PERF_FORMAT_ID
245  *        }             cntr[nr];
246  *      } && PERF_FORMAT_GROUP
247  * };
248  */
249 enum perf_event_read_format {
250         PERF_FORMAT_TOTAL_TIME_ENABLED          = 1U << 0,
251         PERF_FORMAT_TOTAL_TIME_RUNNING          = 1U << 1,
252         PERF_FORMAT_ID                          = 1U << 2,
253         PERF_FORMAT_GROUP                       = 1U << 3,
254
255         PERF_FORMAT_MAX = 1U << 4,              /* non-ABI */
256 };
257
258 #define PERF_ATTR_SIZE_VER0     64      /* sizeof first published struct */
259 #define PERF_ATTR_SIZE_VER1     72      /* add: config2 */
260 #define PERF_ATTR_SIZE_VER2     80      /* add: branch_sample_type */
261 #define PERF_ATTR_SIZE_VER3     96      /* add: sample_regs_user */
262                                         /* add: sample_stack_user */
263 #define PERF_ATTR_SIZE_VER4     104     /* add: sample_regs_intr */
264
265 /*
266  * Hardware event_id to monitor via a performance monitoring event:
267  */
268 struct perf_event_attr {
269
270         /*
271          * Major type: hardware/software/tracepoint/etc.
272          */
273         __u32                   type;
274
275         /*
276          * Size of the attr structure, for fwd/bwd compat.
277          */
278         __u32                   size;
279
280         /*
281          * Type specific configuration information.
282          */
283         __u64                   config;
284
285         union {
286                 __u64           sample_period;
287                 __u64           sample_freq;
288         };
289
290         __u64                   sample_type;
291         __u64                   read_format;
292
293         __u64                   disabled       :  1, /* off by default        */
294                                 inherit        :  1, /* children inherit it   */
295                                 pinned         :  1, /* must always be on PMU */
296                                 exclusive      :  1, /* only group on PMU     */
297                                 exclude_user   :  1, /* don't count user      */
298                                 exclude_kernel :  1, /* ditto kernel          */
299                                 exclude_hv     :  1, /* ditto hypervisor      */
300                                 exclude_idle   :  1, /* don't count when idle */
301                                 mmap           :  1, /* include mmap data     */
302                                 comm           :  1, /* include comm data     */
303                                 freq           :  1, /* use freq, not period  */
304                                 inherit_stat   :  1, /* per task counts       */
305                                 enable_on_exec :  1, /* next exec enables     */
306                                 task           :  1, /* trace fork/exit       */
307                                 watermark      :  1, /* wakeup_watermark      */
308                                 /*
309                                  * precise_ip:
310                                  *
311                                  *  0 - SAMPLE_IP can have arbitrary skid
312                                  *  1 - SAMPLE_IP must have constant skid
313                                  *  2 - SAMPLE_IP requested to have 0 skid
314                                  *  3 - SAMPLE_IP must have 0 skid
315                                  *
316                                  *  See also PERF_RECORD_MISC_EXACT_IP
317                                  */
318                                 precise_ip     :  2, /* skid constraint       */
319                                 mmap_data      :  1, /* non-exec mmap data    */
320                                 sample_id_all  :  1, /* sample_type all events */
321
322                                 exclude_host   :  1, /* don't count in host   */
323                                 exclude_guest  :  1, /* don't count in guest  */
324
325                                 exclude_callchain_kernel : 1, /* exclude kernel callchains */
326                                 exclude_callchain_user   : 1, /* exclude user callchains */
327                                 mmap2          :  1, /* include mmap with inode data     */
328                                 comm_exec      :  1, /* flag comm events that are due to an exec */
329                                 use_clockid    :  1, /* use @clockid for time fields */
330                                 __reserved_1   : 38;
331
332         union {
333                 __u32           wakeup_events;    /* wakeup every n events */
334                 __u32           wakeup_watermark; /* bytes before wakeup   */
335         };
336
337         __u32                   bp_type;
338         union {
339                 __u64           bp_addr;
340                 __u64           config1; /* extension of config */
341         };
342         union {
343                 __u64           bp_len;
344                 __u64           config2; /* extension of config1 */
345         };
346         __u64   branch_sample_type; /* enum perf_branch_sample_type */
347
348         /*
349          * Defines set of user regs to dump on samples.
350          * See asm/perf_regs.h for details.
351          */
352         __u64   sample_regs_user;
353
354         /*
355          * Defines size of the user stack to dump on samples.
356          */
357         __u32   sample_stack_user;
358
359         __s32   clockid;
360         /*
361          * Defines set of regs to dump for each sample
362          * state captured on:
363          *  - precise = 0: PMU interrupt
364          *  - precise > 0: sampled instruction
365          *
366          * See asm/perf_regs.h for details.
367          */
368         __u64   sample_regs_intr;
369 };
370
371 #define perf_flags(attr)        (*(&(attr)->read_format + 1))
372
373 /*
374  * Ioctls that can be done on a perf event fd:
375  */
376 #define PERF_EVENT_IOC_ENABLE           _IO ('$', 0)
377 #define PERF_EVENT_IOC_DISABLE          _IO ('$', 1)
378 #define PERF_EVENT_IOC_REFRESH          _IO ('$', 2)
379 #define PERF_EVENT_IOC_RESET            _IO ('$', 3)
380 #define PERF_EVENT_IOC_PERIOD           _IOW('$', 4, __u64)
381 #define PERF_EVENT_IOC_SET_OUTPUT       _IO ('$', 5)
382 #define PERF_EVENT_IOC_SET_FILTER       _IOW('$', 6, char *)
383 #define PERF_EVENT_IOC_ID               _IOR('$', 7, __u64 *)
384
385 enum perf_event_ioc_flags {
386         PERF_IOC_FLAG_GROUP             = 1U << 0,
387 };
388
389 /*
390  * Structure of the page that can be mapped via mmap
391  */
392 struct perf_event_mmap_page {
393         __u32   version;                /* version number of this structure */
394         __u32   compat_version;         /* lowest version this is compat with */
395
396         /*
397          * Bits needed to read the hw events in user-space.
398          *
399          *   u32 seq, time_mult, time_shift, index, width;
400          *   u64 count, enabled, running;
401          *   u64 cyc, time_offset;
402          *   s64 pmc = 0;
403          *
404          *   do {
405          *     seq = pc->lock;
406          *     barrier()
407          *
408          *     enabled = pc->time_enabled;
409          *     running = pc->time_running;
410          *
411          *     if (pc->cap_usr_time && enabled != running) {
412          *       cyc = rdtsc();
413          *       time_offset = pc->time_offset;
414          *       time_mult   = pc->time_mult;
415          *       time_shift  = pc->time_shift;
416          *     }
417          *
418          *     index = pc->index;
419          *     count = pc->offset;
420          *     if (pc->cap_user_rdpmc && index) {
421          *       width = pc->pmc_width;
422          *       pmc = rdpmc(index - 1);
423          *     }
424          *
425          *     barrier();
426          *   } while (pc->lock != seq);
427          *
428          * NOTE: for obvious reason this only works on self-monitoring
429          *       processes.
430          */
431         __u32   lock;                   /* seqlock for synchronization */
432         __u32   index;                  /* hardware event identifier */
433         __s64   offset;                 /* add to hardware event value */
434         __u64   time_enabled;           /* time event active */
435         __u64   time_running;           /* time event on cpu */
436         union {
437                 __u64   capabilities;
438                 struct {
439                         __u64   cap_bit0                : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
440                                 cap_bit0_is_deprecated  : 1, /* Always 1, signals that bit 0 is zero */
441
442                                 cap_user_rdpmc          : 1, /* The RDPMC instruction can be used to read counts */
443                                 cap_user_time           : 1, /* The time_* fields are used */
444                                 cap_user_time_zero      : 1, /* The time_zero field is used */
445                                 cap_____res             : 59;
446                 };
447         };
448
449         /*
450          * If cap_user_rdpmc this field provides the bit-width of the value
451          * read using the rdpmc() or equivalent instruction. This can be used
452          * to sign extend the result like:
453          *
454          *   pmc <<= 64 - width;
455          *   pmc >>= 64 - width; // signed shift right
456          *   count += pmc;
457          */
458         __u16   pmc_width;
459
460         /*
461          * If cap_usr_time the below fields can be used to compute the time
462          * delta since time_enabled (in ns) using rdtsc or similar.
463          *
464          *   u64 quot, rem;
465          *   u64 delta;
466          *
467          *   quot = (cyc >> time_shift);
468          *   rem = cyc & ((1 << time_shift) - 1);
469          *   delta = time_offset + quot * time_mult +
470          *              ((rem * time_mult) >> time_shift);
471          *
472          * Where time_offset,time_mult,time_shift and cyc are read in the
473          * seqcount loop described above. This delta can then be added to
474          * enabled and possible running (if index), improving the scaling:
475          *
476          *   enabled += delta;
477          *   if (index)
478          *     running += delta;
479          *
480          *   quot = count / running;
481          *   rem  = count % running;
482          *   count = quot * enabled + (rem * enabled) / running;
483          */
484         __u16   time_shift;
485         __u32   time_mult;
486         __u64   time_offset;
487         /*
488          * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
489          * from sample timestamps.
490          *
491          *   time = timestamp - time_zero;
492          *   quot = time / time_mult;
493          *   rem  = time % time_mult;
494          *   cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
495          *
496          * And vice versa:
497          *
498          *   quot = cyc >> time_shift;
499          *   rem  = cyc & ((1 << time_shift) - 1);
500          *   timestamp = time_zero + quot * time_mult +
501          *               ((rem * time_mult) >> time_shift);
502          */
503         __u64   time_zero;
504         __u32   size;                   /* Header size up to __reserved[] fields. */
505
506                 /*
507                  * Hole for extension of the self monitor capabilities
508                  */
509
510         __u8    __reserved[118*8+4];    /* align to 1k. */
511
512         /*
513          * Control data for the mmap() data buffer.
514          *
515          * User-space reading the @data_head value should issue an smp_rmb(),
516          * after reading this value.
517          *
518          * When the mapping is PROT_WRITE the @data_tail value should be
519          * written by userspace to reflect the last read data, after issueing
520          * an smp_mb() to separate the data read from the ->data_tail store.
521          * In this case the kernel will not over-write unread data.
522          *
523          * See perf_output_put_handle() for the data ordering.
524          */
525         __u64   data_head;              /* head in the data section */
526         __u64   data_tail;              /* user-space written tail */
527 };
528
529 #define PERF_RECORD_MISC_CPUMODE_MASK           (7 << 0)
530 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN        (0 << 0)
531 #define PERF_RECORD_MISC_KERNEL                 (1 << 0)
532 #define PERF_RECORD_MISC_USER                   (2 << 0)
533 #define PERF_RECORD_MISC_HYPERVISOR             (3 << 0)
534 #define PERF_RECORD_MISC_GUEST_KERNEL           (4 << 0)
535 #define PERF_RECORD_MISC_GUEST_USER             (5 << 0)
536
537 /*
538  * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
539  * different events so can reuse the same bit position.
540  */
541 #define PERF_RECORD_MISC_MMAP_DATA              (1 << 13)
542 #define PERF_RECORD_MISC_COMM_EXEC              (1 << 13)
543 /*
544  * Indicates that the content of PERF_SAMPLE_IP points to
545  * the actual instruction that triggered the event. See also
546  * perf_event_attr::precise_ip.
547  */
548 #define PERF_RECORD_MISC_EXACT_IP               (1 << 14)
549 /*
550  * Reserve the last bit to indicate some extended misc field
551  */
552 #define PERF_RECORD_MISC_EXT_RESERVED           (1 << 15)
553
554 struct perf_event_header {
555         __u32   type;
556         __u16   misc;
557         __u16   size;
558 };
559
560 enum perf_event_type {
561
562         /*
563          * If perf_event_attr.sample_id_all is set then all event types will
564          * have the sample_type selected fields related to where/when
565          * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
566          * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
567          * just after the perf_event_header and the fields already present for
568          * the existing fields, i.e. at the end of the payload. That way a newer
569          * perf.data file will be supported by older perf tools, with these new
570          * optional fields being ignored.
571          *
572          * struct sample_id {
573          *      { u32                   pid, tid; } && PERF_SAMPLE_TID
574          *      { u64                   time;     } && PERF_SAMPLE_TIME
575          *      { u64                   id;       } && PERF_SAMPLE_ID
576          *      { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
577          *      { u32                   cpu, res; } && PERF_SAMPLE_CPU
578          *      { u64                   id;       } && PERF_SAMPLE_IDENTIFIER
579          * } && perf_event_attr::sample_id_all
580          *
581          * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.  The
582          * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
583          * relative to header.size.
584          */
585
586         /*
587          * The MMAP events record the PROT_EXEC mappings so that we can
588          * correlate userspace IPs to code. They have the following structure:
589          *
590          * struct {
591          *      struct perf_event_header        header;
592          *
593          *      u32                             pid, tid;
594          *      u64                             addr;
595          *      u64                             len;
596          *      u64                             pgoff;
597          *      char                            filename[];
598          *      struct sample_id                sample_id;
599          * };
600          */
601         PERF_RECORD_MMAP                        = 1,
602
603         /*
604          * struct {
605          *      struct perf_event_header        header;
606          *      u64                             id;
607          *      u64                             lost;
608          *      struct sample_id                sample_id;
609          * };
610          */
611         PERF_RECORD_LOST                        = 2,
612
613         /*
614          * struct {
615          *      struct perf_event_header        header;
616          *
617          *      u32                             pid, tid;
618          *      char                            comm[];
619          *      struct sample_id                sample_id;
620          * };
621          */
622         PERF_RECORD_COMM                        = 3,
623
624         /*
625          * struct {
626          *      struct perf_event_header        header;
627          *      u32                             pid, ppid;
628          *      u32                             tid, ptid;
629          *      u64                             time;
630          *      struct sample_id                sample_id;
631          * };
632          */
633         PERF_RECORD_EXIT                        = 4,
634
635         /*
636          * struct {
637          *      struct perf_event_header        header;
638          *      u64                             time;
639          *      u64                             id;
640          *      u64                             stream_id;
641          *      struct sample_id                sample_id;
642          * };
643          */
644         PERF_RECORD_THROTTLE                    = 5,
645         PERF_RECORD_UNTHROTTLE                  = 6,
646
647         /*
648          * struct {
649          *      struct perf_event_header        header;
650          *      u32                             pid, ppid;
651          *      u32                             tid, ptid;
652          *      u64                             time;
653          *      struct sample_id                sample_id;
654          * };
655          */
656         PERF_RECORD_FORK                        = 7,
657
658         /*
659          * struct {
660          *      struct perf_event_header        header;
661          *      u32                             pid, tid;
662          *
663          *      struct read_format              values;
664          *      struct sample_id                sample_id;
665          * };
666          */
667         PERF_RECORD_READ                        = 8,
668
669         /*
670          * struct {
671          *      struct perf_event_header        header;
672          *
673          *      #
674          *      # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
675          *      # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
676          *      # is fixed relative to header.
677          *      #
678          *
679          *      { u64                   id;       } && PERF_SAMPLE_IDENTIFIER
680          *      { u64                   ip;       } && PERF_SAMPLE_IP
681          *      { u32                   pid, tid; } && PERF_SAMPLE_TID
682          *      { u64                   time;     } && PERF_SAMPLE_TIME
683          *      { u64                   addr;     } && PERF_SAMPLE_ADDR
684          *      { u64                   id;       } && PERF_SAMPLE_ID
685          *      { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
686          *      { u32                   cpu, res; } && PERF_SAMPLE_CPU
687          *      { u64                   period;   } && PERF_SAMPLE_PERIOD
688          *
689          *      { struct read_format    values;   } && PERF_SAMPLE_READ
690          *
691          *      { u64                   nr,
692          *        u64                   ips[nr];  } && PERF_SAMPLE_CALLCHAIN
693          *
694          *      #
695          *      # The RAW record below is opaque data wrt the ABI
696          *      #
697          *      # That is, the ABI doesn't make any promises wrt to
698          *      # the stability of its content, it may vary depending
699          *      # on event, hardware, kernel version and phase of
700          *      # the moon.
701          *      #
702          *      # In other words, PERF_SAMPLE_RAW contents are not an ABI.
703          *      #
704          *
705          *      { u32                   size;
706          *        char                  data[size];}&& PERF_SAMPLE_RAW
707          *
708          *      { u64                   nr;
709          *        { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
710          *
711          *      { u64                   abi; # enum perf_sample_regs_abi
712          *        u64                   regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
713          *
714          *      { u64                   size;
715          *        char                  data[size];
716          *        u64                   dyn_size; } && PERF_SAMPLE_STACK_USER
717          *
718          *      { u64                   weight;   } && PERF_SAMPLE_WEIGHT
719          *      { u64                   data_src; } && PERF_SAMPLE_DATA_SRC
720          *      { u64                   transaction; } && PERF_SAMPLE_TRANSACTION
721          *      { u64                   abi; # enum perf_sample_regs_abi
722          *        u64                   regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
723          * };
724          */
725         PERF_RECORD_SAMPLE                      = 9,
726
727         /*
728          * The MMAP2 records are an augmented version of MMAP, they add
729          * maj, min, ino numbers to be used to uniquely identify each mapping
730          *
731          * struct {
732          *      struct perf_event_header        header;
733          *
734          *      u32                             pid, tid;
735          *      u64                             addr;
736          *      u64                             len;
737          *      u64                             pgoff;
738          *      u32                             maj;
739          *      u32                             min;
740          *      u64                             ino;
741          *      u64                             ino_generation;
742          *      u32                             prot, flags;
743          *      char                            filename[];
744          *      struct sample_id                sample_id;
745          * };
746          */
747         PERF_RECORD_MMAP2                       = 10,
748
749         PERF_RECORD_MAX,                        /* non-ABI */
750 };
751
752 #define PERF_MAX_STACK_DEPTH            127
753
754 enum perf_callchain_context {
755         PERF_CONTEXT_HV                 = (__u64)-32,
756         PERF_CONTEXT_KERNEL             = (__u64)-128,
757         PERF_CONTEXT_USER               = (__u64)-512,
758
759         PERF_CONTEXT_GUEST              = (__u64)-2048,
760         PERF_CONTEXT_GUEST_KERNEL       = (__u64)-2176,
761         PERF_CONTEXT_GUEST_USER         = (__u64)-2560,
762
763         PERF_CONTEXT_MAX                = (__u64)-4095,
764 };
765
766 #define PERF_FLAG_FD_NO_GROUP           (1UL << 0)
767 #define PERF_FLAG_FD_OUTPUT             (1UL << 1)
768 #define PERF_FLAG_PID_CGROUP            (1UL << 2) /* pid=cgroup id, per-cpu mode only */
769 #define PERF_FLAG_FD_CLOEXEC            (1UL << 3) /* O_CLOEXEC */
770
771 union perf_mem_data_src {
772         __u64 val;
773         struct {
774                 __u64   mem_op:5,       /* type of opcode */
775                         mem_lvl:14,     /* memory hierarchy level */
776                         mem_snoop:5,    /* snoop mode */
777                         mem_lock:2,     /* lock instr */
778                         mem_dtlb:7,     /* tlb access */
779                         mem_rsvd:31;
780         };
781 };
782
783 /* type of opcode (load/store/prefetch,code) */
784 #define PERF_MEM_OP_NA          0x01 /* not available */
785 #define PERF_MEM_OP_LOAD        0x02 /* load instruction */
786 #define PERF_MEM_OP_STORE       0x04 /* store instruction */
787 #define PERF_MEM_OP_PFETCH      0x08 /* prefetch */
788 #define PERF_MEM_OP_EXEC        0x10 /* code (execution) */
789 #define PERF_MEM_OP_SHIFT       0
790
791 /* memory hierarchy (memory level, hit or miss) */
792 #define PERF_MEM_LVL_NA         0x01  /* not available */
793 #define PERF_MEM_LVL_HIT        0x02  /* hit level */
794 #define PERF_MEM_LVL_MISS       0x04  /* miss level  */
795 #define PERF_MEM_LVL_L1         0x08  /* L1 */
796 #define PERF_MEM_LVL_LFB        0x10  /* Line Fill Buffer */
797 #define PERF_MEM_LVL_L2         0x20  /* L2 */
798 #define PERF_MEM_LVL_L3         0x40  /* L3 */
799 #define PERF_MEM_LVL_LOC_RAM    0x80  /* Local DRAM */
800 #define PERF_MEM_LVL_REM_RAM1   0x100 /* Remote DRAM (1 hop) */
801 #define PERF_MEM_LVL_REM_RAM2   0x200 /* Remote DRAM (2 hops) */
802 #define PERF_MEM_LVL_REM_CCE1   0x400 /* Remote Cache (1 hop) */
803 #define PERF_MEM_LVL_REM_CCE2   0x800 /* Remote Cache (2 hops) */
804 #define PERF_MEM_LVL_IO         0x1000 /* I/O memory */
805 #define PERF_MEM_LVL_UNC        0x2000 /* Uncached memory */
806 #define PERF_MEM_LVL_SHIFT      5
807
808 /* snoop mode */
809 #define PERF_MEM_SNOOP_NA       0x01 /* not available */
810 #define PERF_MEM_SNOOP_NONE     0x02 /* no snoop */
811 #define PERF_MEM_SNOOP_HIT      0x04 /* snoop hit */
812 #define PERF_MEM_SNOOP_MISS     0x08 /* snoop miss */
813 #define PERF_MEM_SNOOP_HITM     0x10 /* snoop hit modified */
814 #define PERF_MEM_SNOOP_SHIFT    19
815
816 /* locked instruction */
817 #define PERF_MEM_LOCK_NA        0x01 /* not available */
818 #define PERF_MEM_LOCK_LOCKED    0x02 /* locked transaction */
819 #define PERF_MEM_LOCK_SHIFT     24
820
821 /* TLB access */
822 #define PERF_MEM_TLB_NA         0x01 /* not available */
823 #define PERF_MEM_TLB_HIT        0x02 /* hit level */
824 #define PERF_MEM_TLB_MISS       0x04 /* miss level */
825 #define PERF_MEM_TLB_L1         0x08 /* L1 */
826 #define PERF_MEM_TLB_L2         0x10 /* L2 */
827 #define PERF_MEM_TLB_WK         0x20 /* Hardware Walker*/
828 #define PERF_MEM_TLB_OS         0x40 /* OS fault handler */
829 #define PERF_MEM_TLB_SHIFT      26
830
831 #define PERF_MEM_S(a, s) \
832         (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
833
834 /*
835  * single taken branch record layout:
836  *
837  *      from: source instruction (may not always be a branch insn)
838  *        to: branch target
839  *   mispred: branch target was mispredicted
840  * predicted: branch target was predicted
841  *
842  * support for mispred, predicted is optional. In case it
843  * is not supported mispred = predicted = 0.
844  *
845  *     in_tx: running in a hardware transaction
846  *     abort: aborting a hardware transaction
847  */
848 struct perf_branch_entry {
849         __u64   from;
850         __u64   to;
851         __u64   mispred:1,  /* target mispredicted */
852                 predicted:1,/* target predicted */
853                 in_tx:1,    /* in transaction */
854                 abort:1,    /* transaction abort */
855                 reserved:60;
856 };
857
858 #endif /* _UAPI_LINUX_PERF_EVENT_H */