Commit | Line | Data |
---|---|---|
0793a61d | 1 | /* |
57c0c15b | 2 | * Performance events: |
0793a61d | 3 | * |
a308444c | 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> |
e7e7ee2e IM |
5 | * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar |
6 | * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra | |
0793a61d | 7 | * |
57c0c15b | 8 | * Data type definitions, declarations, prototypes. |
0793a61d | 9 | * |
a308444c | 10 | * Started by: Thomas Gleixner and Ingo Molnar |
0793a61d | 11 | * |
57c0c15b | 12 | * For licencing details see kernel-base/COPYING |
0793a61d | 13 | */ |
cdd6c482 IM |
14 | #ifndef _LINUX_PERF_EVENT_H |
15 | #define _LINUX_PERF_EVENT_H | |
0793a61d | 16 | |
607ca46e | 17 | #include <uapi/linux/perf_event.h> |
c895f6f7 | 18 | #include <uapi/linux/bpf_perf_event.h> |
0793a61d | 19 | |
9f66a381 | 20 | /* |
f3dfd265 | 21 | * Kernel-internal data types and definitions: |
9f66a381 IM |
22 | */ |
23 | ||
cdd6c482 IM |
24 | #ifdef CONFIG_PERF_EVENTS |
25 | # include <asm/perf_event.h> | |
7be79236 | 26 | # include <asm/local64.h> |
f3dfd265 PM |
27 | #endif |
28 | ||
b9f5621c LX |
29 | #define PERF_GUEST_ACTIVE 0x01 |
30 | #define PERF_GUEST_USER 0x02 | |
31 | ||
39447b38 | 32 | struct perf_guest_info_callbacks { |
b9f5621c LX |
33 | unsigned int (*state)(void); |
34 | unsigned long (*get_ip)(void); | |
35 | unsigned int (*handle_intel_pt_intr)(void); | |
39447b38 ZY |
36 | }; |
37 | ||
2ff6cfd7 AB |
38 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
39 | #include <asm/hw_breakpoint.h> | |
40 | #endif | |
41 | ||
f3dfd265 PM |
42 | #include <linux/list.h> |
43 | #include <linux/mutex.h> | |
44 | #include <linux/rculist.h> | |
45 | #include <linux/rcupdate.h> | |
46 | #include <linux/spinlock.h> | |
d6d020e9 | 47 | #include <linux/hrtimer.h> |
3c446b3d | 48 | #include <linux/fs.h> |
709e50cf | 49 | #include <linux/pid_namespace.h> |
906010b2 | 50 | #include <linux/workqueue.h> |
5331d7b8 | 51 | #include <linux/ftrace.h> |
85cfabbc | 52 | #include <linux/cpu.h> |
e360adbe | 53 | #include <linux/irq_work.h> |
c5905afb | 54 | #include <linux/static_key.h> |
851cf6e7 | 55 | #include <linux/jump_label_ratelimit.h> |
60063497 | 56 | #include <linux/atomic.h> |
641cc938 | 57 | #include <linux/sysfs.h> |
4018994f | 58 | #include <linux/perf_regs.h> |
39bed6cb | 59 | #include <linux/cgroup.h> |
8c94abbb | 60 | #include <linux/refcount.h> |
da97e184 | 61 | #include <linux/security.h> |
c22ac2a3 | 62 | #include <linux/static_call.h> |
fa588151 | 63 | #include <asm/local.h> |
f3dfd265 | 64 | |
f9188e02 PZ |
65 | struct perf_callchain_entry { |
66 | __u64 nr; | |
c50c75e9 | 67 | __u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */ |
f9188e02 PZ |
68 | }; |
69 | ||
cfbcf468 ACM |
70 | struct perf_callchain_entry_ctx { |
71 | struct perf_callchain_entry *entry; | |
72 | u32 max_stack; | |
3b1fff08 | 73 | u32 nr; |
c85b0334 ACM |
74 | short contexts; |
75 | bool contexts_maxed; | |
cfbcf468 ACM |
76 | }; |
77 | ||
7e3f977e | 78 | typedef unsigned long (*perf_copy_f)(void *dst, const void *src, |
aa7145c1 | 79 | unsigned long off, unsigned long len); |
7e3f977e DB |
80 | |
81 | struct perf_raw_frag { | |
82 | union { | |
83 | struct perf_raw_frag *next; | |
84 | unsigned long pad; | |
85 | }; | |
86 | perf_copy_f copy; | |
87 | void *data; | |
88 | u32 size; | |
89 | } __packed; | |
90 | ||
3a43ce68 | 91 | struct perf_raw_record { |
7e3f977e | 92 | struct perf_raw_frag frag; |
3a43ce68 | 93 | u32 size; |
f413cdb8 FW |
94 | }; |
95 | ||
bce38cd5 SE |
96 | /* |
97 | * branch stack layout: | |
98 | * nr: number of taken branches stored in entries[] | |
bbfd5e4f KL |
99 | * hw_idx: The low level index of raw branch records |
100 | * for the most recent branch. | |
101 | * -1ULL means invalid/unknown. | |
bce38cd5 SE |
102 | * |
103 | * Note that nr can vary from sample to sample | |
104 | * branches (to, from) are stored from most recent | |
105 | * to least recent, i.e., entries[0] contains the most | |
106 | * recent branch. | |
bbfd5e4f KL |
107 | * The entries[] is an abstraction of raw branch records, |
108 | * which may not be stored in age order in HW, e.g. Intel LBR. | |
109 | * The hw_idx is to expose the low level index of raw | |
110 | * branch record for the most recent branch aka entries[0]. | |
111 | * The hw_idx index is between -1 (unknown) and max depth, | |
112 | * which can be retrieved in /sys/devices/cpu/caps/branches. | |
113 | * For the architectures whose raw branch records are | |
114 | * already stored in age order, the hw_idx should be 0. | |
bce38cd5 | 115 | */ |
caff2bef PZ |
116 | struct perf_branch_stack { |
117 | __u64 nr; | |
bbfd5e4f | 118 | __u64 hw_idx; |
c50c75e9 | 119 | struct perf_branch_entry entries[]; |
caff2bef PZ |
120 | }; |
121 | ||
f3dfd265 PM |
122 | struct task_struct; |
123 | ||
efc9f05d SE |
124 | /* |
125 | * extra PMU register associated with an event | |
126 | */ | |
127 | struct hw_perf_event_extra { | |
128 | u64 config; /* register value */ | |
129 | unsigned int reg; /* register address or index */ | |
130 | int alloc; /* extra register already allocated */ | |
131 | int idx; /* index in shared_regs->regs[] */ | |
132 | }; | |
133 | ||
369461ce RH |
134 | /** |
135 | * hw_perf_event::flag values | |
136 | * | |
137 | * PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific | |
138 | * usage. | |
139 | */ | |
140 | #define PERF_EVENT_FLAG_ARCH 0x0000ffff | |
141 | #define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000 | |
142 | ||
0793a61d | 143 | /** |
cdd6c482 | 144 | * struct hw_perf_event - performance event hardware details: |
0793a61d | 145 | */ |
cdd6c482 IM |
146 | struct hw_perf_event { |
147 | #ifdef CONFIG_PERF_EVENTS | |
d6d020e9 PZ |
148 | union { |
149 | struct { /* hardware */ | |
a308444c | 150 | u64 config; |
447a194b | 151 | u64 last_tag; |
a308444c | 152 | unsigned long config_base; |
cdd6c482 | 153 | unsigned long event_base; |
c48b6053 | 154 | int event_base_rdpmc; |
a308444c | 155 | int idx; |
447a194b | 156 | int last_cpu; |
9fac2cf3 | 157 | int flags; |
bce38cd5 | 158 | |
efc9f05d | 159 | struct hw_perf_event_extra extra_reg; |
bce38cd5 | 160 | struct hw_perf_event_extra branch_reg; |
d6d020e9 | 161 | }; |
721a669b | 162 | struct { /* software */ |
a308444c | 163 | struct hrtimer hrtimer; |
d6d020e9 | 164 | }; |
f22c1bb6 | 165 | struct { /* tracepoint */ |
f22c1bb6 ON |
166 | /* for tp_event->class */ |
167 | struct list_head tp_list; | |
168 | }; | |
c7ab62bf HR |
169 | struct { /* amd_power */ |
170 | u64 pwr_acc; | |
171 | u64 ptsc; | |
172 | }; | |
24f1e32c | 173 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
45a73372 | 174 | struct { /* breakpoint */ |
d580ff86 PZ |
175 | /* |
176 | * Crufty hack to avoid the chicken and egg | |
177 | * problem hw_breakpoint has with context | |
178 | * creation and event initalization. | |
179 | */ | |
f22c1bb6 ON |
180 | struct arch_hw_breakpoint info; |
181 | struct list_head bp_list; | |
45a73372 | 182 | }; |
24f1e32c | 183 | #endif |
cf25f904 SS |
184 | struct { /* amd_iommu */ |
185 | u8 iommu_bank; | |
186 | u8 iommu_cntr; | |
187 | u16 padding; | |
188 | u64 conf; | |
189 | u64 conf1; | |
190 | }; | |
d6d020e9 | 191 | }; |
b0e87875 PZ |
192 | /* |
193 | * If the event is a per task event, this will point to the task in | |
194 | * question. See the comment in perf_event_alloc(). | |
195 | */ | |
50f16a8b | 196 | struct task_struct *target; |
b0e87875 | 197 | |
375637bc AS |
198 | /* |
199 | * PMU would store hardware filter configuration | |
200 | * here. | |
201 | */ | |
202 | void *addr_filters; | |
203 | ||
204 | /* Last sync'ed generation of filters */ | |
205 | unsigned long addr_filters_gen; | |
206 | ||
b0e87875 PZ |
207 | /* |
208 | * hw_perf_event::state flags; used to track the PERF_EF_* state. | |
209 | */ | |
210 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ | |
211 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ | |
212 | #define PERF_HES_ARCH 0x04 | |
213 | ||
a4eaf7f1 | 214 | int state; |
b0e87875 PZ |
215 | |
216 | /* | |
217 | * The last observed hardware counter value, updated with a | |
218 | * local64_cmpxchg() such that pmu::read() can be called nested. | |
219 | */ | |
e7850595 | 220 | local64_t prev_count; |
b0e87875 PZ |
221 | |
222 | /* | |
223 | * The period to start the next sample with. | |
224 | */ | |
b23f3325 | 225 | u64 sample_period; |
b0e87875 | 226 | |
2cb5383b KL |
227 | union { |
228 | struct { /* Sampling */ | |
229 | /* | |
230 | * The period we started this sample with. | |
231 | */ | |
232 | u64 last_period; | |
b0e87875 | 233 | |
2cb5383b KL |
234 | /* |
235 | * However much is left of the current period; | |
236 | * note that this is a full 64bit value and | |
237 | * allows for generation of periods longer | |
238 | * than hardware might allow. | |
239 | */ | |
240 | local64_t period_left; | |
241 | }; | |
242 | struct { /* Topdown events counting for context switch */ | |
243 | u64 saved_metric; | |
244 | u64 saved_slots; | |
245 | }; | |
246 | }; | |
b0e87875 PZ |
247 | |
248 | /* | |
249 | * State for throttling the event, see __perf_event_overflow() and | |
250 | * perf_adjust_freq_unthr_context(). | |
251 | */ | |
e050e3f0 | 252 | u64 interrupts_seq; |
60db5e09 | 253 | u64 interrupts; |
6a24ed6c | 254 | |
b0e87875 PZ |
255 | /* |
256 | * State for freq target events, see __perf_event_overflow() and | |
257 | * perf_adjust_freq_unthr_context(). | |
258 | */ | |
abd50713 PZ |
259 | u64 freq_time_stamp; |
260 | u64 freq_count_stamp; | |
ee06094f | 261 | #endif |
0793a61d TG |
262 | }; |
263 | ||
cdd6c482 | 264 | struct perf_event; |
621a01ea | 265 | |
8d2cacbb PZ |
266 | /* |
267 | * Common implementation detail of pmu::{start,commit,cancel}_txn | |
268 | */ | |
fbbe0701 | 269 | #define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */ |
4a00c16e | 270 | #define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */ |
fbbe0701 | 271 | |
53b25335 VW |
272 | /** |
273 | * pmu::capabilities flags | |
274 | */ | |
55bcf6ef KL |
275 | #define PERF_PMU_CAP_NO_INTERRUPT 0x0001 |
276 | #define PERF_PMU_CAP_NO_NMI 0x0002 | |
277 | #define PERF_PMU_CAP_AUX_NO_SG 0x0004 | |
278 | #define PERF_PMU_CAP_EXTENDED_REGS 0x0008 | |
279 | #define PERF_PMU_CAP_EXCLUSIVE 0x0010 | |
280 | #define PERF_PMU_CAP_ITRACE 0x0020 | |
281 | #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x0040 | |
282 | #define PERF_PMU_CAP_NO_EXCLUDE 0x0080 | |
283 | #define PERF_PMU_CAP_AUX_OUTPUT 0x0100 | |
284 | #define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0200 | |
53b25335 | 285 | |
a4faf00d AS |
286 | struct perf_output_handle; |
287 | ||
621a01ea | 288 | /** |
4aeb0b42 | 289 | * struct pmu - generic performance monitoring unit |
621a01ea | 290 | */ |
4aeb0b42 | 291 | struct pmu { |
b0a873eb PZ |
292 | struct list_head entry; |
293 | ||
c464c76e | 294 | struct module *module; |
abe43400 | 295 | struct device *dev; |
0c9d42ed | 296 | const struct attribute_group **attr_groups; |
f3a3a825 | 297 | const struct attribute_group **attr_update; |
03d8e80b | 298 | const char *name; |
2e80a82a PZ |
299 | int type; |
300 | ||
53b25335 VW |
301 | /* |
302 | * various common per-pmu feature flags | |
303 | */ | |
304 | int capabilities; | |
305 | ||
43b9e4fe MO |
306 | int __percpu *pmu_disable_count; |
307 | struct perf_cpu_context __percpu *pmu_cpu_context; | |
bed5b25a | 308 | atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ |
8dc85d54 | 309 | int task_ctx_nr; |
62b85639 | 310 | int hrtimer_interval_ms; |
6bde9b6c | 311 | |
375637bc AS |
312 | /* number of address filters this PMU can do */ |
313 | unsigned int nr_addr_filters; | |
314 | ||
6bde9b6c | 315 | /* |
a4eaf7f1 PZ |
316 | * Fully disable/enable this PMU, can be used to protect from the PMI |
317 | * as well as for lazy/batch writing of the MSRs. | |
6bde9b6c | 318 | */ |
ad5133b7 PZ |
319 | void (*pmu_enable) (struct pmu *pmu); /* optional */ |
320 | void (*pmu_disable) (struct pmu *pmu); /* optional */ | |
6bde9b6c | 321 | |
8d2cacbb | 322 | /* |
a4eaf7f1 | 323 | * Try and initialize the event for this PMU. |
b0e87875 PZ |
324 | * |
325 | * Returns: | |
326 | * -ENOENT -- @event is not for this PMU | |
327 | * | |
328 | * -ENODEV -- @event is for this PMU but PMU not present | |
329 | * -EBUSY -- @event is for this PMU but PMU temporarily unavailable | |
330 | * -EINVAL -- @event is for this PMU but @event is not valid | |
331 | * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported | |
652521d4 | 332 | * -EACCES -- @event is for this PMU, @event is valid, but no privileges |
b0e87875 PZ |
333 | * |
334 | * 0 -- @event is for this PMU and valid | |
335 | * | |
336 | * Other error return values are allowed. | |
8d2cacbb | 337 | */ |
b0a873eb PZ |
338 | int (*event_init) (struct perf_event *event); |
339 | ||
1e0fb9ec AL |
340 | /* |
341 | * Notification that the event was mapped or unmapped. Called | |
342 | * in the context of the mapping task. | |
343 | */ | |
bfe33492 PZ |
344 | void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ |
345 | void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ | |
1e0fb9ec | 346 | |
b0e87875 PZ |
347 | /* |
348 | * Flags for ->add()/->del()/ ->start()/->stop(). There are | |
349 | * matching hw_perf_event::state flags. | |
350 | */ | |
a4eaf7f1 PZ |
351 | #define PERF_EF_START 0x01 /* start the counter when adding */ |
352 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ | |
353 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ | |
354 | ||
8d2cacbb | 355 | /* |
b0e87875 PZ |
356 | * Adds/Removes a counter to/from the PMU, can be done inside a |
357 | * transaction, see the ->*_txn() methods. | |
358 | * | |
359 | * The add/del callbacks will reserve all hardware resources required | |
360 | * to service the event, this includes any counter constraint | |
361 | * scheduling etc. | |
362 | * | |
363 | * Called with IRQs disabled and the PMU disabled on the CPU the event | |
364 | * is on. | |
365 | * | |
366 | * ->add() called without PERF_EF_START should result in the same state | |
367 | * as ->add() followed by ->stop(). | |
368 | * | |
369 | * ->del() must always PERF_EF_UPDATE stop an event. If it calls | |
370 | * ->stop() that must deal with already being stopped without | |
371 | * PERF_EF_UPDATE. | |
a4eaf7f1 PZ |
372 | */ |
373 | int (*add) (struct perf_event *event, int flags); | |
374 | void (*del) (struct perf_event *event, int flags); | |
375 | ||
376 | /* | |
b0e87875 PZ |
377 | * Starts/Stops a counter present on the PMU. |
378 | * | |
379 | * The PMI handler should stop the counter when perf_event_overflow() | |
380 | * returns !0. ->start() will be used to continue. | |
381 | * | |
382 | * Also used to change the sample period. | |
383 | * | |
384 | * Called with IRQs disabled and the PMU disabled on the CPU the event | |
385 | * is on -- will be called from NMI context with the PMU generates | |
386 | * NMIs. | |
387 | * | |
388 | * ->stop() with PERF_EF_UPDATE will read the counter and update | |
389 | * period/count values like ->read() would. | |
390 | * | |
c2127e14 | 391 | * ->start() with PERF_EF_RELOAD will reprogram the counter |
b0e87875 | 392 | * value, must be preceded by a ->stop() with PERF_EF_UPDATE. |
a4eaf7f1 PZ |
393 | */ |
394 | void (*start) (struct perf_event *event, int flags); | |
395 | void (*stop) (struct perf_event *event, int flags); | |
396 | ||
397 | /* | |
398 | * Updates the counter value of the event. | |
b0e87875 PZ |
399 | * |
400 | * For sampling capable PMUs this will also update the software period | |
401 | * hw_perf_event::period_left field. | |
a4eaf7f1 | 402 | */ |
cdd6c482 | 403 | void (*read) (struct perf_event *event); |
6bde9b6c LM |
404 | |
405 | /* | |
24cd7f54 PZ |
406 | * Group events scheduling is treated as a transaction, add |
407 | * group events as a whole and perform one schedulability test. | |
408 | * If the test fails, roll back the whole group | |
a4eaf7f1 PZ |
409 | * |
410 | * Start the transaction, after this ->add() doesn't need to | |
24cd7f54 | 411 | * do schedulability tests. |
fbbe0701 SB |
412 | * |
413 | * Optional. | |
8d2cacbb | 414 | */ |
fbbe0701 | 415 | void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); |
8d2cacbb | 416 | /* |
a4eaf7f1 | 417 | * If ->start_txn() disabled the ->add() schedulability test |
8d2cacbb PZ |
418 | * then ->commit_txn() is required to perform one. On success |
419 | * the transaction is closed. On error the transaction is kept | |
420 | * open until ->cancel_txn() is called. | |
fbbe0701 SB |
421 | * |
422 | * Optional. | |
8d2cacbb | 423 | */ |
fbbe0701 | 424 | int (*commit_txn) (struct pmu *pmu); |
8d2cacbb | 425 | /* |
a4eaf7f1 | 426 | * Will cancel the transaction, assumes ->del() is called |
25985edc | 427 | * for each successful ->add() during the transaction. |
fbbe0701 SB |
428 | * |
429 | * Optional. | |
8d2cacbb | 430 | */ |
fbbe0701 | 431 | void (*cancel_txn) (struct pmu *pmu); |
35edc2a5 PZ |
432 | |
433 | /* | |
434 | * Will return the value for perf_event_mmap_page::index for this event, | |
435 | * if no implementation is provided it will default to: event->hw.idx + 1. | |
436 | */ | |
437 | int (*event_idx) (struct perf_event *event); /*optional */ | |
d010b332 | 438 | |
ba532500 YZ |
439 | /* |
440 | * context-switches callback | |
441 | */ | |
442 | void (*sched_task) (struct perf_event_context *ctx, | |
443 | bool sched_in); | |
444 | ||
217c2a63 KL |
445 | /* |
446 | * Kmem cache of PMU specific data | |
447 | */ | |
448 | struct kmem_cache *task_ctx_cache; | |
449 | ||
fc1adfe3 AB |
450 | /* |
451 | * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data) | |
452 | * can be synchronized using this function. See Intel LBR callstack support | |
453 | * implementation and Perf core context switch handling callbacks for usage | |
454 | * examples. | |
455 | */ | |
456 | void (*swap_task_ctx) (struct perf_event_context *prev, | |
457 | struct perf_event_context *next); | |
458 | /* optional */ | |
eacd3ecc | 459 | |
45bfb2e5 PZ |
460 | /* |
461 | * Set up pmu-private data structures for an AUX area | |
462 | */ | |
84001866 | 463 | void *(*setup_aux) (struct perf_event *event, void **pages, |
45bfb2e5 PZ |
464 | int nr_pages, bool overwrite); |
465 | /* optional */ | |
466 | ||
467 | /* | |
468 | * Free pmu-private AUX data structures | |
469 | */ | |
470 | void (*free_aux) (void *aux); /* optional */ | |
66eb579e | 471 | |
a4faf00d AS |
472 | /* |
473 | * Take a snapshot of the AUX buffer without touching the event | |
474 | * state, so that preempting ->start()/->stop() callbacks does | |
475 | * not interfere with their logic. Called in PMI context. | |
476 | * | |
477 | * Returns the size of AUX data copied to the output handle. | |
478 | * | |
479 | * Optional. | |
480 | */ | |
481 | long (*snapshot_aux) (struct perf_event *event, | |
482 | struct perf_output_handle *handle, | |
483 | unsigned long size); | |
484 | ||
375637bc AS |
485 | /* |
486 | * Validate address range filters: make sure the HW supports the | |
487 | * requested configuration and number of filters; return 0 if the | |
488 | * supplied filters are valid, -errno otherwise. | |
489 | * | |
490 | * Runs in the context of the ioctl()ing process and is not serialized | |
491 | * with the rest of the PMU callbacks. | |
492 | */ | |
493 | int (*addr_filters_validate) (struct list_head *filters); | |
494 | /* optional */ | |
495 | ||
496 | /* | |
497 | * Synchronize address range filter configuration: | |
498 | * translate hw-agnostic filters into hardware configuration in | |
499 | * event::hw::addr_filters. | |
500 | * | |
501 | * Runs as a part of filter sync sequence that is done in ->start() | |
502 | * callback by calling perf_event_addr_filters_sync(). | |
503 | * | |
504 | * May (and should) traverse event::addr_filters::list, for which its | |
505 | * caller provides necessary serialization. | |
506 | */ | |
507 | void (*addr_filters_sync) (struct perf_event *event); | |
508 | /* optional */ | |
509 | ||
ab43762e AS |
510 | /* |
511 | * Check if event can be used for aux_output purposes for | |
512 | * events of this PMU. | |
513 | * | |
514 | * Runs from perf_event_open(). Should return 0 for "no match" | |
515 | * or non-zero for "match". | |
516 | */ | |
517 | int (*aux_output_match) (struct perf_event *event); | |
518 | /* optional */ | |
519 | ||
66eb579e MR |
520 | /* |
521 | * Filter events for PMU-specific reasons. | |
522 | */ | |
523 | int (*filter_match) (struct perf_event *event); /* optional */ | |
81ec3f3c JO |
524 | |
525 | /* | |
526 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. | |
527 | */ | |
528 | int (*check_period) (struct perf_event *event, u64 value); /* optional */ | |
621a01ea IM |
529 | }; |
530 | ||
6ed70cf3 AS |
531 | enum perf_addr_filter_action_t { |
532 | PERF_ADDR_FILTER_ACTION_STOP = 0, | |
533 | PERF_ADDR_FILTER_ACTION_START, | |
534 | PERF_ADDR_FILTER_ACTION_FILTER, | |
535 | }; | |
536 | ||
375637bc AS |
537 | /** |
538 | * struct perf_addr_filter - address range filter definition | |
539 | * @entry: event's filter list linkage | |
1279e41d | 540 | * @path: object file's path for file-based filters |
375637bc | 541 | * @offset: filter range offset |
6ed70cf3 AS |
542 | * @size: filter range size (size==0 means single address trigger) |
543 | * @action: filter/start/stop | |
375637bc AS |
544 | * |
545 | * This is a hardware-agnostic filter configuration as specified by the user. | |
546 | */ | |
547 | struct perf_addr_filter { | |
548 | struct list_head entry; | |
9511bce9 | 549 | struct path path; |
375637bc AS |
550 | unsigned long offset; |
551 | unsigned long size; | |
6ed70cf3 | 552 | enum perf_addr_filter_action_t action; |
375637bc AS |
553 | }; |
554 | ||
555 | /** | |
556 | * struct perf_addr_filters_head - container for address range filters | |
557 | * @list: list of filters for this event | |
558 | * @lock: spinlock that serializes accesses to the @list and event's | |
559 | * (and its children's) filter generations. | |
6ce77bfd | 560 | * @nr_file_filters: number of file-based filters |
375637bc AS |
561 | * |
562 | * A child event will use parent's @list (and therefore @lock), so they are | |
563 | * bundled together; see perf_event_addr_filters(). | |
564 | */ | |
565 | struct perf_addr_filters_head { | |
566 | struct list_head list; | |
567 | raw_spinlock_t lock; | |
6ce77bfd | 568 | unsigned int nr_file_filters; |
375637bc AS |
569 | }; |
570 | ||
c60f83b8 AS |
571 | struct perf_addr_filter_range { |
572 | unsigned long start; | |
573 | unsigned long size; | |
574 | }; | |
575 | ||
6a930700 | 576 | /** |
788faab7 | 577 | * enum perf_event_state - the states of an event: |
6a930700 | 578 | */ |
8ca2bd41 | 579 | enum perf_event_state { |
a69b0ca4 | 580 | PERF_EVENT_STATE_DEAD = -4, |
179033b3 | 581 | PERF_EVENT_STATE_EXIT = -3, |
57c0c15b | 582 | PERF_EVENT_STATE_ERROR = -2, |
cdd6c482 IM |
583 | PERF_EVENT_STATE_OFF = -1, |
584 | PERF_EVENT_STATE_INACTIVE = 0, | |
57c0c15b | 585 | PERF_EVENT_STATE_ACTIVE = 1, |
6a930700 IM |
586 | }; |
587 | ||
9b51f66d | 588 | struct file; |
453f19ee PZ |
589 | struct perf_sample_data; |
590 | ||
a8b0ca17 | 591 | typedef void (*perf_overflow_handler_t)(struct perf_event *, |
b326e956 FW |
592 | struct perf_sample_data *, |
593 | struct pt_regs *regs); | |
594 | ||
4ff6a8de DCC |
595 | /* |
596 | * Event capabilities. For event_caps and groups caps. | |
597 | * | |
598 | * PERF_EV_CAP_SOFTWARE: Is a software event. | |
d6a2f903 DCC |
599 | * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read |
600 | * from any CPU in the package where it is active. | |
9f0c4fa1 KL |
601 | * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and |
602 | * cannot be a group leader. If an event with this flag is detached from the | |
603 | * group it is scheduled out and moved into an unrecoverable ERROR state. | |
4ff6a8de DCC |
604 | */ |
605 | #define PERF_EV_CAP_SOFTWARE BIT(0) | |
d6a2f903 | 606 | #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1) |
9f0c4fa1 | 607 | #define PERF_EV_CAP_SIBLING BIT(2) |
d6f962b5 | 608 | |
e7e7ee2e IM |
609 | #define SWEVENT_HLIST_BITS 8 |
610 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) | |
76e1d904 FW |
611 | |
612 | struct swevent_hlist { | |
e7e7ee2e IM |
613 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; |
614 | struct rcu_head rcu_head; | |
76e1d904 FW |
615 | }; |
616 | ||
8a49542c PZ |
617 | #define PERF_ATTACH_CONTEXT 0x01 |
618 | #define PERF_ATTACH_GROUP 0x02 | |
d580ff86 | 619 | #define PERF_ATTACH_TASK 0x04 |
4af57ef2 | 620 | #define PERF_ATTACH_TASK_DATA 0x08 |
8d4e6c4c | 621 | #define PERF_ATTACH_ITRACE 0x10 |
a5398bff | 622 | #define PERF_ATTACH_SCHED_CB 0x20 |
ef54c1a4 | 623 | #define PERF_ATTACH_CHILD 0x40 |
8a49542c | 624 | |
f7ea534a | 625 | struct bpf_prog; |
877c6856 | 626 | struct perf_cgroup; |
56de4e8f | 627 | struct perf_buffer; |
76369139 | 628 | |
f2fb6bef KL |
629 | struct pmu_event_list { |
630 | raw_spinlock_t lock; | |
631 | struct list_head list; | |
632 | }; | |
633 | ||
edb39592 PZ |
634 | #define for_each_sibling_event(sibling, event) \ |
635 | if ((event)->group_leader == (event)) \ | |
636 | list_for_each_entry((sibling), &(event)->sibling_list, sibling_list) | |
637 | ||
0793a61d | 638 | /** |
cdd6c482 | 639 | * struct perf_event - performance event kernel representation: |
0793a61d | 640 | */ |
cdd6c482 IM |
641 | struct perf_event { |
642 | #ifdef CONFIG_PERF_EVENTS | |
9886167d PZ |
643 | /* |
644 | * entry onto perf_event_context::event_list; | |
645 | * modifications require ctx->lock | |
646 | * RCU safe iterations. | |
647 | */ | |
592903cd | 648 | struct list_head event_entry; |
9886167d PZ |
649 | |
650 | /* | |
9886167d PZ |
651 | * Locked for modification by both ctx->mutex and ctx->lock; holding |
652 | * either sufficies for read. | |
653 | */ | |
04289bb9 | 654 | struct list_head sibling_list; |
6668128a | 655 | struct list_head active_list; |
8e1a2031 AB |
656 | /* |
657 | * Node on the pinned or flexible tree located at the event context; | |
658 | */ | |
659 | struct rb_node group_node; | |
660 | u64 group_index; | |
9886167d PZ |
661 | /* |
662 | * We need storage to track the entries in perf_pmu_migrate_context; we | |
663 | * cannot use the event_entry because of RCU and we want to keep the | |
664 | * group in tact which avoids us using the other two entries. | |
665 | */ | |
666 | struct list_head migrate_entry; | |
667 | ||
f3ae75de SE |
668 | struct hlist_node hlist_entry; |
669 | struct list_head active_entry; | |
0127c3ea | 670 | int nr_siblings; |
4ff6a8de DCC |
671 | |
672 | /* Not serialized. Only written during event initialization. */ | |
673 | int event_caps; | |
674 | /* The cumulative AND of all event_caps for events in this group. */ | |
675 | int group_caps; | |
676 | ||
cdd6c482 | 677 | struct perf_event *group_leader; |
a4eaf7f1 | 678 | struct pmu *pmu; |
54d751d4 | 679 | void *pmu_private; |
04289bb9 | 680 | |
8ca2bd41 | 681 | enum perf_event_state state; |
8a49542c | 682 | unsigned int attach_state; |
e7850595 | 683 | local64_t count; |
a6e6dea6 | 684 | atomic64_t child_count; |
ee06094f | 685 | |
53cfbf59 | 686 | /* |
cdd6c482 | 687 | * These are the total time in nanoseconds that the event |
53cfbf59 | 688 | * has been enabled (i.e. eligible to run, and the task has |
cdd6c482 | 689 | * been scheduled in, if this is a per-task event) |
53cfbf59 | 690 | * and running (scheduled onto the CPU), respectively. |
53cfbf59 PM |
691 | */ |
692 | u64 total_time_enabled; | |
693 | u64 total_time_running; | |
0d3d73aa | 694 | u64 tstamp; |
53cfbf59 | 695 | |
eed01528 SE |
696 | /* |
697 | * timestamp shadows the actual context timing but it can | |
698 | * be safely used in NMI interrupt context. It reflects the | |
f7925653 SL |
699 | * context time as it was when the event was last scheduled in, |
700 | * or when ctx_sched_in failed to schedule the event because we | |
701 | * run out of PMC. | |
eed01528 SE |
702 | * |
703 | * ctx_time already accounts for ctx->timestamp. Therefore to | |
704 | * compute ctx_time for a sample, simply add perf_clock(). | |
705 | */ | |
706 | u64 shadow_ctx_time; | |
707 | ||
24f1e32c | 708 | struct perf_event_attr attr; |
c320c7b7 | 709 | u16 header_size; |
6844c09d | 710 | u16 id_header_size; |
c320c7b7 | 711 | u16 read_size; |
cdd6c482 | 712 | struct hw_perf_event hw; |
0793a61d | 713 | |
cdd6c482 | 714 | struct perf_event_context *ctx; |
a6fa941d | 715 | atomic_long_t refcount; |
0793a61d | 716 | |
53cfbf59 PM |
717 | /* |
718 | * These accumulate total time (in nanoseconds) that children | |
cdd6c482 | 719 | * events have been enabled and running, respectively. |
53cfbf59 PM |
720 | */ |
721 | atomic64_t child_total_time_enabled; | |
722 | atomic64_t child_total_time_running; | |
723 | ||
0793a61d | 724 | /* |
d859e29f | 725 | * Protect attach/detach and child_list: |
0793a61d | 726 | */ |
fccc714b PZ |
727 | struct mutex child_mutex; |
728 | struct list_head child_list; | |
cdd6c482 | 729 | struct perf_event *parent; |
0793a61d TG |
730 | |
731 | int oncpu; | |
732 | int cpu; | |
733 | ||
082ff5a2 PZ |
734 | struct list_head owner_entry; |
735 | struct task_struct *owner; | |
736 | ||
7b732a75 PZ |
737 | /* mmap bits */ |
738 | struct mutex mmap_mutex; | |
739 | atomic_t mmap_count; | |
26cb63ad | 740 | |
56de4e8f | 741 | struct perf_buffer *rb; |
10c6db11 | 742 | struct list_head rb_entry; |
b69cf536 PZ |
743 | unsigned long rcu_batches; |
744 | int rcu_pending; | |
37d81828 | 745 | |
7b732a75 | 746 | /* poll related */ |
0793a61d | 747 | wait_queue_head_t waitq; |
3c446b3d | 748 | struct fasync_struct *fasync; |
79f14641 PZ |
749 | |
750 | /* delayed work for NMIs and such */ | |
751 | int pending_wakeup; | |
4c9e2542 | 752 | int pending_kill; |
79f14641 | 753 | int pending_disable; |
97ba62b2 | 754 | unsigned long pending_addr; /* SIGTRAP */ |
e360adbe | 755 | struct irq_work pending; |
592903cd | 756 | |
79f14641 PZ |
757 | atomic_t event_limit; |
758 | ||
375637bc AS |
759 | /* address range filters */ |
760 | struct perf_addr_filters_head addr_filters; | |
761 | /* vma address array for file-based filders */ | |
c60f83b8 | 762 | struct perf_addr_filter_range *addr_filter_ranges; |
375637bc AS |
763 | unsigned long addr_filters_gen; |
764 | ||
ab43762e AS |
765 | /* for aux_output events */ |
766 | struct perf_event *aux_event; | |
767 | ||
cdd6c482 | 768 | void (*destroy)(struct perf_event *); |
592903cd | 769 | struct rcu_head rcu_head; |
709e50cf PZ |
770 | |
771 | struct pid_namespace *ns; | |
8e5799b1 | 772 | u64 id; |
6fb2915d | 773 | |
34f43927 | 774 | u64 (*clock)(void); |
b326e956 | 775 | perf_overflow_handler_t overflow_handler; |
4dc0da86 | 776 | void *overflow_handler_context; |
aa6a5f3c AS |
777 | #ifdef CONFIG_BPF_SYSCALL |
778 | perf_overflow_handler_t orig_overflow_handler; | |
779 | struct bpf_prog *prog; | |
82e6b1ee | 780 | u64 bpf_cookie; |
aa6a5f3c | 781 | #endif |
453f19ee | 782 | |
07b139c8 | 783 | #ifdef CONFIG_EVENT_TRACING |
2425bcb9 | 784 | struct trace_event_call *tp_event; |
6fb2915d | 785 | struct event_filter *filter; |
ced39002 JO |
786 | #ifdef CONFIG_FUNCTION_TRACER |
787 | struct ftrace_ops ftrace_ops; | |
788 | #endif | |
ee06094f | 789 | #endif |
6fb2915d | 790 | |
e5d1367f SE |
791 | #ifdef CONFIG_CGROUP_PERF |
792 | struct perf_cgroup *cgrp; /* cgroup event is attach to */ | |
e5d1367f SE |
793 | #endif |
794 | ||
da97e184 JFG |
795 | #ifdef CONFIG_SECURITY |
796 | void *security; | |
797 | #endif | |
f2fb6bef | 798 | struct list_head sb_list; |
6fb2915d | 799 | #endif /* CONFIG_PERF_EVENTS */ |
0793a61d TG |
800 | }; |
801 | ||
8e1a2031 AB |
802 | |
803 | struct perf_event_groups { | |
804 | struct rb_root tree; | |
805 | u64 index; | |
806 | }; | |
807 | ||
0793a61d | 808 | /** |
cdd6c482 | 809 | * struct perf_event_context - event context structure |
0793a61d | 810 | * |
cdd6c482 | 811 | * Used as a container for task events and CPU events as well: |
0793a61d | 812 | */ |
cdd6c482 | 813 | struct perf_event_context { |
108b02cf | 814 | struct pmu *pmu; |
0793a61d | 815 | /* |
cdd6c482 | 816 | * Protect the states of the events in the list, |
d859e29f | 817 | * nr_active, and the list: |
0793a61d | 818 | */ |
e625cce1 | 819 | raw_spinlock_t lock; |
d859e29f | 820 | /* |
cdd6c482 | 821 | * Protect the list of events. Locking either mutex or lock |
d859e29f PM |
822 | * is sufficient to ensure the list doesn't change; to change |
823 | * the list you need to lock both the mutex and the spinlock. | |
824 | */ | |
a308444c | 825 | struct mutex mutex; |
04289bb9 | 826 | |
2fde4f94 | 827 | struct list_head active_ctx_list; |
8e1a2031 AB |
828 | struct perf_event_groups pinned_groups; |
829 | struct perf_event_groups flexible_groups; | |
a308444c | 830 | struct list_head event_list; |
6668128a PZ |
831 | |
832 | struct list_head pinned_active; | |
833 | struct list_head flexible_active; | |
834 | ||
cdd6c482 | 835 | int nr_events; |
a308444c | 836 | int nr_active; |
82ff0c02 | 837 | int nr_user; |
a308444c | 838 | int is_active; |
bfbd3381 | 839 | int nr_stat; |
0f5a2601 | 840 | int nr_freq; |
dddd3379 | 841 | int rotate_disable; |
fd7d5517 IR |
842 | /* |
843 | * Set when nr_events != nr_active, except tolerant to events not | |
844 | * necessary to be active due to scheduling constraints, such as cgroups. | |
845 | */ | |
846 | int rotate_necessary; | |
8c94abbb | 847 | refcount_t refcount; |
a308444c | 848 | struct task_struct *task; |
53cfbf59 PM |
849 | |
850 | /* | |
4af4998b | 851 | * Context clock, runs when context enabled. |
53cfbf59 | 852 | */ |
a308444c IM |
853 | u64 time; |
854 | u64 timestamp; | |
564c2b21 PM |
855 | |
856 | /* | |
857 | * These fields let us detect when two contexts have both | |
858 | * been cloned (inherited) from a common ancestor. | |
859 | */ | |
cdd6c482 | 860 | struct perf_event_context *parent_ctx; |
a308444c IM |
861 | u64 parent_gen; |
862 | u64 generation; | |
863 | int pin_count; | |
db4a8356 | 864 | #ifdef CONFIG_CGROUP_PERF |
d010b332 | 865 | int nr_cgroups; /* cgroup evts */ |
db4a8356 | 866 | #endif |
4af57ef2 | 867 | void *task_ctx_data; /* pmu specific data */ |
28009ce4 | 868 | struct rcu_head rcu_head; |
0793a61d TG |
869 | }; |
870 | ||
7ae07ea3 FW |
871 | /* |
872 | * Number of contexts where an event can trigger: | |
e7e7ee2e | 873 | * task, softirq, hardirq, nmi. |
7ae07ea3 FW |
874 | */ |
875 | #define PERF_NR_CONTEXTS 4 | |
876 | ||
0793a61d | 877 | /** |
cdd6c482 | 878 | * struct perf_event_cpu_context - per cpu event context structure |
0793a61d TG |
879 | */ |
880 | struct perf_cpu_context { | |
cdd6c482 IM |
881 | struct perf_event_context ctx; |
882 | struct perf_event_context *task_ctx; | |
0793a61d | 883 | int active_oncpu; |
3b6f9e5c | 884 | int exclusive; |
4cfafd30 PZ |
885 | |
886 | raw_spinlock_t hrtimer_lock; | |
9e630205 SE |
887 | struct hrtimer hrtimer; |
888 | ktime_t hrtimer_interval; | |
4cfafd30 PZ |
889 | unsigned int hrtimer_active; |
890 | ||
db4a8356 | 891 | #ifdef CONFIG_CGROUP_PERF |
e5d1367f | 892 | struct perf_cgroup *cgrp; |
058fe1c0 | 893 | struct list_head cgrp_cpuctx_entry; |
db4a8356 | 894 | #endif |
e48c1788 | 895 | |
a5398bff | 896 | struct list_head sched_cb_entry; |
e48c1788 | 897 | int sched_cb_usage; |
a63fbed7 TG |
898 | |
899 | int online; | |
836196be IR |
900 | /* |
901 | * Per-CPU storage for iterators used in visit_groups_merge. The default | |
902 | * storage is of size 2 to hold the CPU and any CPU event iterators. | |
903 | */ | |
904 | int heap_size; | |
905 | struct perf_event **heap; | |
906 | struct perf_event *heap_default[2]; | |
0793a61d TG |
907 | }; |
908 | ||
5622f295 | 909 | struct perf_output_handle { |
57c0c15b | 910 | struct perf_event *event; |
56de4e8f | 911 | struct perf_buffer *rb; |
6d1acfd5 | 912 | unsigned long wakeup; |
5d967a8b | 913 | unsigned long size; |
f4c0b0aa | 914 | u64 aux_flags; |
fdc26706 AS |
915 | union { |
916 | void *addr; | |
917 | unsigned long head; | |
918 | }; | |
5d967a8b | 919 | int page; |
5622f295 MM |
920 | }; |
921 | ||
0515e599 | 922 | struct bpf_perf_event_data_kern { |
c895f6f7 | 923 | bpf_user_pt_regs_t *regs; |
0515e599 | 924 | struct perf_sample_data *data; |
7d9285e8 | 925 | struct perf_event *event; |
0515e599 AS |
926 | }; |
927 | ||
39bed6cb MF |
928 | #ifdef CONFIG_CGROUP_PERF |
929 | ||
930 | /* | |
931 | * perf_cgroup_info keeps track of time_enabled for a cgroup. | |
932 | * This is a per-cpu dynamically allocated data structure. | |
933 | */ | |
934 | struct perf_cgroup_info { | |
935 | u64 time; | |
936 | u64 timestamp; | |
937 | }; | |
938 | ||
939 | struct perf_cgroup { | |
940 | struct cgroup_subsys_state css; | |
941 | struct perf_cgroup_info __percpu *info; | |
942 | }; | |
943 | ||
944 | /* | |
945 | * Must ensure cgroup is pinned (css_get) before calling | |
946 | * this function. In other words, we cannot call this function | |
947 | * if there is no cgroup event for the current CPU context. | |
948 | */ | |
949 | static inline struct perf_cgroup * | |
614e4c4e | 950 | perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) |
39bed6cb | 951 | { |
614e4c4e SE |
952 | return container_of(task_css_check(task, perf_event_cgrp_id, |
953 | ctx ? lockdep_is_held(&ctx->lock) | |
954 | : true), | |
39bed6cb MF |
955 | struct perf_cgroup, css); |
956 | } | |
957 | #endif /* CONFIG_CGROUP_PERF */ | |
958 | ||
cdd6c482 | 959 | #ifdef CONFIG_PERF_EVENTS |
829b42dd | 960 | |
fdc26706 AS |
961 | extern void *perf_aux_output_begin(struct perf_output_handle *handle, |
962 | struct perf_event *event); | |
963 | extern void perf_aux_output_end(struct perf_output_handle *handle, | |
f4c0b0aa | 964 | unsigned long size); |
fdc26706 AS |
965 | extern int perf_aux_output_skip(struct perf_output_handle *handle, |
966 | unsigned long size); | |
967 | extern void *perf_get_aux(struct perf_output_handle *handle); | |
f4c0b0aa | 968 | extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags); |
8d4e6c4c | 969 | extern void perf_event_itrace_started(struct perf_event *event); |
fdc26706 | 970 | |
03d8e80b | 971 | extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); |
b0a873eb | 972 | extern void perf_pmu_unregister(struct pmu *pmu); |
621a01ea | 973 | |
ab0cce56 JO |
974 | extern void __perf_event_task_sched_in(struct task_struct *prev, |
975 | struct task_struct *task); | |
976 | extern void __perf_event_task_sched_out(struct task_struct *prev, | |
977 | struct task_struct *next); | |
2b26f0aa | 978 | extern int perf_event_init_task(struct task_struct *child, u64 clone_flags); |
cdd6c482 IM |
979 | extern void perf_event_exit_task(struct task_struct *child); |
980 | extern void perf_event_free_task(struct task_struct *task); | |
4e231c79 | 981 | extern void perf_event_delayed_put(struct task_struct *task); |
e03e7ee3 | 982 | extern struct file *perf_event_get(unsigned int fd); |
f8d959a5 | 983 | extern const struct perf_event *perf_get_event(struct file *file); |
ffe8690c | 984 | extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); |
cdd6c482 | 985 | extern void perf_event_print_debug(void); |
33696fc0 PZ |
986 | extern void perf_pmu_disable(struct pmu *pmu); |
987 | extern void perf_pmu_enable(struct pmu *pmu); | |
ba532500 YZ |
988 | extern void perf_sched_cb_dec(struct pmu *pmu); |
989 | extern void perf_sched_cb_inc(struct pmu *pmu); | |
cdd6c482 IM |
990 | extern int perf_event_task_disable(void); |
991 | extern int perf_event_task_enable(void); | |
c68d224e SE |
992 | |
993 | extern void perf_pmu_resched(struct pmu *pmu); | |
994 | ||
26ca5c11 | 995 | extern int perf_event_refresh(struct perf_event *event, int refresh); |
cdd6c482 | 996 | extern void perf_event_update_userpage(struct perf_event *event); |
fb0459d7 AV |
997 | extern int perf_event_release_kernel(struct perf_event *event); |
998 | extern struct perf_event * | |
999 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | |
1000 | int cpu, | |
38a81da2 | 1001 | struct task_struct *task, |
4dc0da86 AK |
1002 | perf_overflow_handler_t callback, |
1003 | void *context); | |
0cda4c02 YZ |
1004 | extern void perf_pmu_migrate_context(struct pmu *pmu, |
1005 | int src_cpu, int dst_cpu); | |
7d9285e8 YS |
1006 | int perf_event_read_local(struct perf_event *event, u64 *value, |
1007 | u64 *enabled, u64 *running); | |
59ed446f PZ |
1008 | extern u64 perf_event_read_value(struct perf_event *event, |
1009 | u64 *enabled, u64 *running); | |
5c92d124 | 1010 | |
d010b332 | 1011 | |
df1a132b | 1012 | struct perf_sample_data { |
2565711f PZ |
1013 | /* |
1014 | * Fields set by perf_sample_data_init(), group so as to | |
1015 | * minimize the cachelines touched. | |
1016 | */ | |
1017 | u64 addr; | |
1018 | struct perf_raw_record *raw; | |
1019 | struct perf_branch_stack *br_stack; | |
1020 | u64 period; | |
2a6c6b7d | 1021 | union perf_sample_weight weight; |
2565711f PZ |
1022 | u64 txn; |
1023 | union perf_mem_data_src data_src; | |
5622f295 | 1024 | |
2565711f PZ |
1025 | /* |
1026 | * The other fields, optionally {set,used} by | |
1027 | * perf_{prepare,output}_sample(). | |
1028 | */ | |
1029 | u64 type; | |
5622f295 MM |
1030 | u64 ip; |
1031 | struct { | |
1032 | u32 pid; | |
1033 | u32 tid; | |
1034 | } tid_entry; | |
1035 | u64 time; | |
5622f295 MM |
1036 | u64 id; |
1037 | u64 stream_id; | |
1038 | struct { | |
1039 | u32 cpu; | |
1040 | u32 reserved; | |
1041 | } cpu_entry; | |
5622f295 | 1042 | struct perf_callchain_entry *callchain; |
a4faf00d | 1043 | u64 aux_size; |
88a7c26a | 1044 | |
60e2364e SE |
1045 | struct perf_regs regs_user; |
1046 | struct perf_regs regs_intr; | |
c5ebcedb | 1047 | u64 stack_user_size; |
fc7ce9c7 KL |
1048 | |
1049 | u64 phys_addr; | |
6546b19f | 1050 | u64 cgroup; |
8d97e718 | 1051 | u64 data_page_size; |
995f088e | 1052 | u64 code_page_size; |
2565711f | 1053 | } ____cacheline_aligned; |
df1a132b | 1054 | |
770eee1f SE |
1055 | /* default value for data source */ |
1056 | #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ | |
1057 | PERF_MEM_S(LVL, NA) |\ | |
1058 | PERF_MEM_S(SNOOP, NA) |\ | |
1059 | PERF_MEM_S(LOCK, NA) |\ | |
1060 | PERF_MEM_S(TLB, NA)) | |
1061 | ||
fd0d000b RR |
1062 | static inline void perf_sample_data_init(struct perf_sample_data *data, |
1063 | u64 addr, u64 period) | |
dc1d628a | 1064 | { |
fd0d000b | 1065 | /* remaining struct members initialized in perf_prepare_sample() */ |
dc1d628a PZ |
1066 | data->addr = addr; |
1067 | data->raw = NULL; | |
bce38cd5 | 1068 | data->br_stack = NULL; |
4018994f | 1069 | data->period = period; |
2a6c6b7d | 1070 | data->weight.full = 0; |
770eee1f | 1071 | data->data_src.val = PERF_MEM_NA; |
fdfbbd07 | 1072 | data->txn = 0; |
dc1d628a PZ |
1073 | } |
1074 | ||
5622f295 MM |
1075 | extern void perf_output_sample(struct perf_output_handle *handle, |
1076 | struct perf_event_header *header, | |
1077 | struct perf_sample_data *data, | |
cdd6c482 | 1078 | struct perf_event *event); |
5622f295 MM |
1079 | extern void perf_prepare_sample(struct perf_event_header *header, |
1080 | struct perf_sample_data *data, | |
cdd6c482 | 1081 | struct perf_event *event, |
5622f295 MM |
1082 | struct pt_regs *regs); |
1083 | ||
a8b0ca17 | 1084 | extern int perf_event_overflow(struct perf_event *event, |
5622f295 MM |
1085 | struct perf_sample_data *data, |
1086 | struct pt_regs *regs); | |
df1a132b | 1087 | |
9ecda41a WN |
1088 | extern void perf_event_output_forward(struct perf_event *event, |
1089 | struct perf_sample_data *data, | |
1090 | struct pt_regs *regs); | |
1091 | extern void perf_event_output_backward(struct perf_event *event, | |
1092 | struct perf_sample_data *data, | |
1093 | struct pt_regs *regs); | |
56201969 ACM |
1094 | extern int perf_event_output(struct perf_event *event, |
1095 | struct perf_sample_data *data, | |
1096 | struct pt_regs *regs); | |
21509084 | 1097 | |
1879445d WN |
1098 | static inline bool |
1099 | is_default_overflow_handler(struct perf_event *event) | |
1100 | { | |
9ecda41a WN |
1101 | if (likely(event->overflow_handler == perf_event_output_forward)) |
1102 | return true; | |
1103 | if (unlikely(event->overflow_handler == perf_event_output_backward)) | |
1104 | return true; | |
1105 | return false; | |
1879445d WN |
1106 | } |
1107 | ||
21509084 YZ |
1108 | extern void |
1109 | perf_event_header__init_id(struct perf_event_header *header, | |
1110 | struct perf_sample_data *data, | |
1111 | struct perf_event *event); | |
1112 | extern void | |
1113 | perf_event__output_id_sample(struct perf_event *event, | |
1114 | struct perf_output_handle *handle, | |
1115 | struct perf_sample_data *sample); | |
1116 | ||
f38b0dbb KL |
1117 | extern void |
1118 | perf_log_lost_samples(struct perf_event *event, u64 lost); | |
1119 | ||
486efe9f AM |
1120 | static inline bool event_has_any_exclude_flag(struct perf_event *event) |
1121 | { | |
1122 | struct perf_event_attr *attr = &event->attr; | |
1123 | ||
1124 | return attr->exclude_idle || attr->exclude_user || | |
1125 | attr->exclude_kernel || attr->exclude_hv || | |
1126 | attr->exclude_guest || attr->exclude_host; | |
1127 | } | |
1128 | ||
6c7e550f FBH |
1129 | static inline bool is_sampling_event(struct perf_event *event) |
1130 | { | |
1131 | return event->attr.sample_period != 0; | |
1132 | } | |
1133 | ||
3b6f9e5c | 1134 | /* |
cdd6c482 | 1135 | * Return 1 for a software event, 0 for a hardware event |
3b6f9e5c | 1136 | */ |
cdd6c482 | 1137 | static inline int is_software_event(struct perf_event *event) |
3b6f9e5c | 1138 | { |
4ff6a8de | 1139 | return event->event_caps & PERF_EV_CAP_SOFTWARE; |
3b6f9e5c PM |
1140 | } |
1141 | ||
a1150c20 SL |
1142 | /* |
1143 | * Return 1 for event in sw context, 0 for event in hw context | |
1144 | */ | |
1145 | static inline int in_software_context(struct perf_event *event) | |
1146 | { | |
1147 | return event->ctx->pmu->task_ctx_nr == perf_sw_context; | |
1148 | } | |
1149 | ||
8a58ddae AS |
1150 | static inline int is_exclusive_pmu(struct pmu *pmu) |
1151 | { | |
1152 | return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE; | |
1153 | } | |
1154 | ||
c5905afb | 1155 | extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
f29ac756 | 1156 | |
86038c5e | 1157 | extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); |
a8b0ca17 | 1158 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); |
f29ac756 | 1159 | |
b0f82b81 | 1160 | #ifndef perf_arch_fetch_caller_regs |
e7e7ee2e | 1161 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
b0f82b81 | 1162 | #endif |
5331d7b8 FW |
1163 | |
1164 | /* | |
d15d3568 KS |
1165 | * When generating a perf sample in-line, instead of from an interrupt / |
1166 | * exception, we lack a pt_regs. This is typically used from software events | |
1167 | * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints. | |
1168 | * | |
1169 | * We typically don't need a full set, but (for x86) do require: | |
5331d7b8 FW |
1170 | * - ip for PERF_SAMPLE_IP |
1171 | * - cs for user_mode() tests | |
d15d3568 KS |
1172 | * - sp for PERF_SAMPLE_CALLCHAIN |
1173 | * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs()) | |
1174 | * | |
1175 | * NOTE: assumes @regs is otherwise already 0 filled; this is important for | |
1176 | * things like PERF_SAMPLE_REGS_INTR. | |
5331d7b8 | 1177 | */ |
b0f82b81 | 1178 | static inline void perf_fetch_caller_regs(struct pt_regs *regs) |
5331d7b8 | 1179 | { |
b0f82b81 | 1180 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); |
5331d7b8 FW |
1181 | } |
1182 | ||
7e54a5a0 | 1183 | static __always_inline void |
a8b0ca17 | 1184 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) |
e49a5bd3 | 1185 | { |
86038c5e PZI |
1186 | if (static_key_false(&perf_swevent_enabled[event_id])) |
1187 | __perf_sw_event(event_id, nr, regs, addr); | |
1188 | } | |
1189 | ||
1190 | DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); | |
7e54a5a0 | 1191 | |
86038c5e PZI |
1192 | /* |
1193 | * 'Special' version for the scheduler, it hard assumes no recursion, | |
1194 | * which is guaranteed by us not actually scheduling inside other swevents | |
1195 | * because those disable preemption. | |
1196 | */ | |
7c8056bb | 1197 | static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) |
86038c5e | 1198 | { |
7c8056bb | 1199 | struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); |
86038c5e | 1200 | |
7c8056bb NK |
1201 | perf_fetch_caller_regs(regs); |
1202 | ___perf_sw_event(event_id, nr, regs, addr); | |
e49a5bd3 FW |
1203 | } |
1204 | ||
9107c89e | 1205 | extern struct static_key_false perf_sched_events; |
ee6dcfa4 | 1206 | |
7c8056bb | 1207 | static __always_inline bool __perf_sw_enabled(int swevt) |
ff303e66 | 1208 | { |
7c8056bb | 1209 | return static_key_false(&perf_swevent_enabled[swevt]); |
ff303e66 PZ |
1210 | } |
1211 | ||
1212 | static inline void perf_event_task_migrate(struct task_struct *task) | |
1213 | { | |
7c8056bb | 1214 | if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS)) |
ff303e66 PZ |
1215 | task->sched_migrated = 1; |
1216 | } | |
1217 | ||
ab0cce56 | 1218 | static inline void perf_event_task_sched_in(struct task_struct *prev, |
a8d757ef | 1219 | struct task_struct *task) |
ab0cce56 | 1220 | { |
9107c89e | 1221 | if (static_branch_unlikely(&perf_sched_events)) |
ab0cce56 | 1222 | __perf_event_task_sched_in(prev, task); |
ff303e66 | 1223 | |
7c8056bb NK |
1224 | if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) && |
1225 | task->sched_migrated) { | |
1226 | __perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); | |
ff303e66 PZ |
1227 | task->sched_migrated = 0; |
1228 | } | |
ab0cce56 JO |
1229 | } |
1230 | ||
1231 | static inline void perf_event_task_sched_out(struct task_struct *prev, | |
1232 | struct task_struct *next) | |
ee6dcfa4 | 1233 | { |
7c8056bb NK |
1234 | if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES)) |
1235 | __perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); | |
ee6dcfa4 | 1236 | |
d0d1dd62 NK |
1237 | #ifdef CONFIG_CGROUP_PERF |
1238 | if (__perf_sw_enabled(PERF_COUNT_SW_CGROUP_SWITCHES) && | |
1239 | perf_cgroup_from_task(prev, NULL) != | |
1240 | perf_cgroup_from_task(next, NULL)) | |
1241 | __perf_sw_event_sched(PERF_COUNT_SW_CGROUP_SWITCHES, 1, 0); | |
1242 | #endif | |
1243 | ||
9107c89e | 1244 | if (static_branch_unlikely(&perf_sched_events)) |
ab0cce56 | 1245 | __perf_event_task_sched_out(prev, next); |
ee6dcfa4 PZ |
1246 | } |
1247 | ||
3af9e859 | 1248 | extern void perf_event_mmap(struct vm_area_struct *vma); |
76193a94 SL |
1249 | |
1250 | extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, | |
1251 | bool unregister, const char *sym); | |
6ee52e2a SL |
1252 | extern void perf_event_bpf_event(struct bpf_prog *prog, |
1253 | enum perf_bpf_event_type type, | |
1254 | u16 flags); | |
76193a94 | 1255 | |
2aef6f30 | 1256 | #ifdef CONFIG_GUEST_PERF_EVENTS |
ff083a2d | 1257 | extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs; |
87b940a0 SC |
1258 | |
1259 | DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state); | |
1260 | DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip); | |
1261 | DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr); | |
1262 | ||
1c343051 SC |
1263 | static inline unsigned int perf_guest_state(void) |
1264 | { | |
87b940a0 | 1265 | return static_call(__perf_guest_state)(); |
1c343051 SC |
1266 | } |
1267 | static inline unsigned long perf_guest_get_ip(void) | |
1268 | { | |
87b940a0 | 1269 | return static_call(__perf_guest_get_ip)(); |
1c343051 SC |
1270 | } |
1271 | static inline unsigned int perf_guest_handle_intel_pt_intr(void) | |
1272 | { | |
87b940a0 | 1273 | return static_call(__perf_guest_handle_intel_pt_intr)(); |
1c343051 | 1274 | } |
2934e3d0 SC |
1275 | extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs); |
1276 | extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs); | |
2aef6f30 SC |
1277 | #else |
1278 | static inline unsigned int perf_guest_state(void) { return 0; } | |
1279 | static inline unsigned long perf_guest_get_ip(void) { return 0; } | |
1280 | static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; } | |
1281 | #endif /* CONFIG_GUEST_PERF_EVENTS */ | |
39447b38 | 1282 | |
e041e328 | 1283 | extern void perf_event_exec(void); |
82b89778 | 1284 | extern void perf_event_comm(struct task_struct *tsk, bool exec); |
e4222673 | 1285 | extern void perf_event_namespaces(struct task_struct *tsk); |
cdd6c482 | 1286 | extern void perf_event_fork(struct task_struct *tsk); |
e17d43b9 AH |
1287 | extern void perf_event_text_poke(const void *addr, |
1288 | const void *old_bytes, size_t old_len, | |
1289 | const void *new_bytes, size_t new_len); | |
8d1b2d93 | 1290 | |
56962b44 FW |
1291 | /* Callchains */ |
1292 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | |
1293 | ||
cfbcf468 ACM |
1294 | extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); |
1295 | extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); | |
568b329a AS |
1296 | extern struct perf_callchain_entry * |
1297 | get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, | |
cfbcf468 | 1298 | u32 max_stack, bool crosstask, bool add_mark); |
6cbc304f | 1299 | extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); |
97c79a38 | 1300 | extern int get_callchain_buffers(int max_stack); |
568b329a | 1301 | extern void put_callchain_buffers(void); |
d141b8bc SL |
1302 | extern struct perf_callchain_entry *get_callchain_entry(int *rctx); |
1303 | extern void put_callchain_entry(int rctx); | |
394ee076 | 1304 | |
c5dfd78e | 1305 | extern int sysctl_perf_event_max_stack; |
c85b0334 | 1306 | extern int sysctl_perf_event_max_contexts_per_stack; |
c5dfd78e | 1307 | |
c85b0334 ACM |
1308 | static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip) |
1309 | { | |
1310 | if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { | |
1311 | struct perf_callchain_entry *entry = ctx->entry; | |
1312 | entry->ip[entry->nr++] = ip; | |
1313 | ++ctx->contexts; | |
1314 | return 0; | |
1315 | } else { | |
1316 | ctx->contexts_maxed = true; | |
1317 | return -1; /* no more room, stop walking the stack */ | |
1318 | } | |
1319 | } | |
3e4de4ec | 1320 | |
cfbcf468 | 1321 | static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip) |
70791ce9 | 1322 | { |
c85b0334 | 1323 | if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { |
3b1fff08 | 1324 | struct perf_callchain_entry *entry = ctx->entry; |
70791ce9 | 1325 | entry->ip[entry->nr++] = ip; |
3b1fff08 | 1326 | ++ctx->nr; |
568b329a AS |
1327 | return 0; |
1328 | } else { | |
1329 | return -1; /* no more room, stop walking the stack */ | |
1330 | } | |
70791ce9 | 1331 | } |
394ee076 | 1332 | |
cdd6c482 IM |
1333 | extern int sysctl_perf_event_paranoid; |
1334 | extern int sysctl_perf_event_mlock; | |
1335 | extern int sysctl_perf_event_sample_rate; | |
14c63f17 DH |
1336 | extern int sysctl_perf_cpu_time_max_percent; |
1337 | ||
1338 | extern void perf_sample_event_took(u64 sample_len_ns); | |
1ccd1549 | 1339 | |
32927393 CH |
1340 | int perf_proc_update_handler(struct ctl_table *table, int write, |
1341 | void *buffer, size_t *lenp, loff_t *ppos); | |
1342 | int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, | |
1343 | void *buffer, size_t *lenp, loff_t *ppos); | |
c5dfd78e | 1344 | int perf_event_max_stack_handler(struct ctl_table *table, int write, |
32927393 | 1345 | void *buffer, size_t *lenp, loff_t *ppos); |
163ec435 | 1346 | |
da97e184 JFG |
1347 | /* Access to perf_event_open(2) syscall. */ |
1348 | #define PERF_SECURITY_OPEN 0 | |
1349 | ||
1350 | /* Finer grained perf_event_open(2) access control. */ | |
1351 | #define PERF_SECURITY_CPU 1 | |
1352 | #define PERF_SECURITY_KERNEL 2 | |
1353 | #define PERF_SECURITY_TRACEPOINT 3 | |
1354 | ||
1355 | static inline int perf_is_paranoid(void) | |
320ebf09 PZ |
1356 | { |
1357 | return sysctl_perf_event_paranoid > -1; | |
1358 | } | |
1359 | ||
da97e184 | 1360 | static inline int perf_allow_kernel(struct perf_event_attr *attr) |
320ebf09 | 1361 | { |
18aa1856 | 1362 | if (sysctl_perf_event_paranoid > 1 && !perfmon_capable()) |
da97e184 JFG |
1363 | return -EACCES; |
1364 | ||
1365 | return security_perf_event_open(attr, PERF_SECURITY_KERNEL); | |
320ebf09 PZ |
1366 | } |
1367 | ||
da97e184 | 1368 | static inline int perf_allow_cpu(struct perf_event_attr *attr) |
320ebf09 | 1369 | { |
18aa1856 | 1370 | if (sysctl_perf_event_paranoid > 0 && !perfmon_capable()) |
da97e184 JFG |
1371 | return -EACCES; |
1372 | ||
1373 | return security_perf_event_open(attr, PERF_SECURITY_CPU); | |
1374 | } | |
1375 | ||
1376 | static inline int perf_allow_tracepoint(struct perf_event_attr *attr) | |
1377 | { | |
18aa1856 | 1378 | if (sysctl_perf_event_paranoid > -1 && !perfmon_capable()) |
da97e184 JFG |
1379 | return -EPERM; |
1380 | ||
1381 | return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT); | |
320ebf09 PZ |
1382 | } |
1383 | ||
cdd6c482 | 1384 | extern void perf_event_init(void); |
1e1dcd93 | 1385 | extern void perf_tp_event(u16 event_type, u64 count, void *record, |
1c024eca | 1386 | int entry_size, struct pt_regs *regs, |
e6dab5ff | 1387 | struct hlist_head *head, int rctx, |
8fd0fbbe | 1388 | struct task_struct *task); |
24f1e32c | 1389 | extern void perf_bp_event(struct perf_event *event, void *data); |
0d905bca | 1390 | |
9d23a90a | 1391 | #ifndef perf_misc_flags |
e7e7ee2e IM |
1392 | # define perf_misc_flags(regs) \ |
1393 | (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) | |
1394 | # define perf_instruction_pointer(regs) instruction_pointer(regs) | |
9d23a90a | 1395 | #endif |
c895f6f7 HB |
1396 | #ifndef perf_arch_bpf_user_pt_regs |
1397 | # define perf_arch_bpf_user_pt_regs(regs) regs | |
1398 | #endif | |
9d23a90a | 1399 | |
bce38cd5 SE |
1400 | static inline bool has_branch_stack(struct perf_event *event) |
1401 | { | |
1402 | return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; | |
a46a2300 YZ |
1403 | } |
1404 | ||
1405 | static inline bool needs_branch_stack(struct perf_event *event) | |
1406 | { | |
1407 | return event->attr.branch_sample_type != 0; | |
bce38cd5 SE |
1408 | } |
1409 | ||
45bfb2e5 PZ |
1410 | static inline bool has_aux(struct perf_event *event) |
1411 | { | |
1412 | return event->pmu->setup_aux; | |
1413 | } | |
1414 | ||
9ecda41a WN |
1415 | static inline bool is_write_backward(struct perf_event *event) |
1416 | { | |
1417 | return !!event->attr.write_backward; | |
1418 | } | |
1419 | ||
375637bc AS |
1420 | static inline bool has_addr_filter(struct perf_event *event) |
1421 | { | |
1422 | return event->pmu->nr_addr_filters; | |
1423 | } | |
1424 | ||
1425 | /* | |
1426 | * An inherited event uses parent's filters | |
1427 | */ | |
1428 | static inline struct perf_addr_filters_head * | |
1429 | perf_event_addr_filters(struct perf_event *event) | |
1430 | { | |
1431 | struct perf_addr_filters_head *ifh = &event->addr_filters; | |
1432 | ||
1433 | if (event->parent) | |
1434 | ifh = &event->parent->addr_filters; | |
1435 | ||
1436 | return ifh; | |
1437 | } | |
1438 | ||
1439 | extern void perf_event_addr_filters_sync(struct perf_event *event); | |
8b8ff8cc | 1440 | extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id); |
375637bc | 1441 | |
5622f295 | 1442 | extern int perf_output_begin(struct perf_output_handle *handle, |
267fb273 | 1443 | struct perf_sample_data *data, |
a7ac67ea | 1444 | struct perf_event *event, unsigned int size); |
9ecda41a | 1445 | extern int perf_output_begin_forward(struct perf_output_handle *handle, |
267fb273 PZ |
1446 | struct perf_sample_data *data, |
1447 | struct perf_event *event, | |
1448 | unsigned int size); | |
9ecda41a | 1449 | extern int perf_output_begin_backward(struct perf_output_handle *handle, |
267fb273 | 1450 | struct perf_sample_data *data, |
9ecda41a WN |
1451 | struct perf_event *event, |
1452 | unsigned int size); | |
1453 | ||
5622f295 | 1454 | extern void perf_output_end(struct perf_output_handle *handle); |
91d7753a | 1455 | extern unsigned int perf_output_copy(struct perf_output_handle *handle, |
5622f295 | 1456 | const void *buf, unsigned int len); |
5685e0ff JO |
1457 | extern unsigned int perf_output_skip(struct perf_output_handle *handle, |
1458 | unsigned int len); | |
a4faf00d AS |
1459 | extern long perf_output_copy_aux(struct perf_output_handle *aux_handle, |
1460 | struct perf_output_handle *handle, | |
1461 | unsigned long from, unsigned long to); | |
4ed7c92d PZ |
1462 | extern int perf_swevent_get_recursion_context(void); |
1463 | extern void perf_swevent_put_recursion_context(int rctx); | |
ab573844 | 1464 | extern u64 perf_swevent_set_period(struct perf_event *event); |
44234adc FW |
1465 | extern void perf_event_enable(struct perf_event *event); |
1466 | extern void perf_event_disable(struct perf_event *event); | |
fae3fde6 | 1467 | extern void perf_event_disable_local(struct perf_event *event); |
5aab90ce | 1468 | extern void perf_event_disable_inatomic(struct perf_event *event); |
e9d2b064 | 1469 | extern void perf_event_task_tick(void); |
475113d9 | 1470 | extern int perf_event_account_interrupt(struct perf_event *event); |
3ca270fc | 1471 | extern int perf_event_period(struct perf_event *event, u64 value); |
52ba4b0b | 1472 | extern u64 perf_event_pause(struct perf_event *event, bool reset); |
e041e328 | 1473 | #else /* !CONFIG_PERF_EVENTS: */ |
fdc26706 AS |
1474 | static inline void * |
1475 | perf_aux_output_begin(struct perf_output_handle *handle, | |
1476 | struct perf_event *event) { return NULL; } | |
1477 | static inline void | |
f4c0b0aa WD |
1478 | perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) |
1479 | { } | |
fdc26706 AS |
1480 | static inline int |
1481 | perf_aux_output_skip(struct perf_output_handle *handle, | |
1482 | unsigned long size) { return -EINVAL; } | |
1483 | static inline void * | |
1484 | perf_get_aux(struct perf_output_handle *handle) { return NULL; } | |
0793a61d | 1485 | static inline void |
ff303e66 PZ |
1486 | perf_event_task_migrate(struct task_struct *task) { } |
1487 | static inline void | |
ab0cce56 JO |
1488 | perf_event_task_sched_in(struct task_struct *prev, |
1489 | struct task_struct *task) { } | |
1490 | static inline void | |
1491 | perf_event_task_sched_out(struct task_struct *prev, | |
1492 | struct task_struct *next) { } | |
2b26f0aa ME |
1493 | static inline int perf_event_init_task(struct task_struct *child, |
1494 | u64 clone_flags) { return 0; } | |
cdd6c482 IM |
1495 | static inline void perf_event_exit_task(struct task_struct *child) { } |
1496 | static inline void perf_event_free_task(struct task_struct *task) { } | |
4e231c79 | 1497 | static inline void perf_event_delayed_put(struct task_struct *task) { } |
e03e7ee3 | 1498 | static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } |
f8d959a5 YS |
1499 | static inline const struct perf_event *perf_get_event(struct file *file) |
1500 | { | |
1501 | return ERR_PTR(-EINVAL); | |
1502 | } | |
ffe8690c KX |
1503 | static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) |
1504 | { | |
1505 | return ERR_PTR(-EINVAL); | |
1506 | } | |
7d9285e8 YS |
1507 | static inline int perf_event_read_local(struct perf_event *event, u64 *value, |
1508 | u64 *enabled, u64 *running) | |
f91840a3 AS |
1509 | { |
1510 | return -EINVAL; | |
1511 | } | |
57c0c15b | 1512 | static inline void perf_event_print_debug(void) { } |
57c0c15b IM |
1513 | static inline int perf_event_task_disable(void) { return -EINVAL; } |
1514 | static inline int perf_event_task_enable(void) { return -EINVAL; } | |
26ca5c11 AK |
1515 | static inline int perf_event_refresh(struct perf_event *event, int refresh) |
1516 | { | |
1517 | return -EINVAL; | |
1518 | } | |
15dbf27c | 1519 | |
925d519a | 1520 | static inline void |
a8b0ca17 | 1521 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } |
24f1e32c | 1522 | static inline void |
184f412c | 1523 | perf_bp_event(struct perf_event *event, void *data) { } |
0a4a9391 | 1524 | |
57c0c15b | 1525 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
76193a94 SL |
1526 | |
1527 | typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data); | |
1528 | static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, | |
1529 | bool unregister, const char *sym) { } | |
6ee52e2a SL |
1530 | static inline void perf_event_bpf_event(struct bpf_prog *prog, |
1531 | enum perf_bpf_event_type type, | |
1532 | u16 flags) { } | |
e041e328 | 1533 | static inline void perf_event_exec(void) { } |
82b89778 | 1534 | static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } |
e4222673 | 1535 | static inline void perf_event_namespaces(struct task_struct *tsk) { } |
cdd6c482 | 1536 | static inline void perf_event_fork(struct task_struct *tsk) { } |
e17d43b9 AH |
1537 | static inline void perf_event_text_poke(const void *addr, |
1538 | const void *old_bytes, | |
1539 | size_t old_len, | |
1540 | const void *new_bytes, | |
1541 | size_t new_len) { } | |
cdd6c482 | 1542 | static inline void perf_event_init(void) { } |
184f412c | 1543 | static inline int perf_swevent_get_recursion_context(void) { return -1; } |
4ed7c92d | 1544 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
ab573844 | 1545 | static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } |
44234adc FW |
1546 | static inline void perf_event_enable(struct perf_event *event) { } |
1547 | static inline void perf_event_disable(struct perf_event *event) { } | |
500ad2d8 | 1548 | static inline int __perf_event_disable(void *info) { return -1; } |
e9d2b064 | 1549 | static inline void perf_event_task_tick(void) { } |
ffe8690c | 1550 | static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } |
3ca270fc LX |
1551 | static inline int perf_event_period(struct perf_event *event, u64 value) |
1552 | { | |
1553 | return -EINVAL; | |
1554 | } | |
52ba4b0b LX |
1555 | static inline u64 perf_event_pause(struct perf_event *event, bool reset) |
1556 | { | |
1557 | return 0; | |
1558 | } | |
0793a61d TG |
1559 | #endif |
1560 | ||
6c4d3bc9 DR |
1561 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) |
1562 | extern void perf_restore_debug_store(void); | |
1563 | #else | |
1d9d8639 | 1564 | static inline void perf_restore_debug_store(void) { } |
0793a61d TG |
1565 | #endif |
1566 | ||
7e3f977e DB |
1567 | static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) |
1568 | { | |
1569 | return frag->pad < sizeof(u64); | |
1570 | } | |
1571 | ||
e7e7ee2e | 1572 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) |
5622f295 | 1573 | |
2663960c SB |
1574 | struct perf_pmu_events_attr { |
1575 | struct device_attribute attr; | |
1576 | u64 id; | |
3a54aaa0 | 1577 | const char *event_str; |
2663960c SB |
1578 | }; |
1579 | ||
fc07e9f9 AK |
1580 | struct perf_pmu_events_ht_attr { |
1581 | struct device_attribute attr; | |
1582 | u64 id; | |
1583 | const char *event_str_ht; | |
1584 | const char *event_str_noht; | |
1585 | }; | |
1586 | ||
a9c81ccd KL |
1587 | struct perf_pmu_events_hybrid_attr { |
1588 | struct device_attribute attr; | |
1589 | u64 id; | |
1590 | const char *event_str; | |
1591 | u64 pmu_type; | |
1592 | }; | |
1593 | ||
1594 | struct perf_pmu_format_hybrid_attr { | |
1595 | struct device_attribute attr; | |
1596 | u64 pmu_type; | |
1597 | }; | |
1598 | ||
fd979c01 CS |
1599 | ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, |
1600 | char *page); | |
1601 | ||
2663960c SB |
1602 | #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ |
1603 | static struct perf_pmu_events_attr _var = { \ | |
1604 | .attr = __ATTR(_name, 0444, _show, NULL), \ | |
1605 | .id = _id, \ | |
1606 | }; | |
1607 | ||
f0405b81 CS |
1608 | #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \ |
1609 | static struct perf_pmu_events_attr _var = { \ | |
1610 | .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ | |
1611 | .id = 0, \ | |
1612 | .event_str = _str, \ | |
1613 | }; | |
1614 | ||
f8e6d241 QL |
1615 | #define PMU_EVENT_ATTR_ID(_name, _show, _id) \ |
1616 | (&((struct perf_pmu_events_attr[]) { \ | |
1617 | { .attr = __ATTR(_name, 0444, _show, NULL), \ | |
1618 | .id = _id, } \ | |
1619 | })[0].attr.attr) | |
1620 | ||
641cc938 JO |
1621 | #define PMU_FORMAT_ATTR(_name, _format) \ |
1622 | static ssize_t \ | |
1623 | _name##_show(struct device *dev, \ | |
1624 | struct device_attribute *attr, \ | |
1625 | char *page) \ | |
1626 | { \ | |
1627 | BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ | |
1628 | return sprintf(page, _format "\n"); \ | |
1629 | } \ | |
1630 | \ | |
1631 | static struct device_attribute format_attr_##_name = __ATTR_RO(_name) | |
1632 | ||
00e16c3d TG |
1633 | /* Performance counter hotplug functions */ |
1634 | #ifdef CONFIG_PERF_EVENTS | |
1635 | int perf_event_init_cpu(unsigned int cpu); | |
1636 | int perf_event_exit_cpu(unsigned int cpu); | |
1637 | #else | |
1638 | #define perf_event_init_cpu NULL | |
1639 | #define perf_event_exit_cpu NULL | |
1640 | #endif | |
1641 | ||
f1ec3a51 BT |
1642 | extern void __weak arch_perf_update_userpage(struct perf_event *event, |
1643 | struct perf_event_mmap_page *userpg, | |
1644 | u64 now); | |
1645 | ||
51b646b2 PZ |
1646 | #ifdef CONFIG_MMU |
1647 | extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr); | |
1648 | #endif | |
1649 | ||
c22ac2a3 SL |
1650 | /* |
1651 | * Snapshot branch stack on software events. | |
1652 | * | |
1653 | * Branch stack can be very useful in understanding software events. For | |
1654 | * example, when a long function, e.g. sys_perf_event_open, returns an | |
1655 | * errno, it is not obvious why the function failed. Branch stack could | |
1656 | * provide very helpful information in this type of scenarios. | |
1657 | * | |
1658 | * On software event, it is necessary to stop the hardware branch recorder | |
1659 | * fast. Otherwise, the hardware register/buffer will be flushed with | |
1660 | * entries of the triggering event. Therefore, static call is used to | |
1661 | * stop the hardware recorder. | |
1662 | */ | |
1663 | ||
1664 | /* | |
1665 | * cnt is the number of entries allocated for entries. | |
1666 | * Return number of entries copied to . | |
1667 | */ | |
1668 | typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries, | |
1669 | unsigned int cnt); | |
1670 | DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t); | |
1671 | ||
cdd6c482 | 1672 | #endif /* _LINUX_PERF_EVENT_H */ |