Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
361c99a6 ACM |
2 | #ifndef __PERF_EVLIST_H |
3 | #define __PERF_EVLIST_H 1 | |
4 | ||
5c97cac6 | 5 | #include <linux/compiler.h> |
877a7a11 | 6 | #include <linux/kernel.h> |
25a3720c | 7 | #include <linux/refcount.h> |
361c99a6 | 8 | #include <linux/list.h> |
1b85337d | 9 | #include <api/fd/array.h> |
50d08e47 | 10 | #include <stdio.h> |
70db7533 | 11 | #include "../perf.h" |
04391deb | 12 | #include "event.h" |
0c21f736 | 13 | #include "evsel.h" |
50d08e47 | 14 | #include "util.h" |
718c602d | 15 | #include "auxtrace.h" |
9607ad3a | 16 | #include <signal.h> |
35b9d88e | 17 | #include <unistd.h> |
361c99a6 | 18 | |
5c581041 | 19 | struct pollfd; |
f8a95309 ACM |
20 | struct thread_map; |
21 | struct cpu_map; | |
b4006796 | 22 | struct record_opts; |
5c581041 | 23 | |
70db7533 ACM |
24 | #define PERF_EVLIST__HLIST_BITS 8 |
25 | #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) | |
26 | ||
82396986 ACM |
27 | /** |
28 | * struct perf_mmap - perf's ring buffer mmap details | |
29 | * | |
30 | * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this | |
31 | */ | |
0479b8b9 DA |
32 | struct perf_mmap { |
33 | void *base; | |
34 | int mask; | |
d4c6fb36 | 35 | int fd; |
25a3720c | 36 | refcount_t refcnt; |
7b8283b5 | 37 | u64 prev; |
718c602d | 38 | struct auxtrace_mmap auxtrace_mmap; |
5c97cac6 | 39 | char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8); |
0479b8b9 DA |
40 | }; |
41 | ||
8db6d6b1 WN |
42 | static inline size_t |
43 | perf_mmap__mmap_len(struct perf_mmap *map) | |
44 | { | |
45 | return map->mask + 1 + page_size; | |
46 | } | |
47 | ||
54cc54de WN |
48 | /* |
49 | * State machine of bkw_mmap_state: | |
50 | * | |
51 | * .________________(forbid)_____________. | |
52 | * | V | |
53 | * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY | |
54 | * ^ ^ | ^ | | |
55 | * | |__(forbid)____/ |___(forbid)___/| | |
56 | * | | | |
57 | * \_________________(3)_______________/ | |
58 | * | |
59 | * NOTREADY : Backward ring buffers are not ready | |
60 | * RUNNING : Backward ring buffers are recording | |
61 | * DATA_PENDING : We are required to collect data from backward ring buffers | |
62 | * EMPTY : We have collected data from backward ring buffers. | |
63 | * | |
64 | * (0): Setup backward ring buffer | |
65 | * (1): Pause ring buffers for reading | |
66 | * (2): Read from ring buffers | |
67 | * (3): Resume ring buffers for recording | |
68 | */ | |
69 | enum bkw_mmap_state { | |
70 | BKW_MMAP_NOTREADY, | |
71 | BKW_MMAP_RUNNING, | |
72 | BKW_MMAP_DATA_PENDING, | |
73 | BKW_MMAP_EMPTY, | |
74 | }; | |
75 | ||
361c99a6 ACM |
76 | struct perf_evlist { |
77 | struct list_head entries; | |
70db7533 | 78 | struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; |
361c99a6 | 79 | int nr_entries; |
97f63e4a | 80 | int nr_groups; |
aece948f | 81 | int nr_mmaps; |
86066064 | 82 | bool overwrite; |
2b56bcfb | 83 | bool enabled; |
ec9a77a7 | 84 | bool has_user_cpus; |
994a1f78 | 85 | size_t mmap_len; |
75562573 AH |
86 | int id_pos; |
87 | int is_pos; | |
88 | u64 combined_sample_type; | |
54cc54de | 89 | enum bkw_mmap_state bkw_mmap_state; |
35b9d88e ACM |
90 | struct { |
91 | int cork_fd; | |
92 | pid_t pid; | |
93 | } workload; | |
1b85337d | 94 | struct fdarray pollfd; |
70db7533 | 95 | struct perf_mmap *mmap; |
b2cb615d | 96 | struct perf_mmap *backward_mmap; |
7e2ed097 ACM |
97 | struct thread_map *threads; |
98 | struct cpu_map *cpus; | |
81cce8de | 99 | struct perf_evsel *selected; |
75be989a | 100 | struct events_stats stats; |
2c07144d | 101 | struct perf_env *env; |
361c99a6 ACM |
102 | }; |
103 | ||
ee29be62 ACM |
104 | struct perf_evsel_str_handler { |
105 | const char *name; | |
106 | void *handler; | |
107 | }; | |
108 | ||
334fe7a3 | 109 | struct perf_evlist *perf_evlist__new(void); |
b22d54b0 | 110 | struct perf_evlist *perf_evlist__new_default(void); |
5bae0250 | 111 | struct perf_evlist *perf_evlist__new_dummy(void); |
7e2ed097 ACM |
112 | void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, |
113 | struct thread_map *threads); | |
ef1d1af2 | 114 | void perf_evlist__exit(struct perf_evlist *evlist); |
361c99a6 ACM |
115 | void perf_evlist__delete(struct perf_evlist *evlist); |
116 | ||
117 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); | |
4768230a | 118 | void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel); |
db918acb ACM |
119 | |
120 | int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise); | |
121 | ||
122 | static inline int perf_evlist__add_default(struct perf_evlist *evlist) | |
123 | { | |
124 | return __perf_evlist__add_default(evlist, true); | |
125 | } | |
126 | ||
79695e1b ACM |
127 | int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, |
128 | struct perf_event_attr *attrs, size_t nr_attrs); | |
e60fc847 | 129 | |
79695e1b ACM |
130 | #define perf_evlist__add_default_attrs(evlist, array) \ |
131 | __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array)) | |
361c99a6 | 132 | |
5bae0250 ACM |
133 | int perf_evlist__add_dummy(struct perf_evlist *evlist); |
134 | ||
39876e7d ACM |
135 | int perf_evlist__add_newtp(struct perf_evlist *evlist, |
136 | const char *sys, const char *name, void *handler); | |
137 | ||
22c8a376 ACM |
138 | void __perf_evlist__set_sample_bit(struct perf_evlist *evlist, |
139 | enum perf_event_sample_format bit); | |
140 | void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist, | |
141 | enum perf_event_sample_format bit); | |
142 | ||
143 | #define perf_evlist__set_sample_bit(evlist, bit) \ | |
144 | __perf_evlist__set_sample_bit(evlist, PERF_SAMPLE_##bit) | |
145 | ||
146 | #define perf_evlist__reset_sample_bit(evlist, bit) \ | |
147 | __perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit) | |
148 | ||
745cefc5 | 149 | int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter); |
cfd70a26 | 150 | int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid); |
be199ada | 151 | int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids); |
745cefc5 | 152 | |
da378962 ACM |
153 | struct perf_evsel * |
154 | perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id); | |
155 | ||
a2f2804a DA |
156 | struct perf_evsel * |
157 | perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, | |
158 | const char *name); | |
159 | ||
a91e5431 ACM |
160 | void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, |
161 | int cpu, int thread, u64 id); | |
1c59612d JO |
162 | int perf_evlist__id_add_fd(struct perf_evlist *evlist, |
163 | struct perf_evsel *evsel, | |
164 | int cpu, int thread, int fd); | |
3d3b5e95 | 165 | |
ad6765dd ACM |
166 | int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); |
167 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist); | |
1ddec7f0 ACM |
168 | int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask); |
169 | ||
f66a889d ACM |
170 | int perf_evlist__poll(struct perf_evlist *evlist, int timeout); |
171 | ||
70db7533 | 172 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); |
dddcf6ab AH |
173 | struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist, |
174 | u64 id); | |
70db7533 | 175 | |
932a3594 JO |
176 | struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id); |
177 | ||
54cc54de WN |
178 | void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state); |
179 | ||
8db6d6b1 WN |
180 | union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup); |
181 | union perf_event *perf_mmap__read_backward(struct perf_mmap *map); | |
182 | ||
183 | void perf_mmap__read_catchup(struct perf_mmap *md); | |
184 | void perf_mmap__consume(struct perf_mmap *md, bool overwrite); | |
185 | ||
316c7136 | 186 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx); |
04391deb | 187 | |
5a5ddeb6 WN |
188 | union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, |
189 | int idx); | |
e24c7520 WN |
190 | union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, |
191 | int idx); | |
192 | void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx); | |
193 | ||
8e50d384 ZZ |
194 | void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx); |
195 | ||
6a4bb04c | 196 | int perf_evlist__open(struct perf_evlist *evlist); |
a74b4b66 | 197 | void perf_evlist__close(struct perf_evlist *evlist); |
727ab04e | 198 | |
e68ae9cf ACM |
199 | struct callchain_param; |
200 | ||
75562573 AH |
201 | void perf_evlist__set_id_pos(struct perf_evlist *evlist); |
202 | bool perf_can_sample_identifier(void); | |
b757bb09 | 203 | bool perf_can_record_switch_events(void); |
83509565 | 204 | bool perf_can_record_cpu_wide(void); |
e68ae9cf ACM |
205 | void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts, |
206 | struct callchain_param *callchain); | |
b4006796 | 207 | int record_opts__config(struct record_opts *opts); |
0f82ebc4 | 208 | |
35b9d88e | 209 | int perf_evlist__prepare_workload(struct perf_evlist *evlist, |
602ad878 | 210 | struct target *target, |
55e162ea | 211 | const char *argv[], bool pipe_output, |
735f7e0b ACM |
212 | void (*exec_error)(int signo, siginfo_t *info, |
213 | void *ucontext)); | |
35b9d88e ACM |
214 | int perf_evlist__start_workload(struct perf_evlist *evlist); |
215 | ||
724ce97e ACM |
216 | struct option; |
217 | ||
e9db1310 | 218 | int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str); |
994a1f78 JO |
219 | int perf_evlist__parse_mmap_pages(const struct option *opt, |
220 | const char *str, | |
221 | int unset); | |
222 | ||
f5e7150c ACM |
223 | unsigned long perf_event_mlock_kb_in_pages(void); |
224 | ||
718c602d AH |
225 | int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, |
226 | bool overwrite, unsigned int auxtrace_pages, | |
227 | bool auxtrace_overwrite); | |
50a682ce ACM |
228 | int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, |
229 | bool overwrite); | |
7e2ed097 ACM |
230 | void perf_evlist__munmap(struct perf_evlist *evlist); |
231 | ||
0c582449 JO |
232 | size_t perf_evlist__mmap_size(unsigned long pages); |
233 | ||
4152ab37 | 234 | void perf_evlist__disable(struct perf_evlist *evlist); |
764e16a3 | 235 | void perf_evlist__enable(struct perf_evlist *evlist); |
2b56bcfb | 236 | void perf_evlist__toggle_enable(struct perf_evlist *evlist); |
4152ab37 | 237 | |
1c65056c AH |
238 | int perf_evlist__enable_event_idx(struct perf_evlist *evlist, |
239 | struct perf_evsel *evsel, int idx); | |
395c3070 | 240 | |
81cce8de ACM |
241 | void perf_evlist__set_selected(struct perf_evlist *evlist, |
242 | struct perf_evsel *evsel); | |
243 | ||
d5bc056e AH |
244 | void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, |
245 | struct thread_map *threads); | |
602ad878 | 246 | int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target); |
23d4aad4 | 247 | int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel); |
f8a95309 | 248 | |
63dab225 ACM |
249 | void __perf_evlist__set_leader(struct list_head *list); |
250 | void perf_evlist__set_leader(struct perf_evlist *evlist); | |
251 | ||
9ede473c | 252 | u64 perf_evlist__read_format(struct perf_evlist *evlist); |
75562573 AH |
253 | u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist); |
254 | u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist); | |
98df858e | 255 | u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist); |
0c21f736 ACM |
256 | bool perf_evlist__sample_id_all(struct perf_evlist *evlist); |
257 | u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist); | |
74429964 | 258 | |
a3f698fe | 259 | int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, |
0807d2d8 | 260 | struct perf_sample *sample); |
cb0b29e0 | 261 | |
0c21f736 ACM |
262 | bool perf_evlist__valid_sample_type(struct perf_evlist *evlist); |
263 | bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist); | |
9ede473c | 264 | bool perf_evlist__valid_read_format(struct perf_evlist *evlist); |
0529bc1f JO |
265 | |
266 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, | |
f114d6ef | 267 | struct list_head *list); |
0c21f736 | 268 | |
64831a21 DCC |
269 | static inline bool perf_evlist__empty(struct perf_evlist *evlist) |
270 | { | |
271 | return list_empty(&evlist->entries); | |
272 | } | |
273 | ||
0c21f736 ACM |
274 | static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist) |
275 | { | |
276 | return list_entry(evlist->entries.next, struct perf_evsel, node); | |
277 | } | |
278 | ||
279 | static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist) | |
280 | { | |
281 | return list_entry(evlist->entries.prev, struct perf_evsel, node); | |
282 | } | |
78f067b3 ACM |
283 | |
284 | size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); | |
0479b8b9 | 285 | |
a8f23d8f | 286 | int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size); |
956fa571 | 287 | int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size); |
6ef068cb | 288 | |
7b8283b5 | 289 | static inline u64 perf_mmap__read_head(struct perf_mmap *mm) |
0479b8b9 DA |
290 | { |
291 | struct perf_event_mmap_page *pc = mm->base; | |
7b8283b5 | 292 | u64 head = ACCESS_ONCE(pc->data_head); |
0479b8b9 DA |
293 | rmb(); |
294 | return head; | |
295 | } | |
296 | ||
7b8283b5 | 297 | static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail) |
0479b8b9 DA |
298 | { |
299 | struct perf_event_mmap_page *pc = md->base; | |
300 | ||
301 | /* | |
302 | * ensure all reads are done before we write the tail out. | |
303 | */ | |
a94d342b | 304 | mb(); |
0479b8b9 DA |
305 | pc->data_tail = tail; |
306 | } | |
307 | ||
c09ec622 | 308 | bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str); |
a025e4f0 AH |
309 | void perf_evlist__to_front(struct perf_evlist *evlist, |
310 | struct perf_evsel *move_evsel); | |
311 | ||
0050f7aa | 312 | /** |
e5cadb93 | 313 | * __evlist__for_each_entry - iterate thru all the evsels |
0050f7aa ACM |
314 | * @list: list_head instance to iterate |
315 | * @evsel: struct evsel iterator | |
316 | */ | |
e5cadb93 | 317 | #define __evlist__for_each_entry(list, evsel) \ |
0050f7aa ACM |
318 | list_for_each_entry(evsel, list, node) |
319 | ||
320 | /** | |
e5cadb93 | 321 | * evlist__for_each_entry - iterate thru all the evsels |
0050f7aa ACM |
322 | * @evlist: evlist instance to iterate |
323 | * @evsel: struct evsel iterator | |
324 | */ | |
e5cadb93 ACM |
325 | #define evlist__for_each_entry(evlist, evsel) \ |
326 | __evlist__for_each_entry(&(evlist)->entries, evsel) | |
0050f7aa ACM |
327 | |
328 | /** | |
e5cadb93 | 329 | * __evlist__for_each_entry_continue - continue iteration thru all the evsels |
0050f7aa ACM |
330 | * @list: list_head instance to iterate |
331 | * @evsel: struct evsel iterator | |
332 | */ | |
e5cadb93 | 333 | #define __evlist__for_each_entry_continue(list, evsel) \ |
0050f7aa ACM |
334 | list_for_each_entry_continue(evsel, list, node) |
335 | ||
336 | /** | |
e5cadb93 | 337 | * evlist__for_each_entry_continue - continue iteration thru all the evsels |
0050f7aa ACM |
338 | * @evlist: evlist instance to iterate |
339 | * @evsel: struct evsel iterator | |
340 | */ | |
e5cadb93 ACM |
341 | #define evlist__for_each_entry_continue(evlist, evsel) \ |
342 | __evlist__for_each_entry_continue(&(evlist)->entries, evsel) | |
0050f7aa ACM |
343 | |
344 | /** | |
e5cadb93 | 345 | * __evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order |
0050f7aa ACM |
346 | * @list: list_head instance to iterate |
347 | * @evsel: struct evsel iterator | |
348 | */ | |
e5cadb93 | 349 | #define __evlist__for_each_entry_reverse(list, evsel) \ |
0050f7aa ACM |
350 | list_for_each_entry_reverse(evsel, list, node) |
351 | ||
352 | /** | |
e5cadb93 | 353 | * evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order |
0050f7aa ACM |
354 | * @evlist: evlist instance to iterate |
355 | * @evsel: struct evsel iterator | |
356 | */ | |
e5cadb93 ACM |
357 | #define evlist__for_each_entry_reverse(evlist, evsel) \ |
358 | __evlist__for_each_entry_reverse(&(evlist)->entries, evsel) | |
0050f7aa ACM |
359 | |
360 | /** | |
e5cadb93 | 361 | * __evlist__for_each_entry_safe - safely iterate thru all the evsels |
0050f7aa ACM |
362 | * @list: list_head instance to iterate |
363 | * @tmp: struct evsel temp iterator | |
364 | * @evsel: struct evsel iterator | |
365 | */ | |
e5cadb93 | 366 | #define __evlist__for_each_entry_safe(list, tmp, evsel) \ |
0050f7aa ACM |
367 | list_for_each_entry_safe(evsel, tmp, list, node) |
368 | ||
369 | /** | |
e5cadb93 | 370 | * evlist__for_each_entry_safe - safely iterate thru all the evsels |
0050f7aa ACM |
371 | * @evlist: evlist instance to iterate |
372 | * @evsel: struct evsel iterator | |
373 | * @tmp: struct evsel temp iterator | |
374 | */ | |
e5cadb93 ACM |
375 | #define evlist__for_each_entry_safe(evlist, tmp, evsel) \ |
376 | __evlist__for_each_entry_safe(&(evlist)->entries, tmp, evsel) | |
c09ec622 | 377 | |
60b0896c AH |
378 | void perf_evlist__set_tracking_event(struct perf_evlist *evlist, |
379 | struct perf_evsel *tracking_evsel); | |
45cf6c33 JO |
380 | |
381 | void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr); | |
7630b3e2 WN |
382 | |
383 | struct perf_evsel * | |
384 | perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str); | |
7cb5c5ac JO |
385 | |
386 | struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, | |
387 | union perf_event *event); | |
361c99a6 | 388 | #endif /* __PERF_EVLIST_H */ |