Commit | Line | Data |
---|---|---|
f8a95309 ACM |
1 | /* |
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | |
3 | * | |
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | |
5 | * copyright notes. | |
6 | * | |
7 | * Released under the GPL v2. (and only v2, not any later version) | |
8 | */ | |
5c581041 | 9 | #include <poll.h> |
f8a95309 ACM |
10 | #include "cpumap.h" |
11 | #include "thread_map.h" | |
361c99a6 ACM |
12 | #include "evlist.h" |
13 | #include "evsel.h" | |
14 | #include "util.h" | |
5d2cd909 | 15 | #include "debug.h" |
361c99a6 | 16 | |
f8a95309 ACM |
17 | #include <sys/mman.h> |
18 | ||
70db7533 ACM |
19 | #include <linux/bitops.h> |
20 | #include <linux/hash.h> | |
21 | ||
f8a95309 | 22 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) |
a91e5431 | 23 | #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) |
f8a95309 | 24 | |
7e2ed097 ACM |
25 | void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, |
26 | struct thread_map *threads) | |
ef1d1af2 ACM |
27 | { |
28 | int i; | |
29 | ||
30 | for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) | |
31 | INIT_HLIST_HEAD(&evlist->heads[i]); | |
32 | INIT_LIST_HEAD(&evlist->entries); | |
7e2ed097 | 33 | perf_evlist__set_maps(evlist, cpus, threads); |
ef1d1af2 ACM |
34 | } |
35 | ||
7e2ed097 ACM |
36 | struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, |
37 | struct thread_map *threads) | |
361c99a6 ACM |
38 | { |
39 | struct perf_evlist *evlist = zalloc(sizeof(*evlist)); | |
40 | ||
ef1d1af2 | 41 | if (evlist != NULL) |
7e2ed097 | 42 | perf_evlist__init(evlist, cpus, threads); |
361c99a6 ACM |
43 | |
44 | return evlist; | |
45 | } | |
46 | ||
47 | static void perf_evlist__purge(struct perf_evlist *evlist) | |
48 | { | |
49 | struct perf_evsel *pos, *n; | |
50 | ||
51 | list_for_each_entry_safe(pos, n, &evlist->entries, node) { | |
52 | list_del_init(&pos->node); | |
53 | perf_evsel__delete(pos); | |
54 | } | |
55 | ||
56 | evlist->nr_entries = 0; | |
57 | } | |
58 | ||
ef1d1af2 | 59 | void perf_evlist__exit(struct perf_evlist *evlist) |
361c99a6 | 60 | { |
70db7533 | 61 | free(evlist->mmap); |
5c581041 | 62 | free(evlist->pollfd); |
ef1d1af2 ACM |
63 | evlist->mmap = NULL; |
64 | evlist->pollfd = NULL; | |
65 | } | |
66 | ||
67 | void perf_evlist__delete(struct perf_evlist *evlist) | |
68 | { | |
69 | perf_evlist__purge(evlist); | |
70 | perf_evlist__exit(evlist); | |
361c99a6 ACM |
71 | free(evlist); |
72 | } | |
73 | ||
74 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) | |
75 | { | |
76 | list_add_tail(&entry->node, &evlist->entries); | |
77 | ++evlist->nr_entries; | |
78 | } | |
79 | ||
80 | int perf_evlist__add_default(struct perf_evlist *evlist) | |
81 | { | |
82 | struct perf_event_attr attr = { | |
83 | .type = PERF_TYPE_HARDWARE, | |
84 | .config = PERF_COUNT_HW_CPU_CYCLES, | |
85 | }; | |
86 | struct perf_evsel *evsel = perf_evsel__new(&attr, 0); | |
87 | ||
88 | if (evsel == NULL) | |
89 | return -ENOMEM; | |
90 | ||
91 | perf_evlist__add(evlist, evsel); | |
92 | return 0; | |
93 | } | |
5c581041 | 94 | |
7e2ed097 | 95 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) |
5c581041 | 96 | { |
7e2ed097 | 97 | int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; |
5c581041 ACM |
98 | evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); |
99 | return evlist->pollfd != NULL ? 0 : -ENOMEM; | |
100 | } | |
70082dd9 ACM |
101 | |
102 | void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) | |
103 | { | |
104 | fcntl(fd, F_SETFL, O_NONBLOCK); | |
105 | evlist->pollfd[evlist->nr_fds].fd = fd; | |
106 | evlist->pollfd[evlist->nr_fds].events = POLLIN; | |
107 | evlist->nr_fds++; | |
108 | } | |
70db7533 | 109 | |
a91e5431 ACM |
110 | static void perf_evlist__id_hash(struct perf_evlist *evlist, |
111 | struct perf_evsel *evsel, | |
112 | int cpu, int thread, u64 id) | |
3d3b5e95 ACM |
113 | { |
114 | int hash; | |
115 | struct perf_sample_id *sid = SID(evsel, cpu, thread); | |
116 | ||
117 | sid->id = id; | |
118 | sid->evsel = evsel; | |
119 | hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); | |
120 | hlist_add_head(&sid->node, &evlist->heads[hash]); | |
121 | } | |
122 | ||
a91e5431 ACM |
123 | void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, |
124 | int cpu, int thread, u64 id) | |
125 | { | |
126 | perf_evlist__id_hash(evlist, evsel, cpu, thread, id); | |
127 | evsel->id[evsel->ids++] = id; | |
128 | } | |
129 | ||
130 | static int perf_evlist__id_add_fd(struct perf_evlist *evlist, | |
131 | struct perf_evsel *evsel, | |
132 | int cpu, int thread, int fd) | |
f8a95309 | 133 | { |
f8a95309 | 134 | u64 read_data[4] = { 0, }; |
3d3b5e95 | 135 | int id_idx = 1; /* The first entry is the counter value */ |
f8a95309 ACM |
136 | |
137 | if (!(evsel->attr.read_format & PERF_FORMAT_ID) || | |
138 | read(fd, &read_data, sizeof(read_data)) == -1) | |
139 | return -1; | |
140 | ||
141 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | |
142 | ++id_idx; | |
143 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | |
144 | ++id_idx; | |
145 | ||
a91e5431 | 146 | perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]); |
f8a95309 ACM |
147 | return 0; |
148 | } | |
149 | ||
70db7533 ACM |
150 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) |
151 | { | |
152 | struct hlist_head *head; | |
153 | struct hlist_node *pos; | |
154 | struct perf_sample_id *sid; | |
155 | int hash; | |
156 | ||
157 | if (evlist->nr_entries == 1) | |
158 | return list_entry(evlist->entries.next, struct perf_evsel, node); | |
159 | ||
160 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); | |
161 | head = &evlist->heads[hash]; | |
162 | ||
163 | hlist_for_each_entry(sid, pos, head, node) | |
164 | if (sid->id == id) | |
165 | return sid->evsel; | |
166 | return NULL; | |
167 | } | |
04391deb | 168 | |
8115d60c | 169 | union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) |
04391deb ACM |
170 | { |
171 | /* XXX Move this to perf.c, making it generally available */ | |
172 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | |
173 | struct perf_mmap *md = &evlist->mmap[cpu]; | |
174 | unsigned int head = perf_mmap__read_head(md); | |
175 | unsigned int old = md->prev; | |
176 | unsigned char *data = md->base + page_size; | |
8115d60c | 177 | union perf_event *event = NULL; |
04391deb | 178 | |
7bb41152 | 179 | if (evlist->overwrite) { |
04391deb | 180 | /* |
7bb41152 ACM |
181 | * If we're further behind than half the buffer, there's a chance |
182 | * the writer will bite our tail and mess up the samples under us. | |
183 | * | |
184 | * If we somehow ended up ahead of the head, we got messed up. | |
185 | * | |
186 | * In either case, truncate and restart at head. | |
04391deb | 187 | */ |
7bb41152 ACM |
188 | int diff = head - old; |
189 | if (diff > md->mask / 2 || diff < 0) { | |
190 | fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); | |
191 | ||
192 | /* | |
193 | * head points to a known good entry, start there. | |
194 | */ | |
195 | old = head; | |
196 | } | |
04391deb ACM |
197 | } |
198 | ||
199 | if (old != head) { | |
200 | size_t size; | |
201 | ||
8115d60c | 202 | event = (union perf_event *)&data[old & md->mask]; |
04391deb ACM |
203 | size = event->header.size; |
204 | ||
205 | /* | |
206 | * Event straddles the mmap boundary -- header should always | |
207 | * be inside due to u64 alignment of output. | |
208 | */ | |
209 | if ((old & md->mask) + size != ((old + size) & md->mask)) { | |
210 | unsigned int offset = old; | |
211 | unsigned int len = min(sizeof(*event), size), cpy; | |
212 | void *dst = &evlist->event_copy; | |
213 | ||
214 | do { | |
215 | cpy = min(md->mask + 1 - (offset & md->mask), len); | |
216 | memcpy(dst, &data[offset & md->mask], cpy); | |
217 | offset += cpy; | |
218 | dst += cpy; | |
219 | len -= cpy; | |
220 | } while (len); | |
221 | ||
222 | event = &evlist->event_copy; | |
223 | } | |
224 | ||
225 | old += size; | |
226 | } | |
227 | ||
228 | md->prev = old; | |
7bb41152 ACM |
229 | |
230 | if (!evlist->overwrite) | |
231 | perf_mmap__write_tail(md, old); | |
232 | ||
04391deb ACM |
233 | return event; |
234 | } | |
f8a95309 | 235 | |
7e2ed097 | 236 | void perf_evlist__munmap(struct perf_evlist *evlist) |
f8a95309 ACM |
237 | { |
238 | int cpu; | |
239 | ||
7e2ed097 | 240 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { |
f8a95309 ACM |
241 | if (evlist->mmap[cpu].base != NULL) { |
242 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | |
243 | evlist->mmap[cpu].base = NULL; | |
244 | } | |
245 | } | |
246 | } | |
247 | ||
7e2ed097 | 248 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist) |
f8a95309 | 249 | { |
7e2ed097 | 250 | evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap)); |
f8a95309 ACM |
251 | return evlist->mmap != NULL ? 0 : -ENOMEM; |
252 | } | |
253 | ||
5d2cd909 ACM |
254 | static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel, |
255 | int cpu, int prot, int mask, int fd) | |
f8a95309 ACM |
256 | { |
257 | evlist->mmap[cpu].prev = 0; | |
258 | evlist->mmap[cpu].mask = mask; | |
259 | evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, | |
260 | MAP_SHARED, fd, 0); | |
5d2cd909 ACM |
261 | if (evlist->mmap[cpu].base == MAP_FAILED) { |
262 | if (evlist->cpus->map[cpu] == -1 && evsel->attr.inherit) | |
263 | ui__warning("Inherit is not allowed on per-task " | |
264 | "events using mmap.\n"); | |
f8a95309 | 265 | return -1; |
5d2cd909 | 266 | } |
f8a95309 ACM |
267 | |
268 | perf_evlist__add_pollfd(evlist, fd); | |
269 | return 0; | |
270 | } | |
271 | ||
272 | /** perf_evlist__mmap - Create per cpu maps to receive events | |
273 | * | |
274 | * @evlist - list of events | |
f8a95309 ACM |
275 | * @pages - map length in pages |
276 | * @overwrite - overwrite older events? | |
277 | * | |
278 | * If overwrite is false the user needs to signal event consuption using: | |
279 | * | |
280 | * struct perf_mmap *m = &evlist->mmap[cpu]; | |
281 | * unsigned int head = perf_mmap__read_head(m); | |
282 | * | |
283 | * perf_mmap__write_tail(m, head) | |
7e2ed097 ACM |
284 | * |
285 | * Using perf_evlist__read_on_cpu does this automatically. | |
f8a95309 | 286 | */ |
7e2ed097 | 287 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) |
f8a95309 ACM |
288 | { |
289 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | |
290 | int mask = pages * page_size - 1, cpu; | |
291 | struct perf_evsel *first_evsel, *evsel; | |
7e2ed097 ACM |
292 | const struct cpu_map *cpus = evlist->cpus; |
293 | const struct thread_map *threads = evlist->threads; | |
f8a95309 ACM |
294 | int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); |
295 | ||
7e2ed097 | 296 | if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) |
f8a95309 ACM |
297 | return -ENOMEM; |
298 | ||
7e2ed097 | 299 | if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) |
f8a95309 ACM |
300 | return -ENOMEM; |
301 | ||
302 | evlist->overwrite = overwrite; | |
303 | evlist->mmap_len = (pages + 1) * page_size; | |
304 | first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node); | |
305 | ||
306 | list_for_each_entry(evsel, &evlist->entries, node) { | |
307 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | |
a91e5431 | 308 | evsel->sample_id == NULL && |
f8a95309 ACM |
309 | perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) |
310 | return -ENOMEM; | |
311 | ||
312 | for (cpu = 0; cpu < cpus->nr; cpu++) { | |
313 | for (thread = 0; thread < threads->nr; thread++) { | |
314 | int fd = FD(evsel, cpu, thread); | |
315 | ||
316 | if (evsel->idx || thread) { | |
317 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, | |
318 | FD(first_evsel, cpu, 0)) != 0) | |
319 | goto out_unmap; | |
5d2cd909 ACM |
320 | } else if (__perf_evlist__mmap(evlist, evsel, cpu, |
321 | prot, mask, fd) < 0) | |
f8a95309 ACM |
322 | goto out_unmap; |
323 | ||
324 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | |
a91e5431 | 325 | perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) |
f8a95309 ACM |
326 | goto out_unmap; |
327 | } | |
328 | } | |
329 | } | |
330 | ||
331 | return 0; | |
332 | ||
333 | out_unmap: | |
334 | for (cpu = 0; cpu < cpus->nr; cpu++) { | |
335 | if (evlist->mmap[cpu].base != NULL) { | |
336 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | |
337 | evlist->mmap[cpu].base = NULL; | |
338 | } | |
339 | } | |
340 | return -1; | |
341 | } | |
7e2ed097 ACM |
342 | |
343 | int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, | |
344 | pid_t target_tid, const char *cpu_list) | |
345 | { | |
346 | evlist->threads = thread_map__new(target_pid, target_tid); | |
347 | ||
348 | if (evlist->threads == NULL) | |
349 | return -1; | |
350 | ||
351 | if (target_tid != -1) | |
352 | evlist->cpus = cpu_map__dummy_new(); | |
353 | else | |
354 | evlist->cpus = cpu_map__new(cpu_list); | |
355 | ||
356 | if (evlist->cpus == NULL) | |
357 | goto out_delete_threads; | |
358 | ||
359 | return 0; | |
360 | ||
361 | out_delete_threads: | |
362 | thread_map__delete(evlist->threads); | |
363 | return -1; | |
364 | } | |
365 | ||
366 | void perf_evlist__delete_maps(struct perf_evlist *evlist) | |
367 | { | |
368 | cpu_map__delete(evlist->cpus); | |
369 | thread_map__delete(evlist->threads); | |
370 | evlist->cpus = NULL; | |
371 | evlist->threads = NULL; | |
372 | } | |
0a102479 FW |
373 | |
374 | int perf_evlist__set_filters(struct perf_evlist *evlist) | |
375 | { | |
376 | const struct thread_map *threads = evlist->threads; | |
377 | const struct cpu_map *cpus = evlist->cpus; | |
378 | struct perf_evsel *evsel; | |
379 | char *filter; | |
380 | int thread; | |
381 | int cpu; | |
382 | int err; | |
383 | int fd; | |
384 | ||
385 | list_for_each_entry(evsel, &evlist->entries, node) { | |
386 | filter = evsel->filter; | |
387 | if (!filter) | |
388 | continue; | |
389 | for (cpu = 0; cpu < cpus->nr; cpu++) { | |
390 | for (thread = 0; thread < threads->nr; thread++) { | |
391 | fd = FD(evsel, cpu, thread); | |
392 | err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter); | |
393 | if (err) | |
394 | return err; | |
395 | } | |
396 | } | |
397 | } | |
398 | ||
399 | return 0; | |
400 | } |