Commit | Line | Data |
---|---|---|
f8a95309 ACM |
1 | /* |
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | |
3 | * | |
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | |
5 | * copyright notes. | |
6 | * | |
7 | * Released under the GPL v2. (and only v2, not any later version) | |
8 | */ | |
5c581041 | 9 | #include <poll.h> |
f8a95309 ACM |
10 | #include "cpumap.h" |
11 | #include "thread_map.h" | |
361c99a6 ACM |
12 | #include "evlist.h" |
13 | #include "evsel.h" | |
14 | #include "util.h" | |
15 | ||
f8a95309 ACM |
16 | #include <sys/mman.h> |
17 | ||
70db7533 ACM |
18 | #include <linux/bitops.h> |
19 | #include <linux/hash.h> | |
20 | ||
f8a95309 ACM |
21 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) |
22 | #define SID(e, x, y) xyarray__entry(e->id, x, y) | |
23 | ||
7e2ed097 ACM |
24 | void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, |
25 | struct thread_map *threads) | |
ef1d1af2 ACM |
26 | { |
27 | int i; | |
28 | ||
29 | for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) | |
30 | INIT_HLIST_HEAD(&evlist->heads[i]); | |
31 | INIT_LIST_HEAD(&evlist->entries); | |
7e2ed097 | 32 | perf_evlist__set_maps(evlist, cpus, threads); |
ef1d1af2 ACM |
33 | } |
34 | ||
7e2ed097 ACM |
35 | struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, |
36 | struct thread_map *threads) | |
361c99a6 ACM |
37 | { |
38 | struct perf_evlist *evlist = zalloc(sizeof(*evlist)); | |
39 | ||
ef1d1af2 | 40 | if (evlist != NULL) |
7e2ed097 | 41 | perf_evlist__init(evlist, cpus, threads); |
361c99a6 ACM |
42 | |
43 | return evlist; | |
44 | } | |
45 | ||
46 | static void perf_evlist__purge(struct perf_evlist *evlist) | |
47 | { | |
48 | struct perf_evsel *pos, *n; | |
49 | ||
50 | list_for_each_entry_safe(pos, n, &evlist->entries, node) { | |
51 | list_del_init(&pos->node); | |
52 | perf_evsel__delete(pos); | |
53 | } | |
54 | ||
55 | evlist->nr_entries = 0; | |
56 | } | |
57 | ||
ef1d1af2 | 58 | void perf_evlist__exit(struct perf_evlist *evlist) |
361c99a6 | 59 | { |
70db7533 | 60 | free(evlist->mmap); |
5c581041 | 61 | free(evlist->pollfd); |
ef1d1af2 ACM |
62 | evlist->mmap = NULL; |
63 | evlist->pollfd = NULL; | |
64 | } | |
65 | ||
66 | void perf_evlist__delete(struct perf_evlist *evlist) | |
67 | { | |
68 | perf_evlist__purge(evlist); | |
69 | perf_evlist__exit(evlist); | |
361c99a6 ACM |
70 | free(evlist); |
71 | } | |
72 | ||
73 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) | |
74 | { | |
75 | list_add_tail(&entry->node, &evlist->entries); | |
76 | ++evlist->nr_entries; | |
77 | } | |
78 | ||
79 | int perf_evlist__add_default(struct perf_evlist *evlist) | |
80 | { | |
81 | struct perf_event_attr attr = { | |
82 | .type = PERF_TYPE_HARDWARE, | |
83 | .config = PERF_COUNT_HW_CPU_CYCLES, | |
84 | }; | |
85 | struct perf_evsel *evsel = perf_evsel__new(&attr, 0); | |
86 | ||
87 | if (evsel == NULL) | |
88 | return -ENOMEM; | |
89 | ||
90 | perf_evlist__add(evlist, evsel); | |
91 | return 0; | |
92 | } | |
5c581041 | 93 | |
7e2ed097 | 94 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) |
5c581041 | 95 | { |
7e2ed097 | 96 | int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; |
5c581041 ACM |
97 | evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); |
98 | return evlist->pollfd != NULL ? 0 : -ENOMEM; | |
99 | } | |
70082dd9 ACM |
100 | |
101 | void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) | |
102 | { | |
103 | fcntl(fd, F_SETFL, O_NONBLOCK); | |
104 | evlist->pollfd[evlist->nr_fds].fd = fd; | |
105 | evlist->pollfd[evlist->nr_fds].events = POLLIN; | |
106 | evlist->nr_fds++; | |
107 | } | |
70db7533 | 108 | |
f8a95309 ACM |
109 | static int perf_evlist__id_hash(struct perf_evlist *evlist, struct perf_evsel *evsel, |
110 | int cpu, int thread, int fd) | |
111 | { | |
112 | struct perf_sample_id *sid; | |
113 | u64 read_data[4] = { 0, }; | |
114 | int hash, id_idx = 1; /* The first entry is the counter value */ | |
115 | ||
116 | if (!(evsel->attr.read_format & PERF_FORMAT_ID) || | |
117 | read(fd, &read_data, sizeof(read_data)) == -1) | |
118 | return -1; | |
119 | ||
120 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | |
121 | ++id_idx; | |
122 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | |
123 | ++id_idx; | |
124 | ||
125 | sid = SID(evsel, cpu, thread); | |
126 | sid->id = read_data[id_idx]; | |
127 | sid->evsel = evsel; | |
128 | hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); | |
129 | hlist_add_head(&sid->node, &evlist->heads[hash]); | |
130 | return 0; | |
131 | } | |
132 | ||
70db7533 ACM |
133 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) |
134 | { | |
135 | struct hlist_head *head; | |
136 | struct hlist_node *pos; | |
137 | struct perf_sample_id *sid; | |
138 | int hash; | |
139 | ||
140 | if (evlist->nr_entries == 1) | |
141 | return list_entry(evlist->entries.next, struct perf_evsel, node); | |
142 | ||
143 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); | |
144 | head = &evlist->heads[hash]; | |
145 | ||
146 | hlist_for_each_entry(sid, pos, head, node) | |
147 | if (sid->id == id) | |
148 | return sid->evsel; | |
149 | return NULL; | |
150 | } | |
04391deb | 151 | |
8115d60c | 152 | union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) |
04391deb ACM |
153 | { |
154 | /* XXX Move this to perf.c, making it generally available */ | |
155 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | |
156 | struct perf_mmap *md = &evlist->mmap[cpu]; | |
157 | unsigned int head = perf_mmap__read_head(md); | |
158 | unsigned int old = md->prev; | |
159 | unsigned char *data = md->base + page_size; | |
8115d60c | 160 | union perf_event *event = NULL; |
04391deb | 161 | |
7bb41152 | 162 | if (evlist->overwrite) { |
04391deb | 163 | /* |
7bb41152 ACM |
164 | * If we're further behind than half the buffer, there's a chance |
165 | * the writer will bite our tail and mess up the samples under us. | |
166 | * | |
167 | * If we somehow ended up ahead of the head, we got messed up. | |
168 | * | |
169 | * In either case, truncate and restart at head. | |
04391deb | 170 | */ |
7bb41152 ACM |
171 | int diff = head - old; |
172 | if (diff > md->mask / 2 || diff < 0) { | |
173 | fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); | |
174 | ||
175 | /* | |
176 | * head points to a known good entry, start there. | |
177 | */ | |
178 | old = head; | |
179 | } | |
04391deb ACM |
180 | } |
181 | ||
182 | if (old != head) { | |
183 | size_t size; | |
184 | ||
8115d60c | 185 | event = (union perf_event *)&data[old & md->mask]; |
04391deb ACM |
186 | size = event->header.size; |
187 | ||
188 | /* | |
189 | * Event straddles the mmap boundary -- header should always | |
190 | * be inside due to u64 alignment of output. | |
191 | */ | |
192 | if ((old & md->mask) + size != ((old + size) & md->mask)) { | |
193 | unsigned int offset = old; | |
194 | unsigned int len = min(sizeof(*event), size), cpy; | |
195 | void *dst = &evlist->event_copy; | |
196 | ||
197 | do { | |
198 | cpy = min(md->mask + 1 - (offset & md->mask), len); | |
199 | memcpy(dst, &data[offset & md->mask], cpy); | |
200 | offset += cpy; | |
201 | dst += cpy; | |
202 | len -= cpy; | |
203 | } while (len); | |
204 | ||
205 | event = &evlist->event_copy; | |
206 | } | |
207 | ||
208 | old += size; | |
209 | } | |
210 | ||
211 | md->prev = old; | |
7bb41152 ACM |
212 | |
213 | if (!evlist->overwrite) | |
214 | perf_mmap__write_tail(md, old); | |
215 | ||
04391deb ACM |
216 | return event; |
217 | } | |
f8a95309 | 218 | |
7e2ed097 | 219 | void perf_evlist__munmap(struct perf_evlist *evlist) |
f8a95309 ACM |
220 | { |
221 | int cpu; | |
222 | ||
7e2ed097 | 223 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { |
f8a95309 ACM |
224 | if (evlist->mmap[cpu].base != NULL) { |
225 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | |
226 | evlist->mmap[cpu].base = NULL; | |
227 | } | |
228 | } | |
229 | } | |
230 | ||
7e2ed097 | 231 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist) |
f8a95309 | 232 | { |
7e2ed097 | 233 | evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap)); |
f8a95309 ACM |
234 | return evlist->mmap != NULL ? 0 : -ENOMEM; |
235 | } | |
236 | ||
237 | static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot, | |
238 | int mask, int fd) | |
239 | { | |
240 | evlist->mmap[cpu].prev = 0; | |
241 | evlist->mmap[cpu].mask = mask; | |
242 | evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, | |
243 | MAP_SHARED, fd, 0); | |
244 | if (evlist->mmap[cpu].base == MAP_FAILED) | |
245 | return -1; | |
246 | ||
247 | perf_evlist__add_pollfd(evlist, fd); | |
248 | return 0; | |
249 | } | |
250 | ||
251 | /** perf_evlist__mmap - Create per cpu maps to receive events | |
252 | * | |
253 | * @evlist - list of events | |
f8a95309 ACM |
254 | * @pages - map length in pages |
255 | * @overwrite - overwrite older events? | |
256 | * | |
257 | * If overwrite is false the user needs to signal event consuption using: | |
258 | * | |
259 | * struct perf_mmap *m = &evlist->mmap[cpu]; | |
260 | * unsigned int head = perf_mmap__read_head(m); | |
261 | * | |
262 | * perf_mmap__write_tail(m, head) | |
7e2ed097 ACM |
263 | * |
264 | * Using perf_evlist__read_on_cpu does this automatically. | |
f8a95309 | 265 | */ |
7e2ed097 | 266 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) |
f8a95309 ACM |
267 | { |
268 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | |
269 | int mask = pages * page_size - 1, cpu; | |
270 | struct perf_evsel *first_evsel, *evsel; | |
7e2ed097 ACM |
271 | const struct cpu_map *cpus = evlist->cpus; |
272 | const struct thread_map *threads = evlist->threads; | |
f8a95309 ACM |
273 | int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); |
274 | ||
7e2ed097 | 275 | if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) |
f8a95309 ACM |
276 | return -ENOMEM; |
277 | ||
7e2ed097 | 278 | if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) |
f8a95309 ACM |
279 | return -ENOMEM; |
280 | ||
281 | evlist->overwrite = overwrite; | |
282 | evlist->mmap_len = (pages + 1) * page_size; | |
283 | first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node); | |
284 | ||
285 | list_for_each_entry(evsel, &evlist->entries, node) { | |
286 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | |
287 | evsel->id == NULL && | |
288 | perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) | |
289 | return -ENOMEM; | |
290 | ||
291 | for (cpu = 0; cpu < cpus->nr; cpu++) { | |
292 | for (thread = 0; thread < threads->nr; thread++) { | |
293 | int fd = FD(evsel, cpu, thread); | |
294 | ||
295 | if (evsel->idx || thread) { | |
296 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, | |
297 | FD(first_evsel, cpu, 0)) != 0) | |
298 | goto out_unmap; | |
299 | } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0) | |
300 | goto out_unmap; | |
301 | ||
302 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | |
303 | perf_evlist__id_hash(evlist, evsel, cpu, thread, fd) < 0) | |
304 | goto out_unmap; | |
305 | } | |
306 | } | |
307 | } | |
308 | ||
309 | return 0; | |
310 | ||
311 | out_unmap: | |
312 | for (cpu = 0; cpu < cpus->nr; cpu++) { | |
313 | if (evlist->mmap[cpu].base != NULL) { | |
314 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | |
315 | evlist->mmap[cpu].base = NULL; | |
316 | } | |
317 | } | |
318 | return -1; | |
319 | } | |
7e2ed097 ACM |
320 | |
321 | int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, | |
322 | pid_t target_tid, const char *cpu_list) | |
323 | { | |
324 | evlist->threads = thread_map__new(target_pid, target_tid); | |
325 | ||
326 | if (evlist->threads == NULL) | |
327 | return -1; | |
328 | ||
329 | if (target_tid != -1) | |
330 | evlist->cpus = cpu_map__dummy_new(); | |
331 | else | |
332 | evlist->cpus = cpu_map__new(cpu_list); | |
333 | ||
334 | if (evlist->cpus == NULL) | |
335 | goto out_delete_threads; | |
336 | ||
337 | return 0; | |
338 | ||
339 | out_delete_threads: | |
340 | thread_map__delete(evlist->threads); | |
341 | return -1; | |
342 | } | |
343 | ||
344 | void perf_evlist__delete_maps(struct perf_evlist *evlist) | |
345 | { | |
346 | cpu_map__delete(evlist->cpus); | |
347 | thread_map__delete(evlist->threads); | |
348 | evlist->cpus = NULL; | |
349 | evlist->threads = NULL; | |
350 | } |