Commit | Line | Data |
---|---|---|
76369139 FW |
1 | /* |
2 | * Performance events ring-buffer code: | |
3 | * | |
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar | |
90eec103 | 6 | * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra |
d36b6910 | 7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
76369139 FW |
8 | * |
9 | * For licensing details see kernel-base/COPYING | |
10 | */ | |
11 | ||
12 | #include <linux/perf_event.h> | |
13 | #include <linux/vmalloc.h> | |
14 | #include <linux/slab.h> | |
26c86da8 | 15 | #include <linux/circ_buf.h> |
7c60fc0e | 16 | #include <linux/poll.h> |
76369139 FW |
17 | |
18 | #include "internal.h" | |
19 | ||
76369139 FW |
20 | static void perf_output_wakeup(struct perf_output_handle *handle) |
21 | { | |
7c60fc0e | 22 | atomic_set(&handle->rb->poll, POLLIN); |
76369139 | 23 | |
a8b0ca17 PZ |
24 | handle->event->pending_wakeup = 1; |
25 | irq_work_queue(&handle->event->pending); | |
76369139 FW |
26 | } |
27 | ||
28 | /* | |
29 | * We need to ensure a later event_id doesn't publish a head when a former | |
30 | * event isn't done writing. However since we need to deal with NMIs we | |
31 | * cannot fully serialize things. | |
32 | * | |
33 | * We only publish the head (and generate a wakeup) when the outer-most | |
34 | * event completes. | |
35 | */ | |
36 | static void perf_output_get_handle(struct perf_output_handle *handle) | |
37 | { | |
38 | struct ring_buffer *rb = handle->rb; | |
39 | ||
40 | preempt_disable(); | |
41 | local_inc(&rb->nest); | |
42 | handle->wakeup = local_read(&rb->wakeup); | |
43 | } | |
44 | ||
45 | static void perf_output_put_handle(struct perf_output_handle *handle) | |
46 | { | |
47 | struct ring_buffer *rb = handle->rb; | |
48 | unsigned long head; | |
49 | ||
50 | again: | |
51 | head = local_read(&rb->head); | |
52 | ||
53 | /* | |
54 | * IRQ/NMI can happen here, which means we can miss a head update. | |
55 | */ | |
56 | ||
57 | if (!local_dec_and_test(&rb->nest)) | |
58 | goto out; | |
59 | ||
60 | /* | |
bf378d34 PZ |
61 | * Since the mmap() consumer (userspace) can run on a different CPU: |
62 | * | |
63 | * kernel user | |
64 | * | |
c7f2e3cd PZ |
65 | * if (LOAD ->data_tail) { LOAD ->data_head |
66 | * (A) smp_rmb() (C) | |
67 | * STORE $data LOAD $data | |
68 | * smp_wmb() (B) smp_mb() (D) | |
69 | * STORE ->data_head STORE ->data_tail | |
70 | * } | |
bf378d34 PZ |
71 | * |
72 | * Where A pairs with D, and B pairs with C. | |
73 | * | |
c7f2e3cd PZ |
74 | * In our case (A) is a control dependency that separates the load of |
75 | * the ->data_tail and the stores of $data. In case ->data_tail | |
76 | * indicates there is no room in the buffer to store $data we do not. | |
bf378d34 | 77 | * |
c7f2e3cd | 78 | * D needs to be a full barrier since it separates the data READ |
bf378d34 PZ |
79 | * from the tail WRITE. |
80 | * | |
81 | * For B a WMB is sufficient since it separates two WRITEs, and for C | |
82 | * an RMB is sufficient since it separates two READs. | |
83 | * | |
84 | * See perf_output_begin(). | |
76369139 | 85 | */ |
c7f2e3cd | 86 | smp_wmb(); /* B, matches C */ |
76369139 FW |
87 | rb->user_page->data_head = head; |
88 | ||
89 | /* | |
394570b7 PZ |
90 | * Now check if we missed an update -- rely on previous implied |
91 | * compiler barriers to force a re-read. | |
76369139 FW |
92 | */ |
93 | if (unlikely(head != local_read(&rb->head))) { | |
94 | local_inc(&rb->nest); | |
95 | goto again; | |
96 | } | |
97 | ||
98 | if (handle->wakeup != local_read(&rb->wakeup)) | |
99 | perf_output_wakeup(handle); | |
100 | ||
101 | out: | |
102 | preempt_enable(); | |
103 | } | |
104 | ||
105 | int perf_output_begin(struct perf_output_handle *handle, | |
a7ac67ea | 106 | struct perf_event *event, unsigned int size) |
76369139 FW |
107 | { |
108 | struct ring_buffer *rb; | |
109 | unsigned long tail, offset, head; | |
524feca5 | 110 | int have_lost, page_shift; |
76369139 FW |
111 | struct { |
112 | struct perf_event_header header; | |
113 | u64 id; | |
114 | u64 lost; | |
115 | } lost_event; | |
116 | ||
117 | rcu_read_lock(); | |
118 | /* | |
119 | * For inherited events we send all the output towards the parent. | |
120 | */ | |
121 | if (event->parent) | |
122 | event = event->parent; | |
123 | ||
124 | rb = rcu_dereference(event->rb); | |
c72b42a3 | 125 | if (unlikely(!rb)) |
76369139 FW |
126 | goto out; |
127 | ||
86e7972f WN |
128 | if (unlikely(rb->paused)) { |
129 | if (rb->nr_pages) | |
130 | local_inc(&rb->lost); | |
76369139 | 131 | goto out; |
86e7972f | 132 | } |
76369139 | 133 | |
c72b42a3 PZ |
134 | handle->rb = rb; |
135 | handle->event = event; | |
136 | ||
76369139 | 137 | have_lost = local_read(&rb->lost); |
c72b42a3 | 138 | if (unlikely(have_lost)) { |
d20a973f PZ |
139 | size += sizeof(lost_event); |
140 | if (event->attr.sample_id_all) | |
141 | size += event->id_header_size; | |
76369139 FW |
142 | } |
143 | ||
144 | perf_output_get_handle(handle); | |
145 | ||
146 | do { | |
105ff3cb | 147 | tail = READ_ONCE(rb->user_page->data_tail); |
76369139 | 148 | offset = head = local_read(&rb->head); |
26c86da8 PZ |
149 | if (!rb->overwrite && |
150 | unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size)) | |
76369139 | 151 | goto fail; |
c7f2e3cd PZ |
152 | |
153 | /* | |
154 | * The above forms a control dependency barrier separating the | |
155 | * @tail load above from the data stores below. Since the @tail | |
156 | * load is required to compute the branch to fail below. | |
157 | * | |
158 | * A, matches D; the full memory barrier userspace SHOULD issue | |
159 | * after reading the data and before storing the new tail | |
160 | * position. | |
161 | * | |
162 | * See perf_output_put_handle(). | |
163 | */ | |
164 | ||
26c86da8 | 165 | head += size; |
76369139 FW |
166 | } while (local_cmpxchg(&rb->head, offset, head) != offset); |
167 | ||
85f59edf | 168 | /* |
c7f2e3cd PZ |
169 | * We rely on the implied barrier() by local_cmpxchg() to ensure |
170 | * none of the data stores below can be lifted up by the compiler. | |
85f59edf | 171 | */ |
85f59edf | 172 | |
c72b42a3 | 173 | if (unlikely(head - local_read(&rb->wakeup) > rb->watermark)) |
76369139 FW |
174 | local_add(rb->watermark, &rb->wakeup); |
175 | ||
524feca5 PZ |
176 | page_shift = PAGE_SHIFT + page_order(rb); |
177 | ||
178 | handle->page = (offset >> page_shift) & (rb->nr_pages - 1); | |
179 | offset &= (1UL << page_shift) - 1; | |
180 | handle->addr = rb->data_pages[handle->page] + offset; | |
181 | handle->size = (1UL << page_shift) - offset; | |
76369139 | 182 | |
c72b42a3 | 183 | if (unlikely(have_lost)) { |
d20a973f PZ |
184 | struct perf_sample_data sample_data; |
185 | ||
186 | lost_event.header.size = sizeof(lost_event); | |
76369139 FW |
187 | lost_event.header.type = PERF_RECORD_LOST; |
188 | lost_event.header.misc = 0; | |
189 | lost_event.id = event->id; | |
190 | lost_event.lost = local_xchg(&rb->lost, 0); | |
191 | ||
d20a973f PZ |
192 | perf_event_header__init_id(&lost_event.header, |
193 | &sample_data, event); | |
76369139 FW |
194 | perf_output_put(handle, lost_event); |
195 | perf_event__output_id_sample(event, handle, &sample_data); | |
196 | } | |
197 | ||
198 | return 0; | |
199 | ||
200 | fail: | |
201 | local_inc(&rb->lost); | |
202 | perf_output_put_handle(handle); | |
203 | out: | |
204 | rcu_read_unlock(); | |
205 | ||
206 | return -ENOSPC; | |
207 | } | |
208 | ||
91d7753a | 209 | unsigned int perf_output_copy(struct perf_output_handle *handle, |
76369139 FW |
210 | const void *buf, unsigned int len) |
211 | { | |
91d7753a | 212 | return __output_copy(handle, buf, len); |
76369139 FW |
213 | } |
214 | ||
5685e0ff JO |
215 | unsigned int perf_output_skip(struct perf_output_handle *handle, |
216 | unsigned int len) | |
217 | { | |
218 | return __output_skip(handle, NULL, len); | |
219 | } | |
220 | ||
76369139 FW |
221 | void perf_output_end(struct perf_output_handle *handle) |
222 | { | |
76369139 FW |
223 | perf_output_put_handle(handle); |
224 | rcu_read_unlock(); | |
225 | } | |
226 | ||
227 | static void | |
228 | ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) | |
229 | { | |
230 | long max_size = perf_data_size(rb); | |
231 | ||
232 | if (watermark) | |
233 | rb->watermark = min(max_size, watermark); | |
234 | ||
235 | if (!rb->watermark) | |
236 | rb->watermark = max_size / 2; | |
237 | ||
238 | if (flags & RING_BUFFER_WRITABLE) | |
dd9c086d SE |
239 | rb->overwrite = 0; |
240 | else | |
241 | rb->overwrite = 1; | |
76369139 FW |
242 | |
243 | atomic_set(&rb->refcount, 1); | |
10c6db11 PZ |
244 | |
245 | INIT_LIST_HEAD(&rb->event_list); | |
246 | spin_lock_init(&rb->event_lock); | |
86e7972f WN |
247 | |
248 | /* | |
249 | * perf_output_begin() only checks rb->paused, therefore | |
250 | * rb->paused must be true if we have no pages for output. | |
251 | */ | |
252 | if (!rb->nr_pages) | |
253 | rb->paused = 1; | |
76369139 FW |
254 | } |
255 | ||
fdc26706 AS |
256 | /* |
257 | * This is called before hardware starts writing to the AUX area to | |
258 | * obtain an output handle and make sure there's room in the buffer. | |
259 | * When the capture completes, call perf_aux_output_end() to commit | |
260 | * the recorded data to the buffer. | |
261 | * | |
262 | * The ordering is similar to that of perf_output_{begin,end}, with | |
263 | * the exception of (B), which should be taken care of by the pmu | |
264 | * driver, since ordering rules will differ depending on hardware. | |
af5bb4ed AS |
265 | * |
266 | * Call this from pmu::start(); see the comment in perf_aux_output_end() | |
267 | * about its use in pmu callbacks. Both can also be called from the PMI | |
268 | * handler if needed. | |
fdc26706 AS |
269 | */ |
270 | void *perf_aux_output_begin(struct perf_output_handle *handle, | |
271 | struct perf_event *event) | |
272 | { | |
273 | struct perf_event *output_event = event; | |
274 | unsigned long aux_head, aux_tail; | |
275 | struct ring_buffer *rb; | |
276 | ||
277 | if (output_event->parent) | |
278 | output_event = output_event->parent; | |
279 | ||
280 | /* | |
281 | * Since this will typically be open across pmu::add/pmu::del, we | |
282 | * grab ring_buffer's refcount instead of holding rcu read lock | |
283 | * to make sure it doesn't disappear under us. | |
284 | */ | |
285 | rb = ring_buffer_get(output_event); | |
286 | if (!rb) | |
287 | return NULL; | |
288 | ||
289 | if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount)) | |
290 | goto err; | |
291 | ||
dcb10a96 AS |
292 | /* |
293 | * If rb::aux_mmap_count is zero (and rb_has_aux() above went through), | |
294 | * the aux buffer is in perf_mmap_close(), about to get freed. | |
295 | */ | |
296 | if (!atomic_read(&rb->aux_mmap_count)) | |
95ff4ca2 | 297 | goto err_put; |
dcb10a96 | 298 | |
fdc26706 AS |
299 | /* |
300 | * Nesting is not supported for AUX area, make sure nested | |
301 | * writers are caught early | |
302 | */ | |
303 | if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1))) | |
304 | goto err_put; | |
305 | ||
306 | aux_head = local_read(&rb->aux_head); | |
fdc26706 AS |
307 | |
308 | handle->rb = rb; | |
309 | handle->event = event; | |
310 | handle->head = aux_head; | |
2023a0d2 | 311 | handle->size = 0; |
fdc26706 AS |
312 | |
313 | /* | |
2023a0d2 AS |
314 | * In overwrite mode, AUX data stores do not depend on aux_tail, |
315 | * therefore (A) control dependency barrier does not exist. The | |
316 | * (B) <-> (C) ordering is still observed by the pmu driver. | |
fdc26706 | 317 | */ |
2023a0d2 AS |
318 | if (!rb->aux_overwrite) { |
319 | aux_tail = ACCESS_ONCE(rb->user_page->aux_tail); | |
1a594131 | 320 | handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark; |
2023a0d2 AS |
321 | if (aux_head - aux_tail < perf_aux_size(rb)) |
322 | handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb)); | |
323 | ||
324 | /* | |
325 | * handle->size computation depends on aux_tail load; this forms a | |
326 | * control dependency barrier separating aux_tail load from aux data | |
327 | * store that will be enabled on successful return | |
328 | */ | |
329 | if (!handle->size) { /* A, matches D */ | |
330 | event->pending_disable = 1; | |
331 | perf_output_wakeup(handle); | |
332 | local_set(&rb->aux_nest, 0); | |
333 | goto err_put; | |
334 | } | |
fdc26706 AS |
335 | } |
336 | ||
337 | return handle->rb->aux_priv; | |
338 | ||
339 | err_put: | |
af5bb4ed | 340 | /* can't be last */ |
fdc26706 AS |
341 | rb_free_aux(rb); |
342 | ||
343 | err: | |
95ff4ca2 | 344 | ring_buffer_put(rb); |
fdc26706 AS |
345 | handle->event = NULL; |
346 | ||
347 | return NULL; | |
348 | } | |
349 | ||
350 | /* | |
351 | * Commit the data written by hardware into the ring buffer by adjusting | |
352 | * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the | |
353 | * pmu driver's responsibility to observe ordering rules of the hardware, | |
354 | * so that all the data is externally visible before this is called. | |
af5bb4ed AS |
355 | * |
356 | * Note: this has to be called from pmu::stop() callback, as the assumption | |
357 | * of the AUX buffer management code is that after pmu::stop(), the AUX | |
358 | * transaction must be stopped and therefore drop the AUX reference count. | |
fdc26706 AS |
359 | */ |
360 | void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size, | |
361 | bool truncated) | |
362 | { | |
363 | struct ring_buffer *rb = handle->rb; | |
2023a0d2 | 364 | unsigned long aux_head; |
fdc26706 AS |
365 | u64 flags = 0; |
366 | ||
367 | if (truncated) | |
368 | flags |= PERF_AUX_FLAG_TRUNCATED; | |
369 | ||
2023a0d2 AS |
370 | /* in overwrite mode, driver provides aux_head via handle */ |
371 | if (rb->aux_overwrite) { | |
372 | flags |= PERF_AUX_FLAG_OVERWRITE; | |
373 | ||
374 | aux_head = handle->head; | |
375 | local_set(&rb->aux_head, aux_head); | |
376 | } else { | |
377 | aux_head = local_read(&rb->aux_head); | |
378 | local_add(size, &rb->aux_head); | |
379 | } | |
fdc26706 AS |
380 | |
381 | if (size || flags) { | |
382 | /* | |
383 | * Only send RECORD_AUX if we have something useful to communicate | |
384 | */ | |
385 | ||
386 | perf_event_aux_event(handle->event, aux_head, size, flags); | |
387 | } | |
388 | ||
1a594131 | 389 | aux_head = rb->user_page->aux_head = local_read(&rb->aux_head); |
fdc26706 | 390 | |
1a594131 AS |
391 | if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) { |
392 | perf_output_wakeup(handle); | |
393 | local_add(rb->aux_watermark, &rb->aux_wakeup); | |
394 | } | |
fdc26706 AS |
395 | handle->event = NULL; |
396 | ||
397 | local_set(&rb->aux_nest, 0); | |
af5bb4ed | 398 | /* can't be last */ |
fdc26706 | 399 | rb_free_aux(rb); |
95ff4ca2 | 400 | ring_buffer_put(rb); |
fdc26706 AS |
401 | } |
402 | ||
403 | /* | |
404 | * Skip over a given number of bytes in the AUX buffer, due to, for example, | |
405 | * hardware's alignment constraints. | |
406 | */ | |
407 | int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size) | |
408 | { | |
409 | struct ring_buffer *rb = handle->rb; | |
410 | unsigned long aux_head; | |
411 | ||
412 | if (size > handle->size) | |
413 | return -ENOSPC; | |
414 | ||
415 | local_add(size, &rb->aux_head); | |
416 | ||
1a594131 AS |
417 | aux_head = rb->user_page->aux_head = local_read(&rb->aux_head); |
418 | if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) { | |
419 | perf_output_wakeup(handle); | |
420 | local_add(rb->aux_watermark, &rb->aux_wakeup); | |
421 | handle->wakeup = local_read(&rb->aux_wakeup) + | |
422 | rb->aux_watermark; | |
423 | } | |
424 | ||
fdc26706 AS |
425 | handle->head = aux_head; |
426 | handle->size -= size; | |
427 | ||
428 | return 0; | |
429 | } | |
430 | ||
431 | void *perf_get_aux(struct perf_output_handle *handle) | |
432 | { | |
433 | /* this is only valid between perf_aux_output_begin and *_end */ | |
434 | if (!handle->event) | |
435 | return NULL; | |
436 | ||
437 | return handle->rb->aux_priv; | |
438 | } | |
439 | ||
0a4e38e6 AS |
440 | #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY) |
441 | ||
442 | static struct page *rb_alloc_aux_page(int node, int order) | |
443 | { | |
444 | struct page *page; | |
445 | ||
446 | if (order > MAX_ORDER) | |
447 | order = MAX_ORDER; | |
448 | ||
449 | do { | |
450 | page = alloc_pages_node(node, PERF_AUX_GFP, order); | |
451 | } while (!page && order--); | |
452 | ||
453 | if (page && order) { | |
454 | /* | |
c2ad6b51 AS |
455 | * Communicate the allocation size to the driver: |
456 | * if we managed to secure a high-order allocation, | |
457 | * set its first page's private to this order; | |
458 | * !PagePrivate(page) means it's just a normal page. | |
0a4e38e6 AS |
459 | */ |
460 | split_page(page, order); | |
461 | SetPagePrivate(page); | |
462 | set_page_private(page, order); | |
463 | } | |
464 | ||
465 | return page; | |
466 | } | |
467 | ||
468 | static void rb_free_aux_page(struct ring_buffer *rb, int idx) | |
469 | { | |
470 | struct page *page = virt_to_page(rb->aux_pages[idx]); | |
471 | ||
472 | ClearPagePrivate(page); | |
473 | page->mapping = NULL; | |
474 | __free_page(page); | |
475 | } | |
476 | ||
45c815f0 AS |
477 | static void __rb_free_aux(struct ring_buffer *rb) |
478 | { | |
479 | int pg; | |
480 | ||
95ff4ca2 AS |
481 | /* |
482 | * Should never happen, the last reference should be dropped from | |
483 | * perf_mmap_close() path, which first stops aux transactions (which | |
484 | * in turn are the atomic holders of aux_refcount) and then does the | |
485 | * last rb_free_aux(). | |
486 | */ | |
487 | WARN_ON_ONCE(in_atomic()); | |
488 | ||
45c815f0 AS |
489 | if (rb->aux_priv) { |
490 | rb->free_aux(rb->aux_priv); | |
491 | rb->free_aux = NULL; | |
492 | rb->aux_priv = NULL; | |
493 | } | |
494 | ||
495 | if (rb->aux_nr_pages) { | |
496 | for (pg = 0; pg < rb->aux_nr_pages; pg++) | |
497 | rb_free_aux_page(rb, pg); | |
498 | ||
499 | kfree(rb->aux_pages); | |
500 | rb->aux_nr_pages = 0; | |
501 | } | |
502 | } | |
503 | ||
45bfb2e5 | 504 | int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, |
1a594131 | 505 | pgoff_t pgoff, int nr_pages, long watermark, int flags) |
45bfb2e5 PZ |
506 | { |
507 | bool overwrite = !(flags & RING_BUFFER_WRITABLE); | |
508 | int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu); | |
0a4e38e6 | 509 | int ret = -ENOMEM, max_order = 0; |
45bfb2e5 PZ |
510 | |
511 | if (!has_aux(event)) | |
512 | return -ENOTSUPP; | |
513 | ||
6a279230 | 514 | if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) { |
0a4e38e6 AS |
515 | /* |
516 | * We need to start with the max_order that fits in nr_pages, | |
517 | * not the other way around, hence ilog2() and not get_order. | |
518 | */ | |
519 | max_order = ilog2(nr_pages); | |
520 | ||
6a279230 AS |
521 | /* |
522 | * PMU requests more than one contiguous chunks of memory | |
523 | * for SW double buffering | |
524 | */ | |
525 | if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) && | |
526 | !overwrite) { | |
527 | if (!max_order) | |
528 | return -EINVAL; | |
529 | ||
530 | max_order--; | |
531 | } | |
532 | } | |
533 | ||
45bfb2e5 PZ |
534 | rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node); |
535 | if (!rb->aux_pages) | |
536 | return -ENOMEM; | |
537 | ||
538 | rb->free_aux = event->pmu->free_aux; | |
0a4e38e6 | 539 | for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { |
45bfb2e5 | 540 | struct page *page; |
0a4e38e6 | 541 | int last, order; |
45bfb2e5 | 542 | |
0a4e38e6 AS |
543 | order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); |
544 | page = rb_alloc_aux_page(node, order); | |
45bfb2e5 PZ |
545 | if (!page) |
546 | goto out; | |
547 | ||
0a4e38e6 AS |
548 | for (last = rb->aux_nr_pages + (1 << page_private(page)); |
549 | last > rb->aux_nr_pages; rb->aux_nr_pages++) | |
550 | rb->aux_pages[rb->aux_nr_pages] = page_address(page++); | |
45bfb2e5 PZ |
551 | } |
552 | ||
aa319bcd AS |
553 | /* |
554 | * In overwrite mode, PMUs that don't support SG may not handle more | |
555 | * than one contiguous allocation, since they rely on PMI to do double | |
556 | * buffering. In this case, the entire buffer has to be one contiguous | |
557 | * chunk. | |
558 | */ | |
559 | if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) && | |
560 | overwrite) { | |
561 | struct page *page = virt_to_page(rb->aux_pages[0]); | |
562 | ||
563 | if (page_private(page) != max_order) | |
564 | goto out; | |
565 | } | |
566 | ||
45bfb2e5 PZ |
567 | rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, |
568 | overwrite); | |
569 | if (!rb->aux_priv) | |
570 | goto out; | |
571 | ||
572 | ret = 0; | |
573 | ||
574 | /* | |
575 | * aux_pages (and pmu driver's private data, aux_priv) will be | |
576 | * referenced in both producer's and consumer's contexts, thus | |
577 | * we keep a refcount here to make sure either of the two can | |
578 | * reference them safely. | |
579 | */ | |
580 | atomic_set(&rb->aux_refcount, 1); | |
581 | ||
2023a0d2 | 582 | rb->aux_overwrite = overwrite; |
1a594131 AS |
583 | rb->aux_watermark = watermark; |
584 | ||
585 | if (!rb->aux_watermark && !rb->aux_overwrite) | |
586 | rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1); | |
2023a0d2 | 587 | |
45bfb2e5 PZ |
588 | out: |
589 | if (!ret) | |
590 | rb->aux_pgoff = pgoff; | |
591 | else | |
45c815f0 | 592 | __rb_free_aux(rb); |
45bfb2e5 PZ |
593 | |
594 | return ret; | |
595 | } | |
596 | ||
45bfb2e5 PZ |
597 | void rb_free_aux(struct ring_buffer *rb) |
598 | { | |
599 | if (atomic_dec_and_test(&rb->aux_refcount)) | |
600 | __rb_free_aux(rb); | |
601 | } | |
602 | ||
76369139 FW |
603 | #ifndef CONFIG_PERF_USE_VMALLOC |
604 | ||
605 | /* | |
606 | * Back perf_mmap() with regular GFP_KERNEL-0 pages. | |
607 | */ | |
608 | ||
45bfb2e5 PZ |
609 | static struct page * |
610 | __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) | |
76369139 FW |
611 | { |
612 | if (pgoff > rb->nr_pages) | |
613 | return NULL; | |
614 | ||
615 | if (pgoff == 0) | |
616 | return virt_to_page(rb->user_page); | |
617 | ||
618 | return virt_to_page(rb->data_pages[pgoff - 1]); | |
619 | } | |
620 | ||
621 | static void *perf_mmap_alloc_page(int cpu) | |
622 | { | |
623 | struct page *page; | |
624 | int node; | |
625 | ||
626 | node = (cpu == -1) ? cpu : cpu_to_node(cpu); | |
627 | page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); | |
628 | if (!page) | |
629 | return NULL; | |
630 | ||
631 | return page_address(page); | |
632 | } | |
633 | ||
634 | struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | |
635 | { | |
636 | struct ring_buffer *rb; | |
637 | unsigned long size; | |
638 | int i; | |
639 | ||
640 | size = sizeof(struct ring_buffer); | |
641 | size += nr_pages * sizeof(void *); | |
642 | ||
643 | rb = kzalloc(size, GFP_KERNEL); | |
644 | if (!rb) | |
645 | goto fail; | |
646 | ||
647 | rb->user_page = perf_mmap_alloc_page(cpu); | |
648 | if (!rb->user_page) | |
649 | goto fail_user_page; | |
650 | ||
651 | for (i = 0; i < nr_pages; i++) { | |
652 | rb->data_pages[i] = perf_mmap_alloc_page(cpu); | |
653 | if (!rb->data_pages[i]) | |
654 | goto fail_data_pages; | |
655 | } | |
656 | ||
657 | rb->nr_pages = nr_pages; | |
658 | ||
659 | ring_buffer_init(rb, watermark, flags); | |
660 | ||
661 | return rb; | |
662 | ||
663 | fail_data_pages: | |
664 | for (i--; i >= 0; i--) | |
665 | free_page((unsigned long)rb->data_pages[i]); | |
666 | ||
667 | free_page((unsigned long)rb->user_page); | |
668 | ||
669 | fail_user_page: | |
670 | kfree(rb); | |
671 | ||
672 | fail: | |
673 | return NULL; | |
674 | } | |
675 | ||
676 | static void perf_mmap_free_page(unsigned long addr) | |
677 | { | |
678 | struct page *page = virt_to_page((void *)addr); | |
679 | ||
680 | page->mapping = NULL; | |
681 | __free_page(page); | |
682 | } | |
683 | ||
684 | void rb_free(struct ring_buffer *rb) | |
685 | { | |
686 | int i; | |
687 | ||
688 | perf_mmap_free_page((unsigned long)rb->user_page); | |
689 | for (i = 0; i < rb->nr_pages; i++) | |
690 | perf_mmap_free_page((unsigned long)rb->data_pages[i]); | |
691 | kfree(rb); | |
692 | } | |
693 | ||
694 | #else | |
5919b309 JO |
695 | static int data_page_nr(struct ring_buffer *rb) |
696 | { | |
697 | return rb->nr_pages << page_order(rb); | |
698 | } | |
76369139 | 699 | |
45bfb2e5 PZ |
700 | static struct page * |
701 | __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) | |
76369139 | 702 | { |
5919b309 JO |
703 | /* The '>' counts in the user page. */ |
704 | if (pgoff > data_page_nr(rb)) | |
76369139 FW |
705 | return NULL; |
706 | ||
707 | return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); | |
708 | } | |
709 | ||
710 | static void perf_mmap_unmark_page(void *addr) | |
711 | { | |
712 | struct page *page = vmalloc_to_page(addr); | |
713 | ||
714 | page->mapping = NULL; | |
715 | } | |
716 | ||
717 | static void rb_free_work(struct work_struct *work) | |
718 | { | |
719 | struct ring_buffer *rb; | |
720 | void *base; | |
721 | int i, nr; | |
722 | ||
723 | rb = container_of(work, struct ring_buffer, work); | |
5919b309 | 724 | nr = data_page_nr(rb); |
76369139 FW |
725 | |
726 | base = rb->user_page; | |
5919b309 JO |
727 | /* The '<=' counts in the user page. */ |
728 | for (i = 0; i <= nr; i++) | |
76369139 FW |
729 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); |
730 | ||
731 | vfree(base); | |
732 | kfree(rb); | |
733 | } | |
734 | ||
735 | void rb_free(struct ring_buffer *rb) | |
736 | { | |
737 | schedule_work(&rb->work); | |
738 | } | |
739 | ||
740 | struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | |
741 | { | |
742 | struct ring_buffer *rb; | |
743 | unsigned long size; | |
744 | void *all_buf; | |
745 | ||
746 | size = sizeof(struct ring_buffer); | |
747 | size += sizeof(void *); | |
748 | ||
749 | rb = kzalloc(size, GFP_KERNEL); | |
750 | if (!rb) | |
751 | goto fail; | |
752 | ||
753 | INIT_WORK(&rb->work, rb_free_work); | |
754 | ||
755 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); | |
756 | if (!all_buf) | |
757 | goto fail_all_buf; | |
758 | ||
759 | rb->user_page = all_buf; | |
760 | rb->data_pages[0] = all_buf + PAGE_SIZE; | |
8184059e PZ |
761 | if (nr_pages) { |
762 | rb->nr_pages = 1; | |
763 | rb->page_order = ilog2(nr_pages); | |
764 | } | |
76369139 FW |
765 | |
766 | ring_buffer_init(rb, watermark, flags); | |
767 | ||
768 | return rb; | |
769 | ||
770 | fail_all_buf: | |
771 | kfree(rb); | |
772 | ||
773 | fail: | |
774 | return NULL; | |
775 | } | |
776 | ||
777 | #endif | |
45bfb2e5 PZ |
778 | |
779 | struct page * | |
780 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) | |
781 | { | |
782 | if (rb->aux_nr_pages) { | |
783 | /* above AUX space */ | |
784 | if (pgoff > rb->aux_pgoff + rb->aux_nr_pages) | |
785 | return NULL; | |
786 | ||
787 | /* AUX space */ | |
788 | if (pgoff >= rb->aux_pgoff) | |
789 | return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]); | |
790 | } | |
791 | ||
792 | return __perf_mmap_to_page(rb, pgoff); | |
793 | } |