perf/core: Free AUX pages in unmap path
[linux-2.6-block.git] / kernel / events / ring_buffer.c
CommitLineData
76369139
FW
1/*
2 * Performance events ring-buffer code:
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
90eec103 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
d36b6910 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
76369139
FW
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12#include <linux/perf_event.h>
13#include <linux/vmalloc.h>
14#include <linux/slab.h>
26c86da8 15#include <linux/circ_buf.h>
7c60fc0e 16#include <linux/poll.h>
76369139
FW
17
18#include "internal.h"
19
76369139
FW
20static void perf_output_wakeup(struct perf_output_handle *handle)
21{
7c60fc0e 22 atomic_set(&handle->rb->poll, POLLIN);
76369139 23
a8b0ca17
PZ
24 handle->event->pending_wakeup = 1;
25 irq_work_queue(&handle->event->pending);
76369139
FW
26}
27
28/*
29 * We need to ensure a later event_id doesn't publish a head when a former
30 * event isn't done writing. However since we need to deal with NMIs we
31 * cannot fully serialize things.
32 *
33 * We only publish the head (and generate a wakeup) when the outer-most
34 * event completes.
35 */
36static void perf_output_get_handle(struct perf_output_handle *handle)
37{
38 struct ring_buffer *rb = handle->rb;
39
40 preempt_disable();
41 local_inc(&rb->nest);
42 handle->wakeup = local_read(&rb->wakeup);
43}
44
45static void perf_output_put_handle(struct perf_output_handle *handle)
46{
47 struct ring_buffer *rb = handle->rb;
48 unsigned long head;
49
50again:
51 head = local_read(&rb->head);
52
53 /*
54 * IRQ/NMI can happen here, which means we can miss a head update.
55 */
56
57 if (!local_dec_and_test(&rb->nest))
58 goto out;
59
60 /*
bf378d34
PZ
61 * Since the mmap() consumer (userspace) can run on a different CPU:
62 *
63 * kernel user
64 *
c7f2e3cd
PZ
65 * if (LOAD ->data_tail) { LOAD ->data_head
66 * (A) smp_rmb() (C)
67 * STORE $data LOAD $data
68 * smp_wmb() (B) smp_mb() (D)
69 * STORE ->data_head STORE ->data_tail
70 * }
bf378d34
PZ
71 *
72 * Where A pairs with D, and B pairs with C.
73 *
c7f2e3cd
PZ
74 * In our case (A) is a control dependency that separates the load of
75 * the ->data_tail and the stores of $data. In case ->data_tail
76 * indicates there is no room in the buffer to store $data we do not.
bf378d34 77 *
c7f2e3cd 78 * D needs to be a full barrier since it separates the data READ
bf378d34
PZ
79 * from the tail WRITE.
80 *
81 * For B a WMB is sufficient since it separates two WRITEs, and for C
82 * an RMB is sufficient since it separates two READs.
83 *
84 * See perf_output_begin().
76369139 85 */
c7f2e3cd 86 smp_wmb(); /* B, matches C */
76369139
FW
87 rb->user_page->data_head = head;
88
89 /*
394570b7
PZ
90 * Now check if we missed an update -- rely on previous implied
91 * compiler barriers to force a re-read.
76369139
FW
92 */
93 if (unlikely(head != local_read(&rb->head))) {
94 local_inc(&rb->nest);
95 goto again;
96 }
97
98 if (handle->wakeup != local_read(&rb->wakeup))
99 perf_output_wakeup(handle);
100
101out:
102 preempt_enable();
103}
104
105int perf_output_begin(struct perf_output_handle *handle,
a7ac67ea 106 struct perf_event *event, unsigned int size)
76369139
FW
107{
108 struct ring_buffer *rb;
109 unsigned long tail, offset, head;
524feca5 110 int have_lost, page_shift;
76369139
FW
111 struct {
112 struct perf_event_header header;
113 u64 id;
114 u64 lost;
115 } lost_event;
116
117 rcu_read_lock();
118 /*
119 * For inherited events we send all the output towards the parent.
120 */
121 if (event->parent)
122 event = event->parent;
123
124 rb = rcu_dereference(event->rb);
c72b42a3 125 if (unlikely(!rb))
76369139
FW
126 goto out;
127
c72b42a3 128 if (unlikely(!rb->nr_pages))
76369139
FW
129 goto out;
130
c72b42a3
PZ
131 handle->rb = rb;
132 handle->event = event;
133
76369139 134 have_lost = local_read(&rb->lost);
c72b42a3 135 if (unlikely(have_lost)) {
d20a973f
PZ
136 size += sizeof(lost_event);
137 if (event->attr.sample_id_all)
138 size += event->id_header_size;
76369139
FW
139 }
140
141 perf_output_get_handle(handle);
142
143 do {
105ff3cb 144 tail = READ_ONCE(rb->user_page->data_tail);
76369139 145 offset = head = local_read(&rb->head);
26c86da8
PZ
146 if (!rb->overwrite &&
147 unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
76369139 148 goto fail;
c7f2e3cd
PZ
149
150 /*
151 * The above forms a control dependency barrier separating the
152 * @tail load above from the data stores below. Since the @tail
153 * load is required to compute the branch to fail below.
154 *
155 * A, matches D; the full memory barrier userspace SHOULD issue
156 * after reading the data and before storing the new tail
157 * position.
158 *
159 * See perf_output_put_handle().
160 */
161
26c86da8 162 head += size;
76369139
FW
163 } while (local_cmpxchg(&rb->head, offset, head) != offset);
164
85f59edf 165 /*
c7f2e3cd
PZ
166 * We rely on the implied barrier() by local_cmpxchg() to ensure
167 * none of the data stores below can be lifted up by the compiler.
85f59edf 168 */
85f59edf 169
c72b42a3 170 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
76369139
FW
171 local_add(rb->watermark, &rb->wakeup);
172
524feca5
PZ
173 page_shift = PAGE_SHIFT + page_order(rb);
174
175 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
176 offset &= (1UL << page_shift) - 1;
177 handle->addr = rb->data_pages[handle->page] + offset;
178 handle->size = (1UL << page_shift) - offset;
76369139 179
c72b42a3 180 if (unlikely(have_lost)) {
d20a973f
PZ
181 struct perf_sample_data sample_data;
182
183 lost_event.header.size = sizeof(lost_event);
76369139
FW
184 lost_event.header.type = PERF_RECORD_LOST;
185 lost_event.header.misc = 0;
186 lost_event.id = event->id;
187 lost_event.lost = local_xchg(&rb->lost, 0);
188
d20a973f
PZ
189 perf_event_header__init_id(&lost_event.header,
190 &sample_data, event);
76369139
FW
191 perf_output_put(handle, lost_event);
192 perf_event__output_id_sample(event, handle, &sample_data);
193 }
194
195 return 0;
196
197fail:
198 local_inc(&rb->lost);
199 perf_output_put_handle(handle);
200out:
201 rcu_read_unlock();
202
203 return -ENOSPC;
204}
205
91d7753a 206unsigned int perf_output_copy(struct perf_output_handle *handle,
76369139
FW
207 const void *buf, unsigned int len)
208{
91d7753a 209 return __output_copy(handle, buf, len);
76369139
FW
210}
211
5685e0ff
JO
212unsigned int perf_output_skip(struct perf_output_handle *handle,
213 unsigned int len)
214{
215 return __output_skip(handle, NULL, len);
216}
217
76369139
FW
218void perf_output_end(struct perf_output_handle *handle)
219{
76369139
FW
220 perf_output_put_handle(handle);
221 rcu_read_unlock();
222}
223
224static void
225ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
226{
227 long max_size = perf_data_size(rb);
228
229 if (watermark)
230 rb->watermark = min(max_size, watermark);
231
232 if (!rb->watermark)
233 rb->watermark = max_size / 2;
234
235 if (flags & RING_BUFFER_WRITABLE)
dd9c086d
SE
236 rb->overwrite = 0;
237 else
238 rb->overwrite = 1;
76369139
FW
239
240 atomic_set(&rb->refcount, 1);
10c6db11
PZ
241
242 INIT_LIST_HEAD(&rb->event_list);
243 spin_lock_init(&rb->event_lock);
76369139
FW
244}
245
fdc26706
AS
246/*
247 * This is called before hardware starts writing to the AUX area to
248 * obtain an output handle and make sure there's room in the buffer.
249 * When the capture completes, call perf_aux_output_end() to commit
250 * the recorded data to the buffer.
251 *
252 * The ordering is similar to that of perf_output_{begin,end}, with
253 * the exception of (B), which should be taken care of by the pmu
254 * driver, since ordering rules will differ depending on hardware.
255 */
256void *perf_aux_output_begin(struct perf_output_handle *handle,
257 struct perf_event *event)
258{
259 struct perf_event *output_event = event;
260 unsigned long aux_head, aux_tail;
261 struct ring_buffer *rb;
262
263 if (output_event->parent)
264 output_event = output_event->parent;
265
266 /*
267 * Since this will typically be open across pmu::add/pmu::del, we
268 * grab ring_buffer's refcount instead of holding rcu read lock
269 * to make sure it doesn't disappear under us.
270 */
271 rb = ring_buffer_get(output_event);
272 if (!rb)
273 return NULL;
274
275 if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
276 goto err;
277
dcb10a96
AS
278 /*
279 * If rb::aux_mmap_count is zero (and rb_has_aux() above went through),
280 * the aux buffer is in perf_mmap_close(), about to get freed.
281 */
282 if (!atomic_read(&rb->aux_mmap_count))
95ff4ca2 283 goto err_put;
dcb10a96 284
fdc26706
AS
285 /*
286 * Nesting is not supported for AUX area, make sure nested
287 * writers are caught early
288 */
289 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
290 goto err_put;
291
292 aux_head = local_read(&rb->aux_head);
fdc26706
AS
293
294 handle->rb = rb;
295 handle->event = event;
296 handle->head = aux_head;
2023a0d2 297 handle->size = 0;
fdc26706
AS
298
299 /*
2023a0d2
AS
300 * In overwrite mode, AUX data stores do not depend on aux_tail,
301 * therefore (A) control dependency barrier does not exist. The
302 * (B) <-> (C) ordering is still observed by the pmu driver.
fdc26706 303 */
2023a0d2
AS
304 if (!rb->aux_overwrite) {
305 aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
1a594131 306 handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark;
2023a0d2
AS
307 if (aux_head - aux_tail < perf_aux_size(rb))
308 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
309
310 /*
311 * handle->size computation depends on aux_tail load; this forms a
312 * control dependency barrier separating aux_tail load from aux data
313 * store that will be enabled on successful return
314 */
315 if (!handle->size) { /* A, matches D */
316 event->pending_disable = 1;
317 perf_output_wakeup(handle);
318 local_set(&rb->aux_nest, 0);
319 goto err_put;
320 }
fdc26706
AS
321 }
322
323 return handle->rb->aux_priv;
324
325err_put:
326 rb_free_aux(rb);
327
328err:
95ff4ca2 329 ring_buffer_put(rb);
fdc26706
AS
330 handle->event = NULL;
331
332 return NULL;
333}
334
335/*
336 * Commit the data written by hardware into the ring buffer by adjusting
337 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
338 * pmu driver's responsibility to observe ordering rules of the hardware,
339 * so that all the data is externally visible before this is called.
340 */
341void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
342 bool truncated)
343{
344 struct ring_buffer *rb = handle->rb;
2023a0d2 345 unsigned long aux_head;
fdc26706
AS
346 u64 flags = 0;
347
348 if (truncated)
349 flags |= PERF_AUX_FLAG_TRUNCATED;
350
2023a0d2
AS
351 /* in overwrite mode, driver provides aux_head via handle */
352 if (rb->aux_overwrite) {
353 flags |= PERF_AUX_FLAG_OVERWRITE;
354
355 aux_head = handle->head;
356 local_set(&rb->aux_head, aux_head);
357 } else {
358 aux_head = local_read(&rb->aux_head);
359 local_add(size, &rb->aux_head);
360 }
fdc26706
AS
361
362 if (size || flags) {
363 /*
364 * Only send RECORD_AUX if we have something useful to communicate
365 */
366
367 perf_event_aux_event(handle->event, aux_head, size, flags);
368 }
369
1a594131 370 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
fdc26706 371
1a594131
AS
372 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
373 perf_output_wakeup(handle);
374 local_add(rb->aux_watermark, &rb->aux_wakeup);
375 }
fdc26706
AS
376 handle->event = NULL;
377
378 local_set(&rb->aux_nest, 0);
379 rb_free_aux(rb);
95ff4ca2 380 ring_buffer_put(rb);
fdc26706
AS
381}
382
383/*
384 * Skip over a given number of bytes in the AUX buffer, due to, for example,
385 * hardware's alignment constraints.
386 */
387int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
388{
389 struct ring_buffer *rb = handle->rb;
390 unsigned long aux_head;
391
392 if (size > handle->size)
393 return -ENOSPC;
394
395 local_add(size, &rb->aux_head);
396
1a594131
AS
397 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
398 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
399 perf_output_wakeup(handle);
400 local_add(rb->aux_watermark, &rb->aux_wakeup);
401 handle->wakeup = local_read(&rb->aux_wakeup) +
402 rb->aux_watermark;
403 }
404
fdc26706
AS
405 handle->head = aux_head;
406 handle->size -= size;
407
408 return 0;
409}
410
411void *perf_get_aux(struct perf_output_handle *handle)
412{
413 /* this is only valid between perf_aux_output_begin and *_end */
414 if (!handle->event)
415 return NULL;
416
417 return handle->rb->aux_priv;
418}
419
0a4e38e6
AS
420#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
421
422static struct page *rb_alloc_aux_page(int node, int order)
423{
424 struct page *page;
425
426 if (order > MAX_ORDER)
427 order = MAX_ORDER;
428
429 do {
430 page = alloc_pages_node(node, PERF_AUX_GFP, order);
431 } while (!page && order--);
432
433 if (page && order) {
434 /*
c2ad6b51
AS
435 * Communicate the allocation size to the driver:
436 * if we managed to secure a high-order allocation,
437 * set its first page's private to this order;
438 * !PagePrivate(page) means it's just a normal page.
0a4e38e6
AS
439 */
440 split_page(page, order);
441 SetPagePrivate(page);
442 set_page_private(page, order);
443 }
444
445 return page;
446}
447
448static void rb_free_aux_page(struct ring_buffer *rb, int idx)
449{
450 struct page *page = virt_to_page(rb->aux_pages[idx]);
451
452 ClearPagePrivate(page);
453 page->mapping = NULL;
454 __free_page(page);
455}
456
45c815f0
AS
457static void __rb_free_aux(struct ring_buffer *rb)
458{
459 int pg;
460
95ff4ca2
AS
461 /*
462 * Should never happen, the last reference should be dropped from
463 * perf_mmap_close() path, which first stops aux transactions (which
464 * in turn are the atomic holders of aux_refcount) and then does the
465 * last rb_free_aux().
466 */
467 WARN_ON_ONCE(in_atomic());
468
45c815f0
AS
469 if (rb->aux_priv) {
470 rb->free_aux(rb->aux_priv);
471 rb->free_aux = NULL;
472 rb->aux_priv = NULL;
473 }
474
475 if (rb->aux_nr_pages) {
476 for (pg = 0; pg < rb->aux_nr_pages; pg++)
477 rb_free_aux_page(rb, pg);
478
479 kfree(rb->aux_pages);
480 rb->aux_nr_pages = 0;
481 }
482}
483
45bfb2e5 484int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
1a594131 485 pgoff_t pgoff, int nr_pages, long watermark, int flags)
45bfb2e5
PZ
486{
487 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
488 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
0a4e38e6 489 int ret = -ENOMEM, max_order = 0;
45bfb2e5
PZ
490
491 if (!has_aux(event))
492 return -ENOTSUPP;
493
6a279230 494 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
0a4e38e6
AS
495 /*
496 * We need to start with the max_order that fits in nr_pages,
497 * not the other way around, hence ilog2() and not get_order.
498 */
499 max_order = ilog2(nr_pages);
500
6a279230
AS
501 /*
502 * PMU requests more than one contiguous chunks of memory
503 * for SW double buffering
504 */
505 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
506 !overwrite) {
507 if (!max_order)
508 return -EINVAL;
509
510 max_order--;
511 }
512 }
513
45bfb2e5
PZ
514 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
515 if (!rb->aux_pages)
516 return -ENOMEM;
517
518 rb->free_aux = event->pmu->free_aux;
0a4e38e6 519 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
45bfb2e5 520 struct page *page;
0a4e38e6 521 int last, order;
45bfb2e5 522
0a4e38e6
AS
523 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
524 page = rb_alloc_aux_page(node, order);
45bfb2e5
PZ
525 if (!page)
526 goto out;
527
0a4e38e6
AS
528 for (last = rb->aux_nr_pages + (1 << page_private(page));
529 last > rb->aux_nr_pages; rb->aux_nr_pages++)
530 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
45bfb2e5
PZ
531 }
532
aa319bcd
AS
533 /*
534 * In overwrite mode, PMUs that don't support SG may not handle more
535 * than one contiguous allocation, since they rely on PMI to do double
536 * buffering. In this case, the entire buffer has to be one contiguous
537 * chunk.
538 */
539 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
540 overwrite) {
541 struct page *page = virt_to_page(rb->aux_pages[0]);
542
543 if (page_private(page) != max_order)
544 goto out;
545 }
546
45bfb2e5
PZ
547 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
548 overwrite);
549 if (!rb->aux_priv)
550 goto out;
551
552 ret = 0;
553
554 /*
555 * aux_pages (and pmu driver's private data, aux_priv) will be
556 * referenced in both producer's and consumer's contexts, thus
557 * we keep a refcount here to make sure either of the two can
558 * reference them safely.
559 */
560 atomic_set(&rb->aux_refcount, 1);
561
2023a0d2 562 rb->aux_overwrite = overwrite;
1a594131
AS
563 rb->aux_watermark = watermark;
564
565 if (!rb->aux_watermark && !rb->aux_overwrite)
566 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
2023a0d2 567
45bfb2e5
PZ
568out:
569 if (!ret)
570 rb->aux_pgoff = pgoff;
571 else
45c815f0 572 __rb_free_aux(rb);
45bfb2e5
PZ
573
574 return ret;
575}
576
45bfb2e5
PZ
577void rb_free_aux(struct ring_buffer *rb)
578{
579 if (atomic_dec_and_test(&rb->aux_refcount))
580 __rb_free_aux(rb);
581}
582
76369139
FW
583#ifndef CONFIG_PERF_USE_VMALLOC
584
585/*
586 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
587 */
588
45bfb2e5
PZ
589static struct page *
590__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
76369139
FW
591{
592 if (pgoff > rb->nr_pages)
593 return NULL;
594
595 if (pgoff == 0)
596 return virt_to_page(rb->user_page);
597
598 return virt_to_page(rb->data_pages[pgoff - 1]);
599}
600
601static void *perf_mmap_alloc_page(int cpu)
602{
603 struct page *page;
604 int node;
605
606 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
607 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
608 if (!page)
609 return NULL;
610
611 return page_address(page);
612}
613
614struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
615{
616 struct ring_buffer *rb;
617 unsigned long size;
618 int i;
619
620 size = sizeof(struct ring_buffer);
621 size += nr_pages * sizeof(void *);
622
623 rb = kzalloc(size, GFP_KERNEL);
624 if (!rb)
625 goto fail;
626
627 rb->user_page = perf_mmap_alloc_page(cpu);
628 if (!rb->user_page)
629 goto fail_user_page;
630
631 for (i = 0; i < nr_pages; i++) {
632 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
633 if (!rb->data_pages[i])
634 goto fail_data_pages;
635 }
636
637 rb->nr_pages = nr_pages;
638
639 ring_buffer_init(rb, watermark, flags);
640
641 return rb;
642
643fail_data_pages:
644 for (i--; i >= 0; i--)
645 free_page((unsigned long)rb->data_pages[i]);
646
647 free_page((unsigned long)rb->user_page);
648
649fail_user_page:
650 kfree(rb);
651
652fail:
653 return NULL;
654}
655
656static void perf_mmap_free_page(unsigned long addr)
657{
658 struct page *page = virt_to_page((void *)addr);
659
660 page->mapping = NULL;
661 __free_page(page);
662}
663
664void rb_free(struct ring_buffer *rb)
665{
666 int i;
667
668 perf_mmap_free_page((unsigned long)rb->user_page);
669 for (i = 0; i < rb->nr_pages; i++)
670 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
671 kfree(rb);
672}
673
674#else
5919b309
JO
675static int data_page_nr(struct ring_buffer *rb)
676{
677 return rb->nr_pages << page_order(rb);
678}
76369139 679
45bfb2e5
PZ
680static struct page *
681__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
76369139 682{
5919b309
JO
683 /* The '>' counts in the user page. */
684 if (pgoff > data_page_nr(rb))
76369139
FW
685 return NULL;
686
687 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
688}
689
690static void perf_mmap_unmark_page(void *addr)
691{
692 struct page *page = vmalloc_to_page(addr);
693
694 page->mapping = NULL;
695}
696
697static void rb_free_work(struct work_struct *work)
698{
699 struct ring_buffer *rb;
700 void *base;
701 int i, nr;
702
703 rb = container_of(work, struct ring_buffer, work);
5919b309 704 nr = data_page_nr(rb);
76369139
FW
705
706 base = rb->user_page;
5919b309
JO
707 /* The '<=' counts in the user page. */
708 for (i = 0; i <= nr; i++)
76369139
FW
709 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
710
711 vfree(base);
712 kfree(rb);
713}
714
715void rb_free(struct ring_buffer *rb)
716{
717 schedule_work(&rb->work);
718}
719
720struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
721{
722 struct ring_buffer *rb;
723 unsigned long size;
724 void *all_buf;
725
726 size = sizeof(struct ring_buffer);
727 size += sizeof(void *);
728
729 rb = kzalloc(size, GFP_KERNEL);
730 if (!rb)
731 goto fail;
732
733 INIT_WORK(&rb->work, rb_free_work);
734
735 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
736 if (!all_buf)
737 goto fail_all_buf;
738
739 rb->user_page = all_buf;
740 rb->data_pages[0] = all_buf + PAGE_SIZE;
8184059e
PZ
741 if (nr_pages) {
742 rb->nr_pages = 1;
743 rb->page_order = ilog2(nr_pages);
744 }
76369139
FW
745
746 ring_buffer_init(rb, watermark, flags);
747
748 return rb;
749
750fail_all_buf:
751 kfree(rb);
752
753fail:
754 return NULL;
755}
756
757#endif
45bfb2e5
PZ
758
759struct page *
760perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
761{
762 if (rb->aux_nr_pages) {
763 /* above AUX space */
764 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
765 return NULL;
766
767 /* AUX space */
768 if (pgoff >= rb->aux_pgoff)
769 return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
770 }
771
772 return __perf_mmap_to_page(rb, pgoff);
773}