perf: Support high-order allocations for AUX space
authorAlexander Shishkin <alexander.shishkin@linux.intel.com>
Wed, 14 Jan 2015 12:18:12 +0000 (14:18 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 2 Apr 2015 15:14:08 +0000 (17:14 +0200)
Some pmus (such as BTS or Intel PT without multiple-entry ToPA capability)
don't support scatter-gather and will prefer larger contiguous areas for
their output regions.

This patch adds a new pmu capability to request higher order allocations.

Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kaixu Xia <kaixu.xia@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Robert Richter <rric@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: acme@infradead.org
Cc: adrian.hunter@intel.com
Cc: kan.liang@intel.com
Cc: markus.t.metzger@intel.com
Cc: mathieu.poirier@linaro.org
Link: http://lkml.kernel.org/r/1421237903-181015-4-git-send-email-alexander.shishkin@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/perf_event.h
kernel/events/ring_buffer.c

index 5a94f6d6fa91fc5be33616070f0428b243bfd889..d5a4a8e958084266b47bc9230c0daea960f71875 100644 (file)
@@ -174,6 +174,7 @@ struct perf_event;
  */
 #define PERF_PMU_CAP_NO_INTERRUPT              0x01
 #define PERF_PMU_CAP_NO_NMI                    0x02
+#define PERF_PMU_CAP_AUX_NO_SG                 0x04
 
 /**
  * struct pmu - generic performance monitoring unit
index 3de9c4e9ea9fe836b0c274bb4e84cfc99c0315d1..ed0859e33b2f45166d18aec74e49604e8d385d8d 100644 (file)
@@ -243,30 +243,74 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
        spin_lock_init(&rb->event_lock);
 }
 
+#define PERF_AUX_GFP   (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
+
+static struct page *rb_alloc_aux_page(int node, int order)
+{
+       struct page *page;
+
+       if (order > MAX_ORDER)
+               order = MAX_ORDER;
+
+       do {
+               page = alloc_pages_node(node, PERF_AUX_GFP, order);
+       } while (!page && order--);
+
+       if (page && order) {
+               /*
+                * Communicate the allocation size to the driver
+                */
+               split_page(page, order);
+               SetPagePrivate(page);
+               set_page_private(page, order);
+       }
+
+       return page;
+}
+
+static void rb_free_aux_page(struct ring_buffer *rb, int idx)
+{
+       struct page *page = virt_to_page(rb->aux_pages[idx]);
+
+       ClearPagePrivate(page);
+       page->mapping = NULL;
+       __free_page(page);
+}
+
 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
                 pgoff_t pgoff, int nr_pages, int flags)
 {
        bool overwrite = !(flags & RING_BUFFER_WRITABLE);
        int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
-       int ret = -ENOMEM;
+       int ret = -ENOMEM, max_order = 0;
 
        if (!has_aux(event))
                return -ENOTSUPP;
 
+       if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG)
+               /*
+                * We need to start with the max_order that fits in nr_pages,
+                * not the other way around, hence ilog2() and not get_order.
+                */
+               max_order = ilog2(nr_pages);
+
        rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
        if (!rb->aux_pages)
                return -ENOMEM;
 
        rb->free_aux = event->pmu->free_aux;
-       for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;
-            rb->aux_nr_pages++) {
+       for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
                struct page *page;
+               int last, order;
 
-               page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
+               order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
+               page = rb_alloc_aux_page(node, order);
                if (!page)
                        goto out;
 
-               rb->aux_pages[rb->aux_nr_pages] = page_address(page);
+               for (last = rb->aux_nr_pages + (1 << page_private(page));
+                    last > rb->aux_nr_pages; rb->aux_nr_pages++)
+                       rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
        }
 
        rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
@@ -304,7 +348,7 @@ static void __rb_free_aux(struct ring_buffer *rb)
        }
 
        for (pg = 0; pg < rb->aux_nr_pages; pg++)
-               free_page((unsigned long)rb->aux_pages[pg]);
+               rb_free_aux_page(rb, pg);
 
        kfree(rb->aux_pages);
        rb->aux_nr_pages = 0;