perf evsel: Allow specifying if the inherit bit should be set
[linux-2.6-block.git] / tools / perf / builtin-test.c
index 1c984342a5795090d2e863d0b2da1a995e0201d2..7287158c483042ddcf559100468ceeb2d57e38d1 100644 (file)
@@ -146,7 +146,7 @@ next_pair:
                                if (llabs(skew) < page_size)
                                        continue;
 
-                               pr_debug("%#Lx: diff end addr for %s v: %#Lx k: %#Lx\n",
+                               pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
                                         sym->start, sym->name, sym->end, pair->end);
                        } else {
                                struct rb_node *nnd;
@@ -168,11 +168,11 @@ detour:
                                        goto detour;
                                }
 
-                               pr_debug("%#Lx: diff name v: %s k: %s\n",
+                               pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
                                         sym->start, sym->name, pair->name);
                        }
                } else
-                       pr_debug("%#Lx: %s not on kallsyms\n", sym->start, sym->name);
+                       pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
 
                err = -1;
        }
@@ -211,10 +211,10 @@ detour:
 
                if (pair->start == pos->start) {
                        pair->priv = 1;
-                       pr_info(" %Lx-%Lx %Lx %s in kallsyms as",
+                       pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
                                pos->start, pos->end, pos->pgoff, pos->dso->name);
                        if (pos->pgoff != pair->pgoff || pos->end != pair->end)
-                               pr_info(": \n*%Lx-%Lx %Lx",
+                               pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
                                        pair->start, pair->end, pair->pgoff);
                        pr_info(" %s\n", pair->dso->name);
                        pair->priv = 1;
@@ -234,6 +234,7 @@ out:
        return err;
 }
 
+#include "util/cpumap.h"
 #include "util/evsel.h"
 #include <sys/types.h>
 
@@ -264,6 +265,7 @@ static int test__open_syscall_event(void)
        int err = -1, fd;
        struct thread_map *threads;
        struct perf_evsel *evsel;
+       struct perf_event_attr attr;
        unsigned int nr_open_calls = 111, i;
        int id = trace_event__id("sys_enter_open");
 
@@ -278,13 +280,16 @@ static int test__open_syscall_event(void)
                return -1;
        }
 
-       evsel = perf_evsel__new(PERF_TYPE_TRACEPOINT, id, 0);
+       memset(&attr, 0, sizeof(attr));
+       attr.type = PERF_TYPE_TRACEPOINT;
+       attr.config = id;
+       evsel = perf_evsel__new(&attr, 0);
        if (evsel == NULL) {
                pr_debug("perf_evsel__new\n");
                goto out_thread_map_delete;
        }
 
-       if (perf_evsel__open_per_thread(evsel, threads) < 0) {
+       if (perf_evsel__open_per_thread(evsel, threads, false, false) < 0) {
                pr_debug("failed to open counter: %s, "
                         "tweak /proc/sys/kernel/perf_event_paranoid?\n",
                         strerror(errno));
@@ -302,7 +307,7 @@ static int test__open_syscall_event(void)
        }
 
        if (evsel->counts->cpu[0].val != nr_open_calls) {
-               pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %Ld\n",
+               pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
                         nr_open_calls, evsel->counts->cpu[0].val);
                goto out_close_fd;
        }
@@ -317,6 +322,121 @@ out_thread_map_delete:
        return err;
 }
 
+#include <sched.h>
+
+static int test__open_syscall_event_on_all_cpus(void)
+{
+       int err = -1, fd, cpu;
+       struct thread_map *threads;
+       struct cpu_map *cpus;
+       struct perf_evsel *evsel;
+       struct perf_event_attr attr;
+       unsigned int nr_open_calls = 111, i;
+       cpu_set_t cpu_set;
+       int id = trace_event__id("sys_enter_open");
+
+       if (id < 0) {
+               pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
+               return -1;
+       }
+
+       threads = thread_map__new(-1, getpid());
+       if (threads == NULL) {
+               pr_debug("thread_map__new\n");
+               return -1;
+       }
+
+       cpus = cpu_map__new(NULL);
+       if (threads == NULL) {
+               pr_debug("thread_map__new\n");
+               return -1;
+       }
+
+
+       CPU_ZERO(&cpu_set);
+
+       memset(&attr, 0, sizeof(attr));
+       attr.type = PERF_TYPE_TRACEPOINT;
+       attr.config = id;
+       evsel = perf_evsel__new(&attr, 0);
+       if (evsel == NULL) {
+               pr_debug("perf_evsel__new\n");
+               goto out_thread_map_delete;
+       }
+
+       if (perf_evsel__open(evsel, cpus, threads, false, false) < 0) {
+               pr_debug("failed to open counter: %s, "
+                        "tweak /proc/sys/kernel/perf_event_paranoid?\n",
+                        strerror(errno));
+               goto out_evsel_delete;
+       }
+
+       for (cpu = 0; cpu < cpus->nr; ++cpu) {
+               unsigned int ncalls = nr_open_calls + cpu;
+               /*
+                * XXX eventually lift this restriction in a way that
+                * keeps perf building on older glibc installations
+                * without CPU_ALLOC. 1024 cpus in 2010 still seems
+                * a reasonable upper limit tho :-)
+                */
+               if (cpus->map[cpu] >= CPU_SETSIZE) {
+                       pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
+                       continue;
+               }
+
+               CPU_SET(cpus->map[cpu], &cpu_set);
+               if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
+                       pr_debug("sched_setaffinity() failed on CPU %d: %s ",
+                                cpus->map[cpu],
+                                strerror(errno));
+                       goto out_close_fd;
+               }
+               for (i = 0; i < ncalls; ++i) {
+                       fd = open("/etc/passwd", O_RDONLY);
+                       close(fd);
+               }
+               CPU_CLR(cpus->map[cpu], &cpu_set);
+       }
+
+       /*
+        * Here we need to explicitely preallocate the counts, as if
+        * we use the auto allocation it will allocate just for 1 cpu,
+        * as we start by cpu 0.
+        */
+       if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
+               pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
+               goto out_close_fd;
+       }
+
+       for (cpu = 0; cpu < cpus->nr; ++cpu) {
+               unsigned int expected;
+
+               if (cpus->map[cpu] >= CPU_SETSIZE)
+                       continue;
+
+               if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
+                       pr_debug("perf_evsel__open_read_on_cpu\n");
+                       goto out_close_fd;
+               }
+
+               expected = nr_open_calls + cpu;
+               if (evsel->counts->cpu[cpu].val != expected) {
+                       pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
+                                expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
+                       goto out_close_fd;
+               }
+       }
+
+       err = 0;
+out_close_fd:
+       perf_evsel__close_fd(evsel, 1, threads->nr);
+out_evsel_delete:
+       perf_evsel__delete(evsel);
+out_thread_map_delete:
+       thread_map__delete(threads);
+       return err;
+}
+
 static struct test {
        const char *desc;
        int (*func)(void);
@@ -329,6 +449,10 @@ static struct test {
                .desc = "detect open syscall event",
                .func = test__open_syscall_event,
        },
+       {
+               .desc = "detect open syscall event on all cpus",
+               .func = test__open_syscall_event_on_all_cpus,
+       },
        {
                .func = NULL,
        },