perf stat: Create events as disabled
authorJiri Olsa <jolsa@kernel.org>
Thu, 3 Dec 2015 09:06:44 +0000 (10:06 +0100)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Mon, 7 Dec 2015 21:12:59 +0000 (18:12 -0300)
Currently we have 2 kinds of stat counters based on when the event is
enabled:

  1) tracee command events, which are enable once the
     tracee executes exec syscall (enable_on_exec bit)
  2) all other events which get alive within the
     perf_event_open syscall

And 2) case could raise a problem in case we want additional filter to
be attached for event. In this case we want the event to be enabled
after it's configured with filter.

Changing the behaviour of 2) events, so they all are created as disabled
(disabled bit). Adding extra enable call to make them alive once they
finish setup.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1449133606-14429-6-git-send-email-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/builtin-stat.c

index 8ca40deaa72824da5d5ac9f224ca12af0c36d878..2e70610649a1d2e0b72ebe854eaeef4e1a449eba 100644 (file)
@@ -168,11 +168,18 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
        attr->sample_period = 0;
        attr->sample_type   = 0;
 
+       /*
+        * Disabling all counters initially, they will be enabled
+        * either manually by us or by kernel via enable_on_exec
+        * set later.
+        */
+       if (perf_evsel__is_group_leader(evsel))
+               attr->disabled = 1;
+
        if (target__has_cpu(&target))
                return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
 
        if (!target__has_task(&target) && perf_evsel__is_group_leader(evsel)) {
-               attr->disabled = 1;
                if (!initial_delay)
                        attr->enable_on_exec = 1;
        }
@@ -251,12 +258,18 @@ static void process_interval(void)
        print_counters(&rs, 0, NULL);
 }
 
-static void handle_initial_delay(void)
+static void enable_counters(void)
 {
-       if (initial_delay) {
+       if (initial_delay)
                usleep(initial_delay * 1000);
+
+       /*
+        * We need to enable counters only if:
+        * - we don't have tracee (attaching to task or cpu)
+        * - we have initial delay configured
+        */
+       if (!target__none(&target) || initial_delay)
                perf_evlist__enable(evsel_list);
-       }
 }
 
 static volatile int workload_exec_errno;
@@ -353,7 +366,7 @@ static int __run_perf_stat(int argc, const char **argv)
 
        if (forks) {
                perf_evlist__start_workload(evsel_list);
-               handle_initial_delay();
+               enable_counters();
 
                if (interval) {
                        while (!waitpid(child_pid, &status, WNOHANG)) {
@@ -372,7 +385,7 @@ static int __run_perf_stat(int argc, const char **argv)
                if (WIFSIGNALED(status))
                        psignal(WTERMSIG(status), argv[0]);
        } else {
-               handle_initial_delay();
+               enable_counters();
                while (!done) {
                        nanosleep(&ts, NULL);
                        if (interval)