perf_counter: Introduce struct for sample data
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 10 Jun 2009 19:02:22 +0000 (21:02 +0200)
committerIngo Molnar <mingo@elte.hu>
Thu, 11 Jun 2009 00:39:02 +0000 (02:39 +0200)
For easy extension of the sample data, put it in a structure.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/powerpc/kernel/perf_counter.c
arch/x86/kernel/cpu/perf_counter.c
include/linux/perf_counter.h
kernel/perf_counter.c

index 4786ad9a28877afc0461ae24f053f76ca09605f1..5e0bf399c4335165c06d2bc6ffbd62ba801623e6 100644 (file)
@@ -1001,7 +1001,11 @@ static void record_and_restart(struct perf_counter *counter, long val,
         * Finally record data if requested.
         */
        if (record) {
-               addr = 0;
+               struct perf_sample_data data = {
+                       .regs = regs,
+                       .addr = 0,
+               };
+
                if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
                        /*
                         * The user wants a data address recorded.
@@ -1016,9 +1020,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
                        sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
                                POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
                        if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
-                               addr = mfspr(SPRN_SDAR);
+                               data.addr = mfspr(SPRN_SDAR);
                }
-               if (perf_counter_overflow(counter, nmi, regs, addr)) {
+               if (perf_counter_overflow(counter, nmi, &data)) {
                        /*
                         * Interrupts are coming too fast - throttle them
                         * by setting the counter to 0, so it will be
index 240ca5630632bc33f1d232c8588fe6709f74f58a..82a23d487f928a4f76eabc38f1a7be4ff3cd3ed0 100644 (file)
@@ -1173,11 +1173,14 @@ static void intel_pmu_reset(void)
  */
 static int intel_pmu_handle_irq(struct pt_regs *regs)
 {
+       struct perf_sample_data data;
        struct cpu_hw_counters *cpuc;
-       struct cpu_hw_counters;
        int bit, cpu, loops;
        u64 ack, status;
 
+       data.regs = regs;
+       data.addr = 0;
+
        cpu = smp_processor_id();
        cpuc = &per_cpu(cpu_hw_counters, cpu);
 
@@ -1210,7 +1213,7 @@ again:
                if (!intel_pmu_save_and_restart(counter))
                        continue;
 
-               if (perf_counter_overflow(counter, 1, regs, 0))
+               if (perf_counter_overflow(counter, 1, &data))
                        intel_pmu_disable_counter(&counter->hw, bit);
        }
 
@@ -1230,12 +1233,16 @@ again:
 
 static int amd_pmu_handle_irq(struct pt_regs *regs)
 {
-       int cpu, idx, handled = 0;
+       struct perf_sample_data data;
        struct cpu_hw_counters *cpuc;
        struct perf_counter *counter;
        struct hw_perf_counter *hwc;
+       int cpu, idx, handled = 0;
        u64 val;
 
+       data.regs = regs;
+       data.addr = 0;
+
        cpu = smp_processor_id();
        cpuc = &per_cpu(cpu_hw_counters, cpu);
 
@@ -1256,7 +1263,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
                if (!x86_perf_counter_set_period(counter, hwc, idx))
                        continue;
 
-               if (perf_counter_overflow(counter, 1, regs, 0))
+               if (perf_counter_overflow(counter, 1, &data))
                        amd_pmu_disable_counter(hwc, idx);
        }
 
index 282d8cc4898045695864e4f8775eec3bab53ed1a..d8c0eb480f9a642ca6decf0c27099a0f3ecd673f 100644 (file)
@@ -605,8 +605,14 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
               struct perf_counter_context *ctx, int cpu);
 extern void perf_counter_update_userpage(struct perf_counter *counter);
 
-extern int perf_counter_overflow(struct perf_counter *counter,
-                                int nmi, struct pt_regs *regs, u64 addr);
+struct perf_sample_data {
+       struct pt_regs  *regs;
+       u64             addr;
+};
+
+extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
+                                struct perf_sample_data *data);
+
 /*
  * Return 1 for a software counter, 0 for a hardware counter
  */
index ae591a1275a663e0cd2b245a280ee30cb2e3d69c..4fe85e804f43c5e3c50aad54be5cc2dfa8da5464 100644 (file)
@@ -2378,8 +2378,8 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
        return task_pid_nr_ns(p, counter->ns);
 }
 
-static void perf_counter_output(struct perf_counter *counter,
-                               int nmi, struct pt_regs *regs, u64 addr)
+static void perf_counter_output(struct perf_counter *counter, int nmi,
+                               struct perf_sample_data *data)
 {
        int ret;
        u64 sample_type = counter->attr.sample_type;
@@ -2404,10 +2404,10 @@ static void perf_counter_output(struct perf_counter *counter,
        header.size = sizeof(header);
 
        header.misc = PERF_EVENT_MISC_OVERFLOW;
-       header.misc |= perf_misc_flags(regs);
+       header.misc |= perf_misc_flags(data->regs);
 
        if (sample_type & PERF_SAMPLE_IP) {
-               ip = perf_instruction_pointer(regs);
+               ip = perf_instruction_pointer(data->regs);
                header.type |= PERF_SAMPLE_IP;
                header.size += sizeof(ip);
        }
@@ -2460,7 +2460,7 @@ static void perf_counter_output(struct perf_counter *counter,
        }
 
        if (sample_type & PERF_SAMPLE_CALLCHAIN) {
-               callchain = perf_callchain(regs);
+               callchain = perf_callchain(data->regs);
 
                if (callchain) {
                        callchain_size = (1 + callchain->nr) * sizeof(u64);
@@ -2486,7 +2486,7 @@ static void perf_counter_output(struct perf_counter *counter,
                perf_output_put(&handle, time);
 
        if (sample_type & PERF_SAMPLE_ADDR)
-               perf_output_put(&handle, addr);
+               perf_output_put(&handle, data->addr);
 
        if (sample_type & PERF_SAMPLE_ID)
                perf_output_put(&handle, counter->id);
@@ -2950,8 +2950,8 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
  * Generic counter overflow handling.
  */
 
-int perf_counter_overflow(struct perf_counter *counter,
-                         int nmi, struct pt_regs *regs, u64 addr)
+int perf_counter_overflow(struct perf_counter *counter, int nmi,
+                         struct perf_sample_data *data)
 {
        int events = atomic_read(&counter->event_limit);
        int throttle = counter->pmu->unthrottle != NULL;
@@ -3005,7 +3005,7 @@ int perf_counter_overflow(struct perf_counter *counter,
                        perf_counter_disable(counter);
        }
 
-       perf_counter_output(counter, nmi, regs, addr);
+       perf_counter_output(counter, nmi, data);
        return ret;
 }
 
@@ -3054,24 +3054,25 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
 {
        enum hrtimer_restart ret = HRTIMER_RESTART;
+       struct perf_sample_data data;
        struct perf_counter *counter;
-       struct pt_regs *regs;
        u64 period;
 
        counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
        counter->pmu->read(counter);
 
-       regs = get_irq_regs();
+       data.addr = 0;
+       data.regs = get_irq_regs();
        /*
         * In case we exclude kernel IPs or are somehow not in interrupt
         * context, provide the next best thing, the user IP.
         */
-       if ((counter->attr.exclude_kernel || !regs) &&
+       if ((counter->attr.exclude_kernel || !data.regs) &&
                        !counter->attr.exclude_user)
-               regs = task_pt_regs(current);
+               data.regs = task_pt_regs(current);
 
-       if (regs) {
-               if (perf_counter_overflow(counter, 0, regs, 0))
+       if (data.regs) {
+               if (perf_counter_overflow(counter, 0, &data))
                        ret = HRTIMER_NORESTART;
        }
 
@@ -3084,9 +3085,14 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
 static void perf_swcounter_overflow(struct perf_counter *counter,
                                    int nmi, struct pt_regs *regs, u64 addr)
 {
+       struct perf_sample_data data = {
+               .regs = regs,
+               .addr = addr,
+       };
+
        perf_swcounter_update(counter);
        perf_swcounter_set_period(counter);
-       if (perf_counter_overflow(counter, nmi, regs, addr))
+       if (perf_counter_overflow(counter, nmi, &data))
                /* soft-disable the counter */
                ;