ftrace: timestamp syncing, prepare
authorIngo Molnar <mingo@elte.hu>
Mon, 12 May 2008 19:20:46 +0000 (21:20 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 23 May 2008 18:42:31 +0000 (20:42 +0200)
rename and uninline now() to ftrace_now().

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_functions.c
kernel/trace/trace_irqsoff.c
kernel/trace/trace_sched_switch.c
kernel/trace/trace_sched_wakeup.c

index 97c40865a93e17e0ddcce929d16e6c2fd48b51f0..a15e068535f819a3ab62ecda77e93d942ccb2b9d 100644 (file)
@@ -531,7 +531,7 @@ static int notrace __ftrace_update_code(void *ignore)
        save_ftrace_enabled = ftrace_enabled;
        ftrace_enabled = 0;
 
-       start = now(raw_smp_processor_id());
+       start = ftrace_now(raw_smp_processor_id());
        ftrace_update_cnt = 0;
 
        /* No locks needed, the machine is stopped! */
@@ -550,7 +550,7 @@ static int notrace __ftrace_update_code(void *ignore)
 
        }
 
-       stop = now(raw_smp_processor_id());
+       stop = ftrace_now(raw_smp_processor_id());
        ftrace_update_time = stop - start;
        ftrace_update_tot_cnt += ftrace_update_cnt;
 
index 4550afda9607eb372e2c1c4e5923eba989cbf471..e3778ab0d3f714eed26e75e6d8e1023eb5fbaeb8 100644 (file)
@@ -42,6 +42,11 @@ ns2usecs(cycle_t nsec)
        return nsec;
 }
 
+notrace cycle_t ftrace_now(int cpu)
+{
+       return cpu_clock(cpu);
+}
+
 static atomic_t                        tracer_counter;
 static struct trace_array      global_trace;
 
@@ -607,7 +612,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
        entry->idx              = atomic_inc_return(&tracer_counter);
        entry->preempt_count    = pc & 0xff;
        entry->pid              = tsk->pid;
-       entry->t                = now(raw_smp_processor_id());
+       entry->t                = ftrace_now(raw_smp_processor_id());
        entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
                ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
                ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
index b0408356f0e07e2e8e609f8802e7defb85a32995..30cad677e9d0bae2f25eddbcd2e489785cd70698 100644 (file)
@@ -171,10 +171,7 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
 void update_max_tr_single(struct trace_array *tr,
                          struct task_struct *tsk, int cpu);
 
-static inline notrace cycle_t now(int cpu)
-{
-       return cpu_clock(cpu);
-}
+extern notrace cycle_t ftrace_now(int cpu);
 
 #ifdef CONFIG_SCHED_TRACER
 extern void notrace
index 5d8ad7a096052f7bafc573606c826320ce659f4d..e5d34b78fc99ac1b8a5943880a1ceb5d601f31d6 100644 (file)
@@ -20,7 +20,7 @@ static notrace void function_reset(struct trace_array *tr)
 {
        int cpu;
 
-       tr->time_start = now(tr->cpu);
+       tr->time_start = ftrace_now(tr->cpu);
 
        for_each_online_cpu(cpu)
                tracing_reset(tr->data[cpu]);
index 2dfebb67fdfbd6f790c22603782c03b87d441f7c..d2a6e6f1ad2d95cccd1fd09e9dbb615ebd8f7ad9 100644 (file)
@@ -136,7 +136,7 @@ check_critical_timing(struct trace_array *tr,
         * as long as possible:
         */
        T0 = data->preempt_timestamp;
-       T1 = now(cpu);
+       T1 = ftrace_now(cpu);
        delta = T1-T0;
 
        local_save_flags(flags);
@@ -186,7 +186,7 @@ out_unlock:
 
 out:
        data->critical_sequence = max_sequence;
-       data->preempt_timestamp = now(cpu);
+       data->preempt_timestamp = ftrace_now(cpu);
        tracing_reset(data);
        ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
 }
@@ -215,7 +215,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
        atomic_inc(&data->disabled);
 
        data->critical_sequence = max_sequence;
-       data->preempt_timestamp = now(cpu);
+       data->preempt_timestamp = ftrace_now(cpu);
        data->critical_start = parent_ip ? : ip;
        tracing_reset(data);
 
index 6c9284103a62df8e8447dc1fa9576f7b3ab09d94..8d656672da9360275355705b50ad0b1a85b414e9 100644 (file)
@@ -61,7 +61,7 @@ static notrace void sched_switch_reset(struct trace_array *tr)
 {
        int cpu;
 
-       tr->time_start = now(tr->cpu);
+       tr->time_start = ftrace_now(tr->cpu);
 
        for_each_online_cpu(cpu)
                tracing_reset(tr->data[cpu]);
index 688df965f3f239c581205267f33c1dd19618f9c1..b7df825c3af9e3739536810a3ca49abaa735ffbd 100644 (file)
@@ -92,7 +92,7 @@ wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
         * as long as possible:
         */
        T0 = data->preempt_timestamp;
-       T1 = now(cpu);
+       T1 = ftrace_now(cpu);
        delta = T1-T0;
 
        if (!report_latency(delta))
@@ -191,7 +191,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
 
        local_save_flags(flags);
 
-       tr->data[wakeup_cpu]->preempt_timestamp = now(cpu);
+       tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
        ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags);
 
 out_locked: