Merge branch 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 21 Sep 2009 16:15:07 +0000 (09:15 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 21 Sep 2009 16:15:07 +0000 (09:15 -0700)
* 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  perf: Tidy up after the big rename
  perf: Do the big rename: Performance Counters -> Performance Events
  perf_counter: Rename 'event' to event_id/hw_event
  perf_counter: Rename list_entry -> group_entry, counter_list -> group_list

Manually resolved some fairly trivial conflicts with the tracing tree in
include/trace/ftrace.h and kernel/trace/trace_syscalls.c.

1  2 
arch/x86/kernel/apic/apic.c
include/linux/sched.h
include/linux/syscalls.h
include/trace/ftrace.h
init/Kconfig
kernel/Makefile
kernel/sched.c
kernel/trace/trace_syscalls.c

Simple merge
Simple merge
Simple merge
index a0361cb697693c8a77ed55597620e7c134ee2ad4,ec91e78244f00d69b2f10bc7dc627f2d4dd787f7..cc0d9667e182d14d9d84ffc47d9e21078d96d067
@@@ -664,34 -675,25 +664,34 @@@ __attribute__((section("_ftrace_events"
   *                         sizeof(u64));
   *    __entry_size -= sizeof(u32);
   *
 - *    do {
 - *            char raw_data[__entry_size]; <- allocate our sample in the stack
 - *            struct trace_entry *ent;
 + *    // Protect the non nmi buffer
 + *    // This also protects the rcu read side
 + *    local_irq_save(irq_flags);
 + *    __cpu = smp_processor_id();
 + *
 + *    if (in_nmi())
 + *            raw_data = rcu_dereference(trace_profile_buf_nmi);
 + *    else
 + *            raw_data = rcu_dereference(trace_profile_buf);
 + *
 + *    if (!raw_data)
 + *            goto end;
   *
 - *            zero dead bytes from alignment to avoid stack leak to userspace:
 + *    raw_data = per_cpu_ptr(raw_data, __cpu);
   *
 - *            *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
 - *            entry = (struct ftrace_raw_<call> *)raw_data;
 - *            ent = &entry->ent;
 - *            tracing_generic_entry_update(ent, irq_flags, pc);
 - *            ent->type = event_call->id;
 + *    //zero dead bytes from alignment to avoid stack leak to userspace:
 + *    *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
 + *    entry = (struct ftrace_raw_<call> *)raw_data;
 + *    ent = &entry->ent;
 + *    tracing_generic_entry_update(ent, irq_flags, pc);
 + *    ent->type = event_call->id;
   *
 - *            <tstruct> <- do some jobs with dynamic arrays
 + *    <tstruct> <- do some jobs with dynamic arrays
   *
 - *            <assign>  <- affect our values
 + *    <assign>  <- affect our values
   *
-  *    perf_tpcounter_event(event_call->id, __addr, __count, entry,
 - *            perf_tp_event(event_call->id, __addr, __count, entry,
 - *                         __entry_size);  <- submit them to perf counter
 - *    } while (0);
++ *    perf_tp_event(event_call->id, __addr, __count, entry,
 + *                 __entry_size);  <- submit them to perf counter
   *
   * }
   */
@@@ -728,38 -728,23 +728,38 @@@ static void ftrace_profile_##call(proto
                             sizeof(u64));                              \
        __entry_size -= sizeof(u32);                                    \
                                                                        \
 -      do {                                                            \
 -              char raw_data[__entry_size];                            \
 -              struct trace_entry *ent;                                \
 +      if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE,           \
 +                    "profile buffer not large enough"))               \
 +              return;                                                 \
 +                                                                      \
 +      local_irq_save(irq_flags);                                      \
 +      __cpu = smp_processor_id();                                     \
                                                                        \
 -              *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
 -              entry = (struct ftrace_raw_##call *)raw_data;           \
 -              ent = &entry->ent;                                      \
 -              tracing_generic_entry_update(ent, irq_flags, pc);       \
 -              ent->type = event_call->id;                             \
 +      if (in_nmi())                                                   \
 +              raw_data = rcu_dereference(trace_profile_buf_nmi);              \
 +      else                                                            \
 +              raw_data = rcu_dereference(trace_profile_buf);          \
                                                                        \
 -              tstruct                                                 \
 +      if (!raw_data)                                                  \
 +              goto end;                                               \
                                                                        \
 -              { assign; }                                             \
 +      raw_data = per_cpu_ptr(raw_data, __cpu);                        \
                                                                        \
 -              perf_tp_event(event_call->id, __addr, __count, entry,\
 +      *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;         \
 +      entry = (struct ftrace_raw_##call *)raw_data;                   \
 +      ent = &entry->ent;                                              \
 +      tracing_generic_entry_update(ent, irq_flags, pc);               \
 +      ent->type = event_call->id;                                     \
 +                                                                      \
 +      tstruct                                                         \
 +                                                                      \
 +      { assign; }                                                     \
 +                                                                      \
-       perf_tpcounter_event(event_call->id, __addr, __count, entry,    \
++      perf_tp_event(event_call->id, __addr, __count, entry,           \
                             __entry_size);                             \
 -      } while (0);                                                    \
 +                                                                      \
 +end:                                                                  \
 +      local_irq_restore(irq_flags);                                   \
                                                                        \
  }
  
diff --cc init/Kconfig
Simple merge
diff --cc kernel/Makefile
Simple merge
diff --cc kernel/sched.c
Simple merge
index 7a3550cf2597d52cc591d582cd0306823905528c,233f3483ac833e0cc6bce135737db62778d337c7..9fbce6c9d2e14610762039f0e484ca1509790c6d
@@@ -405,38 -402,20 +405,38 @@@ static void prof_syscall_enter(struct p
        size = ALIGN(size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
  
 -      do {
 -              char raw_data[size];
 +      if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
 +                    "profile buffer not large enough"))
 +              return;
 +
 +      /* Protect the per cpu buffer, begin the rcu read side */
 +      local_irq_save(flags);
  
 -              /* zero the dead bytes from align to not leak stack to user */
 -              *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
 +      cpu = smp_processor_id();
 +
 +      if (in_nmi())
 +              raw_data = rcu_dereference(trace_profile_buf_nmi);
 +      else
 +              raw_data = rcu_dereference(trace_profile_buf);
 +
 +      if (!raw_data)
 +              goto end;
  
 -              rec = (struct syscall_trace_enter *) raw_data;
 -              tracing_generic_entry_update(&rec->ent, 0, 0);
 -              rec->ent.type = sys_data->enter_id;
 -              rec->nr = syscall_nr;
 -              syscall_get_arguments(current, regs, 0, sys_data->nb_args,
 -                                     (unsigned long *)&rec->args);
 -              perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
 -      } while(0);
 +      raw_data = per_cpu_ptr(raw_data, cpu);
 +
 +      /* zero the dead bytes from align to not leak stack to user */
 +      *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
 +
 +      rec = (struct syscall_trace_enter *) raw_data;
 +      tracing_generic_entry_update(&rec->ent, 0, 0);
 +      rec->ent.type = sys_data->enter_id;
 +      rec->nr = syscall_nr;
 +      syscall_get_arguments(current, regs, 0, sys_data->nb_args,
 +                             (unsigned long *)&rec->args);
-       perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size);
++      perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
 +
 +end:
 +      local_irq_restore(flags);
  }
  
  int reg_prof_syscall_enter(char *name)
@@@ -496,46 -471,12 +496,46 @@@ static void prof_syscall_exit(struct pt
        if (!sys_data)
                return;
  
 -      tracing_generic_entry_update(&rec.ent, 0, 0);
 -      rec.ent.type = sys_data->exit_id;
 -      rec.nr = syscall_nr;
 -      rec.ret = syscall_get_return_value(current, regs);
 +      /* We can probably do that at build time */
 +      size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
 +      size -= sizeof(u32);
  
 -      perf_tp_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec));
 +      /*
 +       * Impossible, but be paranoid with the future
 +       * How to put this check outside runtime?
 +       */
 +      if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
 +              "exit event has grown above profile buffer size"))
 +              return;
 +
 +      /* Protect the per cpu buffer, begin the rcu read side */
 +      local_irq_save(flags);
 +      cpu = smp_processor_id();
 +
 +      if (in_nmi())
 +              raw_data = rcu_dereference(trace_profile_buf_nmi);
 +      else
 +              raw_data = rcu_dereference(trace_profile_buf);
 +
 +      if (!raw_data)
 +              goto end;
 +
 +      raw_data = per_cpu_ptr(raw_data, cpu);
 +
 +      /* zero the dead bytes from align to not leak stack to user */
 +      *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
 +
 +      rec = (struct syscall_trace_exit *)raw_data;
 +
 +      tracing_generic_entry_update(&rec->ent, 0, 0);
 +      rec->ent.type = sys_data->exit_id;
 +      rec->nr = syscall_nr;
 +      rec->ret = syscall_get_return_value(current, regs);
 +
-       perf_tpcounter_event(sys_data->exit_id, 0, 1, rec, size);
++      perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
 +
 +end:
 +      local_irq_restore(flags);
  }
  
  int reg_prof_syscall_exit(char *name)