4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
9 * @author Barry Kasindorf <barry.kasindorf@amd.com>
10 * @author Jason Yeh <jason.yeh@amd.com>
11 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/smp.h>
17 #include <linux/oprofile.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/slab.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kdebug.h>
22 #include <linux/cpu.h>
27 #include "op_counter.h"
28 #include "op_x86_model.h"
30 static struct op_x86_model_spec *model;
31 static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
32 static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
34 /* must be protected with get_online_cpus()/put_online_cpus(): */
35 static int nmi_enabled;
36 static int ctr_running;
38 struct op_counter_config counter_config[OP_MAX_COUNTER];
40 /* common functions */
42 u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
43 struct op_counter_config *counter_config)
46 u16 event = (u16)counter_config->event;
48 val |= ARCH_PERFMON_EVENTSEL_INT;
49 val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
50 val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
51 val |= (counter_config->unit_mask & 0xFF) << 8;
52 counter_config->extra &= (ARCH_PERFMON_EVENTSEL_INV |
53 ARCH_PERFMON_EVENTSEL_EDGE |
54 ARCH_PERFMON_EVENTSEL_CMASK);
55 val |= counter_config->extra;
56 event &= model->event_mask ? model->event_mask : 0xFF;
58 val |= (u64)(event & 0x0F00) << 24;
64 static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
67 model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs));
68 else if (!nmi_enabled)
71 model->stop(this_cpu_ptr(&cpu_msrs));
75 static void nmi_cpu_save_registers(struct op_msrs *msrs)
77 struct op_msr *counters = msrs->counters;
78 struct op_msr *controls = msrs->controls;
81 for (i = 0; i < model->num_counters; ++i) {
83 rdmsrl(counters[i].addr, counters[i].saved);
86 for (i = 0; i < model->num_controls; ++i) {
88 rdmsrl(controls[i].addr, controls[i].saved);
92 static void nmi_cpu_start(void *dummy)
94 struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
101 static int nmi_start(void)
105 /* make ctr_running visible to the nmi handler: */
107 on_each_cpu(nmi_cpu_start, NULL, 1);
112 static void nmi_cpu_stop(void *dummy)
114 struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
121 static void nmi_stop(void)
124 on_each_cpu(nmi_cpu_stop, NULL, 1);
129 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
131 static DEFINE_PER_CPU(int, switch_index);
133 static inline int has_mux(void)
135 return !!model->switch_ctrl;
138 inline int op_x86_phys_to_virt(int phys)
140 return __this_cpu_read(switch_index) + phys;
143 inline int op_x86_virt_to_phys(int virt)
145 return virt % model->num_counters;
148 static void nmi_shutdown_mux(void)
155 for_each_possible_cpu(i) {
156 kfree(per_cpu(cpu_msrs, i).multiplex);
157 per_cpu(cpu_msrs, i).multiplex = NULL;
158 per_cpu(switch_index, i) = 0;
162 static int nmi_setup_mux(void)
164 size_t multiplex_size =
165 sizeof(struct op_msr) * model->num_virt_counters;
171 for_each_possible_cpu(i) {
172 per_cpu(cpu_msrs, i).multiplex =
173 kzalloc(multiplex_size, GFP_KERNEL);
174 if (!per_cpu(cpu_msrs, i).multiplex)
181 static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
184 struct op_msr *multiplex = msrs->multiplex;
189 for (i = 0; i < model->num_virt_counters; ++i) {
190 if (counter_config[i].enabled) {
191 multiplex[i].saved = -(u64)counter_config[i].count;
193 multiplex[i].saved = 0;
197 per_cpu(switch_index, cpu) = 0;
200 static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
202 struct op_msr *counters = msrs->counters;
203 struct op_msr *multiplex = msrs->multiplex;
206 for (i = 0; i < model->num_counters; ++i) {
207 int virt = op_x86_phys_to_virt(i);
208 if (counters[i].addr)
209 rdmsrl(counters[i].addr, multiplex[virt].saved);
213 static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
215 struct op_msr *counters = msrs->counters;
216 struct op_msr *multiplex = msrs->multiplex;
219 for (i = 0; i < model->num_counters; ++i) {
220 int virt = op_x86_phys_to_virt(i);
221 if (counters[i].addr)
222 wrmsrl(counters[i].addr, multiplex[virt].saved);
226 static void nmi_cpu_switch(void *dummy)
228 int cpu = smp_processor_id();
229 int si = per_cpu(switch_index, cpu);
230 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
233 nmi_cpu_save_mpx_registers(msrs);
235 /* move to next set */
236 si += model->num_counters;
237 if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
238 per_cpu(switch_index, cpu) = 0;
240 per_cpu(switch_index, cpu) = si;
242 model->switch_ctrl(model, msrs);
243 nmi_cpu_restore_mpx_registers(msrs);
250 * Quick check to see if multiplexing is necessary.
251 * The check should be sufficient since counters are used
254 static int nmi_multiplex_on(void)
256 return counter_config[model->num_counters].count ? 0 : -EINVAL;
259 static int nmi_switch_event(void)
262 return -ENOSYS; /* not implemented */
263 if (nmi_multiplex_on() < 0)
264 return -EINVAL; /* not necessary */
268 on_each_cpu(nmi_cpu_switch, NULL, 1);
274 static inline void mux_init(struct oprofile_operations *ops)
277 ops->switch_events = nmi_switch_event;
280 static void mux_clone(int cpu)
285 memcpy(per_cpu(cpu_msrs, cpu).multiplex,
286 per_cpu(cpu_msrs, 0).multiplex,
287 sizeof(struct op_msr) * model->num_virt_counters);
292 inline int op_x86_phys_to_virt(int phys) { return phys; }
293 inline int op_x86_virt_to_phys(int virt) { return virt; }
294 static inline void nmi_shutdown_mux(void) { }
295 static inline int nmi_setup_mux(void) { return 1; }
297 nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
298 static inline void mux_init(struct oprofile_operations *ops) { }
299 static void mux_clone(int cpu) { }
303 static void free_msrs(void)
306 for_each_possible_cpu(i) {
307 kfree(per_cpu(cpu_msrs, i).counters);
308 per_cpu(cpu_msrs, i).counters = NULL;
309 kfree(per_cpu(cpu_msrs, i).controls);
310 per_cpu(cpu_msrs, i).controls = NULL;
315 static int allocate_msrs(void)
317 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
318 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
321 for_each_possible_cpu(i) {
322 per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
324 if (!per_cpu(cpu_msrs, i).counters)
326 per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
328 if (!per_cpu(cpu_msrs, i).controls)
332 if (!nmi_setup_mux())
342 static void nmi_cpu_setup(void *dummy)
344 int cpu = smp_processor_id();
345 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
346 nmi_cpu_save_registers(msrs);
347 raw_spin_lock(&oprofilefs_lock);
348 model->setup_ctrs(model, msrs);
349 nmi_cpu_setup_mux(cpu, msrs);
350 raw_spin_unlock(&oprofilefs_lock);
351 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
352 apic_write(APIC_LVTPC, APIC_DM_NMI);
355 static void nmi_cpu_restore_registers(struct op_msrs *msrs)
357 struct op_msr *counters = msrs->counters;
358 struct op_msr *controls = msrs->controls;
361 for (i = 0; i < model->num_controls; ++i) {
362 if (controls[i].addr)
363 wrmsrl(controls[i].addr, controls[i].saved);
366 for (i = 0; i < model->num_counters; ++i) {
367 if (counters[i].addr)
368 wrmsrl(counters[i].addr, counters[i].saved);
372 static void nmi_cpu_shutdown(void *dummy)
375 int cpu = smp_processor_id();
376 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
378 /* restoring APIC_LVTPC can trigger an apic error because the delivery
379 * mode and vector nr combination can be illegal. That's by design: on
380 * power on apic lvt contain a zero vector nr which are legal only for
381 * NMI delivery mode. So inhibit apic err before restoring lvtpc
383 v = apic_read(APIC_LVTERR);
384 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
385 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
386 apic_write(APIC_LVTERR, v);
387 nmi_cpu_restore_registers(msrs);
390 static void nmi_cpu_up(void *dummy)
393 nmi_cpu_setup(dummy);
395 nmi_cpu_start(dummy);
398 static void nmi_cpu_down(void *dummy)
403 nmi_cpu_shutdown(dummy);
406 static int nmi_create_files(struct dentry *root)
410 for (i = 0; i < model->num_virt_counters; ++i) {
414 /* quick little hack to _not_ expose a counter if it is not
415 * available for use. This should protect userspace app.
416 * NOTE: assumes 1:1 mapping here (that counters are organized
417 * sequentially in their struct assignment).
419 if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
422 snprintf(buf, sizeof(buf), "%d", i);
423 dir = oprofilefs_mkdir(root, buf);
424 oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
425 oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
426 oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
427 oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
428 oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
429 oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
430 oprofilefs_create_ulong(dir, "extra", &counter_config[i].extra);
436 static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
439 int cpu = (unsigned long)data;
441 switch (action & ~CPU_TASKS_FROZEN) {
442 case CPU_DOWN_FAILED:
444 smp_call_function_single(cpu, nmi_cpu_up, NULL, 0);
446 case CPU_DOWN_PREPARE:
447 smp_call_function_single(cpu, nmi_cpu_down, NULL, 1);
453 static struct notifier_block oprofile_cpu_nb = {
454 .notifier_call = oprofile_cpu_notifier
457 static int nmi_setup(void)
462 if (!allocate_msrs())
465 /* We need to serialize save and setup for HT because the subset
466 * of msrs are distinct for save and setup operations
469 /* Assume saved/restored counters are the same on all CPUs */
470 err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
474 for_each_possible_cpu(cpu) {
478 memcpy(per_cpu(cpu_msrs, cpu).counters,
479 per_cpu(cpu_msrs, 0).counters,
480 sizeof(struct op_msr) * model->num_counters);
482 memcpy(per_cpu(cpu_msrs, cpu).controls,
483 per_cpu(cpu_msrs, 0).controls,
484 sizeof(struct op_msr) * model->num_controls);
491 /* make variables visible to the nmi handler: */
493 err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
498 cpu_notifier_register_begin();
500 /* Use get/put_online_cpus() to protect 'nmi_enabled' */
503 /* make nmi_enabled visible to the nmi handler: */
505 on_each_cpu(nmi_cpu_setup, NULL, 1);
506 __register_cpu_notifier(&oprofile_cpu_nb);
509 cpu_notifier_register_done();
517 static void nmi_shutdown(void)
519 struct op_msrs *msrs;
521 cpu_notifier_register_begin();
523 /* Use get/put_online_cpus() to protect 'nmi_enabled' & 'ctr_running' */
525 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
528 __unregister_cpu_notifier(&oprofile_cpu_nb);
531 cpu_notifier_register_done();
533 /* make variables visible to the nmi handler: */
535 unregister_nmi_handler(NMI_LOCAL, "oprofile");
536 msrs = &get_cpu_var(cpu_msrs);
537 model->shutdown(msrs);
539 put_cpu_var(cpu_msrs);
544 static int nmi_suspend(void)
546 /* Only one CPU left, just stop that one */
547 if (nmi_enabled == 1)
552 static void nmi_resume(void)
554 if (nmi_enabled == 1)
558 static struct syscore_ops oprofile_syscore_ops = {
559 .resume = nmi_resume,
560 .suspend = nmi_suspend,
563 static void __init init_suspend_resume(void)
565 register_syscore_ops(&oprofile_syscore_ops);
568 static void exit_suspend_resume(void)
570 unregister_syscore_ops(&oprofile_syscore_ops);
575 static inline void init_suspend_resume(void) { }
576 static inline void exit_suspend_resume(void) { }
578 #endif /* CONFIG_PM */
580 static int __init p4_init(char **cpu_type)
582 __u8 cpu_model = boot_cpu_data.x86_model;
584 if (cpu_model > 6 || cpu_model == 5)
588 *cpu_type = "i386/p4";
592 switch (smp_num_siblings) {
594 *cpu_type = "i386/p4";
599 *cpu_type = "i386/p4-ht";
600 model = &op_p4_ht2_spec;
605 printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
606 printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
610 enum __force_cpu_type {
611 reserved = 0, /* do not force */
616 static int force_cpu_type;
618 static int set_cpu_type(const char *str, struct kernel_param *kp)
620 if (!strcmp(str, "timer")) {
621 force_cpu_type = timer;
622 printk(KERN_INFO "oprofile: forcing NMI timer mode\n");
623 } else if (!strcmp(str, "arch_perfmon")) {
624 force_cpu_type = arch_perfmon;
625 printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
632 module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
634 static int __init ppro_init(char **cpu_type)
636 __u8 cpu_model = boot_cpu_data.x86_model;
637 struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
639 if (force_cpu_type == arch_perfmon && boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
643 * Documentation on identifying Intel processors by CPU family
644 * and model can be found in the Intel Software Developer's
647 * http://www.intel.com/products/processor/manuals/
649 * As of May 2010 the documentation for this was in the:
650 * "Intel 64 and IA-32 Architectures Software Developer's
651 * Manual Volume 3B: System Programming Guide", "Table B-1
652 * CPUID Signature Values of DisplayFamily_DisplayModel".
656 *cpu_type = "i386/ppro";
659 *cpu_type = "i386/pii";
663 *cpu_type = "i386/piii";
667 *cpu_type = "i386/p6_mobile";
670 *cpu_type = "i386/core";
676 *cpu_type = "i386/core_2";
681 spec = &op_arch_perfmon_spec;
682 *cpu_type = "i386/core_i7";
685 *cpu_type = "i386/atom";
696 int __init op_nmi_init(struct oprofile_operations *ops)
698 __u8 vendor = boot_cpu_data.x86_vendor;
699 __u8 family = boot_cpu_data.x86;
700 char *cpu_type = NULL;
706 if (force_cpu_type == timer)
711 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
715 cpu_type = "i386/athlon";
719 * Actually it could be i386/hammer too, but
720 * give user space an consistent name.
722 cpu_type = "x86-64/hammer";
725 cpu_type = "x86-64/family10";
728 cpu_type = "x86-64/family11h";
731 cpu_type = "x86-64/family12h";
734 cpu_type = "x86-64/family14h";
737 cpu_type = "x86-64/family15h";
742 model = &op_amd_spec;
745 case X86_VENDOR_INTEL:
752 /* A P6-class processor */
754 ppro_init(&cpu_type);
764 if (!boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
767 /* use arch perfmon as fallback */
768 cpu_type = "i386/arch_perfmon";
769 model = &op_arch_perfmon_spec;
776 /* default values, can be overwritten by model */
777 ops->create_files = nmi_create_files;
778 ops->setup = nmi_setup;
779 ops->shutdown = nmi_shutdown;
780 ops->start = nmi_start;
781 ops->stop = nmi_stop;
782 ops->cpu_type = cpu_type;
785 ret = model->init(ops);
789 if (!model->num_virt_counters)
790 model->num_virt_counters = model->num_counters;
794 init_suspend_resume();
796 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
800 void op_nmi_exit(void)
802 exit_suspend_resume();