Commit | Line | Data |
---|---|---|
241771ef IM |
1 | /* |
2 | * Performance counter x86 architecture code | |
3 | * | |
4 | * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar | |
6 | * | |
7 | * For licencing details see kernel-base/COPYING | |
8 | */ | |
9 | ||
10 | #include <linux/perf_counter.h> | |
11 | #include <linux/capability.h> | |
12 | #include <linux/notifier.h> | |
13 | #include <linux/hardirq.h> | |
14 | #include <linux/kprobes.h> | |
4ac13294 | 15 | #include <linux/module.h> |
241771ef IM |
16 | #include <linux/kdebug.h> |
17 | #include <linux/sched.h> | |
18 | ||
19 | #include <asm/intel_arch_perfmon.h> | |
20 | #include <asm/apic.h> | |
21 | ||
22 | static bool perf_counters_initialized __read_mostly; | |
23 | ||
24 | /* | |
25 | * Number of (generic) HW counters: | |
26 | */ | |
27 | static int nr_hw_counters __read_mostly; | |
28 | static u32 perf_counter_mask __read_mostly; | |
29 | ||
30 | /* No support for fixed function counters yet */ | |
31 | ||
32 | #define MAX_HW_COUNTERS 8 | |
33 | ||
34 | struct cpu_hw_counters { | |
35 | struct perf_counter *counters[MAX_HW_COUNTERS]; | |
36 | unsigned long used[BITS_TO_LONGS(MAX_HW_COUNTERS)]; | |
241771ef IM |
37 | }; |
38 | ||
39 | /* | |
40 | * Intel PerfMon v3. Used on Core2 and later. | |
41 | */ | |
42 | static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); | |
43 | ||
44 | const int intel_perfmon_event_map[] = | |
45 | { | |
46 | [PERF_COUNT_CYCLES] = 0x003c, | |
47 | [PERF_COUNT_INSTRUCTIONS] = 0x00c0, | |
48 | [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e, | |
49 | [PERF_COUNT_CACHE_MISSES] = 0x412e, | |
50 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, | |
51 | [PERF_COUNT_BRANCH_MISSES] = 0x00c5, | |
52 | }; | |
53 | ||
54 | const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); | |
55 | ||
56 | /* | |
57 | * Setup the hardware configuration for a given hw_event_type | |
58 | */ | |
59 | int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type) | |
60 | { | |
61 | struct hw_perf_counter *hwc = &counter->hw; | |
62 | ||
63 | if (unlikely(!perf_counters_initialized)) | |
64 | return -EINVAL; | |
65 | ||
66 | /* | |
67 | * Count user events, and generate PMC IRQs: | |
68 | * (keep 'enabled' bit clear for now) | |
69 | */ | |
70 | hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT; | |
71 | ||
72 | /* | |
73 | * If privileged enough, count OS events too, and allow | |
74 | * NMI events as well: | |
75 | */ | |
76 | hwc->nmi = 0; | |
77 | if (capable(CAP_SYS_ADMIN)) { | |
78 | hwc->config |= ARCH_PERFMON_EVENTSEL_OS; | |
79 | if (hw_event_type & PERF_COUNT_NMI) | |
80 | hwc->nmi = 1; | |
81 | } | |
82 | ||
83 | hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; | |
84 | hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; | |
85 | ||
86 | hwc->irq_period = counter->__irq_period; | |
87 | /* | |
88 | * Intel PMCs cannot be accessed sanely above 32 bit width, | |
89 | * so we install an artificial 1<<31 period regardless of | |
90 | * the generic counter period: | |
91 | */ | |
92 | if (!hwc->irq_period) | |
93 | hwc->irq_period = 0x7FFFFFFF; | |
94 | ||
95 | hwc->next_count = -((s32) hwc->irq_period); | |
96 | ||
97 | /* | |
98 | * Negative event types mean raw encoded event+umask values: | |
99 | */ | |
100 | if (hw_event_type < 0) { | |
101 | counter->hw_event_type = -hw_event_type; | |
102 | counter->hw_event_type &= ~PERF_COUNT_NMI; | |
103 | } else { | |
104 | hw_event_type &= ~PERF_COUNT_NMI; | |
105 | if (hw_event_type >= max_intel_perfmon_events) | |
106 | return -EINVAL; | |
107 | /* | |
108 | * The generic map: | |
109 | */ | |
110 | counter->hw_event_type = intel_perfmon_event_map[hw_event_type]; | |
111 | } | |
112 | hwc->config |= counter->hw_event_type; | |
113 | counter->wakeup_pending = 0; | |
114 | ||
115 | return 0; | |
116 | } | |
117 | ||
241771ef IM |
118 | void hw_perf_enable_all(void) |
119 | { | |
43874d23 | 120 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); |
241771ef IM |
121 | } |
122 | ||
4ac13294 | 123 | void hw_perf_restore_ctrl(u64 ctrl) |
241771ef | 124 | { |
4ac13294 TG |
125 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); |
126 | } | |
127 | EXPORT_SYMBOL_GPL(hw_perf_restore_ctrl); | |
128 | ||
129 | u64 hw_perf_disable_all(void) | |
130 | { | |
131 | u64 ctrl; | |
132 | ||
133 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); | |
241771ef | 134 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); |
4ac13294 | 135 | return ctrl; |
241771ef | 136 | } |
4ac13294 | 137 | EXPORT_SYMBOL_GPL(hw_perf_disable_all); |
241771ef | 138 | |
7e2ae347 IM |
139 | static inline void |
140 | __hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) | |
141 | { | |
142 | wrmsr(hwc->config_base + idx, hwc->config, 0); | |
143 | } | |
144 | ||
241771ef IM |
145 | static DEFINE_PER_CPU(u64, prev_next_count[MAX_HW_COUNTERS]); |
146 | ||
7e2ae347 | 147 | static void __hw_perf_counter_set_period(struct hw_perf_counter *hwc, int idx) |
241771ef IM |
148 | { |
149 | per_cpu(prev_next_count[idx], smp_processor_id()) = hwc->next_count; | |
150 | ||
151 | wrmsr(hwc->counter_base + idx, hwc->next_count, 0); | |
7e2ae347 IM |
152 | } |
153 | ||
154 | static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx) | |
155 | { | |
156 | wrmsr(hwc->config_base + idx, | |
157 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); | |
241771ef IM |
158 | } |
159 | ||
160 | void hw_perf_counter_enable(struct perf_counter *counter) | |
161 | { | |
162 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
163 | struct hw_perf_counter *hwc = &counter->hw; | |
164 | int idx = hwc->idx; | |
165 | ||
166 | /* Try to get the previous counter again */ | |
167 | if (test_and_set_bit(idx, cpuc->used)) { | |
168 | idx = find_first_zero_bit(cpuc->used, nr_hw_counters); | |
169 | set_bit(idx, cpuc->used); | |
170 | hwc->idx = idx; | |
171 | } | |
172 | ||
173 | perf_counters_lapic_init(hwc->nmi); | |
174 | ||
7e2ae347 | 175 | __hw_perf_counter_disable(hwc, idx); |
241771ef IM |
176 | |
177 | cpuc->counters[idx] = counter; | |
7e2ae347 IM |
178 | |
179 | __hw_perf_counter_set_period(hwc, idx); | |
241771ef IM |
180 | __hw_perf_counter_enable(hwc, idx); |
181 | } | |
182 | ||
183 | #ifdef CONFIG_X86_64 | |
184 | static inline void atomic64_counter_set(struct perf_counter *counter, u64 val) | |
185 | { | |
186 | atomic64_set(&counter->count, val); | |
187 | } | |
188 | ||
189 | static inline u64 atomic64_counter_read(struct perf_counter *counter) | |
190 | { | |
191 | return atomic64_read(&counter->count); | |
192 | } | |
193 | #else | |
194 | /* | |
195 | * Todo: add proper atomic64_t support to 32-bit x86: | |
196 | */ | |
197 | static inline void atomic64_counter_set(struct perf_counter *counter, u64 val64) | |
198 | { | |
199 | u32 *val32 = (void *)&val64; | |
200 | ||
201 | atomic_set(counter->count32 + 0, *(val32 + 0)); | |
202 | atomic_set(counter->count32 + 1, *(val32 + 1)); | |
203 | } | |
204 | ||
205 | static inline u64 atomic64_counter_read(struct perf_counter *counter) | |
206 | { | |
207 | return atomic_read(counter->count32 + 0) | | |
208 | (u64) atomic_read(counter->count32 + 1) << 32; | |
209 | } | |
210 | #endif | |
211 | ||
212 | static void __hw_perf_save_counter(struct perf_counter *counter, | |
213 | struct hw_perf_counter *hwc, int idx) | |
214 | { | |
215 | s64 raw = -1; | |
216 | s64 delta; | |
241771ef IM |
217 | |
218 | /* | |
219 | * Get the raw hw counter value: | |
220 | */ | |
1e125676 | 221 | rdmsrl(hwc->counter_base + idx, raw); |
241771ef IM |
222 | |
223 | /* | |
224 | * Rebase it to zero (it started counting at -irq_period), | |
225 | * to see the delta since ->prev_count: | |
226 | */ | |
227 | delta = (s64)hwc->irq_period + (s64)(s32)raw; | |
228 | ||
229 | atomic64_counter_set(counter, hwc->prev_count + delta); | |
230 | ||
231 | /* | |
232 | * Adjust the ->prev_count offset - if we went beyond | |
233 | * irq_period of units, then we got an IRQ and the counter | |
234 | * was set back to -irq_period: | |
235 | */ | |
236 | while (delta >= (s64)hwc->irq_period) { | |
237 | hwc->prev_count += hwc->irq_period; | |
238 | delta -= (s64)hwc->irq_period; | |
239 | } | |
240 | ||
241 | /* | |
242 | * Calculate the next raw counter value we'll write into | |
243 | * the counter at the next sched-in time: | |
244 | */ | |
245 | delta -= (s64)hwc->irq_period; | |
246 | ||
247 | hwc->next_count = (s32)delta; | |
248 | } | |
249 | ||
250 | void perf_counter_print_debug(void) | |
251 | { | |
252 | u64 ctrl, status, overflow, pmc_ctrl, pmc_count, next_count; | |
1e125676 IM |
253 | int cpu, idx; |
254 | ||
255 | if (!nr_hw_counters) | |
256 | return; | |
241771ef IM |
257 | |
258 | local_irq_disable(); | |
259 | ||
260 | cpu = smp_processor_id(); | |
261 | ||
1e125676 IM |
262 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
263 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | |
264 | rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); | |
241771ef IM |
265 | |
266 | printk(KERN_INFO "\n"); | |
267 | printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl); | |
268 | printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status); | |
269 | printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); | |
270 | ||
271 | for (idx = 0; idx < nr_hw_counters; idx++) { | |
1e125676 IM |
272 | rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); |
273 | rdmsrl(MSR_ARCH_PERFMON_PERFCTR0 + idx, pmc_count); | |
241771ef IM |
274 | |
275 | next_count = per_cpu(prev_next_count[idx], cpu); | |
276 | ||
277 | printk(KERN_INFO "CPU#%d: PMC%d ctrl: %016llx\n", | |
278 | cpu, idx, pmc_ctrl); | |
279 | printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n", | |
280 | cpu, idx, pmc_count); | |
281 | printk(KERN_INFO "CPU#%d: PMC%d next: %016llx\n", | |
282 | cpu, idx, next_count); | |
283 | } | |
284 | local_irq_enable(); | |
285 | } | |
286 | ||
287 | void hw_perf_counter_disable(struct perf_counter *counter) | |
288 | { | |
289 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
290 | struct hw_perf_counter *hwc = &counter->hw; | |
291 | unsigned int idx = hwc->idx; | |
292 | ||
7e2ae347 | 293 | __hw_perf_counter_disable(hwc, idx); |
241771ef IM |
294 | |
295 | clear_bit(idx, cpuc->used); | |
296 | cpuc->counters[idx] = NULL; | |
297 | __hw_perf_save_counter(counter, hwc, idx); | |
298 | } | |
299 | ||
300 | void hw_perf_counter_read(struct perf_counter *counter) | |
301 | { | |
302 | struct hw_perf_counter *hwc = &counter->hw; | |
303 | unsigned long addr = hwc->counter_base + hwc->idx; | |
304 | s64 offs, val = -1LL; | |
305 | s32 val32; | |
241771ef IM |
306 | |
307 | /* Careful: NMI might modify the counter offset */ | |
308 | do { | |
309 | offs = hwc->prev_count; | |
1e125676 | 310 | rdmsrl(addr, val); |
241771ef IM |
311 | } while (offs != hwc->prev_count); |
312 | ||
313 | val32 = (s32) val; | |
314 | val = (s64)hwc->irq_period + (s64)val32; | |
315 | atomic64_counter_set(counter, hwc->prev_count + val); | |
316 | } | |
317 | ||
318 | static void perf_store_irq_data(struct perf_counter *counter, u64 data) | |
319 | { | |
320 | struct perf_data *irqdata = counter->irqdata; | |
321 | ||
322 | if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { | |
323 | irqdata->overrun++; | |
324 | } else { | |
325 | u64 *p = (u64 *) &irqdata->data[irqdata->len]; | |
326 | ||
327 | *p = data; | |
328 | irqdata->len += sizeof(u64); | |
329 | } | |
330 | } | |
331 | ||
7e2ae347 IM |
332 | /* |
333 | * NMI-safe enable method: | |
334 | */ | |
241771ef IM |
335 | static void perf_save_and_restart(struct perf_counter *counter) |
336 | { | |
337 | struct hw_perf_counter *hwc = &counter->hw; | |
338 | int idx = hwc->idx; | |
7e2ae347 | 339 | u64 pmc_ctrl; |
241771ef | 340 | |
1e125676 | 341 | rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); |
241771ef | 342 | |
7e2ae347 IM |
343 | __hw_perf_save_counter(counter, hwc, idx); |
344 | __hw_perf_counter_set_period(hwc, idx); | |
345 | ||
346 | if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) | |
241771ef | 347 | __hw_perf_counter_enable(hwc, idx); |
241771ef IM |
348 | } |
349 | ||
350 | static void | |
351 | perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown) | |
352 | { | |
353 | struct perf_counter_context *ctx = leader->ctx; | |
354 | struct perf_counter *counter; | |
355 | int bit; | |
356 | ||
357 | list_for_each_entry(counter, &ctx->counters, list) { | |
358 | if (counter->record_type != PERF_RECORD_SIMPLE || | |
359 | counter == leader) | |
360 | continue; | |
361 | ||
362 | if (counter->active) { | |
363 | /* | |
364 | * When counter was not in the overflow mask, we have to | |
365 | * read it from hardware. We read it as well, when it | |
366 | * has not been read yet and clear the bit in the | |
367 | * status mask. | |
368 | */ | |
369 | bit = counter->hw.idx; | |
370 | if (!test_bit(bit, (unsigned long *) overflown) || | |
371 | test_bit(bit, (unsigned long *) status)) { | |
372 | clear_bit(bit, (unsigned long *) status); | |
373 | perf_save_and_restart(counter); | |
374 | } | |
375 | } | |
376 | perf_store_irq_data(leader, counter->hw_event_type); | |
377 | perf_store_irq_data(leader, atomic64_counter_read(counter)); | |
378 | } | |
379 | } | |
380 | ||
381 | /* | |
382 | * This handler is triggered by the local APIC, so the APIC IRQ handling | |
383 | * rules apply: | |
384 | */ | |
385 | static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) | |
386 | { | |
387 | int bit, cpu = smp_processor_id(); | |
43874d23 | 388 | u64 ack, status, saved_global; |
241771ef | 389 | struct cpu_hw_counters *cpuc; |
43874d23 IM |
390 | |
391 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global); | |
241771ef | 392 | |
241771ef IM |
393 | /* Disable counters globally */ |
394 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); | |
395 | ack_APIC_irq(); | |
396 | ||
397 | cpuc = &per_cpu(cpu_hw_counters, cpu); | |
398 | ||
87b9cf46 IM |
399 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
400 | if (!status) | |
401 | goto out; | |
402 | ||
241771ef IM |
403 | again: |
404 | ack = status; | |
405 | for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) { | |
406 | struct perf_counter *counter = cpuc->counters[bit]; | |
407 | ||
408 | clear_bit(bit, (unsigned long *) &status); | |
409 | if (!counter) | |
410 | continue; | |
411 | ||
412 | perf_save_and_restart(counter); | |
413 | ||
414 | switch (counter->record_type) { | |
415 | case PERF_RECORD_SIMPLE: | |
416 | continue; | |
417 | case PERF_RECORD_IRQ: | |
418 | perf_store_irq_data(counter, instruction_pointer(regs)); | |
419 | break; | |
420 | case PERF_RECORD_GROUP: | |
421 | perf_store_irq_data(counter, counter->hw_event_type); | |
422 | perf_store_irq_data(counter, | |
423 | atomic64_counter_read(counter)); | |
424 | perf_handle_group(counter, &status, &ack); | |
425 | break; | |
426 | } | |
427 | /* | |
428 | * From NMI context we cannot call into the scheduler to | |
429 | * do a task wakeup - but we mark these counters as | |
430 | * wakeup_pending and initate a wakeup callback: | |
431 | */ | |
432 | if (nmi) { | |
433 | counter->wakeup_pending = 1; | |
434 | set_tsk_thread_flag(current, TIF_PERF_COUNTERS); | |
435 | } else { | |
436 | wake_up(&counter->waitq); | |
437 | } | |
438 | } | |
439 | ||
440 | wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack, 0); | |
441 | ||
442 | /* | |
443 | * Repeat if there is more work to be done: | |
444 | */ | |
445 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | |
446 | if (status) | |
447 | goto again; | |
87b9cf46 | 448 | out: |
241771ef | 449 | /* |
43874d23 | 450 | * Restore - do not reenable when global enable is off: |
241771ef | 451 | */ |
43874d23 | 452 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, saved_global, 0); |
241771ef IM |
453 | } |
454 | ||
455 | void smp_perf_counter_interrupt(struct pt_regs *regs) | |
456 | { | |
457 | irq_enter(); | |
458 | #ifdef CONFIG_X86_64 | |
459 | add_pda(apic_perf_irqs, 1); | |
460 | #else | |
461 | per_cpu(irq_stat, smp_processor_id()).apic_perf_irqs++; | |
462 | #endif | |
463 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); | |
464 | __smp_perf_counter_interrupt(regs, 0); | |
465 | ||
466 | irq_exit(); | |
467 | } | |
468 | ||
469 | /* | |
470 | * This handler is triggered by NMI contexts: | |
471 | */ | |
472 | void perf_counter_notify(struct pt_regs *regs) | |
473 | { | |
474 | struct cpu_hw_counters *cpuc; | |
475 | unsigned long flags; | |
476 | int bit, cpu; | |
477 | ||
478 | local_irq_save(flags); | |
479 | cpu = smp_processor_id(); | |
480 | cpuc = &per_cpu(cpu_hw_counters, cpu); | |
481 | ||
482 | for_each_bit(bit, cpuc->used, nr_hw_counters) { | |
483 | struct perf_counter *counter = cpuc->counters[bit]; | |
484 | ||
485 | if (!counter) | |
486 | continue; | |
487 | ||
488 | if (counter->wakeup_pending) { | |
489 | counter->wakeup_pending = 0; | |
490 | wake_up(&counter->waitq); | |
491 | } | |
492 | } | |
493 | ||
494 | local_irq_restore(flags); | |
495 | } | |
496 | ||
497 | void __cpuinit perf_counters_lapic_init(int nmi) | |
498 | { | |
499 | u32 apic_val; | |
500 | ||
501 | if (!perf_counters_initialized) | |
502 | return; | |
503 | /* | |
504 | * Enable the performance counter vector in the APIC LVT: | |
505 | */ | |
506 | apic_val = apic_read(APIC_LVTERR); | |
507 | ||
508 | apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED); | |
509 | if (nmi) | |
510 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
511 | else | |
512 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); | |
513 | apic_write(APIC_LVTERR, apic_val); | |
514 | } | |
515 | ||
516 | static int __kprobes | |
517 | perf_counter_nmi_handler(struct notifier_block *self, | |
518 | unsigned long cmd, void *__args) | |
519 | { | |
520 | struct die_args *args = __args; | |
521 | struct pt_regs *regs; | |
522 | ||
523 | if (likely(cmd != DIE_NMI_IPI)) | |
524 | return NOTIFY_DONE; | |
525 | ||
526 | regs = args->regs; | |
527 | ||
528 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
529 | __smp_perf_counter_interrupt(regs, 1); | |
530 | ||
531 | return NOTIFY_STOP; | |
532 | } | |
533 | ||
534 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { | |
535 | .notifier_call = perf_counter_nmi_handler | |
536 | }; | |
537 | ||
538 | void __init init_hw_perf_counters(void) | |
539 | { | |
540 | union cpuid10_eax eax; | |
541 | unsigned int unused; | |
542 | unsigned int ebx; | |
543 | ||
544 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | |
545 | return; | |
546 | ||
547 | /* | |
548 | * Check whether the Architectural PerfMon supports | |
549 | * Branch Misses Retired Event or not. | |
550 | */ | |
551 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | |
552 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) | |
553 | return; | |
554 | ||
555 | printk(KERN_INFO "Intel Performance Monitoring support detected.\n"); | |
556 | ||
557 | printk(KERN_INFO "... version: %d\n", eax.split.version_id); | |
558 | printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters); | |
559 | nr_hw_counters = eax.split.num_counters; | |
560 | if (nr_hw_counters > MAX_HW_COUNTERS) { | |
561 | nr_hw_counters = MAX_HW_COUNTERS; | |
562 | WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", | |
563 | nr_hw_counters, MAX_HW_COUNTERS); | |
564 | } | |
565 | perf_counter_mask = (1 << nr_hw_counters) - 1; | |
566 | perf_max_counters = nr_hw_counters; | |
567 | ||
568 | printk(KERN_INFO "... bit_width: %d\n", eax.split.bit_width); | |
569 | printk(KERN_INFO "... mask_length: %d\n", eax.split.mask_length); | |
570 | ||
571 | perf_counters_lapic_init(0); | |
572 | register_die_notifier(&perf_counter_nmi_notifier); | |
573 | ||
574 | perf_counters_initialized = true; | |
575 | } |