Commit | Line | Data |
---|---|---|
47a486cc CG |
1 | /* |
2 | * local apic based NMI watchdog for various CPUs. | |
3 | * | |
4 | * This file also handles reservation of performance counters for coordination | |
5 | * with other users (like oprofile). | |
6 | * | |
7 | * Note that these events normally don't tick when the CPU idles. This means | |
8 | * the frequency varies with CPU load. | |
9 | * | |
10 | * Original code for K7/P6 written by Keith Owens | |
11 | * | |
12 | */ | |
09198e68 AK |
13 | |
14 | #include <linux/percpu.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/bitops.h> | |
18 | #include <linux/smp.h> | |
19 | #include <linux/nmi.h> | |
20 | #include <asm/apic.h> | |
21 | #include <asm/intel_arch_perfmon.h> | |
22 | ||
23 | struct nmi_watchdog_ctlblk { | |
24 | unsigned int cccr_msr; | |
25 | unsigned int perfctr_msr; /* the MSR to reset in NMI handler */ | |
26 | unsigned int evntsel_msr; /* the MSR to select the events to handle */ | |
27 | }; | |
28 | ||
29 | /* Interface defining a CPU specific perfctr watchdog */ | |
30 | struct wd_ops { | |
31 | int (*reserve)(void); | |
32 | void (*unreserve)(void); | |
33 | int (*setup)(unsigned nmi_hz); | |
34 | void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz); | |
54c6ed75 | 35 | void (*stop)(void); |
09198e68 AK |
36 | unsigned perfctr; |
37 | unsigned evntsel; | |
38 | u64 checkbit; | |
39 | }; | |
40 | ||
d1e08474 | 41 | static const struct wd_ops *wd_ops; |
09198e68 | 42 | |
47a486cc CG |
43 | /* |
44 | * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's | |
45 | * offset from MSR_P4_BSU_ESCR0. | |
46 | * | |
47 | * It will be the max for all platforms (for now) | |
09198e68 AK |
48 | */ |
49 | #define NMI_MAX_COUNTER_BITS 66 | |
50 | ||
47a486cc CG |
51 | /* |
52 | * perfctr_nmi_owner tracks the ownership of the perfctr registers: | |
09198e68 AK |
53 | * evtsel_nmi_owner tracks the ownership of the event selection |
54 | * - different performance counters/ event selection may be reserved for | |
55 | * different subsystems this reservation system just tries to coordinate | |
56 | * things a little | |
57 | */ | |
58 | static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS); | |
59 | static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS); | |
60 | ||
61 | static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); | |
62 | ||
63 | /* converts an msr to an appropriate reservation bit */ | |
64 | static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | |
65 | { | |
5dcccd8d AK |
66 | /* returns the bit offset of the performance counter register */ |
67 | switch (boot_cpu_data.x86_vendor) { | |
68 | case X86_VENDOR_AMD: | |
69 | return (msr - MSR_K7_PERFCTR0); | |
70 | case X86_VENDOR_INTEL: | |
71 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | |
72 | return (msr - MSR_ARCH_PERFMON_PERFCTR0); | |
73 | ||
74 | switch (boot_cpu_data.x86) { | |
75 | case 6: | |
76 | return (msr - MSR_P6_PERFCTR0); | |
77 | case 15: | |
78 | return (msr - MSR_P4_BPU_PERFCTR0); | |
79 | } | |
80 | } | |
81 | return 0; | |
09198e68 AK |
82 | } |
83 | ||
47a486cc CG |
84 | /* |
85 | * converts an msr to an appropriate reservation bit | |
86 | * returns the bit offset of the event selection register | |
87 | */ | |
09198e68 AK |
88 | static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) |
89 | { | |
5dcccd8d AK |
90 | /* returns the bit offset of the event selection register */ |
91 | switch (boot_cpu_data.x86_vendor) { | |
92 | case X86_VENDOR_AMD: | |
93 | return (msr - MSR_K7_EVNTSEL0); | |
94 | case X86_VENDOR_INTEL: | |
95 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | |
96 | return (msr - MSR_ARCH_PERFMON_EVENTSEL0); | |
97 | ||
98 | switch (boot_cpu_data.x86) { | |
99 | case 6: | |
100 | return (msr - MSR_P6_EVNTSEL0); | |
101 | case 15: | |
102 | return (msr - MSR_P4_BSU_ESCR0); | |
103 | } | |
104 | } | |
105 | return 0; | |
106 | ||
09198e68 AK |
107 | } |
108 | ||
109 | /* checks for a bit availability (hack for oprofile) */ | |
110 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | |
111 | { | |
112 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | |
113 | ||
114 | return (!test_bit(counter, perfctr_nmi_owner)); | |
115 | } | |
116 | ||
117 | /* checks the an msr for availability */ | |
118 | int avail_to_resrv_perfctr_nmi(unsigned int msr) | |
119 | { | |
120 | unsigned int counter; | |
121 | ||
122 | counter = nmi_perfctr_msr_to_bit(msr); | |
123 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | |
124 | ||
125 | return (!test_bit(counter, perfctr_nmi_owner)); | |
126 | } | |
47a486cc | 127 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); |
09198e68 AK |
128 | |
129 | int reserve_perfctr_nmi(unsigned int msr) | |
130 | { | |
131 | unsigned int counter; | |
132 | ||
133 | counter = nmi_perfctr_msr_to_bit(msr); | |
124d395f SE |
134 | /* register not managed by the allocator? */ |
135 | if (counter > NMI_MAX_COUNTER_BITS) | |
136 | return 1; | |
09198e68 AK |
137 | |
138 | if (!test_and_set_bit(counter, perfctr_nmi_owner)) | |
139 | return 1; | |
140 | return 0; | |
141 | } | |
47a486cc | 142 | EXPORT_SYMBOL(reserve_perfctr_nmi); |
09198e68 AK |
143 | |
144 | void release_perfctr_nmi(unsigned int msr) | |
145 | { | |
146 | unsigned int counter; | |
147 | ||
148 | counter = nmi_perfctr_msr_to_bit(msr); | |
124d395f SE |
149 | /* register not managed by the allocator? */ |
150 | if (counter > NMI_MAX_COUNTER_BITS) | |
151 | return; | |
09198e68 AK |
152 | |
153 | clear_bit(counter, perfctr_nmi_owner); | |
154 | } | |
47a486cc | 155 | EXPORT_SYMBOL(release_perfctr_nmi); |
09198e68 AK |
156 | |
157 | int reserve_evntsel_nmi(unsigned int msr) | |
158 | { | |
159 | unsigned int counter; | |
160 | ||
161 | counter = nmi_evntsel_msr_to_bit(msr); | |
124d395f SE |
162 | /* register not managed by the allocator? */ |
163 | if (counter > NMI_MAX_COUNTER_BITS) | |
164 | return 1; | |
09198e68 AK |
165 | |
166 | if (!test_and_set_bit(counter, evntsel_nmi_owner)) | |
167 | return 1; | |
168 | return 0; | |
169 | } | |
47a486cc | 170 | EXPORT_SYMBOL(reserve_evntsel_nmi); |
09198e68 AK |
171 | |
172 | void release_evntsel_nmi(unsigned int msr) | |
173 | { | |
174 | unsigned int counter; | |
175 | ||
176 | counter = nmi_evntsel_msr_to_bit(msr); | |
124d395f SE |
177 | /* register not managed by the allocator? */ |
178 | if (counter > NMI_MAX_COUNTER_BITS) | |
179 | return; | |
09198e68 AK |
180 | |
181 | clear_bit(counter, evntsel_nmi_owner); | |
182 | } | |
09198e68 AK |
183 | EXPORT_SYMBOL(release_evntsel_nmi); |
184 | ||
185 | void disable_lapic_nmi_watchdog(void) | |
186 | { | |
187 | BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); | |
188 | ||
189 | if (atomic_read(&nmi_active) <= 0) | |
190 | return; | |
191 | ||
15c8b6c1 | 192 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 1); |
1a1b1d13 CG |
193 | |
194 | if (wd_ops) | |
195 | wd_ops->unreserve(); | |
09198e68 AK |
196 | |
197 | BUG_ON(atomic_read(&nmi_active) != 0); | |
198 | } | |
199 | ||
200 | void enable_lapic_nmi_watchdog(void) | |
201 | { | |
202 | BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); | |
203 | ||
204 | /* are we already enabled */ | |
205 | if (atomic_read(&nmi_active) != 0) | |
206 | return; | |
207 | ||
208 | /* are we lapic aware */ | |
209 | if (!wd_ops) | |
210 | return; | |
211 | if (!wd_ops->reserve()) { | |
212 | printk(KERN_ERR "NMI watchdog: cannot reserve perfctrs\n"); | |
213 | return; | |
214 | } | |
215 | ||
15c8b6c1 | 216 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 1); |
09198e68 AK |
217 | touch_nmi_watchdog(); |
218 | } | |
219 | ||
220 | /* | |
221 | * Activate the NMI watchdog via the local APIC. | |
222 | */ | |
223 | ||
224 | static unsigned int adjust_for_32bit_ctr(unsigned int hz) | |
225 | { | |
226 | u64 counter_val; | |
227 | unsigned int retval = hz; | |
228 | ||
229 | /* | |
230 | * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter | |
231 | * are writable, with higher bits sign extending from bit 31. | |
232 | * So, we can only program the counter with 31 bit values and | |
233 | * 32nd bit should be 1, for 33.. to be 1. | |
234 | * Find the appropriate nmi_hz | |
235 | */ | |
236 | counter_val = (u64)cpu_khz * 1000; | |
237 | do_div(counter_val, retval); | |
238 | if (counter_val > 0x7fffffffULL) { | |
239 | u64 count = (u64)cpu_khz * 1000; | |
240 | do_div(count, 0x7fffffffUL); | |
241 | retval = count + 1; | |
242 | } | |
243 | return retval; | |
244 | } | |
245 | ||
47a486cc CG |
246 | static void write_watchdog_counter(unsigned int perfctr_msr, |
247 | const char *descr, unsigned nmi_hz) | |
09198e68 AK |
248 | { |
249 | u64 count = (u64)cpu_khz * 1000; | |
250 | ||
251 | do_div(count, nmi_hz); | |
252 | if(descr) | |
253 | Dprintk("setting %s to -0x%08Lx\n", descr, count); | |
254 | wrmsrl(perfctr_msr, 0 - count); | |
255 | } | |
256 | ||
257 | static void write_watchdog_counter32(unsigned int perfctr_msr, | |
47a486cc | 258 | const char *descr, unsigned nmi_hz) |
09198e68 AK |
259 | { |
260 | u64 count = (u64)cpu_khz * 1000; | |
261 | ||
262 | do_div(count, nmi_hz); | |
263 | if(descr) | |
264 | Dprintk("setting %s to -0x%08Lx\n", descr, count); | |
265 | wrmsr(perfctr_msr, (u32)(-count), 0); | |
266 | } | |
267 | ||
47a486cc CG |
268 | /* |
269 | * AMD K7/K8/Family10h/Family11h support. | |
270 | * AMD keeps this interface nicely stable so there is not much variety | |
271 | */ | |
09198e68 AK |
272 | #define K7_EVNTSEL_ENABLE (1 << 22) |
273 | #define K7_EVNTSEL_INT (1 << 20) | |
274 | #define K7_EVNTSEL_OS (1 << 17) | |
275 | #define K7_EVNTSEL_USR (1 << 16) | |
276 | #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 | |
277 | #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING | |
278 | ||
279 | static int setup_k7_watchdog(unsigned nmi_hz) | |
280 | { | |
281 | unsigned int perfctr_msr, evntsel_msr; | |
282 | unsigned int evntsel; | |
283 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | |
284 | ||
23d5ea5d SE |
285 | perfctr_msr = wd_ops->perfctr; |
286 | evntsel_msr = wd_ops->evntsel; | |
09198e68 AK |
287 | |
288 | wrmsrl(perfctr_msr, 0UL); | |
289 | ||
290 | evntsel = K7_EVNTSEL_INT | |
291 | | K7_EVNTSEL_OS | |
292 | | K7_EVNTSEL_USR | |
293 | | K7_NMI_EVENT; | |
294 | ||
295 | /* setup the timer */ | |
296 | wrmsr(evntsel_msr, evntsel, 0); | |
297 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); | |
298 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
299 | evntsel |= K7_EVNTSEL_ENABLE; | |
300 | wrmsr(evntsel_msr, evntsel, 0); | |
301 | ||
302 | wd->perfctr_msr = perfctr_msr; | |
303 | wd->evntsel_msr = evntsel_msr; | |
47a486cc | 304 | wd->cccr_msr = 0; /* unused */ |
09198e68 AK |
305 | return 1; |
306 | } | |
307 | ||
54c6ed75 | 308 | static void single_msr_stop_watchdog(void) |
09198e68 AK |
309 | { |
310 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | |
311 | ||
312 | wrmsr(wd->evntsel_msr, 0, 0); | |
313 | } | |
314 | ||
315 | static int single_msr_reserve(void) | |
316 | { | |
317 | if (!reserve_perfctr_nmi(wd_ops->perfctr)) | |
318 | return 0; | |
319 | ||
320 | if (!reserve_evntsel_nmi(wd_ops->evntsel)) { | |
321 | release_perfctr_nmi(wd_ops->perfctr); | |
322 | return 0; | |
323 | } | |
324 | return 1; | |
325 | } | |
326 | ||
327 | static void single_msr_unreserve(void) | |
328 | { | |
da88ba17 BS |
329 | release_evntsel_nmi(wd_ops->evntsel); |
330 | release_perfctr_nmi(wd_ops->perfctr); | |
09198e68 AK |
331 | } |
332 | ||
333 | static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | |
334 | { | |
335 | /* start the cycle over again */ | |
336 | write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); | |
337 | } | |
338 | ||
d1e08474 | 339 | static const struct wd_ops k7_wd_ops = { |
47a486cc CG |
340 | .reserve = single_msr_reserve, |
341 | .unreserve = single_msr_unreserve, | |
342 | .setup = setup_k7_watchdog, | |
343 | .rearm = single_msr_rearm, | |
344 | .stop = single_msr_stop_watchdog, | |
345 | .perfctr = MSR_K7_PERFCTR0, | |
346 | .evntsel = MSR_K7_EVNTSEL0, | |
347 | .checkbit = 1ULL << 47, | |
09198e68 AK |
348 | }; |
349 | ||
47a486cc CG |
350 | /* |
351 | * Intel Model 6 (PPro+,P2,P3,P-M,Core1) | |
352 | */ | |
09198e68 AK |
353 | #define P6_EVNTSEL0_ENABLE (1 << 22) |
354 | #define P6_EVNTSEL_INT (1 << 20) | |
355 | #define P6_EVNTSEL_OS (1 << 17) | |
356 | #define P6_EVNTSEL_USR (1 << 16) | |
357 | #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79 | |
358 | #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED | |
359 | ||
360 | static int setup_p6_watchdog(unsigned nmi_hz) | |
361 | { | |
362 | unsigned int perfctr_msr, evntsel_msr; | |
363 | unsigned int evntsel; | |
364 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | |
365 | ||
23d5ea5d SE |
366 | perfctr_msr = wd_ops->perfctr; |
367 | evntsel_msr = wd_ops->evntsel; | |
09198e68 | 368 | |
57c22f49 AK |
369 | /* KVM doesn't implement this MSR */ |
370 | if (wrmsr_safe(perfctr_msr, 0, 0) < 0) | |
371 | return 0; | |
09198e68 AK |
372 | |
373 | evntsel = P6_EVNTSEL_INT | |
374 | | P6_EVNTSEL_OS | |
375 | | P6_EVNTSEL_USR | |
376 | | P6_NMI_EVENT; | |
377 | ||
378 | /* setup the timer */ | |
379 | wrmsr(evntsel_msr, evntsel, 0); | |
380 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | |
381 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); | |
382 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
383 | evntsel |= P6_EVNTSEL0_ENABLE; | |
384 | wrmsr(evntsel_msr, evntsel, 0); | |
385 | ||
386 | wd->perfctr_msr = perfctr_msr; | |
387 | wd->evntsel_msr = evntsel_msr; | |
47a486cc | 388 | wd->cccr_msr = 0; /* unused */ |
09198e68 AK |
389 | return 1; |
390 | } | |
391 | ||
392 | static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | |
393 | { | |
47a486cc CG |
394 | /* |
395 | * P6 based Pentium M need to re-unmask | |
09198e68 AK |
396 | * the apic vector but it doesn't hurt |
397 | * other P6 variant. | |
47a486cc CG |
398 | * ArchPerfom/Core Duo also needs this |
399 | */ | |
09198e68 | 400 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
47a486cc | 401 | |
09198e68 AK |
402 | /* P6/ARCH_PERFMON has 32 bit counter write */ |
403 | write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz); | |
404 | } | |
405 | ||
d1e08474 | 406 | static const struct wd_ops p6_wd_ops = { |
47a486cc CG |
407 | .reserve = single_msr_reserve, |
408 | .unreserve = single_msr_unreserve, | |
409 | .setup = setup_p6_watchdog, | |
410 | .rearm = p6_rearm, | |
411 | .stop = single_msr_stop_watchdog, | |
412 | .perfctr = MSR_P6_PERFCTR0, | |
413 | .evntsel = MSR_P6_EVNTSEL0, | |
414 | .checkbit = 1ULL << 39, | |
09198e68 AK |
415 | }; |
416 | ||
47a486cc CG |
417 | /* |
418 | * Intel P4 performance counters. | |
419 | * By far the most complicated of all. | |
420 | */ | |
421 | #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1 << 7) | |
422 | #define P4_ESCR_EVENT_SELECT(N) ((N) << 25) | |
423 | #define P4_ESCR_OS (1 << 3) | |
424 | #define P4_ESCR_USR (1 << 2) | |
425 | #define P4_CCCR_OVF_PMI0 (1 << 26) | |
426 | #define P4_CCCR_OVF_PMI1 (1 << 27) | |
427 | #define P4_CCCR_THRESHOLD(N) ((N) << 20) | |
428 | #define P4_CCCR_COMPLEMENT (1 << 19) | |
429 | #define P4_CCCR_COMPARE (1 << 18) | |
430 | #define P4_CCCR_REQUIRED (3 << 16) | |
431 | #define P4_CCCR_ESCR_SELECT(N) ((N) << 13) | |
432 | #define P4_CCCR_ENABLE (1 << 12) | |
433 | #define P4_CCCR_OVF (1 << 31) | |
09198e68 | 434 | |
47a486cc CG |
435 | /* |
436 | * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter | |
437 | * CRU_ESCR0 (with any non-null event selector) through a complemented | |
438 | * max threshold. [IA32-Vol3, Section 14.9.9] | |
439 | */ | |
09198e68 AK |
440 | static int setup_p4_watchdog(unsigned nmi_hz) |
441 | { | |
442 | unsigned int perfctr_msr, evntsel_msr, cccr_msr; | |
443 | unsigned int evntsel, cccr_val; | |
444 | unsigned int misc_enable, dummy; | |
445 | unsigned int ht_num; | |
446 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | |
447 | ||
448 | rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy); | |
449 | if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL)) | |
450 | return 0; | |
451 | ||
452 | #ifdef CONFIG_SMP | |
453 | /* detect which hyperthread we are on */ | |
454 | if (smp_num_siblings == 2) { | |
455 | unsigned int ebx, apicid; | |
456 | ||
457 | ebx = cpuid_ebx(1); | |
458 | apicid = (ebx >> 24) & 0xff; | |
459 | ht_num = apicid & 1; | |
460 | } else | |
461 | #endif | |
462 | ht_num = 0; | |
463 | ||
47a486cc CG |
464 | /* |
465 | * performance counters are shared resources | |
09198e68 AK |
466 | * assign each hyperthread its own set |
467 | * (re-use the ESCR0 register, seems safe | |
468 | * and keeps the cccr_val the same) | |
469 | */ | |
470 | if (!ht_num) { | |
471 | /* logical cpu 0 */ | |
472 | perfctr_msr = MSR_P4_IQ_PERFCTR0; | |
473 | evntsel_msr = MSR_P4_CRU_ESCR0; | |
474 | cccr_msr = MSR_P4_IQ_CCCR0; | |
475 | cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); | |
476 | } else { | |
477 | /* logical cpu 1 */ | |
478 | perfctr_msr = MSR_P4_IQ_PERFCTR1; | |
479 | evntsel_msr = MSR_P4_CRU_ESCR0; | |
480 | cccr_msr = MSR_P4_IQ_CCCR1; | |
481 | cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); | |
482 | } | |
483 | ||
484 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) | |
485 | | P4_ESCR_OS | |
486 | | P4_ESCR_USR; | |
487 | ||
488 | cccr_val |= P4_CCCR_THRESHOLD(15) | |
489 | | P4_CCCR_COMPLEMENT | |
490 | | P4_CCCR_COMPARE | |
491 | | P4_CCCR_REQUIRED; | |
492 | ||
493 | wrmsr(evntsel_msr, evntsel, 0); | |
494 | wrmsr(cccr_msr, cccr_val, 0); | |
495 | write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz); | |
496 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
497 | cccr_val |= P4_CCCR_ENABLE; | |
498 | wrmsr(cccr_msr, cccr_val, 0); | |
499 | wd->perfctr_msr = perfctr_msr; | |
500 | wd->evntsel_msr = evntsel_msr; | |
501 | wd->cccr_msr = cccr_msr; | |
502 | return 1; | |
503 | } | |
504 | ||
54c6ed75 | 505 | static void stop_p4_watchdog(void) |
09198e68 AK |
506 | { |
507 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | |
508 | wrmsr(wd->cccr_msr, 0, 0); | |
509 | wrmsr(wd->evntsel_msr, 0, 0); | |
510 | } | |
511 | ||
512 | static int p4_reserve(void) | |
513 | { | |
514 | if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0)) | |
515 | return 0; | |
516 | #ifdef CONFIG_SMP | |
517 | if (smp_num_siblings > 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1)) | |
518 | goto fail1; | |
519 | #endif | |
520 | if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0)) | |
521 | goto fail2; | |
522 | /* RED-PEN why is ESCR1 not reserved here? */ | |
523 | return 1; | |
524 | fail2: | |
525 | #ifdef CONFIG_SMP | |
526 | if (smp_num_siblings > 1) | |
527 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR1); | |
528 | fail1: | |
529 | #endif | |
530 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR0); | |
531 | return 0; | |
532 | } | |
533 | ||
534 | static void p4_unreserve(void) | |
535 | { | |
536 | #ifdef CONFIG_SMP | |
537 | if (smp_num_siblings > 1) | |
da88ba17 | 538 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR1); |
09198e68 | 539 | #endif |
da88ba17 BS |
540 | release_evntsel_nmi(MSR_P4_CRU_ESCR0); |
541 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR0); | |
09198e68 AK |
542 | } |
543 | ||
544 | static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | |
545 | { | |
546 | unsigned dummy; | |
547 | /* | |
548 | * P4 quirks: | |
549 | * - An overflown perfctr will assert its interrupt | |
550 | * until the OVF flag in its CCCR is cleared. | |
551 | * - LVTPC is masked on interrupt and must be | |
552 | * unmasked by the LVTPC handler. | |
553 | */ | |
554 | rdmsrl(wd->cccr_msr, dummy); | |
555 | dummy &= ~P4_CCCR_OVF; | |
556 | wrmsrl(wd->cccr_msr, dummy); | |
557 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
558 | /* start the cycle over again */ | |
559 | write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); | |
560 | } | |
561 | ||
d1e08474 | 562 | static const struct wd_ops p4_wd_ops = { |
47a486cc CG |
563 | .reserve = p4_reserve, |
564 | .unreserve = p4_unreserve, | |
565 | .setup = setup_p4_watchdog, | |
566 | .rearm = p4_rearm, | |
567 | .stop = stop_p4_watchdog, | |
09198e68 | 568 | /* RED-PEN this is wrong for the other sibling */ |
47a486cc CG |
569 | .perfctr = MSR_P4_BPU_PERFCTR0, |
570 | .evntsel = MSR_P4_BSU_ESCR0, | |
571 | .checkbit = 1ULL << 39, | |
09198e68 AK |
572 | }; |
573 | ||
47a486cc CG |
574 | /* |
575 | * Watchdog using the Intel architected PerfMon. | |
576 | * Used for Core2 and hopefully all future Intel CPUs. | |
577 | */ | |
09198e68 AK |
578 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL |
579 | #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK | |
580 | ||
d1e08474 JB |
581 | static struct wd_ops intel_arch_wd_ops; |
582 | ||
09198e68 AK |
583 | static int setup_intel_arch_watchdog(unsigned nmi_hz) |
584 | { | |
585 | unsigned int ebx; | |
586 | union cpuid10_eax eax; | |
587 | unsigned int unused; | |
588 | unsigned int perfctr_msr, evntsel_msr; | |
589 | unsigned int evntsel; | |
590 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | |
591 | ||
592 | /* | |
593 | * Check whether the Architectural PerfMon supports | |
594 | * Unhalted Core Cycles Event or not. | |
595 | * NOTE: Corresponding bit = 0 in ebx indicates event present. | |
596 | */ | |
597 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | |
598 | if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | |
599 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | |
600 | return 0; | |
601 | ||
23d5ea5d SE |
602 | perfctr_msr = wd_ops->perfctr; |
603 | evntsel_msr = wd_ops->evntsel; | |
09198e68 AK |
604 | |
605 | wrmsrl(perfctr_msr, 0UL); | |
606 | ||
607 | evntsel = ARCH_PERFMON_EVENTSEL_INT | |
608 | | ARCH_PERFMON_EVENTSEL_OS | |
609 | | ARCH_PERFMON_EVENTSEL_USR | |
610 | | ARCH_PERFMON_NMI_EVENT_SEL | |
611 | | ARCH_PERFMON_NMI_EVENT_UMASK; | |
612 | ||
613 | /* setup the timer */ | |
614 | wrmsr(evntsel_msr, evntsel, 0); | |
615 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | |
616 | write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz); | |
617 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
618 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | |
619 | wrmsr(evntsel_msr, evntsel, 0); | |
620 | ||
621 | wd->perfctr_msr = perfctr_msr; | |
622 | wd->evntsel_msr = evntsel_msr; | |
47a486cc | 623 | wd->cccr_msr = 0; /* unused */ |
d1e08474 | 624 | intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); |
09198e68 AK |
625 | return 1; |
626 | } | |
627 | ||
d1e08474 | 628 | static struct wd_ops intel_arch_wd_ops __read_mostly = { |
47a486cc CG |
629 | .reserve = single_msr_reserve, |
630 | .unreserve = single_msr_unreserve, | |
631 | .setup = setup_intel_arch_watchdog, | |
632 | .rearm = p6_rearm, | |
633 | .stop = single_msr_stop_watchdog, | |
634 | .perfctr = MSR_ARCH_PERFMON_PERFCTR1, | |
635 | .evntsel = MSR_ARCH_PERFMON_EVENTSEL1, | |
09198e68 AK |
636 | }; |
637 | ||
638 | static void probe_nmi_watchdog(void) | |
639 | { | |
640 | switch (boot_cpu_data.x86_vendor) { | |
641 | case X86_VENDOR_AMD: | |
642 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && | |
643 | boot_cpu_data.x86 != 16) | |
644 | return; | |
645 | wd_ops = &k7_wd_ops; | |
646 | break; | |
647 | case X86_VENDOR_INTEL: | |
47a486cc CG |
648 | /* |
649 | * Work around Core Duo (Yonah) errata AE49 where perfctr1 | |
650 | * doesn't have a working enable bit. | |
651 | */ | |
23d5ea5d | 652 | if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) { |
86d78f64 JB |
653 | intel_arch_wd_ops.perfctr = MSR_ARCH_PERFMON_PERFCTR0; |
654 | intel_arch_wd_ops.evntsel = MSR_ARCH_PERFMON_EVENTSEL0; | |
23d5ea5d | 655 | } |
09198e68 AK |
656 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { |
657 | wd_ops = &intel_arch_wd_ops; | |
658 | break; | |
659 | } | |
660 | switch (boot_cpu_data.x86) { | |
661 | case 6: | |
47a486cc | 662 | if (boot_cpu_data.x86_model > 13) |
09198e68 AK |
663 | return; |
664 | ||
665 | wd_ops = &p6_wd_ops; | |
666 | break; | |
667 | case 15: | |
09198e68 AK |
668 | wd_ops = &p4_wd_ops; |
669 | break; | |
670 | default: | |
671 | return; | |
672 | } | |
673 | break; | |
674 | } | |
675 | } | |
676 | ||
677 | /* Interface to nmi.c */ | |
678 | ||
679 | int lapic_watchdog_init(unsigned nmi_hz) | |
680 | { | |
681 | if (!wd_ops) { | |
682 | probe_nmi_watchdog(); | |
9c9b81f7 IM |
683 | if (!wd_ops) { |
684 | printk(KERN_INFO "NMI watchdog: CPU not supported\n"); | |
09198e68 | 685 | return -1; |
9c9b81f7 | 686 | } |
faa4cfa6 BS |
687 | |
688 | if (!wd_ops->reserve()) { | |
689 | printk(KERN_ERR | |
690 | "NMI watchdog: cannot reserve perfctrs\n"); | |
691 | return -1; | |
692 | } | |
09198e68 AK |
693 | } |
694 | ||
695 | if (!(wd_ops->setup(nmi_hz))) { | |
696 | printk(KERN_ERR "Cannot setup NMI watchdog on CPU %d\n", | |
697 | raw_smp_processor_id()); | |
698 | return -1; | |
699 | } | |
700 | ||
701 | return 0; | |
702 | } | |
703 | ||
704 | void lapic_watchdog_stop(void) | |
705 | { | |
706 | if (wd_ops) | |
54c6ed75 | 707 | wd_ops->stop(); |
09198e68 AK |
708 | } |
709 | ||
710 | unsigned lapic_adjust_nmi_hz(unsigned hz) | |
711 | { | |
712 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | |
713 | if (wd->perfctr_msr == MSR_P6_PERFCTR0 || | |
714 | wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1) | |
715 | hz = adjust_for_32bit_ctr(hz); | |
716 | return hz; | |
717 | } | |
718 | ||
719 | int lapic_wd_event(unsigned nmi_hz) | |
720 | { | |
721 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | |
722 | u64 ctr; | |
47a486cc | 723 | |
09198e68 | 724 | rdmsrl(wd->perfctr_msr, ctr); |
47a486cc | 725 | if (ctr & wd_ops->checkbit) /* perfctr still running? */ |
09198e68 | 726 | return 0; |
47a486cc | 727 | |
09198e68 AK |
728 | wd_ops->rearm(wd, nmi_hz); |
729 | return 1; | |
730 | } | |
731 | ||
732 | int lapic_watchdog_ok(void) | |
733 | { | |
734 | return wd_ops != NULL; | |
735 | } |