Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
c767a54b JP |
2 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
3 | ||
bfc0f594 | 4 | #include <linux/kernel.h> |
0ef95533 | 5 | #include <linux/sched.h> |
e6017571 | 6 | #include <linux/sched/clock.h> |
0ef95533 | 7 | #include <linux/init.h> |
186f4360 | 8 | #include <linux/export.h> |
0ef95533 | 9 | #include <linux/timer.h> |
bfc0f594 | 10 | #include <linux/acpi_pmtmr.h> |
2dbe06fa | 11 | #include <linux/cpufreq.h> |
8fbbc4b4 AK |
12 | #include <linux/delay.h> |
13 | #include <linux/clocksource.h> | |
14 | #include <linux/percpu.h> | |
08604bd9 | 15 | #include <linux/timex.h> |
10b033d4 | 16 | #include <linux/static_key.h> |
a0e2bf7c | 17 | #include <linux/static_call.h> |
bfc0f594 AK |
18 | |
19 | #include <asm/hpet.h> | |
8fbbc4b4 AK |
20 | #include <asm/timer.h> |
21 | #include <asm/vgtod.h> | |
22 | #include <asm/time.h> | |
23 | #include <asm/delay.h> | |
88b094fb | 24 | #include <asm/hypervisor.h> |
08047c4f | 25 | #include <asm/nmi.h> |
2d826404 | 26 | #include <asm/x86_init.h> |
03da3ff1 | 27 | #include <asm/geode.h> |
6731b0d6 | 28 | #include <asm/apic.h> |
f21b075b | 29 | #include <asm/cpu_device_id.h> |
30c7e5b1 | 30 | #include <asm/i8259.h> |
2647c43c | 31 | #include <asm/uv/uv.h> |
0ef95533 | 32 | |
f24ade3a | 33 | unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ |
0ef95533 | 34 | EXPORT_SYMBOL(cpu_khz); |
f24ade3a IM |
35 | |
36 | unsigned int __read_mostly tsc_khz; | |
0ef95533 AK |
37 | EXPORT_SYMBOL(tsc_khz); |
38 | ||
cf7a63ef PT |
39 | #define KHZ 1000 |
40 | ||
0ef95533 AK |
41 | /* |
42 | * TSC can be unstable due to cpufreq or due to unsynced TSCs | |
43 | */ | |
f24ade3a | 44 | static int __read_mostly tsc_unstable; |
bd35c77e | 45 | static unsigned int __initdata tsc_early_khz; |
0ef95533 | 46 | |
79a4567b | 47 | static DEFINE_STATIC_KEY_FALSE_RO(__use_tsc); |
10b033d4 | 48 | |
28a00184 | 49 | int tsc_clocksource_reliable; |
57c67da2 | 50 | |
a7ec817d FT |
51 | static int __read_mostly tsc_force_recalibrate; |
52 | ||
f9677e0f CH |
53 | static u32 art_to_tsc_numerator; |
54 | static u32 art_to_tsc_denominator; | |
55 | static u64 art_to_tsc_offset; | |
a2c1fe72 | 56 | static bool have_art; |
f9677e0f | 57 | |
20d1c86a | 58 | struct cyc2ns { |
59eaef78 | 59 | struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */ |
a1f10661 | 60 | seqcount_latch_t seq; /* 32 + 4 = 36 */ |
20d1c86a | 61 | |
59eaef78 | 62 | }; /* fits one cacheline */ |
20d1c86a | 63 | |
59eaef78 | 64 | static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns); |
20d1c86a | 65 | |
bd35c77e KP |
66 | static int __init tsc_early_khz_setup(char *buf) |
67 | { | |
68 | return kstrtouint(buf, 0, &tsc_early_khz); | |
69 | } | |
70 | early_param("tsc_early_khz", tsc_early_khz_setup); | |
71 | ||
5c5e9a2b | 72 | __always_inline void __cyc2ns_read(struct cyc2ns_data *data) |
20d1c86a | 73 | { |
59eaef78 | 74 | int seq, idx; |
20d1c86a | 75 | |
59eaef78 | 76 | do { |
a1f10661 | 77 | seq = this_cpu_read(cyc2ns.seq.seqcount.sequence); |
59eaef78 | 78 | idx = seq & 1; |
20d1c86a | 79 | |
59eaef78 PZ |
80 | data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset); |
81 | data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul); | |
82 | data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift); | |
20d1c86a | 83 | |
a1f10661 | 84 | } while (unlikely(seq != this_cpu_read(cyc2ns.seq.seqcount.sequence))); |
20d1c86a PZ |
85 | } |
86 | ||
5c5e9a2b PZ |
87 | __always_inline void cyc2ns_read_begin(struct cyc2ns_data *data) |
88 | { | |
89 | preempt_disable_notrace(); | |
90 | __cyc2ns_read(data); | |
91 | } | |
92 | ||
83e83726 | 93 | __always_inline void cyc2ns_read_end(void) |
20d1c86a | 94 | { |
59eaef78 | 95 | preempt_enable_notrace(); |
20d1c86a PZ |
96 | } |
97 | ||
98 | /* | |
99 | * Accelerators for sched_clock() | |
57c67da2 PZ |
100 | * convert from cycles(64bits) => nanoseconds (64bits) |
101 | * basic equation: | |
102 | * ns = cycles / (freq / ns_per_sec) | |
103 | * ns = cycles * (ns_per_sec / freq) | |
104 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | |
105 | * ns = cycles * (10^6 / cpu_khz) | |
106 | * | |
107 | * Then we use scaling math (suggested by george@mvista.com) to get: | |
108 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | |
109 | * ns = cycles * cyc2ns_scale / SC | |
110 | * | |
111 | * And since SC is a constant power of two, we can convert the div | |
b20112ed AH |
112 | * into a shift. The larger SC is, the more accurate the conversion, but |
113 | * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication | |
114 | * (64-bit result) can be used. | |
57c67da2 | 115 | * |
b20112ed | 116 | * We can use khz divisor instead of mhz to keep a better precision. |
57c67da2 PZ |
117 | * (mathieu.desnoyers@polymtl.ca) |
118 | * | |
119 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | |
120 | */ | |
121 | ||
5c5e9a2b | 122 | static __always_inline unsigned long long __cycles_2_ns(unsigned long long cyc) |
57c67da2 | 123 | { |
59eaef78 | 124 | struct cyc2ns_data data; |
20d1c86a PZ |
125 | unsigned long long ns; |
126 | ||
5c5e9a2b | 127 | __cyc2ns_read(&data); |
20d1c86a | 128 | |
59eaef78 PZ |
129 | ns = data.cyc2ns_offset; |
130 | ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift); | |
20d1c86a | 131 | |
5c5e9a2b PZ |
132 | return ns; |
133 | } | |
20d1c86a | 134 | |
5c5e9a2b PZ |
135 | static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc) |
136 | { | |
137 | unsigned long long ns; | |
138 | preempt_disable_notrace(); | |
139 | ns = __cycles_2_ns(cyc); | |
140 | preempt_enable_notrace(); | |
57c67da2 PZ |
141 | return ns; |
142 | } | |
143 | ||
e2a9ca29 | 144 | static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now) |
57c67da2 | 145 | { |
615cd033 | 146 | unsigned long long ns_now; |
59eaef78 PZ |
147 | struct cyc2ns_data data; |
148 | struct cyc2ns *c2n; | |
20d1c86a | 149 | |
57c67da2 PZ |
150 | ns_now = cycles_2_ns(tsc_now); |
151 | ||
20d1c86a PZ |
152 | /* |
153 | * Compute a new multiplier as per the above comment and ensure our | |
154 | * time function is continuous; see the comment near struct | |
155 | * cyc2ns_data. | |
156 | */ | |
59eaef78 | 157 | clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz, |
b20112ed AH |
158 | NSEC_PER_MSEC, 0); |
159 | ||
b9511cd7 AH |
160 | /* |
161 | * cyc2ns_shift is exported via arch_perf_update_userpage() where it is | |
162 | * not expected to be greater than 31 due to the original published | |
163 | * conversion algorithm shifting a 32-bit value (now specifies a 64-bit | |
164 | * value) - refer perf_event_mmap_page documentation in perf_event.h. | |
165 | */ | |
59eaef78 PZ |
166 | if (data.cyc2ns_shift == 32) { |
167 | data.cyc2ns_shift = 31; | |
168 | data.cyc2ns_mul >>= 1; | |
b9511cd7 AH |
169 | } |
170 | ||
59eaef78 PZ |
171 | data.cyc2ns_offset = ns_now - |
172 | mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift); | |
173 | ||
174 | c2n = per_cpu_ptr(&cyc2ns, cpu); | |
20d1c86a | 175 | |
59eaef78 PZ |
176 | raw_write_seqcount_latch(&c2n->seq); |
177 | c2n->data[0] = data; | |
178 | raw_write_seqcount_latch(&c2n->seq); | |
179 | c2n->data[1] = data; | |
e2a9ca29 PT |
180 | } |
181 | ||
182 | static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now) | |
183 | { | |
184 | unsigned long flags; | |
185 | ||
186 | local_irq_save(flags); | |
187 | sched_clock_idle_sleep_event(); | |
188 | ||
189 | if (khz) | |
190 | __set_cyc2ns_scale(khz, cpu, tsc_now); | |
57c67da2 | 191 | |
ac1e843f | 192 | sched_clock_idle_wakeup_event(); |
57c67da2 PZ |
193 | local_irq_restore(flags); |
194 | } | |
615cd033 | 195 | |
e2a9ca29 PT |
196 | /* |
197 | * Initialize cyc2ns for boot cpu | |
198 | */ | |
199 | static void __init cyc2ns_init_boot_cpu(void) | |
200 | { | |
201 | struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns); | |
202 | ||
a1f10661 | 203 | seqcount_latch_init(&c2n->seq); |
e2a9ca29 PT |
204 | __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc()); |
205 | } | |
206 | ||
207 | /* | |
608008a4 | 208 | * Secondary CPUs do not run through tsc_init(), so set up |
e2a9ca29 | 209 | * all the scale factors for all CPUs, assuming the same |
c208ac8f | 210 | * speed as the bootup CPU. |
e2a9ca29 PT |
211 | */ |
212 | static void __init cyc2ns_init_secondary_cpus(void) | |
213 | { | |
214 | unsigned int cpu, this_cpu = smp_processor_id(); | |
215 | struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns); | |
216 | struct cyc2ns_data *data = c2n->data; | |
217 | ||
218 | for_each_possible_cpu(cpu) { | |
219 | if (cpu != this_cpu) { | |
a1f10661 | 220 | seqcount_latch_init(&c2n->seq); |
e2a9ca29 PT |
221 | c2n = per_cpu_ptr(&cyc2ns, cpu); |
222 | c2n->data[0] = data[0]; | |
223 | c2n->data[1] = data[1]; | |
224 | } | |
225 | } | |
226 | } | |
227 | ||
0ef95533 AK |
228 | /* |
229 | * Scheduler clock - returns current time in nanosec units. | |
230 | */ | |
8739c681 | 231 | noinstr u64 native_sched_clock(void) |
0ef95533 | 232 | { |
3bbfafb7 PZ |
233 | if (static_branch_likely(&__use_tsc)) { |
234 | u64 tsc_now = rdtsc(); | |
235 | ||
236 | /* return the value in ns */ | |
5c5e9a2b | 237 | return __cycles_2_ns(tsc_now); |
3bbfafb7 | 238 | } |
0ef95533 AK |
239 | |
240 | /* | |
241 | * Fall back to jiffies if there's no TSC available: | |
242 | * ( But note that we still use it if the TSC is marked | |
243 | * unstable. We do this because unlike Time Of Day, | |
244 | * the scheduler clock tolerates small errors and it's | |
245 | * very important for it to be as fast as the platform | |
3ad2f3fb | 246 | * can achieve it. ) |
0ef95533 | 247 | */ |
0ef95533 | 248 | |
3bbfafb7 PZ |
249 | /* No locking but a rare wrong value is not a big deal: */ |
250 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | |
0ef95533 AK |
251 | } |
252 | ||
a94cab23 AK |
253 | /* |
254 | * Generate a sched_clock if you already have a TSC value. | |
255 | */ | |
256 | u64 native_sched_clock_from_tsc(u64 tsc) | |
257 | { | |
258 | return cycles_2_ns(tsc); | |
259 | } | |
260 | ||
0ef95533 AK |
261 | /* We need to define a real function for sched_clock, to override the |
262 | weak default version */ | |
263 | #ifdef CONFIG_PARAVIRT | |
5c5e9a2b | 264 | noinstr u64 sched_clock_noinstr(void) |
0ef95533 AK |
265 | { |
266 | return paravirt_sched_clock(); | |
267 | } | |
f94c8d11 | 268 | |
698eff63 | 269 | bool using_native_sched_clock(void) |
f94c8d11 | 270 | { |
a0e2bf7c | 271 | return static_call_query(pv_sched_clock) == native_sched_clock; |
f94c8d11 | 272 | } |
0ef95533 | 273 | #else |
5c5e9a2b | 274 | u64 sched_clock_noinstr(void) __attribute__((alias("native_sched_clock"))); |
f94c8d11 | 275 | |
698eff63 | 276 | bool using_native_sched_clock(void) { return true; } |
0ef95533 AK |
277 | #endif |
278 | ||
5c5e9a2b PZ |
279 | notrace u64 sched_clock(void) |
280 | { | |
281 | u64 now; | |
282 | preempt_disable_notrace(); | |
283 | now = sched_clock_noinstr(); | |
284 | preempt_enable_notrace(); | |
285 | return now; | |
286 | } | |
287 | ||
0ef95533 AK |
288 | int check_tsc_unstable(void) |
289 | { | |
290 | return tsc_unstable; | |
291 | } | |
292 | EXPORT_SYMBOL_GPL(check_tsc_unstable); | |
293 | ||
294 | #ifdef CONFIG_X86_TSC | |
295 | int __init notsc_setup(char *str) | |
296 | { | |
fe9af81e | 297 | mark_tsc_unstable("boot parameter notsc"); |
0ef95533 AK |
298 | return 1; |
299 | } | |
300 | #else | |
301 | /* | |
302 | * disable flag for tsc. Takes effect by clearing the TSC cpu flag | |
303 | * in cpu/common.c | |
304 | */ | |
305 | int __init notsc_setup(char *str) | |
306 | { | |
307 | setup_clear_cpu_cap(X86_FEATURE_TSC); | |
308 | return 1; | |
309 | } | |
310 | #endif | |
311 | ||
312 | __setup("notsc", notsc_setup); | |
bfc0f594 | 313 | |
e82b8e4e | 314 | static int no_sched_irq_time; |
0f0b7e1c | 315 | static int no_tsc_watchdog; |
0051293c | 316 | static int tsc_as_watchdog; |
e82b8e4e | 317 | |
395628ef AK |
318 | static int __init tsc_setup(char *str) |
319 | { | |
320 | if (!strcmp(str, "reliable")) | |
321 | tsc_clocksource_reliable = 1; | |
e82b8e4e VP |
322 | if (!strncmp(str, "noirqtime", 9)) |
323 | no_sched_irq_time = 1; | |
8309f86c PZ |
324 | if (!strcmp(str, "unstable")) |
325 | mark_tsc_unstable("boot parameter"); | |
0051293c | 326 | if (!strcmp(str, "nowatchdog")) { |
0f0b7e1c | 327 | no_tsc_watchdog = 1; |
0051293c PM |
328 | if (tsc_as_watchdog) |
329 | pr_alert("%s: Overriding earlier tsc=watchdog with tsc=nowatchdog\n", | |
330 | __func__); | |
331 | tsc_as_watchdog = 0; | |
332 | } | |
a7ec817d FT |
333 | if (!strcmp(str, "recalibrate")) |
334 | tsc_force_recalibrate = 1; | |
0051293c PM |
335 | if (!strcmp(str, "watchdog")) { |
336 | if (no_tsc_watchdog) | |
337 | pr_alert("%s: tsc=watchdog overridden by earlier tsc=nowatchdog\n", | |
338 | __func__); | |
339 | else | |
340 | tsc_as_watchdog = 1; | |
341 | } | |
395628ef AK |
342 | return 1; |
343 | } | |
344 | ||
345 | __setup("tsc=", tsc_setup); | |
346 | ||
a786ef15 DV |
347 | #define MAX_RETRIES 5 |
348 | #define TSC_DEFAULT_THRESHOLD 0x20000 | |
bfc0f594 AK |
349 | |
350 | /* | |
a786ef15 | 351 | * Read TSC and the reference counters. Take care of any disturbances |
bfc0f594 | 352 | */ |
827014be | 353 | static u64 tsc_read_refs(u64 *p, int hpet) |
bfc0f594 AK |
354 | { |
355 | u64 t1, t2; | |
a786ef15 | 356 | u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD; |
bfc0f594 AK |
357 | int i; |
358 | ||
359 | for (i = 0; i < MAX_RETRIES; i++) { | |
360 | t1 = get_cycles(); | |
361 | if (hpet) | |
827014be | 362 | *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; |
bfc0f594 | 363 | else |
827014be | 364 | *p = acpi_pm_read_early(); |
bfc0f594 | 365 | t2 = get_cycles(); |
a786ef15 | 366 | if ((t2 - t1) < thresh) |
bfc0f594 AK |
367 | return t2; |
368 | } | |
369 | return ULLONG_MAX; | |
370 | } | |
371 | ||
d683ef7a TG |
372 | /* |
373 | * Calculate the TSC frequency from HPET reference | |
bfc0f594 | 374 | */ |
d683ef7a | 375 | static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) |
bfc0f594 | 376 | { |
d683ef7a | 377 | u64 tmp; |
bfc0f594 | 378 | |
d683ef7a TG |
379 | if (hpet2 < hpet1) |
380 | hpet2 += 0x100000000ULL; | |
381 | hpet2 -= hpet1; | |
382 | tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); | |
383 | do_div(tmp, 1000000); | |
d3878e16 | 384 | deltatsc = div64_u64(deltatsc, tmp); |
d683ef7a TG |
385 | |
386 | return (unsigned long) deltatsc; | |
387 | } | |
388 | ||
389 | /* | |
390 | * Calculate the TSC frequency from PMTimer reference | |
391 | */ | |
392 | static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2) | |
393 | { | |
394 | u64 tmp; | |
bfc0f594 | 395 | |
d683ef7a TG |
396 | if (!pm1 && !pm2) |
397 | return ULONG_MAX; | |
398 | ||
399 | if (pm2 < pm1) | |
400 | pm2 += (u64)ACPI_PM_OVRRUN; | |
401 | pm2 -= pm1; | |
402 | tmp = pm2 * 1000000000LL; | |
403 | do_div(tmp, PMTMR_TICKS_PER_SEC); | |
404 | do_div(deltatsc, tmp); | |
405 | ||
406 | return (unsigned long) deltatsc; | |
407 | } | |
408 | ||
a977c400 | 409 | #define CAL_MS 10 |
b7743970 | 410 | #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS)) |
a977c400 TG |
411 | #define CAL_PIT_LOOPS 1000 |
412 | ||
413 | #define CAL2_MS 50 | |
b7743970 | 414 | #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS)) |
a977c400 TG |
415 | #define CAL2_PIT_LOOPS 5000 |
416 | ||
cce3e057 | 417 | |
ec0c15af LT |
418 | /* |
419 | * Try to calibrate the TSC against the Programmable | |
420 | * Interrupt Timer and return the frequency of the TSC | |
421 | * in kHz. | |
422 | * | |
423 | * Return ULONG_MAX on failure to calibrate. | |
424 | */ | |
a977c400 | 425 | static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) |
ec0c15af LT |
426 | { |
427 | u64 tsc, t1, t2, delta; | |
428 | unsigned long tscmin, tscmax; | |
429 | int pitcnt; | |
430 | ||
30c7e5b1 PZ |
431 | if (!has_legacy_pic()) { |
432 | /* | |
433 | * Relies on tsc_early_delay_calibrate() to have given us semi | |
434 | * usable udelay(), wait for the same 50ms we would have with | |
435 | * the PIT loop below. | |
436 | */ | |
437 | udelay(10 * USEC_PER_MSEC); | |
438 | udelay(10 * USEC_PER_MSEC); | |
439 | udelay(10 * USEC_PER_MSEC); | |
440 | udelay(10 * USEC_PER_MSEC); | |
441 | udelay(10 * USEC_PER_MSEC); | |
442 | return ULONG_MAX; | |
443 | } | |
444 | ||
ec0c15af LT |
445 | /* Set the Gate high, disable speaker */ |
446 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | |
447 | ||
448 | /* | |
449 | * Setup CTC channel 2* for mode 0, (interrupt on terminal | |
450 | * count mode), binary count. Set the latch register to 50ms | |
451 | * (LSB then MSB) to begin countdown. | |
452 | */ | |
453 | outb(0xb0, 0x43); | |
a977c400 TG |
454 | outb(latch & 0xff, 0x42); |
455 | outb(latch >> 8, 0x42); | |
ec0c15af LT |
456 | |
457 | tsc = t1 = t2 = get_cycles(); | |
458 | ||
459 | pitcnt = 0; | |
460 | tscmax = 0; | |
461 | tscmin = ULONG_MAX; | |
462 | while ((inb(0x61) & 0x20) == 0) { | |
463 | t2 = get_cycles(); | |
464 | delta = t2 - tsc; | |
465 | tsc = t2; | |
466 | if ((unsigned long) delta < tscmin) | |
467 | tscmin = (unsigned int) delta; | |
468 | if ((unsigned long) delta > tscmax) | |
469 | tscmax = (unsigned int) delta; | |
470 | pitcnt++; | |
471 | } | |
472 | ||
473 | /* | |
474 | * Sanity checks: | |
475 | * | |
a977c400 | 476 | * If we were not able to read the PIT more than loopmin |
ec0c15af LT |
477 | * times, then we have been hit by a massive SMI |
478 | * | |
479 | * If the maximum is 10 times larger than the minimum, | |
480 | * then we got hit by an SMI as well. | |
481 | */ | |
a977c400 | 482 | if (pitcnt < loopmin || tscmax > 10 * tscmin) |
ec0c15af LT |
483 | return ULONG_MAX; |
484 | ||
485 | /* Calculate the PIT value */ | |
486 | delta = t2 - t1; | |
a977c400 | 487 | do_div(delta, ms); |
ec0c15af LT |
488 | return delta; |
489 | } | |
490 | ||
6ac40ed0 LT |
491 | /* |
492 | * This reads the current MSB of the PIT counter, and | |
493 | * checks if we are running on sufficiently fast and | |
494 | * non-virtualized hardware. | |
495 | * | |
496 | * Our expectations are: | |
497 | * | |
498 | * - the PIT is running at roughly 1.19MHz | |
499 | * | |
500 | * - each IO is going to take about 1us on real hardware, | |
501 | * but we allow it to be much faster (by a factor of 10) or | |
502 | * _slightly_ slower (ie we allow up to a 2us read+counter | |
503 | * update - anything else implies a unacceptably slow CPU | |
504 | * or PIT for the fast calibration to work. | |
505 | * | |
506 | * - with 256 PIT ticks to read the value, we have 214us to | |
507 | * see the same MSB (and overhead like doing a single TSC | |
508 | * read per MSB value etc). | |
509 | * | |
510 | * - We're doing 2 reads per loop (LSB, MSB), and we expect | |
511 | * them each to take about a microsecond on real hardware. | |
512 | * So we expect a count value of around 100. But we'll be | |
513 | * generous, and accept anything over 50. | |
514 | * | |
515 | * - if the PIT is stuck, and we see *many* more reads, we | |
516 | * return early (and the next caller of pit_expect_msb() | |
517 | * then consider it a failure when they don't see the | |
518 | * next expected value). | |
519 | * | |
520 | * These expectations mean that we know that we have seen the | |
521 | * transition from one expected value to another with a fairly | |
522 | * high accuracy, and we didn't miss any events. We can thus | |
523 | * use the TSC value at the transitions to calculate a pretty | |
4d1d0977 | 524 | * good value for the TSC frequency. |
6ac40ed0 | 525 | */ |
b6e61eef LT |
526 | static inline int pit_verify_msb(unsigned char val) |
527 | { | |
528 | /* Ignore LSB */ | |
529 | inb(0x42); | |
530 | return inb(0x42) == val; | |
531 | } | |
532 | ||
9e8912e0 | 533 | static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) |
6ac40ed0 | 534 | { |
9e8912e0 | 535 | int count; |
68f30fbe | 536 | u64 tsc = 0, prev_tsc = 0; |
bfc0f594 | 537 | |
6ac40ed0 | 538 | for (count = 0; count < 50000; count++) { |
b6e61eef | 539 | if (!pit_verify_msb(val)) |
6ac40ed0 | 540 | break; |
68f30fbe | 541 | prev_tsc = tsc; |
9e8912e0 | 542 | tsc = get_cycles(); |
6ac40ed0 | 543 | } |
68f30fbe | 544 | *deltap = get_cycles() - prev_tsc; |
9e8912e0 LT |
545 | *tscp = tsc; |
546 | ||
547 | /* | |
548 | * We require _some_ success, but the quality control | |
549 | * will be based on the error terms on the TSC values. | |
550 | */ | |
551 | return count > 5; | |
6ac40ed0 LT |
552 | } |
553 | ||
554 | /* | |
9e8912e0 LT |
555 | * How many MSB values do we want to see? We aim for |
556 | * a maximum error rate of 500ppm (in practice the | |
557 | * real error is much smaller), but refuse to spend | |
68f30fbe | 558 | * more than 50ms on it. |
6ac40ed0 | 559 | */ |
68f30fbe | 560 | #define MAX_QUICK_PIT_MS 50 |
9e8912e0 | 561 | #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) |
bfc0f594 | 562 | |
6ac40ed0 LT |
563 | static unsigned long quick_pit_calibrate(void) |
564 | { | |
9e8912e0 LT |
565 | int i; |
566 | u64 tsc, delta; | |
567 | unsigned long d1, d2; | |
568 | ||
30c7e5b1 PZ |
569 | if (!has_legacy_pic()) |
570 | return 0; | |
571 | ||
6ac40ed0 | 572 | /* Set the Gate high, disable speaker */ |
bfc0f594 AK |
573 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); |
574 | ||
6ac40ed0 LT |
575 | /* |
576 | * Counter 2, mode 0 (one-shot), binary count | |
577 | * | |
578 | * NOTE! Mode 2 decrements by two (and then the | |
579 | * output is flipped each time, giving the same | |
580 | * final output frequency as a decrement-by-one), | |
581 | * so mode 0 is much better when looking at the | |
582 | * individual counts. | |
583 | */ | |
bfc0f594 | 584 | outb(0xb0, 0x43); |
bfc0f594 | 585 | |
6ac40ed0 LT |
586 | /* Start at 0xffff */ |
587 | outb(0xff, 0x42); | |
588 | outb(0xff, 0x42); | |
589 | ||
a6a80e1d LT |
590 | /* |
591 | * The PIT starts counting at the next edge, so we | |
592 | * need to delay for a microsecond. The easiest way | |
593 | * to do that is to just read back the 16-bit counter | |
594 | * once from the PIT. | |
595 | */ | |
b6e61eef | 596 | pit_verify_msb(0); |
a6a80e1d | 597 | |
9e8912e0 LT |
598 | if (pit_expect_msb(0xff, &tsc, &d1)) { |
599 | for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { | |
600 | if (!pit_expect_msb(0xff-i, &delta, &d2)) | |
601 | break; | |
602 | ||
5aac644a AH |
603 | delta -= tsc; |
604 | ||
605 | /* | |
606 | * Extrapolate the error and fail fast if the error will | |
607 | * never be below 500 ppm. | |
608 | */ | |
609 | if (i == 1 && | |
610 | d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11) | |
611 | return 0; | |
612 | ||
9e8912e0 LT |
613 | /* |
614 | * Iterate until the error is less than 500 ppm | |
615 | */ | |
b6e61eef LT |
616 | if (d1+d2 >= delta >> 11) |
617 | continue; | |
618 | ||
619 | /* | |
620 | * Check the PIT one more time to verify that | |
621 | * all TSC reads were stable wrt the PIT. | |
622 | * | |
623 | * This also guarantees serialization of the | |
624 | * last cycle read ('d2') in pit_expect_msb. | |
625 | */ | |
626 | if (!pit_verify_msb(0xfe - i)) | |
627 | break; | |
628 | goto success; | |
6ac40ed0 | 629 | } |
6ac40ed0 | 630 | } |
52045217 | 631 | pr_info("Fast TSC calibration failed\n"); |
6ac40ed0 | 632 | return 0; |
9e8912e0 LT |
633 | |
634 | success: | |
635 | /* | |
636 | * Ok, if we get here, then we've seen the | |
637 | * MSB of the PIT decrement 'i' times, and the | |
638 | * error has shrunk to less than 500 ppm. | |
639 | * | |
640 | * As a result, we can depend on there not being | |
641 | * any odd delays anywhere, and the TSC reads are | |
68f30fbe | 642 | * reliable (within the error). |
9e8912e0 LT |
643 | * |
644 | * kHz = ticks / time-in-seconds / 1000; | |
645 | * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 | |
646 | * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) | |
647 | */ | |
9e8912e0 LT |
648 | delta *= PIT_TICK_RATE; |
649 | do_div(delta, i*256*1000); | |
c767a54b | 650 | pr_info("Fast TSC calibration using PIT\n"); |
9e8912e0 | 651 | return delta; |
6ac40ed0 | 652 | } |
ec0c15af | 653 | |
bfc0f594 | 654 | /** |
c55cbfce | 655 | * native_calibrate_tsc - determine TSC frequency |
aa297292 | 656 | * Determine TSC frequency via CPUID, else return 0. |
bfc0f594 | 657 | */ |
e93ef949 | 658 | unsigned long native_calibrate_tsc(void) |
aa297292 LB |
659 | { |
660 | unsigned int eax_denominator, ebx_numerator, ecx_hz, edx; | |
661 | unsigned int crystal_khz; | |
662 | ||
663 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | |
664 | return 0; | |
665 | ||
666 | if (boot_cpu_data.cpuid_level < 0x15) | |
667 | return 0; | |
668 | ||
669 | eax_denominator = ebx_numerator = ecx_hz = edx = 0; | |
670 | ||
671 | /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */ | |
672 | cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx); | |
673 | ||
674 | if (ebx_numerator == 0 || eax_denominator == 0) | |
675 | return 0; | |
676 | ||
677 | crystal_khz = ecx_hz / 1000; | |
678 | ||
604dc917 DD |
679 | /* |
680 | * Denverton SoCs don't report crystal clock, and also don't support | |
681 | * CPUID.0x16 for the calculation below, so hardcode the 25MHz crystal | |
682 | * clock. | |
683 | */ | |
684 | if (crystal_khz == 0 && | |
f21b075b | 685 | boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT_D) |
604dc917 | 686 | crystal_khz = 25000; |
aa297292 | 687 | |
4ca4df0b | 688 | /* |
604dc917 | 689 | * TSC frequency reported directly by CPUID is a "hardware reported" |
4ca4df0b BG |
690 | * frequency and is the most accurate one so far we have. This |
691 | * is considered a known frequency. | |
692 | */ | |
604dc917 DD |
693 | if (crystal_khz != 0) |
694 | setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); | |
695 | ||
696 | /* | |
697 | * Some Intel SoCs like Skylake and Kabylake don't report the crystal | |
698 | * clock, but we can easily calculate it to a high degree of accuracy | |
699 | * by considering the crystal ratio and the CPU speed. | |
700 | */ | |
701 | if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= 0x16) { | |
702 | unsigned int eax_base_mhz, ebx, ecx, edx; | |
703 | ||
704 | cpuid(0x16, &eax_base_mhz, &ebx, &ecx, &edx); | |
705 | crystal_khz = eax_base_mhz * 1000 * | |
706 | eax_denominator / ebx_numerator; | |
707 | } | |
708 | ||
709 | if (crystal_khz == 0) | |
710 | return 0; | |
4ca4df0b | 711 | |
4635fdc6 BG |
712 | /* |
713 | * For Atom SoCs TSC is the only reliable clocksource. | |
714 | * Mark TSC reliable so no watchdog on it. | |
715 | */ | |
f21b075b | 716 | if (boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT) |
4635fdc6 BG |
717 | setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); |
718 | ||
2420a0b1 DD |
719 | #ifdef CONFIG_X86_LOCAL_APIC |
720 | /* | |
721 | * The local APIC appears to be fed by the core crystal clock | |
722 | * (which sounds entirely sensible). We can set the global | |
723 | * lapic_timer_period here to avoid having to calibrate the APIC | |
724 | * timer later. | |
725 | */ | |
726 | lapic_timer_period = crystal_khz * 1000 / HZ; | |
727 | #endif | |
728 | ||
aa297292 LB |
729 | return crystal_khz * ebx_numerator / eax_denominator; |
730 | } | |
731 | ||
732 | static unsigned long cpu_khz_from_cpuid(void) | |
733 | { | |
734 | unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx; | |
735 | ||
736 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | |
737 | return 0; | |
738 | ||
739 | if (boot_cpu_data.cpuid_level < 0x16) | |
740 | return 0; | |
741 | ||
742 | eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0; | |
743 | ||
744 | cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx); | |
745 | ||
746 | return eax_base_mhz * 1000; | |
747 | } | |
748 | ||
03821f45 PT |
749 | /* |
750 | * calibrate cpu using pit, hpet, and ptimer methods. They are available | |
751 | * later in boot after acpi is initialized. | |
aa297292 | 752 | */ |
03821f45 | 753 | static unsigned long pit_hpet_ptimer_calibrate_cpu(void) |
bfc0f594 | 754 | { |
827014be | 755 | u64 tsc1, tsc2, delta, ref1, ref2; |
fbb16e24 | 756 | unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; |
03821f45 | 757 | unsigned long flags, latch, ms; |
a977c400 | 758 | int hpet = is_hpet_enabled(), i, loopmin; |
bfc0f594 | 759 | |
fbb16e24 TG |
760 | /* |
761 | * Run 5 calibration loops to get the lowest frequency value | |
762 | * (the best estimate). We use two different calibration modes | |
763 | * here: | |
764 | * | |
765 | * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and | |
766 | * load a timeout of 50ms. We read the time right after we | |
767 | * started the timer and wait until the PIT count down reaches | |
768 | * zero. In each wait loop iteration we read the TSC and check | |
769 | * the delta to the previous read. We keep track of the min | |
770 | * and max values of that delta. The delta is mostly defined | |
a786ef15 DV |
771 | * by the IO time of the PIT access, so we can detect when |
772 | * any disturbance happened between the two reads. If the | |
fbb16e24 TG |
773 | * maximum time is significantly larger than the minimum time, |
774 | * then we discard the result and have another try. | |
775 | * | |
776 | * 2) Reference counter. If available we use the HPET or the | |
777 | * PMTIMER as a reference to check the sanity of that value. | |
778 | * We use separate TSC readouts and check inside of the | |
d9f6e12f | 779 | * reference read for any possible disturbance. We discard |
fbb16e24 TG |
780 | * disturbed values here as well. We do that around the PIT |
781 | * calibration delay loop as we have to wait for a certain | |
782 | * amount of time anyway. | |
783 | */ | |
a977c400 TG |
784 | |
785 | /* Preset PIT loop values */ | |
786 | latch = CAL_LATCH; | |
787 | ms = CAL_MS; | |
788 | loopmin = CAL_PIT_LOOPS; | |
789 | ||
790 | for (i = 0; i < 3; i++) { | |
ec0c15af | 791 | unsigned long tsc_pit_khz; |
fbb16e24 TG |
792 | |
793 | /* | |
794 | * Read the start value and the reference count of | |
ec0c15af LT |
795 | * hpet/pmtimer when available. Then do the PIT |
796 | * calibration, which will take at least 50ms, and | |
797 | * read the end value. | |
fbb16e24 | 798 | */ |
ec0c15af | 799 | local_irq_save(flags); |
827014be | 800 | tsc1 = tsc_read_refs(&ref1, hpet); |
a977c400 | 801 | tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin); |
827014be | 802 | tsc2 = tsc_read_refs(&ref2, hpet); |
fbb16e24 TG |
803 | local_irq_restore(flags); |
804 | ||
ec0c15af LT |
805 | /* Pick the lowest PIT TSC calibration so far */ |
806 | tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); | |
fbb16e24 TG |
807 | |
808 | /* hpet or pmtimer available ? */ | |
62627bec | 809 | if (ref1 == ref2) |
fbb16e24 TG |
810 | continue; |
811 | ||
a786ef15 | 812 | /* Check, whether the sampling was disturbed */ |
fbb16e24 TG |
813 | if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) |
814 | continue; | |
815 | ||
816 | tsc2 = (tsc2 - tsc1) * 1000000LL; | |
d683ef7a | 817 | if (hpet) |
827014be | 818 | tsc2 = calc_hpet_ref(tsc2, ref1, ref2); |
d683ef7a | 819 | else |
827014be | 820 | tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2); |
fbb16e24 | 821 | |
fbb16e24 | 822 | tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); |
a977c400 TG |
823 | |
824 | /* Check the reference deviation */ | |
825 | delta = ((u64) tsc_pit_min) * 100; | |
826 | do_div(delta, tsc_ref_min); | |
827 | ||
828 | /* | |
829 | * If both calibration results are inside a 10% window | |
830 | * then we can be sure, that the calibration | |
831 | * succeeded. We break out of the loop right away. We | |
832 | * use the reference value, as it is more precise. | |
833 | */ | |
834 | if (delta >= 90 && delta <= 110) { | |
c767a54b JP |
835 | pr_info("PIT calibration matches %s. %d loops\n", |
836 | hpet ? "HPET" : "PMTIMER", i + 1); | |
a977c400 | 837 | return tsc_ref_min; |
fbb16e24 TG |
838 | } |
839 | ||
a977c400 TG |
840 | /* |
841 | * Check whether PIT failed more than once. This | |
842 | * happens in virtualized environments. We need to | |
843 | * give the virtual PC a slightly longer timeframe for | |
844 | * the HPET/PMTIMER to make the result precise. | |
845 | */ | |
846 | if (i == 1 && tsc_pit_min == ULONG_MAX) { | |
847 | latch = CAL2_LATCH; | |
848 | ms = CAL2_MS; | |
849 | loopmin = CAL2_PIT_LOOPS; | |
850 | } | |
fbb16e24 | 851 | } |
bfc0f594 AK |
852 | |
853 | /* | |
fbb16e24 | 854 | * Now check the results. |
bfc0f594 | 855 | */ |
fbb16e24 TG |
856 | if (tsc_pit_min == ULONG_MAX) { |
857 | /* PIT gave no useful value */ | |
c767a54b | 858 | pr_warn("Unable to calibrate against PIT\n"); |
fbb16e24 TG |
859 | |
860 | /* We don't have an alternative source, disable TSC */ | |
827014be | 861 | if (!hpet && !ref1 && !ref2) { |
c767a54b | 862 | pr_notice("No reference (HPET/PMTIMER) available\n"); |
fbb16e24 TG |
863 | return 0; |
864 | } | |
865 | ||
866 | /* The alternative source failed as well, disable TSC */ | |
867 | if (tsc_ref_min == ULONG_MAX) { | |
c767a54b | 868 | pr_warn("HPET/PMTIMER calibration failed\n"); |
fbb16e24 TG |
869 | return 0; |
870 | } | |
871 | ||
872 | /* Use the alternative source */ | |
c767a54b JP |
873 | pr_info("using %s reference calibration\n", |
874 | hpet ? "HPET" : "PMTIMER"); | |
fbb16e24 TG |
875 | |
876 | return tsc_ref_min; | |
877 | } | |
bfc0f594 | 878 | |
fbb16e24 | 879 | /* We don't have an alternative source, use the PIT calibration value */ |
827014be | 880 | if (!hpet && !ref1 && !ref2) { |
c767a54b | 881 | pr_info("Using PIT calibration value\n"); |
fbb16e24 | 882 | return tsc_pit_min; |
bfc0f594 AK |
883 | } |
884 | ||
fbb16e24 TG |
885 | /* The alternative source failed, use the PIT calibration value */ |
886 | if (tsc_ref_min == ULONG_MAX) { | |
c767a54b | 887 | pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n"); |
fbb16e24 | 888 | return tsc_pit_min; |
bfc0f594 AK |
889 | } |
890 | ||
fbb16e24 TG |
891 | /* |
892 | * The calibration values differ too much. In doubt, we use | |
893 | * the PIT value as we know that there are PMTIMERs around | |
a977c400 | 894 | * running at double speed. At least we let the user know: |
fbb16e24 | 895 | */ |
c767a54b JP |
896 | pr_warn("PIT calibration deviates from %s: %lu %lu\n", |
897 | hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); | |
898 | pr_info("Using PIT calibration value\n"); | |
fbb16e24 | 899 | return tsc_pit_min; |
bfc0f594 AK |
900 | } |
901 | ||
03821f45 PT |
902 | /** |
903 | * native_calibrate_cpu_early - can calibrate the cpu early in boot | |
904 | */ | |
905 | unsigned long native_calibrate_cpu_early(void) | |
906 | { | |
907 | unsigned long flags, fast_calibrate = cpu_khz_from_cpuid(); | |
908 | ||
909 | if (!fast_calibrate) | |
910 | fast_calibrate = cpu_khz_from_msr(); | |
911 | if (!fast_calibrate) { | |
912 | local_irq_save(flags); | |
913 | fast_calibrate = quick_pit_calibrate(); | |
914 | local_irq_restore(flags); | |
915 | } | |
916 | return fast_calibrate; | |
917 | } | |
918 | ||
919 | ||
920 | /** | |
921 | * native_calibrate_cpu - calibrate the cpu | |
922 | */ | |
8dbe4385 | 923 | static unsigned long native_calibrate_cpu(void) |
03821f45 PT |
924 | { |
925 | unsigned long tsc_freq = native_calibrate_cpu_early(); | |
926 | ||
927 | if (!tsc_freq) | |
928 | tsc_freq = pit_hpet_ptimer_calibrate_cpu(); | |
929 | ||
930 | return tsc_freq; | |
931 | } | |
932 | ||
af576850 | 933 | void recalibrate_cpu_khz(void) |
bfc0f594 AK |
934 | { |
935 | #ifndef CONFIG_SMP | |
936 | unsigned long cpu_khz_old = cpu_khz; | |
937 | ||
eff4677e | 938 | if (!boot_cpu_has(X86_FEATURE_TSC)) |
af576850 | 939 | return; |
eff4677e | 940 | |
aa297292 | 941 | cpu_khz = x86_platform.calibrate_cpu(); |
eff4677e | 942 | tsc_khz = x86_platform.calibrate_tsc(); |
aa297292 LB |
943 | if (tsc_khz == 0) |
944 | tsc_khz = cpu_khz; | |
ff4c8663 LB |
945 | else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz) |
946 | cpu_khz = tsc_khz; | |
eff4677e BP |
947 | cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy, |
948 | cpu_khz_old, cpu_khz); | |
bfc0f594 AK |
949 | #endif |
950 | } | |
8fe6d849 | 951 | EXPORT_SYMBOL_GPL(recalibrate_cpu_khz); |
bfc0f594 | 952 | |
2dbe06fa | 953 | |
cd7240c0 SS |
954 | static unsigned long long cyc2ns_suspend; |
955 | ||
b74f05d6 | 956 | void tsc_save_sched_clock_state(void) |
cd7240c0 | 957 | { |
35af99e6 | 958 | if (!sched_clock_stable()) |
cd7240c0 SS |
959 | return; |
960 | ||
961 | cyc2ns_suspend = sched_clock(); | |
962 | } | |
963 | ||
964 | /* | |
965 | * Even on processors with invariant TSC, TSC gets reset in some the | |
966 | * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to | |
967 | * arbitrary value (still sync'd across cpu's) during resume from such sleep | |
968 | * states. To cope up with this, recompute the cyc2ns_offset for each cpu so | |
969 | * that sched_clock() continues from the point where it was left off during | |
970 | * suspend. | |
971 | */ | |
b74f05d6 | 972 | void tsc_restore_sched_clock_state(void) |
cd7240c0 SS |
973 | { |
974 | unsigned long long offset; | |
975 | unsigned long flags; | |
976 | int cpu; | |
977 | ||
35af99e6 | 978 | if (!sched_clock_stable()) |
cd7240c0 SS |
979 | return; |
980 | ||
981 | local_irq_save(flags); | |
982 | ||
20d1c86a | 983 | /* |
6a6256f9 | 984 | * We're coming out of suspend, there's no concurrency yet; don't |
20d1c86a PZ |
985 | * bother being nice about the RCU stuff, just write to both |
986 | * data fields. | |
987 | */ | |
988 | ||
989 | this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0); | |
990 | this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0); | |
991 | ||
cd7240c0 SS |
992 | offset = cyc2ns_suspend - sched_clock(); |
993 | ||
20d1c86a PZ |
994 | for_each_possible_cpu(cpu) { |
995 | per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset; | |
996 | per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset; | |
997 | } | |
cd7240c0 SS |
998 | |
999 | local_irq_restore(flags); | |
1000 | } | |
1001 | ||
2dbe06fa | 1002 | #ifdef CONFIG_CPU_FREQ |
c208ac8f RW |
1003 | /* |
1004 | * Frequency scaling support. Adjust the TSC based timer when the CPU frequency | |
2dbe06fa AK |
1005 | * changes. |
1006 | * | |
c208ac8f RW |
1007 | * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC |
1008 | * as unstable and give up in those cases. | |
2dbe06fa AK |
1009 | * |
1010 | * Should fix up last_tsc too. Currently gettimeofday in the | |
1011 | * first tick after the change will be slightly wrong. | |
1012 | */ | |
1013 | ||
1014 | static unsigned int ref_freq; | |
1015 | static unsigned long loops_per_jiffy_ref; | |
1016 | static unsigned long tsc_khz_ref; | |
1017 | ||
1018 | static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |
1019 | void *data) | |
1020 | { | |
1021 | struct cpufreq_freqs *freq = data; | |
2dbe06fa | 1022 | |
c208ac8f RW |
1023 | if (num_online_cpus() > 1) { |
1024 | mark_tsc_unstable("cpufreq changes on SMP"); | |
1025 | return 0; | |
1026 | } | |
2dbe06fa AK |
1027 | |
1028 | if (!ref_freq) { | |
1029 | ref_freq = freq->old; | |
c208ac8f | 1030 | loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy; |
2dbe06fa AK |
1031 | tsc_khz_ref = tsc_khz; |
1032 | } | |
c208ac8f | 1033 | |
2dbe06fa | 1034 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || |
c208ac8f RW |
1035 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { |
1036 | boot_cpu_data.loops_per_jiffy = | |
1037 | cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); | |
2dbe06fa AK |
1038 | |
1039 | tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); | |
1040 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | |
1041 | mark_tsc_unstable("cpufreq changes"); | |
2dbe06fa | 1042 | |
df24014a | 1043 | set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc()); |
3896c329 | 1044 | } |
2dbe06fa AK |
1045 | |
1046 | return 0; | |
1047 | } | |
1048 | ||
1049 | static struct notifier_block time_cpufreq_notifier_block = { | |
1050 | .notifier_call = time_cpufreq_notifier | |
1051 | }; | |
1052 | ||
a841cca7 | 1053 | static int __init cpufreq_register_tsc_scaling(void) |
2dbe06fa | 1054 | { |
59e21e3d | 1055 | if (!boot_cpu_has(X86_FEATURE_TSC)) |
060700b5 LT |
1056 | return 0; |
1057 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | |
1058 | return 0; | |
2dbe06fa AK |
1059 | cpufreq_register_notifier(&time_cpufreq_notifier_block, |
1060 | CPUFREQ_TRANSITION_NOTIFIER); | |
1061 | return 0; | |
1062 | } | |
1063 | ||
a841cca7 | 1064 | core_initcall(cpufreq_register_tsc_scaling); |
2dbe06fa AK |
1065 | |
1066 | #endif /* CONFIG_CPU_FREQ */ | |
8fbbc4b4 | 1067 | |
f9677e0f CH |
1068 | #define ART_CPUID_LEAF (0x15) |
1069 | #define ART_MIN_DENOMINATOR (1) | |
1070 | ||
1071 | ||
1072 | /* | |
1073 | * If ART is present detect the numerator:denominator to convert to TSC | |
1074 | */ | |
120fc3fb | 1075 | static void __init detect_art(void) |
f9677e0f CH |
1076 | { |
1077 | unsigned int unused[2]; | |
1078 | ||
1079 | if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF) | |
1080 | return; | |
1081 | ||
6c66350d | 1082 | /* |
1083 | * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required, | |
1084 | * and the TSC counter resets must not occur asynchronously. | |
1085 | */ | |
f9677e0f CH |
1086 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR) || |
1087 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) || | |
6c66350d | 1088 | !boot_cpu_has(X86_FEATURE_TSC_ADJUST) || |
1089 | tsc_async_resets) | |
f9677e0f CH |
1090 | return; |
1091 | ||
7b3d2f6e TG |
1092 | cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator, |
1093 | &art_to_tsc_numerator, unused, unused+1); | |
1094 | ||
1095 | if (art_to_tsc_denominator < ART_MIN_DENOMINATOR) | |
f9677e0f CH |
1096 | return; |
1097 | ||
7b3d2f6e TG |
1098 | rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset); |
1099 | ||
f9677e0f CH |
1100 | /* Make this sticky over multiple CPU init calls */ |
1101 | setup_force_cpu_cap(X86_FEATURE_ART); | |
1102 | } | |
1103 | ||
1104 | ||
8fbbc4b4 AK |
1105 | /* clocksource code */ |
1106 | ||
6a369583 TG |
1107 | static void tsc_resume(struct clocksource *cs) |
1108 | { | |
1109 | tsc_verify_tsc_adjust(true); | |
1110 | } | |
1111 | ||
8fbbc4b4 | 1112 | /* |
09ec5442 | 1113 | * We used to compare the TSC to the cycle_last value in the clocksource |
8fbbc4b4 AK |
1114 | * structure to avoid a nasty time-warp. This can be observed in a |
1115 | * very small window right after one CPU updated cycle_last under | |
1116 | * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which | |
1117 | * is smaller than the cycle_last reference value due to a TSC which | |
d9f6e12f | 1118 | * is slightly behind. This delta is nowhere else observable, but in |
8fbbc4b4 AK |
1119 | * that case it results in a forward time jump in the range of hours |
1120 | * due to the unsigned delta calculation of the time keeping core | |
1121 | * code, which is necessary to support wrapping clocksources like pm | |
1122 | * timer. | |
09ec5442 TG |
1123 | * |
1124 | * This sanity check is now done in the core timekeeping code. | |
1125 | * checking the result of read_tsc() - cycle_last for being negative. | |
1126 | * That works because CLOCKSOURCE_MASK(64) does not mask out any bit. | |
8fbbc4b4 | 1127 | */ |
a5a1d1c2 | 1128 | static u64 read_tsc(struct clocksource *cs) |
8fbbc4b4 | 1129 | { |
a5a1d1c2 | 1130 | return (u64)rdtsc_ordered(); |
1be39679 MS |
1131 | } |
1132 | ||
12907fbb TG |
1133 | static void tsc_cs_mark_unstable(struct clocksource *cs) |
1134 | { | |
1135 | if (tsc_unstable) | |
1136 | return; | |
f94c8d11 | 1137 | |
12907fbb | 1138 | tsc_unstable = 1; |
f94c8d11 PZ |
1139 | if (using_native_sched_clock()) |
1140 | clear_sched_clock_stable(); | |
12907fbb TG |
1141 | disable_sched_clock_irqtime(); |
1142 | pr_info("Marking TSC unstable due to clocksource watchdog\n"); | |
1143 | } | |
1144 | ||
b421b22b PZ |
1145 | static void tsc_cs_tick_stable(struct clocksource *cs) |
1146 | { | |
1147 | if (tsc_unstable) | |
1148 | return; | |
1149 | ||
1150 | if (using_native_sched_clock()) | |
1151 | sched_clock_tick_stable(); | |
1152 | } | |
1153 | ||
eec399dd TG |
1154 | static int tsc_cs_enable(struct clocksource *cs) |
1155 | { | |
b95a8a27 | 1156 | vclocks_set_used(VDSO_CLOCKMODE_TSC); |
eec399dd TG |
1157 | return 0; |
1158 | } | |
1159 | ||
09ec5442 TG |
1160 | /* |
1161 | * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() | |
1162 | */ | |
aa83c457 | 1163 | static struct clocksource clocksource_tsc_early = { |
eec399dd TG |
1164 | .name = "tsc-early", |
1165 | .rating = 299, | |
2e27e793 | 1166 | .uncertainty_margin = 32 * NSEC_PER_MSEC, |
eec399dd TG |
1167 | .read = read_tsc, |
1168 | .mask = CLOCKSOURCE_MASK(64), | |
1169 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | |
aa83c457 | 1170 | CLOCK_SOURCE_MUST_VERIFY, |
a2c1fe72 | 1171 | .id = CSID_X86_TSC_EARLY, |
b95a8a27 | 1172 | .vdso_clock_mode = VDSO_CLOCKMODE_TSC, |
eec399dd | 1173 | .enable = tsc_cs_enable, |
aa83c457 PZ |
1174 | .resume = tsc_resume, |
1175 | .mark_unstable = tsc_cs_mark_unstable, | |
1176 | .tick_stable = tsc_cs_tick_stable, | |
e3b4f790 | 1177 | .list = LIST_HEAD_INIT(clocksource_tsc_early.list), |
aa83c457 PZ |
1178 | }; |
1179 | ||
1180 | /* | |
1181 | * Must mark VALID_FOR_HRES early such that when we unregister tsc_early | |
1182 | * this one will immediately take over. We will only register if TSC has | |
1183 | * been found good. | |
1184 | */ | |
8fbbc4b4 | 1185 | static struct clocksource clocksource_tsc = { |
eec399dd TG |
1186 | .name = "tsc", |
1187 | .rating = 300, | |
1188 | .read = read_tsc, | |
1189 | .mask = CLOCKSOURCE_MASK(64), | |
1190 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | |
aa83c457 | 1191 | CLOCK_SOURCE_VALID_FOR_HRES | |
7560c02b PM |
1192 | CLOCK_SOURCE_MUST_VERIFY | |
1193 | CLOCK_SOURCE_VERIFY_PERCPU, | |
a2c1fe72 | 1194 | .id = CSID_X86_TSC, |
b95a8a27 | 1195 | .vdso_clock_mode = VDSO_CLOCKMODE_TSC, |
eec399dd | 1196 | .enable = tsc_cs_enable, |
6a369583 | 1197 | .resume = tsc_resume, |
12907fbb | 1198 | .mark_unstable = tsc_cs_mark_unstable, |
b421b22b | 1199 | .tick_stable = tsc_cs_tick_stable, |
e3b4f790 | 1200 | .list = LIST_HEAD_INIT(clocksource_tsc.list), |
8fbbc4b4 AK |
1201 | }; |
1202 | ||
1203 | void mark_tsc_unstable(char *reason) | |
1204 | { | |
f94c8d11 PZ |
1205 | if (tsc_unstable) |
1206 | return; | |
1207 | ||
1208 | tsc_unstable = 1; | |
1209 | if (using_native_sched_clock()) | |
35af99e6 | 1210 | clear_sched_clock_stable(); |
f94c8d11 PZ |
1211 | disable_sched_clock_irqtime(); |
1212 | pr_info("Marking TSC unstable due to %s\n", reason); | |
e3b4f790 PZ |
1213 | |
1214 | clocksource_mark_unstable(&clocksource_tsc_early); | |
1215 | clocksource_mark_unstable(&clocksource_tsc); | |
8fbbc4b4 AK |
1216 | } |
1217 | ||
1218 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | |
1219 | ||
b50db709 FT |
1220 | static void __init tsc_disable_clocksource_watchdog(void) |
1221 | { | |
1222 | clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY; | |
1223 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; | |
1224 | } | |
1225 | ||
efc8b329 PM |
1226 | bool tsc_clocksource_watchdog_disabled(void) |
1227 | { | |
0051293c PM |
1228 | return !(clocksource_tsc.flags & CLOCK_SOURCE_MUST_VERIFY) && |
1229 | tsc_as_watchdog && !no_tsc_watchdog; | |
efc8b329 PM |
1230 | } |
1231 | ||
395628ef AK |
1232 | static void __init check_system_tsc_reliable(void) |
1233 | { | |
03da3ff1 DW |
1234 | #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC) |
1235 | if (is_geode_lx()) { | |
1236 | /* RTSC counts during suspend */ | |
8fbbc4b4 | 1237 | #define RTSC_SUSP 0x100 |
03da3ff1 | 1238 | unsigned long res_low, res_high; |
8fbbc4b4 | 1239 | |
03da3ff1 DW |
1240 | rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); |
1241 | /* Geode_LX - the OLPC CPU has a very reliable TSC */ | |
1242 | if (res_low & RTSC_SUSP) | |
1243 | tsc_clocksource_reliable = 1; | |
1244 | } | |
8fbbc4b4 | 1245 | #endif |
395628ef AK |
1246 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) |
1247 | tsc_clocksource_reliable = 1; | |
b50db709 FT |
1248 | |
1249 | /* | |
1250 | * Disable the clocksource watchdog when the system has: | |
1251 | * - TSC running at constant frequency | |
1252 | * - TSC which does not stop in C-States | |
1253 | * - the TSC_ADJUST register which allows to detect even minimal | |
1254 | * modifications | |
1255 | * - not more than two sockets. As the number of sockets cannot be | |
1256 | * evaluated at the early boot stage where this has to be | |
1257 | * invoked, check the number of online memory nodes as a | |
1258 | * fallback solution which is an reasonable estimate. | |
1259 | */ | |
1260 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && | |
1261 | boot_cpu_has(X86_FEATURE_NONSTOP_TSC) && | |
1262 | boot_cpu_has(X86_FEATURE_TSC_ADJUST) && | |
233756a6 | 1263 | nr_online_nodes <= 4) |
b50db709 | 1264 | tsc_disable_clocksource_watchdog(); |
395628ef | 1265 | } |
8fbbc4b4 AK |
1266 | |
1267 | /* | |
1268 | * Make an educated guess if the TSC is trustworthy and synchronized | |
1269 | * over all CPUs. | |
1270 | */ | |
148f9bb8 | 1271 | int unsynchronized_tsc(void) |
8fbbc4b4 | 1272 | { |
59e21e3d | 1273 | if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable) |
8fbbc4b4 AK |
1274 | return 1; |
1275 | ||
3e5095d1 | 1276 | #ifdef CONFIG_SMP |
8fbbc4b4 AK |
1277 | if (apic_is_clustered_box()) |
1278 | return 1; | |
1279 | #endif | |
1280 | ||
1281 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | |
1282 | return 0; | |
d3b8f889 | 1283 | |
1284 | if (tsc_clocksource_reliable) | |
1285 | return 0; | |
8fbbc4b4 AK |
1286 | /* |
1287 | * Intel systems are normally all synchronized. | |
1288 | * Exceptions must mark TSC as unstable: | |
1289 | */ | |
1290 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { | |
1291 | /* assume multi socket systems are not synchronized: */ | |
1292 | if (num_possible_cpus() > 1) | |
d3b8f889 | 1293 | return 1; |
8fbbc4b4 AK |
1294 | } |
1295 | ||
d3b8f889 | 1296 | return 0; |
8fbbc4b4 AK |
1297 | } |
1298 | ||
f9677e0f CH |
1299 | /* |
1300 | * Convert ART to TSC given numerator/denominator found in detect_art() | |
1301 | */ | |
a5a1d1c2 | 1302 | struct system_counterval_t convert_art_to_tsc(u64 art) |
f9677e0f CH |
1303 | { |
1304 | u64 tmp, res, rem; | |
1305 | ||
1306 | rem = do_div(art, art_to_tsc_denominator); | |
1307 | ||
1308 | res = art * art_to_tsc_numerator; | |
1309 | tmp = rem * art_to_tsc_numerator; | |
1310 | ||
1311 | do_div(tmp, art_to_tsc_denominator); | |
1312 | res += tmp + art_to_tsc_offset; | |
1313 | ||
a2c1fe72 | 1314 | return (struct system_counterval_t) { |
a2c1fe72 PH |
1315 | .cs_id = have_art ? CSID_X86_TSC : CSID_GENERIC, |
1316 | .cycles = res, | |
1317 | }; | |
f9677e0f CH |
1318 | } |
1319 | EXPORT_SYMBOL(convert_art_to_tsc); | |
08ec0c58 | 1320 | |
fc804f65 RJ |
1321 | /** |
1322 | * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC. | |
1323 | * @art_ns: ART (Always Running Timer) in unit of nanoseconds | |
1324 | * | |
1325 | * PTM requires all timestamps to be in units of nanoseconds. When user | |
1326 | * software requests a cross-timestamp, this function converts system timestamp | |
1327 | * to TSC. | |
1328 | * | |
1329 | * This is valid when CPU feature flag X86_FEATURE_TSC_KNOWN_FREQ is set | |
1330 | * indicating the tsc_khz is derived from CPUID[15H]. Drivers should check | |
1331 | * that this flag is set before conversion to TSC is attempted. | |
1332 | * | |
1333 | * Return: | |
a2c1fe72 | 1334 | * struct system_counterval_t - system counter value with the ID of the |
c55cbfce RD |
1335 | * corresponding clocksource: |
1336 | * cycles: System counter value | |
a2c1fe72 | 1337 | * cs_id: The clocksource ID for validating comparability |
fc804f65 RJ |
1338 | */ |
1339 | ||
1340 | struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns) | |
1341 | { | |
1342 | u64 tmp, res, rem; | |
1343 | ||
1344 | rem = do_div(art_ns, USEC_PER_SEC); | |
1345 | ||
1346 | res = art_ns * tsc_khz; | |
1347 | tmp = rem * tsc_khz; | |
1348 | ||
1349 | do_div(tmp, USEC_PER_SEC); | |
1350 | res += tmp; | |
1351 | ||
a2c1fe72 | 1352 | return (struct system_counterval_t) { |
a2c1fe72 PH |
1353 | .cs_id = have_art ? CSID_X86_TSC : CSID_GENERIC, |
1354 | .cycles = res, | |
1355 | }; | |
fc804f65 RJ |
1356 | } |
1357 | EXPORT_SYMBOL(convert_art_ns_to_tsc); | |
1358 | ||
1359 | ||
08ec0c58 JS |
1360 | static void tsc_refine_calibration_work(struct work_struct *work); |
1361 | static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); | |
1362 | /** | |
1363 | * tsc_refine_calibration_work - Further refine tsc freq calibration | |
c55cbfce | 1364 | * @work: ignored. |
08ec0c58 JS |
1365 | * |
1366 | * This functions uses delayed work over a period of a | |
1367 | * second to further refine the TSC freq value. Since this is | |
1368 | * timer based, instead of loop based, we don't block the boot | |
1369 | * process while this longer calibration is done. | |
1370 | * | |
0d2eb44f | 1371 | * If there are any calibration anomalies (too many SMIs, etc), |
08ec0c58 JS |
1372 | * or the refined calibration is off by 1% of the fast early |
1373 | * calibration, we throw out the new calibration and use the | |
1374 | * early calibration. | |
1375 | */ | |
1376 | static void tsc_refine_calibration_work(struct work_struct *work) | |
1377 | { | |
a786ef15 | 1378 | static u64 tsc_start = ULLONG_MAX, ref_start; |
08ec0c58 JS |
1379 | static int hpet; |
1380 | u64 tsc_stop, ref_stop, delta; | |
1381 | unsigned long freq; | |
aa7b630e | 1382 | int cpu; |
08ec0c58 JS |
1383 | |
1384 | /* Don't bother refining TSC on unstable systems */ | |
aa83c457 | 1385 | if (tsc_unstable) |
e9088add | 1386 | goto unreg; |
08ec0c58 JS |
1387 | |
1388 | /* | |
1389 | * Since the work is started early in boot, we may be | |
1390 | * delayed the first time we expire. So set the workqueue | |
1391 | * again once we know timers are working. | |
1392 | */ | |
a786ef15 DV |
1393 | if (tsc_start == ULLONG_MAX) { |
1394 | restart: | |
08ec0c58 JS |
1395 | /* |
1396 | * Only set hpet once, to avoid mixing hardware | |
1397 | * if the hpet becomes enabled later. | |
1398 | */ | |
1399 | hpet = is_hpet_enabled(); | |
08ec0c58 | 1400 | tsc_start = tsc_read_refs(&ref_start, hpet); |
a786ef15 | 1401 | schedule_delayed_work(&tsc_irqwork, HZ); |
08ec0c58 JS |
1402 | return; |
1403 | } | |
1404 | ||
1405 | tsc_stop = tsc_read_refs(&ref_stop, hpet); | |
1406 | ||
1407 | /* hpet or pmtimer available ? */ | |
62627bec | 1408 | if (ref_start == ref_stop) |
08ec0c58 JS |
1409 | goto out; |
1410 | ||
a786ef15 DV |
1411 | /* Check, whether the sampling was disturbed */ |
1412 | if (tsc_stop == ULLONG_MAX) | |
1413 | goto restart; | |
08ec0c58 JS |
1414 | |
1415 | delta = tsc_stop - tsc_start; | |
1416 | delta *= 1000000LL; | |
1417 | if (hpet) | |
1418 | freq = calc_hpet_ref(delta, ref_start, ref_stop); | |
1419 | else | |
1420 | freq = calc_pmtimer_ref(delta, ref_start, ref_stop); | |
1421 | ||
a7ec817d FT |
1422 | /* Will hit this only if tsc_force_recalibrate has been set */ |
1423 | if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) { | |
1424 | ||
1425 | /* Warn if the deviation exceeds 500 ppm */ | |
1426 | if (abs(tsc_khz - freq) > (tsc_khz >> 11)) { | |
1427 | pr_warn("Warning: TSC freq calibrated by CPUID/MSR differs from what is calibrated by HW timer, please check with vendor!!\n"); | |
1428 | pr_info("Previous calibrated TSC freq:\t %lu.%03lu MHz\n", | |
1429 | (unsigned long)tsc_khz / 1000, | |
1430 | (unsigned long)tsc_khz % 1000); | |
1431 | } | |
1432 | ||
1433 | pr_info("TSC freq recalibrated by [%s]:\t %lu.%03lu MHz\n", | |
1434 | hpet ? "HPET" : "PM_TIMER", | |
1435 | (unsigned long)freq / 1000, | |
1436 | (unsigned long)freq % 1000); | |
1437 | ||
1438 | return; | |
1439 | } | |
1440 | ||
08ec0c58 JS |
1441 | /* Make sure we're within 1% */ |
1442 | if (abs(tsc_khz - freq) > tsc_khz/100) | |
1443 | goto out; | |
1444 | ||
1445 | tsc_khz = freq; | |
c767a54b JP |
1446 | pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n", |
1447 | (unsigned long)tsc_khz / 1000, | |
1448 | (unsigned long)tsc_khz % 1000); | |
08ec0c58 | 1449 | |
6731b0d6 NS |
1450 | /* Inform the TSC deadline clockevent devices about the recalibration */ |
1451 | lapic_update_tsc_freq(); | |
1452 | ||
aa7b630e PZ |
1453 | /* Update the sched_clock() rate to match the clocksource one */ |
1454 | for_each_possible_cpu(cpu) | |
5c3c2ea6 | 1455 | set_cyc2ns_scale(tsc_khz, cpu, tsc_stop); |
aa7b630e | 1456 | |
08ec0c58 | 1457 | out: |
aa83c457 | 1458 | if (tsc_unstable) |
e9088add | 1459 | goto unreg; |
aa83c457 | 1460 | |
b152688c | 1461 | if (boot_cpu_has(X86_FEATURE_ART)) |
a2c1fe72 | 1462 | have_art = true; |
08ec0c58 | 1463 | clocksource_register_khz(&clocksource_tsc, tsc_khz); |
e9088add | 1464 | unreg: |
aa83c457 | 1465 | clocksource_unregister(&clocksource_tsc_early); |
08ec0c58 JS |
1466 | } |
1467 | ||
1468 | ||
1469 | static int __init init_tsc_clocksource(void) | |
8fbbc4b4 | 1470 | { |
fe9af81e | 1471 | if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz) |
a8760eca TG |
1472 | return 0; |
1473 | ||
a7ec817d FT |
1474 | if (tsc_unstable) { |
1475 | clocksource_unregister(&clocksource_tsc_early); | |
1476 | return 0; | |
1477 | } | |
aa83c457 | 1478 | |
82f9c080 FT |
1479 | if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) |
1480 | clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; | |
1481 | ||
57779dc2 | 1482 | /* |
47c95a46 BG |
1483 | * When TSC frequency is known (retrieved via MSR or CPUID), we skip |
1484 | * the refined calibration and directly register it as a clocksource. | |
57779dc2 | 1485 | */ |
984feceb | 1486 | if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) { |
b152688c | 1487 | if (boot_cpu_has(X86_FEATURE_ART)) |
a2c1fe72 | 1488 | have_art = true; |
57779dc2 | 1489 | clocksource_register_khz(&clocksource_tsc, tsc_khz); |
aa83c457 | 1490 | clocksource_unregister(&clocksource_tsc_early); |
a7ec817d FT |
1491 | |
1492 | if (!tsc_force_recalibrate) | |
1493 | return 0; | |
57779dc2 AK |
1494 | } |
1495 | ||
08ec0c58 JS |
1496 | schedule_delayed_work(&tsc_irqwork, 0); |
1497 | return 0; | |
8fbbc4b4 | 1498 | } |
08ec0c58 JS |
1499 | /* |
1500 | * We use device_initcall here, to ensure we run after the hpet | |
1501 | * is fully initialized, which may occur at fs_initcall time. | |
1502 | */ | |
1503 | device_initcall(init_tsc_clocksource); | |
8fbbc4b4 | 1504 | |
8dbe4385 | 1505 | static bool __init determine_cpu_tsc_frequencies(bool early) |
8fbbc4b4 | 1506 | { |
cf7a63ef PT |
1507 | /* Make sure that cpu and tsc are not already calibrated */ |
1508 | WARN_ON(cpu_khz || tsc_khz); | |
8fbbc4b4 | 1509 | |
8dbe4385 PT |
1510 | if (early) { |
1511 | cpu_khz = x86_platform.calibrate_cpu(); | |
bd35c77e KP |
1512 | if (tsc_early_khz) |
1513 | tsc_khz = tsc_early_khz; | |
1514 | else | |
1515 | tsc_khz = x86_platform.calibrate_tsc(); | |
8dbe4385 PT |
1516 | } else { |
1517 | /* We should not be here with non-native cpu calibration */ | |
1518 | WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu); | |
1519 | cpu_khz = pit_hpet_ptimer_calibrate_cpu(); | |
1520 | } | |
ff4c8663 LB |
1521 | |
1522 | /* | |
608008a4 | 1523 | * Trust non-zero tsc_khz as authoritative, |
ff4c8663 LB |
1524 | * and use it to sanity check cpu_khz, |
1525 | * which will be off if system timer is off. | |
1526 | */ | |
aa297292 LB |
1527 | if (tsc_khz == 0) |
1528 | tsc_khz = cpu_khz; | |
ff4c8663 LB |
1529 | else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz) |
1530 | cpu_khz = tsc_khz; | |
8fbbc4b4 | 1531 | |
cf7a63ef PT |
1532 | if (tsc_khz == 0) |
1533 | return false; | |
8fbbc4b4 | 1534 | |
c767a54b | 1535 | pr_info("Detected %lu.%03lu MHz processor\n", |
cf7a63ef PT |
1536 | (unsigned long)cpu_khz / KHZ, |
1537 | (unsigned long)cpu_khz % KHZ); | |
8fbbc4b4 | 1538 | |
4b5b2127 LB |
1539 | if (cpu_khz != tsc_khz) { |
1540 | pr_info("Detected %lu.%03lu MHz TSC", | |
cf7a63ef PT |
1541 | (unsigned long)tsc_khz / KHZ, |
1542 | (unsigned long)tsc_khz % KHZ); | |
1543 | } | |
1544 | return true; | |
1545 | } | |
1546 | ||
1547 | static unsigned long __init get_loops_per_jiffy(void) | |
1548 | { | |
17f6bac2 | 1549 | u64 lpj = (u64)tsc_khz * KHZ; |
cf7a63ef PT |
1550 | |
1551 | do_div(lpj, HZ); | |
1552 | return lpj; | |
1553 | } | |
1554 | ||
608008a4 DL |
1555 | static void __init tsc_enable_sched_clock(void) |
1556 | { | |
69f8aeab PZ |
1557 | loops_per_jiffy = get_loops_per_jiffy(); |
1558 | use_tsc_delay(); | |
1559 | ||
608008a4 DL |
1560 | /* Sanitize TSC ADJUST before cyc2ns gets initialized */ |
1561 | tsc_store_and_check_tsc_adjust(true); | |
1562 | cyc2ns_init_boot_cpu(); | |
1563 | static_branch_enable(&__use_tsc); | |
1564 | } | |
1565 | ||
cf7a63ef PT |
1566 | void __init tsc_early_init(void) |
1567 | { | |
1568 | if (!boot_cpu_has(X86_FEATURE_TSC)) | |
1569 | return; | |
2647c43c MT |
1570 | /* Don't change UV TSC multi-chassis synchronization */ |
1571 | if (is_early_uv_system()) | |
1572 | return; | |
8dbe4385 | 1573 | if (!determine_cpu_tsc_frequencies(true)) |
cf7a63ef | 1574 | return; |
608008a4 | 1575 | tsc_enable_sched_clock(); |
cf7a63ef PT |
1576 | } |
1577 | ||
1578 | void __init tsc_init(void) | |
1579 | { | |
6b8d5dde BPA |
1580 | if (!cpu_feature_enabled(X86_FEATURE_TSC)) { |
1581 | setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); | |
1582 | return; | |
1583 | } | |
1584 | ||
8dbe4385 PT |
1585 | /* |
1586 | * native_calibrate_cpu_early can only calibrate using methods that are | |
1587 | * available early in boot. | |
1588 | */ | |
1589 | if (x86_platform.calibrate_cpu == native_calibrate_cpu_early) | |
1590 | x86_platform.calibrate_cpu = native_calibrate_cpu; | |
1591 | ||
cf7a63ef PT |
1592 | if (!tsc_khz) { |
1593 | /* We failed to determine frequencies earlier, try again */ | |
8dbe4385 | 1594 | if (!determine_cpu_tsc_frequencies(false)) { |
cf7a63ef PT |
1595 | mark_tsc_unstable("could not calculate TSC khz"); |
1596 | setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); | |
1597 | return; | |
1598 | } | |
608008a4 | 1599 | tsc_enable_sched_clock(); |
4b5b2127 LB |
1600 | } |
1601 | ||
e2a9ca29 | 1602 | cyc2ns_init_secondary_cpus(); |
8fbbc4b4 | 1603 | |
e82b8e4e VP |
1604 | if (!no_sched_irq_time) |
1605 | enable_sched_clock_irqtime(); | |
1606 | ||
cf7a63ef | 1607 | lpj_fine = get_loops_per_jiffy(); |
8fbbc4b4 | 1608 | |
a1272dd5 ZD |
1609 | check_system_tsc_reliable(); |
1610 | ||
aa83c457 | 1611 | if (unsynchronized_tsc()) { |
8fbbc4b4 | 1612 | mark_tsc_unstable("TSCs unsynchronized"); |
aa83c457 PZ |
1613 | return; |
1614 | } | |
8fbbc4b4 | 1615 | |
63ec58b4 | 1616 | if (tsc_clocksource_reliable || no_tsc_watchdog) |
b50db709 | 1617 | tsc_disable_clocksource_watchdog(); |
63ec58b4 | 1618 | |
aa83c457 | 1619 | clocksource_register_khz(&clocksource_tsc_early, tsc_khz); |
f9677e0f | 1620 | detect_art(); |
8fbbc4b4 AK |
1621 | } |
1622 | ||
b565201c JS |
1623 | #ifdef CONFIG_SMP |
1624 | /* | |
134a1282 | 1625 | * Check whether existing calibration data can be reused. |
b565201c | 1626 | */ |
148f9bb8 | 1627 | unsigned long calibrate_delay_is_known(void) |
b565201c | 1628 | { |
c25323c0 | 1629 | int sibling, cpu = smp_processor_id(); |
76ce7cfe PT |
1630 | int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC); |
1631 | const struct cpumask *mask = topology_core_cpumask(cpu); | |
b565201c | 1632 | |
134a1282 TG |
1633 | /* |
1634 | * If TSC has constant frequency and TSC is synchronized across | |
1635 | * sockets then reuse CPU0 calibration. | |
1636 | */ | |
1637 | if (constant_tsc && !tsc_unstable) | |
1638 | return cpu_data(0).loops_per_jiffy; | |
1639 | ||
1640 | /* | |
1641 | * If TSC has constant frequency and TSC is not synchronized across | |
1642 | * sockets and this is not the first CPU in the socket, then reuse | |
1643 | * the calibration value of an already online CPU on that socket. | |
1644 | * | |
1645 | * This assumes that CONSTANT_TSC is consistent for all CPUs in a | |
1646 | * socket. | |
1647 | */ | |
fe9af81e | 1648 | if (!constant_tsc || !mask) |
f508a5ba TG |
1649 | return 0; |
1650 | ||
1651 | sibling = cpumask_any_but(mask, cpu); | |
c25323c0 TG |
1652 | if (sibling < nr_cpu_ids) |
1653 | return cpu_data(sibling).loops_per_jiffy; | |
b565201c JS |
1654 | return 0; |
1655 | } | |
1656 | #endif |