Commit | Line | Data |
---|---|---|
3e51f33f PZ |
1 | /* |
2 | * sched_clock for unstable cpu clocks | |
3 | * | |
90eec103 | 4 | * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra |
3e51f33f | 5 | * |
c300ba25 SR |
6 | * Updates and enhancements: |
7 | * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> | |
8 | * | |
3e51f33f PZ |
9 | * Based on code by: |
10 | * Ingo Molnar <mingo@redhat.com> | |
11 | * Guillaume Chazarain <guichaz@gmail.com> | |
12 | * | |
c676329a PZ |
13 | * |
14 | * What: | |
15 | * | |
16 | * cpu_clock(i) provides a fast (execution time) high resolution | |
17 | * clock with bounded drift between CPUs. The value of cpu_clock(i) | |
18 | * is monotonic for constant i. The timestamp returned is in nanoseconds. | |
19 | * | |
20 | * ######################### BIG FAT WARNING ########################## | |
21 | * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # | |
22 | * # go backwards !! # | |
23 | * #################################################################### | |
24 | * | |
25 | * There is no strict promise about the base, although it tends to start | |
26 | * at 0 on boot (but people really shouldn't rely on that). | |
27 | * | |
28 | * cpu_clock(i) -- can be used from any context, including NMI. | |
c676329a PZ |
29 | * local_clock() -- is cpu_clock() on the current cpu. |
30 | * | |
ef08f0ff PZ |
31 | * sched_clock_cpu(i) |
32 | * | |
c676329a PZ |
33 | * How: |
34 | * | |
35 | * The implementation either uses sched_clock() when | |
36 | * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the | |
37 | * sched_clock() is assumed to provide these properties (mostly it means | |
38 | * the architecture provides a globally synchronized highres time source). | |
39 | * | |
40 | * Otherwise it tries to create a semi stable clock from a mixture of other | |
41 | * clocks, including: | |
42 | * | |
43 | * - GTOD (clock monotomic) | |
3e51f33f PZ |
44 | * - sched_clock() |
45 | * - explicit idle events | |
46 | * | |
c676329a PZ |
47 | * We use GTOD as base and use sched_clock() deltas to improve resolution. The |
48 | * deltas are filtered to provide monotonicity and keeping it within an | |
49 | * expected window. | |
3e51f33f PZ |
50 | * |
51 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time | |
52 | * that is otherwise invisible (TSC gets stopped). | |
53 | * | |
3e51f33f | 54 | */ |
3e51f33f | 55 | #include <linux/spinlock.h> |
6409c4da | 56 | #include <linux/hardirq.h> |
9984de1a | 57 | #include <linux/export.h> |
b342501c IM |
58 | #include <linux/percpu.h> |
59 | #include <linux/ktime.h> | |
60 | #include <linux/sched.h> | |
38b8d208 | 61 | #include <linux/nmi.h> |
e6017571 | 62 | #include <linux/sched/clock.h> |
35af99e6 | 63 | #include <linux/static_key.h> |
6577e42a | 64 | #include <linux/workqueue.h> |
52f5684c | 65 | #include <linux/compiler.h> |
4f49b90a | 66 | #include <linux/tick.h> |
3e51f33f | 67 | |
2c3d103b HD |
68 | /* |
69 | * Scheduler clock - returns current time in nanosec units. | |
70 | * This is default implementation. | |
71 | * Architectures and sub-architectures can override this. | |
72 | */ | |
52f5684c | 73 | unsigned long long __weak sched_clock(void) |
2c3d103b | 74 | { |
92d23f70 R |
75 | return (unsigned long long)(jiffies - INITIAL_JIFFIES) |
76 | * (NSEC_PER_SEC / HZ); | |
2c3d103b | 77 | } |
b6ac23af | 78 | EXPORT_SYMBOL_GPL(sched_clock); |
3e51f33f | 79 | |
5bb6b1ea | 80 | __read_mostly int sched_clock_running; |
c1955a3d | 81 | |
9881b024 PZ |
82 | void sched_clock_init(void) |
83 | { | |
84 | sched_clock_running = 1; | |
85 | } | |
86 | ||
3e51f33f | 87 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
acb04058 PZ |
88 | /* |
89 | * We must start with !__sched_clock_stable because the unstable -> stable | |
90 | * transition is accurate, while the stable -> unstable transition is not. | |
91 | * | |
92 | * Similarly we start with __sched_clock_stable_early, thereby assuming we | |
93 | * will become stable, such that there's only a single 1 -> 0 transition. | |
94 | */ | |
555570d7 | 95 | static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable); |
acb04058 | 96 | static int __sched_clock_stable_early = 1; |
35af99e6 | 97 | |
5680d809 | 98 | /* |
698eff63 | 99 | * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset |
5680d809 | 100 | */ |
698eff63 PZ |
101 | __read_mostly u64 __sched_clock_offset; |
102 | static __read_mostly u64 __gtod_offset; | |
5680d809 PZ |
103 | |
104 | struct sched_clock_data { | |
105 | u64 tick_raw; | |
106 | u64 tick_gtod; | |
107 | u64 clock; | |
108 | }; | |
109 | ||
110 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); | |
111 | ||
112 | static inline struct sched_clock_data *this_scd(void) | |
113 | { | |
114 | return this_cpu_ptr(&sched_clock_data); | |
115 | } | |
116 | ||
117 | static inline struct sched_clock_data *cpu_sdc(int cpu) | |
118 | { | |
119 | return &per_cpu(sched_clock_data, cpu); | |
120 | } | |
121 | ||
35af99e6 PZ |
122 | int sched_clock_stable(void) |
123 | { | |
555570d7 | 124 | return static_branch_likely(&__sched_clock_stable); |
35af99e6 PZ |
125 | } |
126 | ||
cf15ca8d PZ |
127 | static void __scd_stamp(struct sched_clock_data *scd) |
128 | { | |
129 | scd->tick_gtod = ktime_get_ns(); | |
130 | scd->tick_raw = sched_clock(); | |
131 | } | |
132 | ||
d375b4e0 | 133 | static void __set_sched_clock_stable(void) |
35af99e6 | 134 | { |
5680d809 PZ |
135 | struct sched_clock_data *scd = this_scd(); |
136 | ||
137 | /* | |
138 | * Attempt to make the (initial) unstable->stable transition continuous. | |
139 | */ | |
698eff63 | 140 | __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw); |
5680d809 PZ |
141 | |
142 | printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", | |
698eff63 PZ |
143 | scd->tick_gtod, __gtod_offset, |
144 | scd->tick_raw, __sched_clock_offset); | |
5680d809 | 145 | |
555570d7 | 146 | static_branch_enable(&__sched_clock_stable); |
4f49b90a | 147 | tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); |
d375b4e0 PZ |
148 | } |
149 | ||
cf15ca8d PZ |
150 | /* |
151 | * If we ever get here, we're screwed, because we found out -- typically after | |
152 | * the fact -- that TSC wasn't good. This means all our clocksources (including | |
153 | * ktime) could have reported wrong values. | |
154 | * | |
155 | * What we do here is an attempt to fix up and continue sort of where we left | |
156 | * off in a coherent manner. | |
157 | * | |
158 | * The only way to fully avoid random clock jumps is to boot with: | |
159 | * "tsc=unstable". | |
160 | */ | |
71fdb70e PZ |
161 | static void __sched_clock_work(struct work_struct *work) |
162 | { | |
cf15ca8d PZ |
163 | struct sched_clock_data *scd; |
164 | int cpu; | |
165 | ||
166 | /* take a current timestamp and set 'now' */ | |
167 | preempt_disable(); | |
168 | scd = this_scd(); | |
169 | __scd_stamp(scd); | |
170 | scd->clock = scd->tick_gtod + __gtod_offset; | |
171 | preempt_enable(); | |
172 | ||
173 | /* clone to all CPUs */ | |
174 | for_each_possible_cpu(cpu) | |
175 | per_cpu(sched_clock_data, cpu) = *scd; | |
176 | ||
177 | printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", | |
178 | scd->tick_gtod, __gtod_offset, | |
179 | scd->tick_raw, __sched_clock_offset); | |
180 | ||
71fdb70e PZ |
181 | static_branch_disable(&__sched_clock_stable); |
182 | } | |
183 | ||
184 | static DECLARE_WORK(sched_clock_work, __sched_clock_work); | |
185 | ||
186 | static void __clear_sched_clock_stable(void) | |
35af99e6 | 187 | { |
cf15ca8d PZ |
188 | if (!sched_clock_stable()) |
189 | return; | |
5680d809 | 190 | |
4f49b90a | 191 | tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); |
cf15ca8d | 192 | schedule_work(&sched_clock_work); |
71fdb70e | 193 | } |
6577e42a PZ |
194 | |
195 | void clear_sched_clock_stable(void) | |
196 | { | |
d375b4e0 PZ |
197 | __sched_clock_stable_early = 0; |
198 | ||
9881b024 | 199 | smp_mb(); /* matches sched_clock_init_late() */ |
d375b4e0 | 200 | |
9881b024 | 201 | if (sched_clock_running == 2) |
71fdb70e | 202 | __clear_sched_clock_stable(); |
6577e42a PZ |
203 | } |
204 | ||
9881b024 | 205 | void sched_clock_init_late(void) |
3e51f33f | 206 | { |
9881b024 | 207 | sched_clock_running = 2; |
d375b4e0 PZ |
208 | /* |
209 | * Ensure that it is impossible to not do a static_key update. | |
210 | * | |
211 | * Either {set,clear}_sched_clock_stable() must see sched_clock_running | |
212 | * and do the update, or we must see their __sched_clock_stable_early | |
213 | * and do the update, or both. | |
214 | */ | |
215 | smp_mb(); /* matches {set,clear}_sched_clock_stable() */ | |
216 | ||
217 | if (__sched_clock_stable_early) | |
218 | __set_sched_clock_stable(); | |
3e51f33f PZ |
219 | } |
220 | ||
354879bb | 221 | /* |
b342501c | 222 | * min, max except they take wrapping into account |
354879bb PZ |
223 | */ |
224 | ||
225 | static inline u64 wrap_min(u64 x, u64 y) | |
226 | { | |
227 | return (s64)(x - y) < 0 ? x : y; | |
228 | } | |
229 | ||
230 | static inline u64 wrap_max(u64 x, u64 y) | |
231 | { | |
232 | return (s64)(x - y) > 0 ? x : y; | |
233 | } | |
234 | ||
3e51f33f PZ |
235 | /* |
236 | * update the percpu scd from the raw @now value | |
237 | * | |
238 | * - filter out backward motion | |
354879bb | 239 | * - use the GTOD tick value to create a window to filter crazy TSC values |
3e51f33f | 240 | */ |
def0a9b2 | 241 | static u64 sched_clock_local(struct sched_clock_data *scd) |
3e51f33f | 242 | { |
7b09cc5a | 243 | u64 now, clock, old_clock, min_clock, max_clock, gtod; |
def0a9b2 | 244 | s64 delta; |
3e51f33f | 245 | |
def0a9b2 PZ |
246 | again: |
247 | now = sched_clock(); | |
248 | delta = now - scd->tick_raw; | |
354879bb PZ |
249 | if (unlikely(delta < 0)) |
250 | delta = 0; | |
3e51f33f | 251 | |
def0a9b2 PZ |
252 | old_clock = scd->clock; |
253 | ||
354879bb PZ |
254 | /* |
255 | * scd->clock = clamp(scd->tick_gtod + delta, | |
b342501c IM |
256 | * max(scd->tick_gtod, scd->clock), |
257 | * scd->tick_gtod + TICK_NSEC); | |
354879bb | 258 | */ |
3e51f33f | 259 | |
7b09cc5a PT |
260 | gtod = scd->tick_gtod + __gtod_offset; |
261 | clock = gtod + delta; | |
262 | min_clock = wrap_max(gtod, old_clock); | |
263 | max_clock = wrap_max(old_clock, gtod + TICK_NSEC); | |
3e51f33f | 264 | |
354879bb PZ |
265 | clock = wrap_max(clock, min_clock); |
266 | clock = wrap_min(clock, max_clock); | |
3e51f33f | 267 | |
152f9d07 | 268 | if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) |
def0a9b2 | 269 | goto again; |
56b90612 | 270 | |
def0a9b2 | 271 | return clock; |
3e51f33f PZ |
272 | } |
273 | ||
def0a9b2 | 274 | static u64 sched_clock_remote(struct sched_clock_data *scd) |
3e51f33f | 275 | { |
def0a9b2 PZ |
276 | struct sched_clock_data *my_scd = this_scd(); |
277 | u64 this_clock, remote_clock; | |
278 | u64 *ptr, old_val, val; | |
279 | ||
a1cbcaa9 TG |
280 | #if BITS_PER_LONG != 64 |
281 | again: | |
282 | /* | |
283 | * Careful here: The local and the remote clock values need to | |
284 | * be read out atomic as we need to compare the values and | |
285 | * then update either the local or the remote side. So the | |
286 | * cmpxchg64 below only protects one readout. | |
287 | * | |
288 | * We must reread via sched_clock_local() in the retry case on | |
289 | * 32bit as an NMI could use sched_clock_local() via the | |
290 | * tracer and hit between the readout of | |
291 | * the low32bit and the high 32bit portion. | |
292 | */ | |
293 | this_clock = sched_clock_local(my_scd); | |
294 | /* | |
295 | * We must enforce atomic readout on 32bit, otherwise the | |
296 | * update on the remote cpu can hit inbetween the readout of | |
297 | * the low32bit and the high 32bit portion. | |
298 | */ | |
299 | remote_clock = cmpxchg64(&scd->clock, 0, 0); | |
300 | #else | |
301 | /* | |
302 | * On 64bit the read of [my]scd->clock is atomic versus the | |
303 | * update, so we can avoid the above 32bit dance. | |
304 | */ | |
def0a9b2 PZ |
305 | sched_clock_local(my_scd); |
306 | again: | |
307 | this_clock = my_scd->clock; | |
308 | remote_clock = scd->clock; | |
a1cbcaa9 | 309 | #endif |
def0a9b2 PZ |
310 | |
311 | /* | |
312 | * Use the opportunity that we have both locks | |
313 | * taken to couple the two clocks: we take the | |
314 | * larger time as the latest time for both | |
315 | * runqueues. (this creates monotonic movement) | |
316 | */ | |
317 | if (likely((s64)(remote_clock - this_clock) < 0)) { | |
318 | ptr = &scd->clock; | |
319 | old_val = remote_clock; | |
320 | val = this_clock; | |
3e51f33f | 321 | } else { |
def0a9b2 PZ |
322 | /* |
323 | * Should be rare, but possible: | |
324 | */ | |
325 | ptr = &my_scd->clock; | |
326 | old_val = this_clock; | |
327 | val = remote_clock; | |
3e51f33f | 328 | } |
def0a9b2 | 329 | |
152f9d07 | 330 | if (cmpxchg64(ptr, old_val, val) != old_val) |
def0a9b2 PZ |
331 | goto again; |
332 | ||
333 | return val; | |
3e51f33f PZ |
334 | } |
335 | ||
c676329a PZ |
336 | /* |
337 | * Similar to cpu_clock(), but requires local IRQs to be disabled. | |
338 | * | |
339 | * See cpu_clock(). | |
340 | */ | |
3e51f33f PZ |
341 | u64 sched_clock_cpu(int cpu) |
342 | { | |
b342501c | 343 | struct sched_clock_data *scd; |
def0a9b2 PZ |
344 | u64 clock; |
345 | ||
35af99e6 | 346 | if (sched_clock_stable()) |
698eff63 | 347 | return sched_clock() + __sched_clock_offset; |
a381759d | 348 | |
a381759d PZ |
349 | if (unlikely(!sched_clock_running)) |
350 | return 0ull; | |
351 | ||
96b3d28b | 352 | preempt_disable_notrace(); |
def0a9b2 | 353 | scd = cpu_sdc(cpu); |
3e51f33f | 354 | |
def0a9b2 PZ |
355 | if (cpu != smp_processor_id()) |
356 | clock = sched_clock_remote(scd); | |
357 | else | |
358 | clock = sched_clock_local(scd); | |
96b3d28b | 359 | preempt_enable_notrace(); |
e4e4e534 | 360 | |
3e51f33f PZ |
361 | return clock; |
362 | } | |
2c923e94 | 363 | EXPORT_SYMBOL_GPL(sched_clock_cpu); |
3e51f33f PZ |
364 | |
365 | void sched_clock_tick(void) | |
366 | { | |
8325d9c0 | 367 | struct sched_clock_data *scd; |
a381759d | 368 | |
3e51f33f PZ |
369 | WARN_ON_ONCE(!irqs_disabled()); |
370 | ||
5680d809 PZ |
371 | /* |
372 | * Update these values even if sched_clock_stable(), because it can | |
373 | * become unstable at any point in time at which point we need some | |
374 | * values to fall back on. | |
375 | * | |
376 | * XXX arguably we can skip this if we expose tsc_clocksource_reliable | |
377 | */ | |
8325d9c0 | 378 | scd = this_scd(); |
cf15ca8d | 379 | __scd_stamp(scd); |
3e51f33f | 380 | |
5680d809 PZ |
381 | if (!sched_clock_stable() && likely(sched_clock_running)) |
382 | sched_clock_local(scd); | |
3e51f33f PZ |
383 | } |
384 | ||
385 | /* | |
386 | * We are going deep-idle (irqs are disabled): | |
387 | */ | |
388 | void sched_clock_idle_sleep_event(void) | |
389 | { | |
390 | sched_clock_cpu(smp_processor_id()); | |
391 | } | |
392 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | |
393 | ||
394 | /* | |
395 | * We just idled delta nanoseconds (called with irqs disabled): | |
396 | */ | |
397 | void sched_clock_idle_wakeup_event(u64 delta_ns) | |
398 | { | |
1c5745aa TG |
399 | if (timekeeping_suspended) |
400 | return; | |
401 | ||
354879bb | 402 | sched_clock_tick(); |
03e0d461 | 403 | touch_softlockup_watchdog_sched(); |
3e51f33f PZ |
404 | } |
405 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | |
406 | ||
8325d9c0 PZ |
407 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
408 | ||
8325d9c0 PZ |
409 | u64 sched_clock_cpu(int cpu) |
410 | { | |
411 | if (unlikely(!sched_clock_running)) | |
412 | return 0; | |
413 | ||
414 | return sched_clock(); | |
415 | } | |
9881b024 | 416 | |
b9f8fcd5 | 417 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
76a2a6ee | 418 | |
545a2bf7 CB |
419 | /* |
420 | * Running clock - returns the time that has elapsed while a guest has been | |
421 | * running. | |
422 | * On a guest this value should be local_clock minus the time the guest was | |
423 | * suspended by the hypervisor (for any reason). | |
424 | * On bare metal this function should return the same as local_clock. | |
425 | * Architectures and sub-architectures can override this. | |
426 | */ | |
427 | u64 __weak running_clock(void) | |
428 | { | |
429 | return local_clock(); | |
430 | } |