rtc: hctosys: use function name in the error log
[linux-2.6-block.git] / kernel / sched / clock.c
CommitLineData
3e51f33f
PZ
1/*
2 * sched_clock for unstable cpu clocks
3 *
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
5 *
c300ba25
SR
6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
8 *
3e51f33f
PZ
9 * Based on code by:
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
12 *
c676329a
PZ
13 *
14 * What:
15 *
16 * cpu_clock(i) provides a fast (execution time) high resolution
17 * clock with bounded drift between CPUs. The value of cpu_clock(i)
18 * is monotonic for constant i. The timestamp returned is in nanoseconds.
19 *
20 * ######################### BIG FAT WARNING ##########################
21 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
22 * # go backwards !! #
23 * ####################################################################
24 *
25 * There is no strict promise about the base, although it tends to start
26 * at 0 on boot (but people really shouldn't rely on that).
27 *
28 * cpu_clock(i) -- can be used from any context, including NMI.
c676329a
PZ
29 * local_clock() -- is cpu_clock() on the current cpu.
30 *
ef08f0ff
PZ
31 * sched_clock_cpu(i)
32 *
c676329a
PZ
33 * How:
34 *
35 * The implementation either uses sched_clock() when
36 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
37 * sched_clock() is assumed to provide these properties (mostly it means
38 * the architecture provides a globally synchronized highres time source).
39 *
40 * Otherwise it tries to create a semi stable clock from a mixture of other
41 * clocks, including:
42 *
43 * - GTOD (clock monotomic)
3e51f33f
PZ
44 * - sched_clock()
45 * - explicit idle events
46 *
c676329a
PZ
47 * We use GTOD as base and use sched_clock() deltas to improve resolution. The
48 * deltas are filtered to provide monotonicity and keeping it within an
49 * expected window.
3e51f33f
PZ
50 *
51 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
52 * that is otherwise invisible (TSC gets stopped).
53 *
3e51f33f 54 */
3e51f33f 55#include <linux/spinlock.h>
6409c4da 56#include <linux/hardirq.h>
9984de1a 57#include <linux/export.h>
b342501c
IM
58#include <linux/percpu.h>
59#include <linux/ktime.h>
60#include <linux/sched.h>
35af99e6 61#include <linux/static_key.h>
6577e42a 62#include <linux/workqueue.h>
52f5684c 63#include <linux/compiler.h>
3e51f33f 64
2c3d103b
HD
65/*
66 * Scheduler clock - returns current time in nanosec units.
67 * This is default implementation.
68 * Architectures and sub-architectures can override this.
69 */
52f5684c 70unsigned long long __weak sched_clock(void)
2c3d103b 71{
92d23f70
R
72 return (unsigned long long)(jiffies - INITIAL_JIFFIES)
73 * (NSEC_PER_SEC / HZ);
2c3d103b 74}
b6ac23af 75EXPORT_SYMBOL_GPL(sched_clock);
3e51f33f 76
5bb6b1ea 77__read_mostly int sched_clock_running;
c1955a3d 78
3e51f33f 79#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
35af99e6 80static struct static_key __sched_clock_stable = STATIC_KEY_INIT;
d375b4e0 81static int __sched_clock_stable_early;
35af99e6
PZ
82
83int sched_clock_stable(void)
84{
d375b4e0 85 return static_key_false(&__sched_clock_stable);
35af99e6
PZ
86}
87
d375b4e0 88static void __set_sched_clock_stable(void)
35af99e6
PZ
89{
90 if (!sched_clock_stable())
d375b4e0
PZ
91 static_key_slow_inc(&__sched_clock_stable);
92}
93
94void set_sched_clock_stable(void)
95{
96 __sched_clock_stable_early = 1;
97
98 smp_mb(); /* matches sched_clock_init() */
99
100 if (!sched_clock_running)
101 return;
102
103 __set_sched_clock_stable();
35af99e6
PZ
104}
105
6577e42a 106static void __clear_sched_clock_stable(struct work_struct *work)
35af99e6
PZ
107{
108 /* XXX worry about clock continuity */
109 if (sched_clock_stable())
d375b4e0 110 static_key_slow_dec(&__sched_clock_stable);
35af99e6 111}
3e51f33f 112
6577e42a
PZ
113static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);
114
115void clear_sched_clock_stable(void)
116{
d375b4e0
PZ
117 __sched_clock_stable_early = 0;
118
119 smp_mb(); /* matches sched_clock_init() */
120
121 if (!sched_clock_running)
122 return;
123
124 schedule_work(&sched_clock_work);
6577e42a
PZ
125}
126
3e51f33f 127struct sched_clock_data {
3e51f33f
PZ
128 u64 tick_raw;
129 u64 tick_gtod;
130 u64 clock;
131};
132
133static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
134
135static inline struct sched_clock_data *this_scd(void)
136{
22127e93 137 return this_cpu_ptr(&sched_clock_data);
3e51f33f
PZ
138}
139
140static inline struct sched_clock_data *cpu_sdc(int cpu)
141{
142 return &per_cpu(sched_clock_data, cpu);
143}
144
145void sched_clock_init(void)
146{
147 u64 ktime_now = ktime_to_ns(ktime_get());
3e51f33f
PZ
148 int cpu;
149
150 for_each_possible_cpu(cpu) {
151 struct sched_clock_data *scd = cpu_sdc(cpu);
152
a381759d 153 scd->tick_raw = 0;
3e51f33f
PZ
154 scd->tick_gtod = ktime_now;
155 scd->clock = ktime_now;
156 }
a381759d
PZ
157
158 sched_clock_running = 1;
d375b4e0
PZ
159
160 /*
161 * Ensure that it is impossible to not do a static_key update.
162 *
163 * Either {set,clear}_sched_clock_stable() must see sched_clock_running
164 * and do the update, or we must see their __sched_clock_stable_early
165 * and do the update, or both.
166 */
167 smp_mb(); /* matches {set,clear}_sched_clock_stable() */
168
169 if (__sched_clock_stable_early)
170 __set_sched_clock_stable();
171 else
172 __clear_sched_clock_stable(NULL);
3e51f33f
PZ
173}
174
354879bb 175/*
b342501c 176 * min, max except they take wrapping into account
354879bb
PZ
177 */
178
179static inline u64 wrap_min(u64 x, u64 y)
180{
181 return (s64)(x - y) < 0 ? x : y;
182}
183
184static inline u64 wrap_max(u64 x, u64 y)
185{
186 return (s64)(x - y) > 0 ? x : y;
187}
188
3e51f33f
PZ
189/*
190 * update the percpu scd from the raw @now value
191 *
192 * - filter out backward motion
354879bb 193 * - use the GTOD tick value to create a window to filter crazy TSC values
3e51f33f 194 */
def0a9b2 195static u64 sched_clock_local(struct sched_clock_data *scd)
3e51f33f 196{
def0a9b2
PZ
197 u64 now, clock, old_clock, min_clock, max_clock;
198 s64 delta;
3e51f33f 199
def0a9b2
PZ
200again:
201 now = sched_clock();
202 delta = now - scd->tick_raw;
354879bb
PZ
203 if (unlikely(delta < 0))
204 delta = 0;
3e51f33f 205
def0a9b2
PZ
206 old_clock = scd->clock;
207
354879bb
PZ
208 /*
209 * scd->clock = clamp(scd->tick_gtod + delta,
b342501c
IM
210 * max(scd->tick_gtod, scd->clock),
211 * scd->tick_gtod + TICK_NSEC);
354879bb 212 */
3e51f33f 213
354879bb 214 clock = scd->tick_gtod + delta;
def0a9b2
PZ
215 min_clock = wrap_max(scd->tick_gtod, old_clock);
216 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
3e51f33f 217
354879bb
PZ
218 clock = wrap_max(clock, min_clock);
219 clock = wrap_min(clock, max_clock);
3e51f33f 220
152f9d07 221 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
def0a9b2 222 goto again;
56b90612 223
def0a9b2 224 return clock;
3e51f33f
PZ
225}
226
def0a9b2 227static u64 sched_clock_remote(struct sched_clock_data *scd)
3e51f33f 228{
def0a9b2
PZ
229 struct sched_clock_data *my_scd = this_scd();
230 u64 this_clock, remote_clock;
231 u64 *ptr, old_val, val;
232
a1cbcaa9
TG
233#if BITS_PER_LONG != 64
234again:
235 /*
236 * Careful here: The local and the remote clock values need to
237 * be read out atomic as we need to compare the values and
238 * then update either the local or the remote side. So the
239 * cmpxchg64 below only protects one readout.
240 *
241 * We must reread via sched_clock_local() in the retry case on
242 * 32bit as an NMI could use sched_clock_local() via the
243 * tracer and hit between the readout of
244 * the low32bit and the high 32bit portion.
245 */
246 this_clock = sched_clock_local(my_scd);
247 /*
248 * We must enforce atomic readout on 32bit, otherwise the
249 * update on the remote cpu can hit inbetween the readout of
250 * the low32bit and the high 32bit portion.
251 */
252 remote_clock = cmpxchg64(&scd->clock, 0, 0);
253#else
254 /*
255 * On 64bit the read of [my]scd->clock is atomic versus the
256 * update, so we can avoid the above 32bit dance.
257 */
def0a9b2
PZ
258 sched_clock_local(my_scd);
259again:
260 this_clock = my_scd->clock;
261 remote_clock = scd->clock;
a1cbcaa9 262#endif
def0a9b2
PZ
263
264 /*
265 * Use the opportunity that we have both locks
266 * taken to couple the two clocks: we take the
267 * larger time as the latest time for both
268 * runqueues. (this creates monotonic movement)
269 */
270 if (likely((s64)(remote_clock - this_clock) < 0)) {
271 ptr = &scd->clock;
272 old_val = remote_clock;
273 val = this_clock;
3e51f33f 274 } else {
def0a9b2
PZ
275 /*
276 * Should be rare, but possible:
277 */
278 ptr = &my_scd->clock;
279 old_val = this_clock;
280 val = remote_clock;
3e51f33f 281 }
def0a9b2 282
152f9d07 283 if (cmpxchg64(ptr, old_val, val) != old_val)
def0a9b2
PZ
284 goto again;
285
286 return val;
3e51f33f
PZ
287}
288
c676329a
PZ
289/*
290 * Similar to cpu_clock(), but requires local IRQs to be disabled.
291 *
292 * See cpu_clock().
293 */
3e51f33f
PZ
294u64 sched_clock_cpu(int cpu)
295{
b342501c 296 struct sched_clock_data *scd;
def0a9b2
PZ
297 u64 clock;
298
35af99e6 299 if (sched_clock_stable())
b342501c 300 return sched_clock();
a381759d 301
a381759d
PZ
302 if (unlikely(!sched_clock_running))
303 return 0ull;
304
96b3d28b 305 preempt_disable_notrace();
def0a9b2 306 scd = cpu_sdc(cpu);
3e51f33f 307
def0a9b2
PZ
308 if (cpu != smp_processor_id())
309 clock = sched_clock_remote(scd);
310 else
311 clock = sched_clock_local(scd);
96b3d28b 312 preempt_enable_notrace();
e4e4e534 313
3e51f33f
PZ
314 return clock;
315}
316
317void sched_clock_tick(void)
318{
8325d9c0 319 struct sched_clock_data *scd;
3e51f33f
PZ
320 u64 now, now_gtod;
321
35af99e6 322 if (sched_clock_stable())
8325d9c0
PZ
323 return;
324
a381759d
PZ
325 if (unlikely(!sched_clock_running))
326 return;
327
3e51f33f
PZ
328 WARN_ON_ONCE(!irqs_disabled());
329
8325d9c0 330 scd = this_scd();
3e51f33f 331 now_gtod = ktime_to_ns(ktime_get());
a83bc47c 332 now = sched_clock();
3e51f33f 333
3e51f33f
PZ
334 scd->tick_raw = now;
335 scd->tick_gtod = now_gtod;
def0a9b2 336 sched_clock_local(scd);
3e51f33f
PZ
337}
338
339/*
340 * We are going deep-idle (irqs are disabled):
341 */
342void sched_clock_idle_sleep_event(void)
343{
344 sched_clock_cpu(smp_processor_id());
345}
346EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
347
348/*
349 * We just idled delta nanoseconds (called with irqs disabled):
350 */
351void sched_clock_idle_wakeup_event(u64 delta_ns)
352{
1c5745aa
TG
353 if (timekeeping_suspended)
354 return;
355
354879bb 356 sched_clock_tick();
3e51f33f
PZ
357 touch_softlockup_watchdog();
358}
359EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
360
c676329a
PZ
361/*
362 * As outlined at the top, provides a fast, high resolution, nanosecond
363 * time source that is monotonic per cpu argument and has bounded drift
364 * between cpus.
365 *
366 * ######################### BIG FAT WARNING ##########################
367 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
368 * # go backwards !! #
369 * ####################################################################
370 */
371u64 cpu_clock(int cpu)
b9f8fcd5 372{
d375b4e0 373 if (!sched_clock_stable())
35af99e6
PZ
374 return sched_clock_cpu(cpu);
375
376 return sched_clock();
b9f8fcd5
DM
377}
378
c676329a
PZ
379/*
380 * Similar to cpu_clock() for the current cpu. Time will only be observed
381 * to be monotonic if care is taken to only compare timestampt taken on the
382 * same CPU.
383 *
384 * See cpu_clock().
385 */
386u64 local_clock(void)
387{
d375b4e0 388 if (!sched_clock_stable())
35af99e6
PZ
389 return sched_clock_cpu(raw_smp_processor_id());
390
391 return sched_clock();
c676329a
PZ
392}
393
8325d9c0
PZ
394#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
395
396void sched_clock_init(void)
397{
398 sched_clock_running = 1;
399}
400
401u64 sched_clock_cpu(int cpu)
402{
403 if (unlikely(!sched_clock_running))
404 return 0;
405
406 return sched_clock();
407}
408
c676329a 409u64 cpu_clock(int cpu)
76a2a6ee 410{
35af99e6 411 return sched_clock();
b9f8fcd5 412}
76a2a6ee 413
c676329a
PZ
414u64 local_clock(void)
415{
35af99e6 416 return sched_clock();
c676329a
PZ
417}
418
b9f8fcd5 419#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
76a2a6ee 420
4c9fe8ad 421EXPORT_SYMBOL_GPL(cpu_clock);
c676329a 422EXPORT_SYMBOL_GPL(local_clock);
545a2bf7
CB
423
424/*
425 * Running clock - returns the time that has elapsed while a guest has been
426 * running.
427 * On a guest this value should be local_clock minus the time the guest was
428 * suspended by the hypervisor (for any reason).
429 * On bare metal this function should return the same as local_clock.
430 * Architectures and sub-architectures can override this.
431 */
432u64 __weak running_clock(void)
433{
434 return local_clock();
435}