Commit | Line | Data |
---|---|---|
8524070b | 1 | /* |
2 | * linux/kernel/time/timekeeping.c | |
3 | * | |
4 | * Kernel timekeeping code and accessor functions | |
5 | * | |
6 | * This code was moved from linux/kernel/timer.c. | |
7 | * Please see that file for copyright and history logs. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/module.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/percpu.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/sysdev.h> | |
17 | #include <linux/clocksource.h> | |
18 | #include <linux/jiffies.h> | |
19 | #include <linux/time.h> | |
20 | #include <linux/tick.h> | |
21 | ||
22 | ||
23 | /* | |
24 | * This read-write spinlock protects us from races in SMP while | |
25 | * playing with xtime and avenrun. | |
26 | */ | |
27 | __attribute__((weak)) __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); | |
28 | ||
29 | EXPORT_SYMBOL(xtime_lock); | |
30 | ||
31 | ||
32 | /* | |
33 | * The current time | |
34 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected | |
35 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged | |
36 | * at zero at system boot time, so wall_to_monotonic will be negative, | |
37 | * however, we will ALWAYS keep the tv_nsec part positive so we can use | |
38 | * the usual normalization. | |
7c3f1a57 TJ |
39 | * |
40 | * wall_to_monotonic is moved after resume from suspend for the monotonic | |
41 | * time not to jump. We need to add total_sleep_time to wall_to_monotonic | |
42 | * to get the real boot based time offset. | |
43 | * | |
44 | * - wall_to_monotonic is no longer the boot time, getboottime must be | |
45 | * used instead. | |
8524070b | 46 | */ |
47 | struct timespec xtime __attribute__ ((aligned (16))); | |
48 | struct timespec wall_to_monotonic __attribute__ ((aligned (16))); | |
7c3f1a57 | 49 | static unsigned long total_sleep_time; /* seconds */ |
8524070b | 50 | |
51 | EXPORT_SYMBOL(xtime); | |
52 | ||
53 | ||
54 | static struct clocksource *clock; /* pointer to current clocksource */ | |
55 | ||
56 | ||
57 | #ifdef CONFIG_GENERIC_TIME | |
58 | /** | |
59 | * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook | |
60 | * | |
61 | * private function, must hold xtime_lock lock when being | |
62 | * called. Returns the number of nanoseconds since the | |
63 | * last call to update_wall_time() (adjusted by NTP scaling) | |
64 | */ | |
65 | static inline s64 __get_nsec_offset(void) | |
66 | { | |
67 | cycle_t cycle_now, cycle_delta; | |
68 | s64 ns_offset; | |
69 | ||
70 | /* read clocksource: */ | |
71 | cycle_now = clocksource_read(clock); | |
72 | ||
73 | /* calculate the delta since the last update_wall_time: */ | |
74 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | |
75 | ||
76 | /* convert to nanoseconds: */ | |
77 | ns_offset = cyc2ns(clock, cycle_delta); | |
78 | ||
79 | return ns_offset; | |
80 | } | |
81 | ||
82 | /** | |
83 | * __get_realtime_clock_ts - Returns the time of day in a timespec | |
84 | * @ts: pointer to the timespec to be set | |
85 | * | |
86 | * Returns the time of day in a timespec. Used by | |
87 | * do_gettimeofday() and get_realtime_clock_ts(). | |
88 | */ | |
89 | static inline void __get_realtime_clock_ts(struct timespec *ts) | |
90 | { | |
91 | unsigned long seq; | |
92 | s64 nsecs; | |
93 | ||
94 | do { | |
95 | seq = read_seqbegin(&xtime_lock); | |
96 | ||
97 | *ts = xtime; | |
98 | nsecs = __get_nsec_offset(); | |
99 | ||
100 | } while (read_seqretry(&xtime_lock, seq)); | |
101 | ||
102 | timespec_add_ns(ts, nsecs); | |
103 | } | |
104 | ||
105 | /** | |
106 | * getnstimeofday - Returns the time of day in a timespec | |
107 | * @ts: pointer to the timespec to be set | |
108 | * | |
109 | * Returns the time of day in a timespec. | |
110 | */ | |
111 | void getnstimeofday(struct timespec *ts) | |
112 | { | |
113 | __get_realtime_clock_ts(ts); | |
114 | } | |
115 | ||
116 | EXPORT_SYMBOL(getnstimeofday); | |
117 | ||
118 | /** | |
119 | * do_gettimeofday - Returns the time of day in a timeval | |
120 | * @tv: pointer to the timeval to be set | |
121 | * | |
122 | * NOTE: Users should be converted to using get_realtime_clock_ts() | |
123 | */ | |
124 | void do_gettimeofday(struct timeval *tv) | |
125 | { | |
126 | struct timespec now; | |
127 | ||
128 | __get_realtime_clock_ts(&now); | |
129 | tv->tv_sec = now.tv_sec; | |
130 | tv->tv_usec = now.tv_nsec/1000; | |
131 | } | |
132 | ||
133 | EXPORT_SYMBOL(do_gettimeofday); | |
134 | /** | |
135 | * do_settimeofday - Sets the time of day | |
136 | * @tv: pointer to the timespec variable containing the new time | |
137 | * | |
138 | * Sets the time of day to the new time and update NTP and notify hrtimers | |
139 | */ | |
140 | int do_settimeofday(struct timespec *tv) | |
141 | { | |
142 | unsigned long flags; | |
143 | time_t wtm_sec, sec = tv->tv_sec; | |
144 | long wtm_nsec, nsec = tv->tv_nsec; | |
145 | ||
146 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | |
147 | return -EINVAL; | |
148 | ||
149 | write_seqlock_irqsave(&xtime_lock, flags); | |
150 | ||
151 | nsec -= __get_nsec_offset(); | |
152 | ||
153 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | |
154 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | |
155 | ||
156 | set_normalized_timespec(&xtime, sec, nsec); | |
157 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | |
158 | ||
159 | clock->error = 0; | |
160 | ntp_clear(); | |
161 | ||
162 | update_vsyscall(&xtime, clock); | |
163 | ||
164 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
165 | ||
166 | /* signal hrtimers about time change */ | |
167 | clock_was_set(); | |
168 | ||
169 | return 0; | |
170 | } | |
171 | ||
172 | EXPORT_SYMBOL(do_settimeofday); | |
173 | ||
174 | /** | |
175 | * change_clocksource - Swaps clocksources if a new one is available | |
176 | * | |
177 | * Accumulates current time interval and initializes new clocksource | |
178 | */ | |
179 | static void change_clocksource(void) | |
180 | { | |
181 | struct clocksource *new; | |
182 | cycle_t now; | |
183 | u64 nsec; | |
184 | ||
185 | new = clocksource_get_next(); | |
186 | ||
187 | if (clock == new) | |
188 | return; | |
189 | ||
190 | now = clocksource_read(new); | |
191 | nsec = __get_nsec_offset(); | |
192 | timespec_add_ns(&xtime, nsec); | |
193 | ||
194 | clock = new; | |
195 | clock->cycle_last = now; | |
196 | ||
197 | clock->error = 0; | |
198 | clock->xtime_nsec = 0; | |
199 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | |
200 | ||
201 | tick_clock_notify(); | |
202 | ||
203 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", | |
204 | clock->name); | |
205 | } | |
206 | #else | |
207 | static inline void change_clocksource(void) { } | |
208 | #endif | |
209 | ||
210 | /** | |
211 | * timekeeping_is_continuous - check to see if timekeeping is free running | |
212 | */ | |
213 | int timekeeping_is_continuous(void) | |
214 | { | |
215 | unsigned long seq; | |
216 | int ret; | |
217 | ||
218 | do { | |
219 | seq = read_seqbegin(&xtime_lock); | |
220 | ||
221 | ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; | |
222 | ||
223 | } while (read_seqretry(&xtime_lock, seq)); | |
224 | ||
225 | return ret; | |
226 | } | |
227 | ||
228 | /** | |
229 | * read_persistent_clock - Return time in seconds from the persistent clock. | |
230 | * | |
231 | * Weak dummy function for arches that do not yet support it. | |
232 | * Returns seconds from epoch using the battery backed persistent clock. | |
233 | * Returns zero if unsupported. | |
234 | * | |
235 | * XXX - Do be sure to remove it once all arches implement it. | |
236 | */ | |
237 | unsigned long __attribute__((weak)) read_persistent_clock(void) | |
238 | { | |
239 | return 0; | |
240 | } | |
241 | ||
242 | /* | |
243 | * timekeeping_init - Initializes the clocksource and common timekeeping values | |
244 | */ | |
245 | void __init timekeeping_init(void) | |
246 | { | |
247 | unsigned long flags; | |
248 | unsigned long sec = read_persistent_clock(); | |
249 | ||
250 | write_seqlock_irqsave(&xtime_lock, flags); | |
251 | ||
252 | ntp_clear(); | |
253 | ||
254 | clock = clocksource_get_next(); | |
255 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | |
256 | clock->cycle_last = clocksource_read(clock); | |
257 | ||
258 | xtime.tv_sec = sec; | |
259 | xtime.tv_nsec = 0; | |
260 | set_normalized_timespec(&wall_to_monotonic, | |
261 | -xtime.tv_sec, -xtime.tv_nsec); | |
7c3f1a57 | 262 | total_sleep_time = 0; |
8524070b | 263 | |
264 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
265 | } | |
266 | ||
267 | /* flag for if timekeeping is suspended */ | |
268 | static int timekeeping_suspended; | |
269 | /* time in seconds when suspend began */ | |
270 | static unsigned long timekeeping_suspend_time; | |
271 | ||
272 | /** | |
273 | * timekeeping_resume - Resumes the generic timekeeping subsystem. | |
274 | * @dev: unused | |
275 | * | |
276 | * This is for the generic clocksource timekeeping. | |
277 | * xtime/wall_to_monotonic/jiffies/etc are | |
278 | * still managed by arch specific suspend/resume code. | |
279 | */ | |
280 | static int timekeeping_resume(struct sys_device *dev) | |
281 | { | |
282 | unsigned long flags; | |
283 | unsigned long now = read_persistent_clock(); | |
284 | ||
d10ff3fb TG |
285 | clocksource_resume(); |
286 | ||
8524070b | 287 | write_seqlock_irqsave(&xtime_lock, flags); |
288 | ||
289 | if (now && (now > timekeeping_suspend_time)) { | |
290 | unsigned long sleep_length = now - timekeeping_suspend_time; | |
291 | ||
292 | xtime.tv_sec += sleep_length; | |
293 | wall_to_monotonic.tv_sec -= sleep_length; | |
7c3f1a57 | 294 | total_sleep_time += sleep_length; |
8524070b | 295 | } |
296 | /* re-base the last cycle value */ | |
297 | clock->cycle_last = clocksource_read(clock); | |
298 | clock->error = 0; | |
299 | timekeeping_suspended = 0; | |
300 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
301 | ||
302 | touch_softlockup_watchdog(); | |
303 | ||
304 | clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); | |
305 | ||
306 | /* Resume hrtimers */ | |
307 | hres_timers_resume(); | |
308 | ||
309 | return 0; | |
310 | } | |
311 | ||
312 | static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) | |
313 | { | |
314 | unsigned long flags; | |
315 | ||
316 | write_seqlock_irqsave(&xtime_lock, flags); | |
317 | timekeeping_suspended = 1; | |
318 | timekeeping_suspend_time = read_persistent_clock(); | |
319 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
320 | ||
321 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); | |
322 | ||
323 | return 0; | |
324 | } | |
325 | ||
326 | /* sysfs resume/suspend bits for timekeeping */ | |
327 | static struct sysdev_class timekeeping_sysclass = { | |
328 | .resume = timekeeping_resume, | |
329 | .suspend = timekeeping_suspend, | |
330 | set_kset_name("timekeeping"), | |
331 | }; | |
332 | ||
333 | static struct sys_device device_timer = { | |
334 | .id = 0, | |
335 | .cls = &timekeeping_sysclass, | |
336 | }; | |
337 | ||
338 | static int __init timekeeping_init_device(void) | |
339 | { | |
340 | int error = sysdev_class_register(&timekeeping_sysclass); | |
341 | if (!error) | |
342 | error = sysdev_register(&device_timer); | |
343 | return error; | |
344 | } | |
345 | ||
346 | device_initcall(timekeeping_init_device); | |
347 | ||
348 | /* | |
349 | * If the error is already larger, we look ahead even further | |
350 | * to compensate for late or lost adjustments. | |
351 | */ | |
352 | static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, | |
353 | s64 *offset) | |
354 | { | |
355 | s64 tick_error, i; | |
356 | u32 look_ahead, adj; | |
357 | s32 error2, mult; | |
358 | ||
359 | /* | |
360 | * Use the current error value to determine how much to look ahead. | |
361 | * The larger the error the slower we adjust for it to avoid problems | |
362 | * with losing too many ticks, otherwise we would overadjust and | |
363 | * produce an even larger error. The smaller the adjustment the | |
364 | * faster we try to adjust for it, as lost ticks can do less harm | |
365 | * here. This is tuned so that an error of about 1 msec is adusted | |
366 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). | |
367 | */ | |
368 | error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ); | |
369 | error2 = abs(error2); | |
370 | for (look_ahead = 0; error2 > 0; look_ahead++) | |
371 | error2 >>= 2; | |
372 | ||
373 | /* | |
374 | * Now calculate the error in (1 << look_ahead) ticks, but first | |
375 | * remove the single look ahead already included in the error. | |
376 | */ | |
377 | tick_error = current_tick_length() >> | |
378 | (TICK_LENGTH_SHIFT - clock->shift + 1); | |
379 | tick_error -= clock->xtime_interval >> 1; | |
380 | error = ((error - tick_error) >> look_ahead) + tick_error; | |
381 | ||
382 | /* Finally calculate the adjustment shift value. */ | |
383 | i = *interval; | |
384 | mult = 1; | |
385 | if (error < 0) { | |
386 | error = -error; | |
387 | *interval = -*interval; | |
388 | *offset = -*offset; | |
389 | mult = -1; | |
390 | } | |
391 | for (adj = 0; error > i; adj++) | |
392 | error >>= 1; | |
393 | ||
394 | *interval <<= adj; | |
395 | *offset <<= adj; | |
396 | return mult << adj; | |
397 | } | |
398 | ||
399 | /* | |
400 | * Adjust the multiplier to reduce the error value, | |
401 | * this is optimized for the most common adjustments of -1,0,1, | |
402 | * for other values we can do a bit more work. | |
403 | */ | |
71120f18 | 404 | static void clocksource_adjust(s64 offset) |
8524070b | 405 | { |
406 | s64 error, interval = clock->cycle_interval; | |
407 | int adj; | |
408 | ||
409 | error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1); | |
410 | if (error > interval) { | |
411 | error >>= 2; | |
412 | if (likely(error <= interval)) | |
413 | adj = 1; | |
414 | else | |
415 | adj = clocksource_bigadjust(error, &interval, &offset); | |
416 | } else if (error < -interval) { | |
417 | error >>= 2; | |
418 | if (likely(error >= -interval)) { | |
419 | adj = -1; | |
420 | interval = -interval; | |
421 | offset = -offset; | |
422 | } else | |
423 | adj = clocksource_bigadjust(error, &interval, &offset); | |
424 | } else | |
425 | return; | |
426 | ||
427 | clock->mult += adj; | |
428 | clock->xtime_interval += interval; | |
429 | clock->xtime_nsec -= offset; | |
430 | clock->error -= (interval - offset) << | |
431 | (TICK_LENGTH_SHIFT - clock->shift); | |
432 | } | |
433 | ||
434 | /** | |
435 | * update_wall_time - Uses the current clocksource to increment the wall time | |
436 | * | |
437 | * Called from the timer interrupt, must hold a write on xtime_lock. | |
438 | */ | |
439 | void update_wall_time(void) | |
440 | { | |
441 | cycle_t offset; | |
442 | ||
443 | /* Make sure we're fully resumed: */ | |
444 | if (unlikely(timekeeping_suspended)) | |
445 | return; | |
446 | ||
447 | #ifdef CONFIG_GENERIC_TIME | |
448 | offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; | |
449 | #else | |
450 | offset = clock->cycle_interval; | |
451 | #endif | |
452 | clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; | |
453 | ||
454 | /* normally this loop will run just once, however in the | |
455 | * case of lost or late ticks, it will accumulate correctly. | |
456 | */ | |
457 | while (offset >= clock->cycle_interval) { | |
458 | /* accumulate one interval */ | |
459 | clock->xtime_nsec += clock->xtime_interval; | |
460 | clock->cycle_last += clock->cycle_interval; | |
461 | offset -= clock->cycle_interval; | |
462 | ||
463 | if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { | |
464 | clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; | |
465 | xtime.tv_sec++; | |
466 | second_overflow(); | |
467 | } | |
468 | ||
8524070b | 469 | /* accumulate error between NTP and clock interval */ |
470 | clock->error += current_tick_length(); | |
471 | clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); | |
472 | } | |
473 | ||
474 | /* correct the clock when NTP error is too big */ | |
71120f18 | 475 | clocksource_adjust(offset); |
8524070b | 476 | |
477 | /* store full nanoseconds into xtime */ | |
478 | xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift; | |
479 | clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; | |
480 | ||
481 | /* check to see if there is a new clocksource to use */ | |
482 | change_clocksource(); | |
483 | update_vsyscall(&xtime, clock); | |
484 | } | |
7c3f1a57 TJ |
485 | |
486 | /** | |
487 | * getboottime - Return the real time of system boot. | |
488 | * @ts: pointer to the timespec to be set | |
489 | * | |
490 | * Returns the time of day in a timespec. | |
491 | * | |
492 | * This is based on the wall_to_monotonic offset and the total suspend | |
493 | * time. Calls to settimeofday will affect the value returned (which | |
494 | * basically means that however wrong your real time clock is at boot time, | |
495 | * you get the right time here). | |
496 | */ | |
497 | void getboottime(struct timespec *ts) | |
498 | { | |
499 | set_normalized_timespec(ts, | |
500 | - (wall_to_monotonic.tv_sec + total_sleep_time), | |
501 | - wall_to_monotonic.tv_nsec); | |
502 | } | |
503 | ||
504 | /** | |
505 | * monotonic_to_bootbased - Convert the monotonic time to boot based. | |
506 | * @ts: pointer to the timespec to be converted | |
507 | */ | |
508 | void monotonic_to_bootbased(struct timespec *ts) | |
509 | { | |
510 | ts->tv_sec += total_sleep_time; | |
511 | } |