Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/timer.c | |
3 | * | |
4 | * Kernel internal timers, kernel timekeeping, basic process system calls | |
5 | * | |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * | |
8 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. | |
9 | * | |
10 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
12 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | |
13 | * serialize accesses to xtime/lost_ticks). | |
14 | * Copyright (C) 1998 Andrea Arcangeli | |
15 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl | |
16 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love | |
17 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. | |
18 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar | |
19 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | |
20 | */ | |
21 | ||
22 | #include <linux/kernel_stat.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/swap.h> | |
29 | #include <linux/notifier.h> | |
30 | #include <linux/thread_info.h> | |
31 | #include <linux/time.h> | |
32 | #include <linux/jiffies.h> | |
33 | #include <linux/posix-timers.h> | |
34 | #include <linux/cpu.h> | |
35 | #include <linux/syscalls.h> | |
97a41e26 | 36 | #include <linux/delay.h> |
1da177e4 LT |
37 | |
38 | #include <asm/uaccess.h> | |
39 | #include <asm/unistd.h> | |
40 | #include <asm/div64.h> | |
41 | #include <asm/timex.h> | |
42 | #include <asm/io.h> | |
43 | ||
ecea8d19 TG |
44 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
45 | ||
46 | EXPORT_SYMBOL(jiffies_64); | |
47 | ||
1da177e4 LT |
48 | /* |
49 | * per-CPU timer vector definitions: | |
50 | */ | |
1da177e4 LT |
51 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) |
52 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | |
53 | #define TVN_SIZE (1 << TVN_BITS) | |
54 | #define TVR_SIZE (1 << TVR_BITS) | |
55 | #define TVN_MASK (TVN_SIZE - 1) | |
56 | #define TVR_MASK (TVR_SIZE - 1) | |
57 | ||
58 | typedef struct tvec_s { | |
59 | struct list_head vec[TVN_SIZE]; | |
60 | } tvec_t; | |
61 | ||
62 | typedef struct tvec_root_s { | |
63 | struct list_head vec[TVR_SIZE]; | |
64 | } tvec_root_t; | |
65 | ||
66 | struct tvec_t_base_s { | |
3691c519 ON |
67 | spinlock_t lock; |
68 | struct timer_list *running_timer; | |
1da177e4 | 69 | unsigned long timer_jiffies; |
1da177e4 LT |
70 | tvec_root_t tv1; |
71 | tvec_t tv2; | |
72 | tvec_t tv3; | |
73 | tvec_t tv4; | |
74 | tvec_t tv5; | |
75 | } ____cacheline_aligned_in_smp; | |
76 | ||
77 | typedef struct tvec_t_base_s tvec_base_t; | |
ba6edfcd | 78 | |
3691c519 ON |
79 | tvec_base_t boot_tvec_bases; |
80 | EXPORT_SYMBOL(boot_tvec_bases); | |
51d8c5ed | 81 | static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; |
1da177e4 LT |
82 | |
83 | static inline void set_running_timer(tvec_base_t *base, | |
84 | struct timer_list *timer) | |
85 | { | |
86 | #ifdef CONFIG_SMP | |
3691c519 | 87 | base->running_timer = timer; |
1da177e4 LT |
88 | #endif |
89 | } | |
90 | ||
1da177e4 LT |
91 | static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) |
92 | { | |
93 | unsigned long expires = timer->expires; | |
94 | unsigned long idx = expires - base->timer_jiffies; | |
95 | struct list_head *vec; | |
96 | ||
97 | if (idx < TVR_SIZE) { | |
98 | int i = expires & TVR_MASK; | |
99 | vec = base->tv1.vec + i; | |
100 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | |
101 | int i = (expires >> TVR_BITS) & TVN_MASK; | |
102 | vec = base->tv2.vec + i; | |
103 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | |
104 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | |
105 | vec = base->tv3.vec + i; | |
106 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | |
107 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | |
108 | vec = base->tv4.vec + i; | |
109 | } else if ((signed long) idx < 0) { | |
110 | /* | |
111 | * Can happen if you add a timer with expires == jiffies, | |
112 | * or you set a timer to go off in the past | |
113 | */ | |
114 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | |
115 | } else { | |
116 | int i; | |
117 | /* If the timeout is larger than 0xffffffff on 64-bit | |
118 | * architectures then we use the maximum timeout: | |
119 | */ | |
120 | if (idx > 0xffffffffUL) { | |
121 | idx = 0xffffffffUL; | |
122 | expires = idx + base->timer_jiffies; | |
123 | } | |
124 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | |
125 | vec = base->tv5.vec + i; | |
126 | } | |
127 | /* | |
128 | * Timers are FIFO: | |
129 | */ | |
130 | list_add_tail(&timer->entry, vec); | |
131 | } | |
132 | ||
2aae4a10 | 133 | /** |
55c888d6 ON |
134 | * init_timer - initialize a timer. |
135 | * @timer: the timer to be initialized | |
136 | * | |
137 | * init_timer() must be done to a timer prior calling *any* of the | |
138 | * other timer functions. | |
139 | */ | |
140 | void fastcall init_timer(struct timer_list *timer) | |
141 | { | |
142 | timer->entry.next = NULL; | |
bfe5d834 | 143 | timer->base = __raw_get_cpu_var(tvec_bases); |
55c888d6 ON |
144 | } |
145 | EXPORT_SYMBOL(init_timer); | |
146 | ||
147 | static inline void detach_timer(struct timer_list *timer, | |
148 | int clear_pending) | |
149 | { | |
150 | struct list_head *entry = &timer->entry; | |
151 | ||
152 | __list_del(entry->prev, entry->next); | |
153 | if (clear_pending) | |
154 | entry->next = NULL; | |
155 | entry->prev = LIST_POISON2; | |
156 | } | |
157 | ||
158 | /* | |
3691c519 | 159 | * We are using hashed locking: holding per_cpu(tvec_bases).lock |
55c888d6 ON |
160 | * means that all timers which are tied to this base via timer->base are |
161 | * locked, and the base itself is locked too. | |
162 | * | |
163 | * So __run_timers/migrate_timers can safely modify all timers which could | |
164 | * be found on ->tvX lists. | |
165 | * | |
166 | * When the timer's base is locked, and the timer removed from list, it is | |
167 | * possible to set timer->base = NULL and drop the lock: the timer remains | |
168 | * locked. | |
169 | */ | |
3691c519 | 170 | static tvec_base_t *lock_timer_base(struct timer_list *timer, |
55c888d6 | 171 | unsigned long *flags) |
89e7e374 | 172 | __acquires(timer->base->lock) |
55c888d6 | 173 | { |
3691c519 | 174 | tvec_base_t *base; |
55c888d6 ON |
175 | |
176 | for (;;) { | |
177 | base = timer->base; | |
178 | if (likely(base != NULL)) { | |
179 | spin_lock_irqsave(&base->lock, *flags); | |
180 | if (likely(base == timer->base)) | |
181 | return base; | |
182 | /* The timer has migrated to another CPU */ | |
183 | spin_unlock_irqrestore(&base->lock, *flags); | |
184 | } | |
185 | cpu_relax(); | |
186 | } | |
187 | } | |
188 | ||
1da177e4 LT |
189 | int __mod_timer(struct timer_list *timer, unsigned long expires) |
190 | { | |
3691c519 | 191 | tvec_base_t *base, *new_base; |
1da177e4 LT |
192 | unsigned long flags; |
193 | int ret = 0; | |
194 | ||
195 | BUG_ON(!timer->function); | |
1da177e4 | 196 | |
55c888d6 ON |
197 | base = lock_timer_base(timer, &flags); |
198 | ||
199 | if (timer_pending(timer)) { | |
200 | detach_timer(timer, 0); | |
201 | ret = 1; | |
202 | } | |
203 | ||
a4a6198b | 204 | new_base = __get_cpu_var(tvec_bases); |
1da177e4 | 205 | |
3691c519 | 206 | if (base != new_base) { |
1da177e4 | 207 | /* |
55c888d6 ON |
208 | * We are trying to schedule the timer on the local CPU. |
209 | * However we can't change timer's base while it is running, | |
210 | * otherwise del_timer_sync() can't detect that the timer's | |
211 | * handler yet has not finished. This also guarantees that | |
212 | * the timer is serialized wrt itself. | |
1da177e4 | 213 | */ |
a2c348fe | 214 | if (likely(base->running_timer != timer)) { |
55c888d6 ON |
215 | /* See the comment in lock_timer_base() */ |
216 | timer->base = NULL; | |
217 | spin_unlock(&base->lock); | |
a2c348fe ON |
218 | base = new_base; |
219 | spin_lock(&base->lock); | |
220 | timer->base = base; | |
1da177e4 LT |
221 | } |
222 | } | |
223 | ||
1da177e4 | 224 | timer->expires = expires; |
a2c348fe ON |
225 | internal_add_timer(base, timer); |
226 | spin_unlock_irqrestore(&base->lock, flags); | |
1da177e4 LT |
227 | |
228 | return ret; | |
229 | } | |
230 | ||
231 | EXPORT_SYMBOL(__mod_timer); | |
232 | ||
2aae4a10 | 233 | /** |
1da177e4 LT |
234 | * add_timer_on - start a timer on a particular CPU |
235 | * @timer: the timer to be added | |
236 | * @cpu: the CPU to start it on | |
237 | * | |
238 | * This is not very scalable on SMP. Double adds are not possible. | |
239 | */ | |
240 | void add_timer_on(struct timer_list *timer, int cpu) | |
241 | { | |
a4a6198b | 242 | tvec_base_t *base = per_cpu(tvec_bases, cpu); |
1da177e4 | 243 | unsigned long flags; |
55c888d6 | 244 | |
1da177e4 | 245 | BUG_ON(timer_pending(timer) || !timer->function); |
3691c519 ON |
246 | spin_lock_irqsave(&base->lock, flags); |
247 | timer->base = base; | |
1da177e4 | 248 | internal_add_timer(base, timer); |
3691c519 | 249 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 LT |
250 | } |
251 | ||
252 | ||
2aae4a10 | 253 | /** |
1da177e4 LT |
254 | * mod_timer - modify a timer's timeout |
255 | * @timer: the timer to be modified | |
2aae4a10 | 256 | * @expires: new timeout in jiffies |
1da177e4 LT |
257 | * |
258 | * mod_timer is a more efficient way to update the expire field of an | |
259 | * active timer (if the timer is inactive it will be activated) | |
260 | * | |
261 | * mod_timer(timer, expires) is equivalent to: | |
262 | * | |
263 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
264 | * | |
265 | * Note that if there are multiple unserialized concurrent users of the | |
266 | * same timer, then mod_timer() is the only safe way to modify the timeout, | |
267 | * since add_timer() cannot modify an already running timer. | |
268 | * | |
269 | * The function returns whether it has modified a pending timer or not. | |
270 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | |
271 | * active timer returns 1.) | |
272 | */ | |
273 | int mod_timer(struct timer_list *timer, unsigned long expires) | |
274 | { | |
275 | BUG_ON(!timer->function); | |
276 | ||
1da177e4 LT |
277 | /* |
278 | * This is a common optimization triggered by the | |
279 | * networking code - if the timer is re-modified | |
280 | * to be the same thing then just return: | |
281 | */ | |
282 | if (timer->expires == expires && timer_pending(timer)) | |
283 | return 1; | |
284 | ||
285 | return __mod_timer(timer, expires); | |
286 | } | |
287 | ||
288 | EXPORT_SYMBOL(mod_timer); | |
289 | ||
2aae4a10 | 290 | /** |
1da177e4 LT |
291 | * del_timer - deactive a timer. |
292 | * @timer: the timer to be deactivated | |
293 | * | |
294 | * del_timer() deactivates a timer - this works on both active and inactive | |
295 | * timers. | |
296 | * | |
297 | * The function returns whether it has deactivated a pending timer or not. | |
298 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | |
299 | * active timer returns 1.) | |
300 | */ | |
301 | int del_timer(struct timer_list *timer) | |
302 | { | |
3691c519 | 303 | tvec_base_t *base; |
1da177e4 | 304 | unsigned long flags; |
55c888d6 | 305 | int ret = 0; |
1da177e4 | 306 | |
55c888d6 ON |
307 | if (timer_pending(timer)) { |
308 | base = lock_timer_base(timer, &flags); | |
309 | if (timer_pending(timer)) { | |
310 | detach_timer(timer, 1); | |
311 | ret = 1; | |
312 | } | |
1da177e4 | 313 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 | 314 | } |
1da177e4 | 315 | |
55c888d6 | 316 | return ret; |
1da177e4 LT |
317 | } |
318 | ||
319 | EXPORT_SYMBOL(del_timer); | |
320 | ||
321 | #ifdef CONFIG_SMP | |
2aae4a10 REB |
322 | /** |
323 | * try_to_del_timer_sync - Try to deactivate a timer | |
324 | * @timer: timer do del | |
325 | * | |
fd450b73 ON |
326 | * This function tries to deactivate a timer. Upon successful (ret >= 0) |
327 | * exit the timer is not queued and the handler is not running on any CPU. | |
328 | * | |
329 | * It must not be called from interrupt contexts. | |
330 | */ | |
331 | int try_to_del_timer_sync(struct timer_list *timer) | |
332 | { | |
3691c519 | 333 | tvec_base_t *base; |
fd450b73 ON |
334 | unsigned long flags; |
335 | int ret = -1; | |
336 | ||
337 | base = lock_timer_base(timer, &flags); | |
338 | ||
339 | if (base->running_timer == timer) | |
340 | goto out; | |
341 | ||
342 | ret = 0; | |
343 | if (timer_pending(timer)) { | |
344 | detach_timer(timer, 1); | |
345 | ret = 1; | |
346 | } | |
347 | out: | |
348 | spin_unlock_irqrestore(&base->lock, flags); | |
349 | ||
350 | return ret; | |
351 | } | |
352 | ||
2aae4a10 | 353 | /** |
1da177e4 LT |
354 | * del_timer_sync - deactivate a timer and wait for the handler to finish. |
355 | * @timer: the timer to be deactivated | |
356 | * | |
357 | * This function only differs from del_timer() on SMP: besides deactivating | |
358 | * the timer it also makes sure the handler has finished executing on other | |
359 | * CPUs. | |
360 | * | |
361 | * Synchronization rules: callers must prevent restarting of the timer, | |
362 | * otherwise this function is meaningless. It must not be called from | |
363 | * interrupt contexts. The caller must not hold locks which would prevent | |
55c888d6 ON |
364 | * completion of the timer's handler. The timer's handler must not call |
365 | * add_timer_on(). Upon exit the timer is not queued and the handler is | |
366 | * not running on any CPU. | |
1da177e4 LT |
367 | * |
368 | * The function returns whether it has deactivated a pending timer or not. | |
1da177e4 LT |
369 | */ |
370 | int del_timer_sync(struct timer_list *timer) | |
371 | { | |
fd450b73 ON |
372 | for (;;) { |
373 | int ret = try_to_del_timer_sync(timer); | |
374 | if (ret >= 0) | |
375 | return ret; | |
a0009652 | 376 | cpu_relax(); |
fd450b73 | 377 | } |
1da177e4 | 378 | } |
1da177e4 | 379 | |
55c888d6 | 380 | EXPORT_SYMBOL(del_timer_sync); |
1da177e4 LT |
381 | #endif |
382 | ||
383 | static int cascade(tvec_base_t *base, tvec_t *tv, int index) | |
384 | { | |
385 | /* cascade all the timers from tv up one level */ | |
3439dd86 P |
386 | struct timer_list *timer, *tmp; |
387 | struct list_head tv_list; | |
388 | ||
389 | list_replace_init(tv->vec + index, &tv_list); | |
1da177e4 | 390 | |
1da177e4 | 391 | /* |
3439dd86 P |
392 | * We are removing _all_ timers from the list, so we |
393 | * don't have to detach them individually. | |
1da177e4 | 394 | */ |
3439dd86 P |
395 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { |
396 | BUG_ON(timer->base != base); | |
397 | internal_add_timer(base, timer); | |
1da177e4 | 398 | } |
1da177e4 LT |
399 | |
400 | return index; | |
401 | } | |
402 | ||
2aae4a10 REB |
403 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) |
404 | ||
405 | /** | |
1da177e4 LT |
406 | * __run_timers - run all expired timers (if any) on this CPU. |
407 | * @base: the timer vector to be processed. | |
408 | * | |
409 | * This function cascades all vectors and executes all expired timer | |
410 | * vectors. | |
411 | */ | |
1da177e4 LT |
412 | static inline void __run_timers(tvec_base_t *base) |
413 | { | |
414 | struct timer_list *timer; | |
415 | ||
3691c519 | 416 | spin_lock_irq(&base->lock); |
1da177e4 | 417 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
626ab0e6 | 418 | struct list_head work_list; |
1da177e4 LT |
419 | struct list_head *head = &work_list; |
420 | int index = base->timer_jiffies & TVR_MASK; | |
626ab0e6 | 421 | |
1da177e4 LT |
422 | /* |
423 | * Cascade timers: | |
424 | */ | |
425 | if (!index && | |
426 | (!cascade(base, &base->tv2, INDEX(0))) && | |
427 | (!cascade(base, &base->tv3, INDEX(1))) && | |
428 | !cascade(base, &base->tv4, INDEX(2))) | |
429 | cascade(base, &base->tv5, INDEX(3)); | |
626ab0e6 ON |
430 | ++base->timer_jiffies; |
431 | list_replace_init(base->tv1.vec + index, &work_list); | |
55c888d6 | 432 | while (!list_empty(head)) { |
1da177e4 LT |
433 | void (*fn)(unsigned long); |
434 | unsigned long data; | |
435 | ||
436 | timer = list_entry(head->next,struct timer_list,entry); | |
437 | fn = timer->function; | |
438 | data = timer->data; | |
439 | ||
1da177e4 | 440 | set_running_timer(base, timer); |
55c888d6 | 441 | detach_timer(timer, 1); |
3691c519 | 442 | spin_unlock_irq(&base->lock); |
1da177e4 | 443 | { |
be5b4fbd | 444 | int preempt_count = preempt_count(); |
1da177e4 LT |
445 | fn(data); |
446 | if (preempt_count != preempt_count()) { | |
be5b4fbd JJ |
447 | printk(KERN_WARNING "huh, entered %p " |
448 | "with preempt_count %08x, exited" | |
449 | " with %08x?\n", | |
450 | fn, preempt_count, | |
451 | preempt_count()); | |
1da177e4 LT |
452 | BUG(); |
453 | } | |
454 | } | |
3691c519 | 455 | spin_lock_irq(&base->lock); |
1da177e4 LT |
456 | } |
457 | } | |
458 | set_running_timer(base, NULL); | |
3691c519 | 459 | spin_unlock_irq(&base->lock); |
1da177e4 LT |
460 | } |
461 | ||
462 | #ifdef CONFIG_NO_IDLE_HZ | |
463 | /* | |
464 | * Find out when the next timer event is due to happen. This | |
465 | * is used on S/390 to stop all activity when a cpus is idle. | |
466 | * This functions needs to be called disabled. | |
467 | */ | |
468 | unsigned long next_timer_interrupt(void) | |
469 | { | |
470 | tvec_base_t *base; | |
471 | struct list_head *list; | |
472 | struct timer_list *nte; | |
473 | unsigned long expires; | |
69239749 TL |
474 | unsigned long hr_expires = MAX_JIFFY_OFFSET; |
475 | ktime_t hr_delta; | |
1da177e4 LT |
476 | tvec_t *varray[4]; |
477 | int i, j; | |
478 | ||
69239749 TL |
479 | hr_delta = hrtimer_get_next_event(); |
480 | if (hr_delta.tv64 != KTIME_MAX) { | |
481 | struct timespec tsdelta; | |
482 | tsdelta = ktime_to_timespec(hr_delta); | |
483 | hr_expires = timespec_to_jiffies(&tsdelta); | |
484 | if (hr_expires < 3) | |
485 | return hr_expires + jiffies; | |
486 | } | |
487 | hr_expires += jiffies; | |
488 | ||
a4a6198b | 489 | base = __get_cpu_var(tvec_bases); |
3691c519 | 490 | spin_lock(&base->lock); |
1da177e4 | 491 | expires = base->timer_jiffies + (LONG_MAX >> 1); |
53f087fe | 492 | list = NULL; |
1da177e4 LT |
493 | |
494 | /* Look for timer events in tv1. */ | |
495 | j = base->timer_jiffies & TVR_MASK; | |
496 | do { | |
497 | list_for_each_entry(nte, base->tv1.vec + j, entry) { | |
498 | expires = nte->expires; | |
499 | if (j < (base->timer_jiffies & TVR_MASK)) | |
500 | list = base->tv2.vec + (INDEX(0)); | |
501 | goto found; | |
502 | } | |
503 | j = (j + 1) & TVR_MASK; | |
504 | } while (j != (base->timer_jiffies & TVR_MASK)); | |
505 | ||
506 | /* Check tv2-tv5. */ | |
507 | varray[0] = &base->tv2; | |
508 | varray[1] = &base->tv3; | |
509 | varray[2] = &base->tv4; | |
510 | varray[3] = &base->tv5; | |
511 | for (i = 0; i < 4; i++) { | |
512 | j = INDEX(i); | |
513 | do { | |
514 | if (list_empty(varray[i]->vec + j)) { | |
515 | j = (j + 1) & TVN_MASK; | |
516 | continue; | |
517 | } | |
518 | list_for_each_entry(nte, varray[i]->vec + j, entry) | |
519 | if (time_before(nte->expires, expires)) | |
520 | expires = nte->expires; | |
521 | if (j < (INDEX(i)) && i < 3) | |
522 | list = varray[i + 1]->vec + (INDEX(i + 1)); | |
523 | goto found; | |
524 | } while (j != (INDEX(i))); | |
525 | } | |
526 | found: | |
527 | if (list) { | |
528 | /* | |
529 | * The search wrapped. We need to look at the next list | |
530 | * from next tv element that would cascade into tv element | |
531 | * where we found the timer element. | |
532 | */ | |
533 | list_for_each_entry(nte, list, entry) { | |
534 | if (time_before(nte->expires, expires)) | |
535 | expires = nte->expires; | |
536 | } | |
537 | } | |
3691c519 | 538 | spin_unlock(&base->lock); |
69239749 | 539 | |
0662b713 ZA |
540 | /* |
541 | * It can happen that other CPUs service timer IRQs and increment | |
542 | * jiffies, but we have not yet got a local timer tick to process | |
543 | * the timer wheels. In that case, the expiry time can be before | |
544 | * jiffies, but since the high-resolution timer here is relative to | |
545 | * jiffies, the default expression when high-resolution timers are | |
546 | * not active, | |
547 | * | |
548 | * time_before(MAX_JIFFY_OFFSET + jiffies, expires) | |
549 | * | |
550 | * would falsely evaluate to true. If that is the case, just | |
551 | * return jiffies so that we can immediately fire the local timer | |
552 | */ | |
553 | if (time_before(expires, jiffies)) | |
554 | return jiffies; | |
555 | ||
69239749 TL |
556 | if (time_before(hr_expires, expires)) |
557 | return hr_expires; | |
558 | ||
1da177e4 LT |
559 | return expires; |
560 | } | |
561 | #endif | |
562 | ||
563 | /******************************************************************/ | |
564 | ||
565 | /* | |
566 | * Timekeeping variables | |
567 | */ | |
568 | unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */ | |
569 | unsigned long tick_nsec = TICK_NSEC; /* ACTHZ period (nsec) */ | |
570 | ||
571 | /* | |
572 | * The current time | |
573 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected | |
574 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged | |
575 | * at zero at system boot time, so wall_to_monotonic will be negative, | |
576 | * however, we will ALWAYS keep the tv_nsec part positive so we can use | |
577 | * the usual normalization. | |
578 | */ | |
579 | struct timespec xtime __attribute__ ((aligned (16))); | |
580 | struct timespec wall_to_monotonic __attribute__ ((aligned (16))); | |
581 | ||
582 | EXPORT_SYMBOL(xtime); | |
583 | ||
726c14bf | 584 | |
ad596171 | 585 | /* XXX - all of this timekeeping code should be later moved to time.c */ |
586 | #include <linux/clocksource.h> | |
587 | static struct clocksource *clock; /* pointer to current clocksource */ | |
cf3c769b | 588 | |
589 | #ifdef CONFIG_GENERIC_TIME | |
590 | /** | |
591 | * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook | |
592 | * | |
593 | * private function, must hold xtime_lock lock when being | |
594 | * called. Returns the number of nanoseconds since the | |
595 | * last call to update_wall_time() (adjusted by NTP scaling) | |
596 | */ | |
597 | static inline s64 __get_nsec_offset(void) | |
598 | { | |
599 | cycle_t cycle_now, cycle_delta; | |
600 | s64 ns_offset; | |
601 | ||
602 | /* read clocksource: */ | |
a2752549 | 603 | cycle_now = clocksource_read(clock); |
cf3c769b | 604 | |
605 | /* calculate the delta since the last update_wall_time: */ | |
19923c19 | 606 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
cf3c769b | 607 | |
608 | /* convert to nanoseconds: */ | |
609 | ns_offset = cyc2ns(clock, cycle_delta); | |
610 | ||
611 | return ns_offset; | |
612 | } | |
613 | ||
614 | /** | |
615 | * __get_realtime_clock_ts - Returns the time of day in a timespec | |
616 | * @ts: pointer to the timespec to be set | |
617 | * | |
618 | * Returns the time of day in a timespec. Used by | |
619 | * do_gettimeofday() and get_realtime_clock_ts(). | |
620 | */ | |
621 | static inline void __get_realtime_clock_ts(struct timespec *ts) | |
622 | { | |
623 | unsigned long seq; | |
624 | s64 nsecs; | |
625 | ||
626 | do { | |
627 | seq = read_seqbegin(&xtime_lock); | |
628 | ||
629 | *ts = xtime; | |
630 | nsecs = __get_nsec_offset(); | |
631 | ||
632 | } while (read_seqretry(&xtime_lock, seq)); | |
633 | ||
634 | timespec_add_ns(ts, nsecs); | |
635 | } | |
636 | ||
637 | /** | |
a2752549 | 638 | * getnstimeofday - Returns the time of day in a timespec |
cf3c769b | 639 | * @ts: pointer to the timespec to be set |
640 | * | |
641 | * Returns the time of day in a timespec. | |
642 | */ | |
643 | void getnstimeofday(struct timespec *ts) | |
644 | { | |
645 | __get_realtime_clock_ts(ts); | |
646 | } | |
647 | ||
648 | EXPORT_SYMBOL(getnstimeofday); | |
649 | ||
650 | /** | |
651 | * do_gettimeofday - Returns the time of day in a timeval | |
652 | * @tv: pointer to the timeval to be set | |
653 | * | |
654 | * NOTE: Users should be converted to using get_realtime_clock_ts() | |
655 | */ | |
656 | void do_gettimeofday(struct timeval *tv) | |
657 | { | |
658 | struct timespec now; | |
659 | ||
660 | __get_realtime_clock_ts(&now); | |
661 | tv->tv_sec = now.tv_sec; | |
662 | tv->tv_usec = now.tv_nsec/1000; | |
663 | } | |
664 | ||
665 | EXPORT_SYMBOL(do_gettimeofday); | |
666 | /** | |
667 | * do_settimeofday - Sets the time of day | |
668 | * @tv: pointer to the timespec variable containing the new time | |
669 | * | |
670 | * Sets the time of day to the new time and update NTP and notify hrtimers | |
671 | */ | |
672 | int do_settimeofday(struct timespec *tv) | |
673 | { | |
674 | unsigned long flags; | |
675 | time_t wtm_sec, sec = tv->tv_sec; | |
676 | long wtm_nsec, nsec = tv->tv_nsec; | |
677 | ||
678 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | |
679 | return -EINVAL; | |
680 | ||
681 | write_seqlock_irqsave(&xtime_lock, flags); | |
682 | ||
683 | nsec -= __get_nsec_offset(); | |
684 | ||
685 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | |
686 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | |
687 | ||
688 | set_normalized_timespec(&xtime, sec, nsec); | |
689 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | |
690 | ||
e154ff3d | 691 | clock->error = 0; |
cf3c769b | 692 | ntp_clear(); |
693 | ||
694 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
695 | ||
696 | /* signal hrtimers about time change */ | |
697 | clock_was_set(); | |
698 | ||
699 | return 0; | |
700 | } | |
701 | ||
702 | EXPORT_SYMBOL(do_settimeofday); | |
703 | ||
704 | /** | |
705 | * change_clocksource - Swaps clocksources if a new one is available | |
706 | * | |
707 | * Accumulates current time interval and initializes new clocksource | |
708 | */ | |
709 | static int change_clocksource(void) | |
710 | { | |
711 | struct clocksource *new; | |
712 | cycle_t now; | |
713 | u64 nsec; | |
a2752549 | 714 | new = clocksource_get_next(); |
cf3c769b | 715 | if (clock != new) { |
a2752549 | 716 | now = clocksource_read(new); |
cf3c769b | 717 | nsec = __get_nsec_offset(); |
718 | timespec_add_ns(&xtime, nsec); | |
719 | ||
720 | clock = new; | |
19923c19 | 721 | clock->cycle_last = now; |
cf3c769b | 722 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", |
723 | clock->name); | |
724 | return 1; | |
725 | } else if (clock->update_callback) { | |
726 | return clock->update_callback(); | |
727 | } | |
728 | return 0; | |
729 | } | |
730 | #else | |
731 | #define change_clocksource() (0) | |
732 | #endif | |
733 | ||
734 | /** | |
735 | * timeofday_is_continuous - check to see if timekeeping is free running | |
736 | */ | |
737 | int timekeeping_is_continuous(void) | |
738 | { | |
739 | unsigned long seq; | |
740 | int ret; | |
741 | ||
742 | do { | |
743 | seq = read_seqbegin(&xtime_lock); | |
744 | ||
745 | ret = clock->is_continuous; | |
746 | ||
747 | } while (read_seqretry(&xtime_lock, seq)); | |
748 | ||
749 | return ret; | |
750 | } | |
751 | ||
1da177e4 | 752 | /* |
ad596171 | 753 | * timekeeping_init - Initializes the clocksource and common timekeeping values |
1da177e4 | 754 | */ |
ad596171 | 755 | void __init timekeeping_init(void) |
1da177e4 | 756 | { |
ad596171 | 757 | unsigned long flags; |
758 | ||
759 | write_seqlock_irqsave(&xtime_lock, flags); | |
a2752549 | 760 | clock = clocksource_get_next(); |
761 | clocksource_calculate_interval(clock, tick_nsec); | |
19923c19 | 762 | clock->cycle_last = clocksource_read(clock); |
ad596171 | 763 | ntp_clear(); |
764 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
765 | } | |
766 | ||
767 | ||
3e143475 | 768 | static int timekeeping_suspended; |
2aae4a10 | 769 | /** |
ad596171 | 770 | * timekeeping_resume - Resumes the generic timekeeping subsystem. |
771 | * @dev: unused | |
772 | * | |
773 | * This is for the generic clocksource timekeeping. | |
774 | * xtime/wall_to_monotonic/jiffies/wall_jiffies/etc are | |
775 | * still managed by arch specific suspend/resume code. | |
776 | */ | |
777 | static int timekeeping_resume(struct sys_device *dev) | |
778 | { | |
779 | unsigned long flags; | |
780 | ||
781 | write_seqlock_irqsave(&xtime_lock, flags); | |
782 | /* restart the last cycle value */ | |
19923c19 | 783 | clock->cycle_last = clocksource_read(clock); |
3e143475 | 784 | clock->error = 0; |
785 | timekeeping_suspended = 0; | |
786 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
787 | return 0; | |
788 | } | |
789 | ||
790 | static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) | |
791 | { | |
792 | unsigned long flags; | |
793 | ||
794 | write_seqlock_irqsave(&xtime_lock, flags); | |
795 | timekeeping_suspended = 1; | |
ad596171 | 796 | write_sequnlock_irqrestore(&xtime_lock, flags); |
797 | return 0; | |
798 | } | |
799 | ||
800 | /* sysfs resume/suspend bits for timekeeping */ | |
801 | static struct sysdev_class timekeeping_sysclass = { | |
802 | .resume = timekeeping_resume, | |
3e143475 | 803 | .suspend = timekeeping_suspend, |
ad596171 | 804 | set_kset_name("timekeeping"), |
805 | }; | |
806 | ||
807 | static struct sys_device device_timer = { | |
808 | .id = 0, | |
809 | .cls = &timekeeping_sysclass, | |
810 | }; | |
811 | ||
812 | static int __init timekeeping_init_device(void) | |
813 | { | |
814 | int error = sysdev_class_register(&timekeeping_sysclass); | |
815 | if (!error) | |
816 | error = sysdev_register(&device_timer); | |
817 | return error; | |
818 | } | |
819 | ||
820 | device_initcall(timekeeping_init_device); | |
821 | ||
19923c19 | 822 | /* |
e154ff3d | 823 | * If the error is already larger, we look ahead even further |
19923c19 RZ |
824 | * to compensate for late or lost adjustments. |
825 | */ | |
e154ff3d | 826 | static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, s64 *offset) |
19923c19 | 827 | { |
e154ff3d RZ |
828 | s64 tick_error, i; |
829 | u32 look_ahead, adj; | |
830 | s32 error2, mult; | |
19923c19 RZ |
831 | |
832 | /* | |
e154ff3d RZ |
833 | * Use the current error value to determine how much to look ahead. |
834 | * The larger the error the slower we adjust for it to avoid problems | |
835 | * with losing too many ticks, otherwise we would overadjust and | |
836 | * produce an even larger error. The smaller the adjustment the | |
837 | * faster we try to adjust for it, as lost ticks can do less harm | |
838 | * here. This is tuned so that an error of about 1 msec is adusted | |
839 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). | |
19923c19 | 840 | */ |
e154ff3d RZ |
841 | error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ); |
842 | error2 = abs(error2); | |
843 | for (look_ahead = 0; error2 > 0; look_ahead++) | |
844 | error2 >>= 2; | |
19923c19 RZ |
845 | |
846 | /* | |
e154ff3d RZ |
847 | * Now calculate the error in (1 << look_ahead) ticks, but first |
848 | * remove the single look ahead already included in the error. | |
19923c19 | 849 | */ |
e154ff3d RZ |
850 | tick_error = current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1); |
851 | tick_error -= clock->xtime_interval >> 1; | |
852 | error = ((error - tick_error) >> look_ahead) + tick_error; | |
853 | ||
854 | /* Finally calculate the adjustment shift value. */ | |
855 | i = *interval; | |
856 | mult = 1; | |
857 | if (error < 0) { | |
858 | error = -error; | |
859 | *interval = -*interval; | |
860 | *offset = -*offset; | |
861 | mult = -1; | |
19923c19 | 862 | } |
e154ff3d RZ |
863 | for (adj = 0; error > i; adj++) |
864 | error >>= 1; | |
19923c19 RZ |
865 | |
866 | *interval <<= adj; | |
867 | *offset <<= adj; | |
e154ff3d | 868 | return mult << adj; |
19923c19 RZ |
869 | } |
870 | ||
871 | /* | |
872 | * Adjust the multiplier to reduce the error value, | |
873 | * this is optimized for the most common adjustments of -1,0,1, | |
874 | * for other values we can do a bit more work. | |
875 | */ | |
876 | static void clocksource_adjust(struct clocksource *clock, s64 offset) | |
877 | { | |
878 | s64 error, interval = clock->cycle_interval; | |
879 | int adj; | |
880 | ||
881 | error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1); | |
882 | if (error > interval) { | |
e154ff3d RZ |
883 | error >>= 2; |
884 | if (likely(error <= interval)) | |
885 | adj = 1; | |
886 | else | |
887 | adj = clocksource_bigadjust(error, &interval, &offset); | |
19923c19 | 888 | } else if (error < -interval) { |
e154ff3d RZ |
889 | error >>= 2; |
890 | if (likely(error >= -interval)) { | |
891 | adj = -1; | |
892 | interval = -interval; | |
893 | offset = -offset; | |
894 | } else | |
895 | adj = clocksource_bigadjust(error, &interval, &offset); | |
19923c19 RZ |
896 | } else |
897 | return; | |
898 | ||
899 | clock->mult += adj; | |
900 | clock->xtime_interval += interval; | |
901 | clock->xtime_nsec -= offset; | |
902 | clock->error -= (interval - offset) << (TICK_LENGTH_SHIFT - clock->shift); | |
903 | } | |
904 | ||
2aae4a10 | 905 | /** |
ad596171 | 906 | * update_wall_time - Uses the current clocksource to increment the wall time |
907 | * | |
908 | * Called from the timer interrupt, must hold a write on xtime_lock. | |
909 | */ | |
910 | static void update_wall_time(void) | |
911 | { | |
19923c19 | 912 | cycle_t offset; |
ad596171 | 913 | |
3e143475 | 914 | /* Make sure we're fully resumed: */ |
915 | if (unlikely(timekeeping_suspended)) | |
916 | return; | |
5eb6d205 | 917 | |
19923c19 RZ |
918 | #ifdef CONFIG_GENERIC_TIME |
919 | offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; | |
920 | #else | |
921 | offset = clock->cycle_interval; | |
922 | #endif | |
3e143475 | 923 | clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; |
ad596171 | 924 | |
925 | /* normally this loop will run just once, however in the | |
926 | * case of lost or late ticks, it will accumulate correctly. | |
927 | */ | |
19923c19 | 928 | while (offset >= clock->cycle_interval) { |
ad596171 | 929 | /* accumulate one interval */ |
19923c19 RZ |
930 | clock->xtime_nsec += clock->xtime_interval; |
931 | clock->cycle_last += clock->cycle_interval; | |
932 | offset -= clock->cycle_interval; | |
933 | ||
934 | if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { | |
935 | clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; | |
936 | xtime.tv_sec++; | |
937 | second_overflow(); | |
938 | } | |
ad596171 | 939 | |
5eb6d205 | 940 | /* interpolator bits */ |
19923c19 | 941 | time_interpolator_update(clock->xtime_interval |
5eb6d205 | 942 | >> clock->shift); |
943 | /* increment the NTP state machine */ | |
944 | update_ntp_one_tick(); | |
945 | ||
946 | /* accumulate error between NTP and clock interval */ | |
19923c19 RZ |
947 | clock->error += current_tick_length(); |
948 | clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); | |
949 | } | |
5eb6d205 | 950 | |
19923c19 RZ |
951 | /* correct the clock when NTP error is too big */ |
952 | clocksource_adjust(clock, offset); | |
5eb6d205 | 953 | |
5eb6d205 | 954 | /* store full nanoseconds into xtime */ |
e154ff3d | 955 | xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift; |
19923c19 | 956 | clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; |
cf3c769b | 957 | |
958 | /* check to see if there is a new clocksource to use */ | |
959 | if (change_clocksource()) { | |
19923c19 RZ |
960 | clock->error = 0; |
961 | clock->xtime_nsec = 0; | |
a2752549 | 962 | clocksource_calculate_interval(clock, tick_nsec); |
cf3c769b | 963 | } |
1da177e4 LT |
964 | } |
965 | ||
966 | /* | |
967 | * Called from the timer interrupt handler to charge one tick to the current | |
968 | * process. user_tick is 1 if the tick is user time, 0 for system. | |
969 | */ | |
970 | void update_process_times(int user_tick) | |
971 | { | |
972 | struct task_struct *p = current; | |
973 | int cpu = smp_processor_id(); | |
974 | ||
975 | /* Note: this timer irq context must be accounted for as well. */ | |
976 | if (user_tick) | |
977 | account_user_time(p, jiffies_to_cputime(1)); | |
978 | else | |
979 | account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); | |
980 | run_local_timers(); | |
981 | if (rcu_pending(cpu)) | |
982 | rcu_check_callbacks(cpu, user_tick); | |
983 | scheduler_tick(); | |
984 | run_posix_cpu_timers(p); | |
985 | } | |
986 | ||
987 | /* | |
988 | * Nr of active tasks - counted in fixed-point numbers | |
989 | */ | |
990 | static unsigned long count_active_tasks(void) | |
991 | { | |
db1b1fef | 992 | return nr_active() * FIXED_1; |
1da177e4 LT |
993 | } |
994 | ||
995 | /* | |
996 | * Hmm.. Changed this, as the GNU make sources (load.c) seems to | |
997 | * imply that avenrun[] is the standard name for this kind of thing. | |
998 | * Nothing else seems to be standardized: the fractional size etc | |
999 | * all seem to differ on different machines. | |
1000 | * | |
1001 | * Requires xtime_lock to access. | |
1002 | */ | |
1003 | unsigned long avenrun[3]; | |
1004 | ||
1005 | EXPORT_SYMBOL(avenrun); | |
1006 | ||
1007 | /* | |
1008 | * calc_load - given tick count, update the avenrun load estimates. | |
1009 | * This is called while holding a write_lock on xtime_lock. | |
1010 | */ | |
1011 | static inline void calc_load(unsigned long ticks) | |
1012 | { | |
1013 | unsigned long active_tasks; /* fixed-point */ | |
1014 | static int count = LOAD_FREQ; | |
1015 | ||
3171a030 AN |
1016 | active_tasks = count_active_tasks(); |
1017 | for (count -= ticks; count < 0; count += LOAD_FREQ) { | |
1da177e4 LT |
1018 | CALC_LOAD(avenrun[0], EXP_1, active_tasks); |
1019 | CALC_LOAD(avenrun[1], EXP_5, active_tasks); | |
1020 | CALC_LOAD(avenrun[2], EXP_15, active_tasks); | |
1021 | } | |
1022 | } | |
1023 | ||
1024 | /* jiffies at the most recent update of wall time */ | |
1025 | unsigned long wall_jiffies = INITIAL_JIFFIES; | |
1026 | ||
1027 | /* | |
1028 | * This read-write spinlock protects us from races in SMP while | |
1029 | * playing with xtime and avenrun. | |
1030 | */ | |
1031 | #ifndef ARCH_HAVE_XTIME_LOCK | |
e4d91918 | 1032 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); |
1da177e4 LT |
1033 | |
1034 | EXPORT_SYMBOL(xtime_lock); | |
1035 | #endif | |
1036 | ||
1037 | /* | |
1038 | * This function runs timers and the timer-tq in bottom half context. | |
1039 | */ | |
1040 | static void run_timer_softirq(struct softirq_action *h) | |
1041 | { | |
a4a6198b | 1042 | tvec_base_t *base = __get_cpu_var(tvec_bases); |
1da177e4 | 1043 | |
c0a31329 | 1044 | hrtimer_run_queues(); |
1da177e4 LT |
1045 | if (time_after_eq(jiffies, base->timer_jiffies)) |
1046 | __run_timers(base); | |
1047 | } | |
1048 | ||
1049 | /* | |
1050 | * Called by the local, per-CPU timer interrupt on SMP. | |
1051 | */ | |
1052 | void run_local_timers(void) | |
1053 | { | |
1054 | raise_softirq(TIMER_SOFTIRQ); | |
6687a97d | 1055 | softlockup_tick(); |
1da177e4 LT |
1056 | } |
1057 | ||
1058 | /* | |
1059 | * Called by the timer interrupt. xtime_lock must already be taken | |
1060 | * by the timer IRQ! | |
1061 | */ | |
3171a030 | 1062 | static inline void update_times(unsigned long ticks) |
1da177e4 | 1063 | { |
ad596171 | 1064 | wall_jiffies += ticks; |
1065 | update_wall_time(); | |
1da177e4 LT |
1066 | calc_load(ticks); |
1067 | } | |
1068 | ||
1069 | /* | |
1070 | * The 64-bit jiffies value is not atomic - you MUST NOT read it | |
1071 | * without sampling the sequence number in xtime_lock. | |
1072 | * jiffies is defined in the linker script... | |
1073 | */ | |
1074 | ||
3171a030 | 1075 | void do_timer(unsigned long ticks) |
1da177e4 | 1076 | { |
3171a030 AN |
1077 | jiffies_64 += ticks; |
1078 | update_times(ticks); | |
1da177e4 LT |
1079 | } |
1080 | ||
1081 | #ifdef __ARCH_WANT_SYS_ALARM | |
1082 | ||
1083 | /* | |
1084 | * For backwards compatibility? This can be done in libc so Alpha | |
1085 | * and all newer ports shouldn't need it. | |
1086 | */ | |
1087 | asmlinkage unsigned long sys_alarm(unsigned int seconds) | |
1088 | { | |
c08b8a49 | 1089 | return alarm_setitimer(seconds); |
1da177e4 LT |
1090 | } |
1091 | ||
1092 | #endif | |
1093 | ||
1094 | #ifndef __alpha__ | |
1095 | ||
1096 | /* | |
1097 | * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this | |
1098 | * should be moved into arch/i386 instead? | |
1099 | */ | |
1100 | ||
1101 | /** | |
1102 | * sys_getpid - return the thread group id of the current process | |
1103 | * | |
1104 | * Note, despite the name, this returns the tgid not the pid. The tgid and | |
1105 | * the pid are identical unless CLONE_THREAD was specified on clone() in | |
1106 | * which case the tgid is the same in all threads of the same group. | |
1107 | * | |
1108 | * This is SMP safe as current->tgid does not change. | |
1109 | */ | |
1110 | asmlinkage long sys_getpid(void) | |
1111 | { | |
1112 | return current->tgid; | |
1113 | } | |
1114 | ||
1115 | /* | |
6997a6fa KK |
1116 | * Accessing ->real_parent is not SMP-safe, it could |
1117 | * change from under us. However, we can use a stale | |
1118 | * value of ->real_parent under rcu_read_lock(), see | |
1119 | * release_task()->call_rcu(delayed_put_task_struct). | |
1da177e4 LT |
1120 | */ |
1121 | asmlinkage long sys_getppid(void) | |
1122 | { | |
1123 | int pid; | |
1da177e4 | 1124 | |
6997a6fa KK |
1125 | rcu_read_lock(); |
1126 | pid = rcu_dereference(current->real_parent)->tgid; | |
1127 | rcu_read_unlock(); | |
1da177e4 | 1128 | |
1da177e4 LT |
1129 | return pid; |
1130 | } | |
1131 | ||
1132 | asmlinkage long sys_getuid(void) | |
1133 | { | |
1134 | /* Only we change this so SMP safe */ | |
1135 | return current->uid; | |
1136 | } | |
1137 | ||
1138 | asmlinkage long sys_geteuid(void) | |
1139 | { | |
1140 | /* Only we change this so SMP safe */ | |
1141 | return current->euid; | |
1142 | } | |
1143 | ||
1144 | asmlinkage long sys_getgid(void) | |
1145 | { | |
1146 | /* Only we change this so SMP safe */ | |
1147 | return current->gid; | |
1148 | } | |
1149 | ||
1150 | asmlinkage long sys_getegid(void) | |
1151 | { | |
1152 | /* Only we change this so SMP safe */ | |
1153 | return current->egid; | |
1154 | } | |
1155 | ||
1156 | #endif | |
1157 | ||
1158 | static void process_timeout(unsigned long __data) | |
1159 | { | |
36c8b586 | 1160 | wake_up_process((struct task_struct *)__data); |
1da177e4 LT |
1161 | } |
1162 | ||
1163 | /** | |
1164 | * schedule_timeout - sleep until timeout | |
1165 | * @timeout: timeout value in jiffies | |
1166 | * | |
1167 | * Make the current task sleep until @timeout jiffies have | |
1168 | * elapsed. The routine will return immediately unless | |
1169 | * the current task state has been set (see set_current_state()). | |
1170 | * | |
1171 | * You can set the task state as follows - | |
1172 | * | |
1173 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | |
1174 | * pass before the routine returns. The routine will return 0 | |
1175 | * | |
1176 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1177 | * delivered to the current task. In this case the remaining time | |
1178 | * in jiffies will be returned, or 0 if the timer expired in time | |
1179 | * | |
1180 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1181 | * routine returns. | |
1182 | * | |
1183 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | |
1184 | * the CPU away without a bound on the timeout. In this case the return | |
1185 | * value will be %MAX_SCHEDULE_TIMEOUT. | |
1186 | * | |
1187 | * In all cases the return value is guaranteed to be non-negative. | |
1188 | */ | |
1189 | fastcall signed long __sched schedule_timeout(signed long timeout) | |
1190 | { | |
1191 | struct timer_list timer; | |
1192 | unsigned long expire; | |
1193 | ||
1194 | switch (timeout) | |
1195 | { | |
1196 | case MAX_SCHEDULE_TIMEOUT: | |
1197 | /* | |
1198 | * These two special cases are useful to be comfortable | |
1199 | * in the caller. Nothing more. We could take | |
1200 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | |
1201 | * but I' d like to return a valid offset (>=0) to allow | |
1202 | * the caller to do everything it want with the retval. | |
1203 | */ | |
1204 | schedule(); | |
1205 | goto out; | |
1206 | default: | |
1207 | /* | |
1208 | * Another bit of PARANOID. Note that the retval will be | |
1209 | * 0 since no piece of kernel is supposed to do a check | |
1210 | * for a negative retval of schedule_timeout() (since it | |
1211 | * should never happens anyway). You just have the printk() | |
1212 | * that will tell you if something is gone wrong and where. | |
1213 | */ | |
1214 | if (timeout < 0) | |
1215 | { | |
1216 | printk(KERN_ERR "schedule_timeout: wrong timeout " | |
a5a0d52c AM |
1217 | "value %lx from %p\n", timeout, |
1218 | __builtin_return_address(0)); | |
1da177e4 LT |
1219 | current->state = TASK_RUNNING; |
1220 | goto out; | |
1221 | } | |
1222 | } | |
1223 | ||
1224 | expire = timeout + jiffies; | |
1225 | ||
a8db2db1 ON |
1226 | setup_timer(&timer, process_timeout, (unsigned long)current); |
1227 | __mod_timer(&timer, expire); | |
1da177e4 LT |
1228 | schedule(); |
1229 | del_singleshot_timer_sync(&timer); | |
1230 | ||
1231 | timeout = expire - jiffies; | |
1232 | ||
1233 | out: | |
1234 | return timeout < 0 ? 0 : timeout; | |
1235 | } | |
1da177e4 LT |
1236 | EXPORT_SYMBOL(schedule_timeout); |
1237 | ||
8a1c1757 AM |
1238 | /* |
1239 | * We can use __set_current_state() here because schedule_timeout() calls | |
1240 | * schedule() unconditionally. | |
1241 | */ | |
64ed93a2 NA |
1242 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1243 | { | |
a5a0d52c AM |
1244 | __set_current_state(TASK_INTERRUPTIBLE); |
1245 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1246 | } |
1247 | EXPORT_SYMBOL(schedule_timeout_interruptible); | |
1248 | ||
1249 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) | |
1250 | { | |
a5a0d52c AM |
1251 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1252 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1253 | } |
1254 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | |
1255 | ||
1da177e4 LT |
1256 | /* Thread ID - the internal kernel "pid" */ |
1257 | asmlinkage long sys_gettid(void) | |
1258 | { | |
1259 | return current->pid; | |
1260 | } | |
1261 | ||
2aae4a10 | 1262 | /** |
1da177e4 | 1263 | * sys_sysinfo - fill in sysinfo struct |
2aae4a10 | 1264 | * @info: pointer to buffer to fill |
1da177e4 LT |
1265 | */ |
1266 | asmlinkage long sys_sysinfo(struct sysinfo __user *info) | |
1267 | { | |
1268 | struct sysinfo val; | |
1269 | unsigned long mem_total, sav_total; | |
1270 | unsigned int mem_unit, bitcount; | |
1271 | unsigned long seq; | |
1272 | ||
1273 | memset((char *)&val, 0, sizeof(struct sysinfo)); | |
1274 | ||
1275 | do { | |
1276 | struct timespec tp; | |
1277 | seq = read_seqbegin(&xtime_lock); | |
1278 | ||
1279 | /* | |
1280 | * This is annoying. The below is the same thing | |
1281 | * posix_get_clock_monotonic() does, but it wants to | |
1282 | * take the lock which we want to cover the loads stuff | |
1283 | * too. | |
1284 | */ | |
1285 | ||
1286 | getnstimeofday(&tp); | |
1287 | tp.tv_sec += wall_to_monotonic.tv_sec; | |
1288 | tp.tv_nsec += wall_to_monotonic.tv_nsec; | |
1289 | if (tp.tv_nsec - NSEC_PER_SEC >= 0) { | |
1290 | tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; | |
1291 | tp.tv_sec++; | |
1292 | } | |
1293 | val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); | |
1294 | ||
1295 | val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); | |
1296 | val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); | |
1297 | val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); | |
1298 | ||
1299 | val.procs = nr_threads; | |
1300 | } while (read_seqretry(&xtime_lock, seq)); | |
1301 | ||
1302 | si_meminfo(&val); | |
1303 | si_swapinfo(&val); | |
1304 | ||
1305 | /* | |
1306 | * If the sum of all the available memory (i.e. ram + swap) | |
1307 | * is less than can be stored in a 32 bit unsigned long then | |
1308 | * we can be binary compatible with 2.2.x kernels. If not, | |
1309 | * well, in that case 2.2.x was broken anyways... | |
1310 | * | |
1311 | * -Erik Andersen <andersee@debian.org> | |
1312 | */ | |
1313 | ||
1314 | mem_total = val.totalram + val.totalswap; | |
1315 | if (mem_total < val.totalram || mem_total < val.totalswap) | |
1316 | goto out; | |
1317 | bitcount = 0; | |
1318 | mem_unit = val.mem_unit; | |
1319 | while (mem_unit > 1) { | |
1320 | bitcount++; | |
1321 | mem_unit >>= 1; | |
1322 | sav_total = mem_total; | |
1323 | mem_total <<= 1; | |
1324 | if (mem_total < sav_total) | |
1325 | goto out; | |
1326 | } | |
1327 | ||
1328 | /* | |
1329 | * If mem_total did not overflow, multiply all memory values by | |
1330 | * val.mem_unit and set it to 1. This leaves things compatible | |
1331 | * with 2.2.x, and also retains compatibility with earlier 2.4.x | |
1332 | * kernels... | |
1333 | */ | |
1334 | ||
1335 | val.mem_unit = 1; | |
1336 | val.totalram <<= bitcount; | |
1337 | val.freeram <<= bitcount; | |
1338 | val.sharedram <<= bitcount; | |
1339 | val.bufferram <<= bitcount; | |
1340 | val.totalswap <<= bitcount; | |
1341 | val.freeswap <<= bitcount; | |
1342 | val.totalhigh <<= bitcount; | |
1343 | val.freehigh <<= bitcount; | |
1344 | ||
1345 | out: | |
1346 | if (copy_to_user(info, &val, sizeof(struct sysinfo))) | |
1347 | return -EFAULT; | |
1348 | ||
1349 | return 0; | |
1350 | } | |
1351 | ||
d730e882 IM |
1352 | /* |
1353 | * lockdep: we want to track each per-CPU base as a separate lock-class, | |
1354 | * but timer-bases are kmalloc()-ed, so we need to attach separate | |
1355 | * keys to them: | |
1356 | */ | |
1357 | static struct lock_class_key base_lock_keys[NR_CPUS]; | |
1358 | ||
a4a6198b | 1359 | static int __devinit init_timers_cpu(int cpu) |
1da177e4 LT |
1360 | { |
1361 | int j; | |
1362 | tvec_base_t *base; | |
ba6edfcd | 1363 | static char __devinitdata tvec_base_done[NR_CPUS]; |
55c888d6 | 1364 | |
ba6edfcd | 1365 | if (!tvec_base_done[cpu]) { |
a4a6198b JB |
1366 | static char boot_done; |
1367 | ||
a4a6198b | 1368 | if (boot_done) { |
ba6edfcd AM |
1369 | /* |
1370 | * The APs use this path later in boot | |
1371 | */ | |
a4a6198b JB |
1372 | base = kmalloc_node(sizeof(*base), GFP_KERNEL, |
1373 | cpu_to_node(cpu)); | |
1374 | if (!base) | |
1375 | return -ENOMEM; | |
1376 | memset(base, 0, sizeof(*base)); | |
ba6edfcd | 1377 | per_cpu(tvec_bases, cpu) = base; |
a4a6198b | 1378 | } else { |
ba6edfcd AM |
1379 | /* |
1380 | * This is for the boot CPU - we use compile-time | |
1381 | * static initialisation because per-cpu memory isn't | |
1382 | * ready yet and because the memory allocators are not | |
1383 | * initialised either. | |
1384 | */ | |
a4a6198b | 1385 | boot_done = 1; |
ba6edfcd | 1386 | base = &boot_tvec_bases; |
a4a6198b | 1387 | } |
ba6edfcd AM |
1388 | tvec_base_done[cpu] = 1; |
1389 | } else { | |
1390 | base = per_cpu(tvec_bases, cpu); | |
a4a6198b | 1391 | } |
ba6edfcd | 1392 | |
3691c519 | 1393 | spin_lock_init(&base->lock); |
d730e882 IM |
1394 | lockdep_set_class(&base->lock, base_lock_keys + cpu); |
1395 | ||
1da177e4 LT |
1396 | for (j = 0; j < TVN_SIZE; j++) { |
1397 | INIT_LIST_HEAD(base->tv5.vec + j); | |
1398 | INIT_LIST_HEAD(base->tv4.vec + j); | |
1399 | INIT_LIST_HEAD(base->tv3.vec + j); | |
1400 | INIT_LIST_HEAD(base->tv2.vec + j); | |
1401 | } | |
1402 | for (j = 0; j < TVR_SIZE; j++) | |
1403 | INIT_LIST_HEAD(base->tv1.vec + j); | |
1404 | ||
1405 | base->timer_jiffies = jiffies; | |
a4a6198b | 1406 | return 0; |
1da177e4 LT |
1407 | } |
1408 | ||
1409 | #ifdef CONFIG_HOTPLUG_CPU | |
55c888d6 | 1410 | static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head) |
1da177e4 LT |
1411 | { |
1412 | struct timer_list *timer; | |
1413 | ||
1414 | while (!list_empty(head)) { | |
1415 | timer = list_entry(head->next, struct timer_list, entry); | |
55c888d6 | 1416 | detach_timer(timer, 0); |
3691c519 | 1417 | timer->base = new_base; |
1da177e4 | 1418 | internal_add_timer(new_base, timer); |
1da177e4 | 1419 | } |
1da177e4 LT |
1420 | } |
1421 | ||
1422 | static void __devinit migrate_timers(int cpu) | |
1423 | { | |
1424 | tvec_base_t *old_base; | |
1425 | tvec_base_t *new_base; | |
1426 | int i; | |
1427 | ||
1428 | BUG_ON(cpu_online(cpu)); | |
a4a6198b JB |
1429 | old_base = per_cpu(tvec_bases, cpu); |
1430 | new_base = get_cpu_var(tvec_bases); | |
1da177e4 LT |
1431 | |
1432 | local_irq_disable(); | |
3691c519 ON |
1433 | spin_lock(&new_base->lock); |
1434 | spin_lock(&old_base->lock); | |
1435 | ||
1436 | BUG_ON(old_base->running_timer); | |
1da177e4 | 1437 | |
1da177e4 | 1438 | for (i = 0; i < TVR_SIZE; i++) |
55c888d6 ON |
1439 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1440 | for (i = 0; i < TVN_SIZE; i++) { | |
1441 | migrate_timer_list(new_base, old_base->tv2.vec + i); | |
1442 | migrate_timer_list(new_base, old_base->tv3.vec + i); | |
1443 | migrate_timer_list(new_base, old_base->tv4.vec + i); | |
1444 | migrate_timer_list(new_base, old_base->tv5.vec + i); | |
1445 | } | |
1446 | ||
3691c519 ON |
1447 | spin_unlock(&old_base->lock); |
1448 | spin_unlock(&new_base->lock); | |
1da177e4 LT |
1449 | local_irq_enable(); |
1450 | put_cpu_var(tvec_bases); | |
1da177e4 LT |
1451 | } |
1452 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1453 | ||
8c78f307 | 1454 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, |
1da177e4 LT |
1455 | unsigned long action, void *hcpu) |
1456 | { | |
1457 | long cpu = (long)hcpu; | |
1458 | switch(action) { | |
1459 | case CPU_UP_PREPARE: | |
a4a6198b JB |
1460 | if (init_timers_cpu(cpu) < 0) |
1461 | return NOTIFY_BAD; | |
1da177e4 LT |
1462 | break; |
1463 | #ifdef CONFIG_HOTPLUG_CPU | |
1464 | case CPU_DEAD: | |
1465 | migrate_timers(cpu); | |
1466 | break; | |
1467 | #endif | |
1468 | default: | |
1469 | break; | |
1470 | } | |
1471 | return NOTIFY_OK; | |
1472 | } | |
1473 | ||
8c78f307 | 1474 | static struct notifier_block __cpuinitdata timers_nb = { |
1da177e4 LT |
1475 | .notifier_call = timer_cpu_notify, |
1476 | }; | |
1477 | ||
1478 | ||
1479 | void __init init_timers(void) | |
1480 | { | |
07dccf33 | 1481 | int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, |
1da177e4 | 1482 | (void *)(long)smp_processor_id()); |
07dccf33 AM |
1483 | |
1484 | BUG_ON(err == NOTIFY_BAD); | |
1da177e4 LT |
1485 | register_cpu_notifier(&timers_nb); |
1486 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); | |
1487 | } | |
1488 | ||
1489 | #ifdef CONFIG_TIME_INTERPOLATION | |
1490 | ||
67890d70 CL |
1491 | struct time_interpolator *time_interpolator __read_mostly; |
1492 | static struct time_interpolator *time_interpolator_list __read_mostly; | |
1da177e4 LT |
1493 | static DEFINE_SPINLOCK(time_interpolator_lock); |
1494 | ||
1495 | static inline u64 time_interpolator_get_cycles(unsigned int src) | |
1496 | { | |
1497 | unsigned long (*x)(void); | |
1498 | ||
1499 | switch (src) | |
1500 | { | |
1501 | case TIME_SOURCE_FUNCTION: | |
1502 | x = time_interpolator->addr; | |
1503 | return x(); | |
1504 | ||
1505 | case TIME_SOURCE_MMIO64 : | |
685db65e | 1506 | return readq_relaxed((void __iomem *)time_interpolator->addr); |
1da177e4 LT |
1507 | |
1508 | case TIME_SOURCE_MMIO32 : | |
685db65e | 1509 | return readl_relaxed((void __iomem *)time_interpolator->addr); |
1da177e4 LT |
1510 | |
1511 | default: return get_cycles(); | |
1512 | } | |
1513 | } | |
1514 | ||
486d46ae | 1515 | static inline u64 time_interpolator_get_counter(int writelock) |
1da177e4 LT |
1516 | { |
1517 | unsigned int src = time_interpolator->source; | |
1518 | ||
1519 | if (time_interpolator->jitter) | |
1520 | { | |
1521 | u64 lcycle; | |
1522 | u64 now; | |
1523 | ||
1524 | do { | |
1525 | lcycle = time_interpolator->last_cycle; | |
1526 | now = time_interpolator_get_cycles(src); | |
1527 | if (lcycle && time_after(lcycle, now)) | |
1528 | return lcycle; | |
486d46ae AW |
1529 | |
1530 | /* When holding the xtime write lock, there's no need | |
1531 | * to add the overhead of the cmpxchg. Readers are | |
1532 | * force to retry until the write lock is released. | |
1533 | */ | |
1534 | if (writelock) { | |
1535 | time_interpolator->last_cycle = now; | |
1536 | return now; | |
1537 | } | |
1da177e4 LT |
1538 | /* Keep track of the last timer value returned. The use of cmpxchg here |
1539 | * will cause contention in an SMP environment. | |
1540 | */ | |
1541 | } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle)); | |
1542 | return now; | |
1543 | } | |
1544 | else | |
1545 | return time_interpolator_get_cycles(src); | |
1546 | } | |
1547 | ||
1548 | void time_interpolator_reset(void) | |
1549 | { | |
1550 | time_interpolator->offset = 0; | |
486d46ae | 1551 | time_interpolator->last_counter = time_interpolator_get_counter(1); |
1da177e4 LT |
1552 | } |
1553 | ||
1554 | #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift) | |
1555 | ||
1556 | unsigned long time_interpolator_get_offset(void) | |
1557 | { | |
1558 | /* If we do not have a time interpolator set up then just return zero */ | |
1559 | if (!time_interpolator) | |
1560 | return 0; | |
1561 | ||
1562 | return time_interpolator->offset + | |
486d46ae | 1563 | GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator); |
1da177e4 LT |
1564 | } |
1565 | ||
1566 | #define INTERPOLATOR_ADJUST 65536 | |
1567 | #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST | |
1568 | ||
4c7ee8de | 1569 | void time_interpolator_update(long delta_nsec) |
1da177e4 LT |
1570 | { |
1571 | u64 counter; | |
1572 | unsigned long offset; | |
1573 | ||
1574 | /* If there is no time interpolator set up then do nothing */ | |
1575 | if (!time_interpolator) | |
1576 | return; | |
1577 | ||
a5a0d52c AM |
1578 | /* |
1579 | * The interpolator compensates for late ticks by accumulating the late | |
1580 | * time in time_interpolator->offset. A tick earlier than expected will | |
1581 | * lead to a reset of the offset and a corresponding jump of the clock | |
1582 | * forward. Again this only works if the interpolator clock is running | |
1583 | * slightly slower than the regular clock and the tuning logic insures | |
1584 | * that. | |
1585 | */ | |
1da177e4 | 1586 | |
486d46ae | 1587 | counter = time_interpolator_get_counter(1); |
a5a0d52c AM |
1588 | offset = time_interpolator->offset + |
1589 | GET_TI_NSECS(counter, time_interpolator); | |
1da177e4 LT |
1590 | |
1591 | if (delta_nsec < 0 || (unsigned long) delta_nsec < offset) | |
1592 | time_interpolator->offset = offset - delta_nsec; | |
1593 | else { | |
1594 | time_interpolator->skips++; | |
1595 | time_interpolator->ns_skipped += delta_nsec - offset; | |
1596 | time_interpolator->offset = 0; | |
1597 | } | |
1598 | time_interpolator->last_counter = counter; | |
1599 | ||
1600 | /* Tuning logic for time interpolator invoked every minute or so. | |
1601 | * Decrease interpolator clock speed if no skips occurred and an offset is carried. | |
1602 | * Increase interpolator clock speed if we skip too much time. | |
1603 | */ | |
1604 | if (jiffies % INTERPOLATOR_ADJUST == 0) | |
1605 | { | |
b20367a6 | 1606 | if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec) |
1da177e4 LT |
1607 | time_interpolator->nsec_per_cyc--; |
1608 | if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0) | |
1609 | time_interpolator->nsec_per_cyc++; | |
1610 | time_interpolator->skips = 0; | |
1611 | time_interpolator->ns_skipped = 0; | |
1612 | } | |
1613 | } | |
1614 | ||
1615 | static inline int | |
1616 | is_better_time_interpolator(struct time_interpolator *new) | |
1617 | { | |
1618 | if (!time_interpolator) | |
1619 | return 1; | |
1620 | return new->frequency > 2*time_interpolator->frequency || | |
1621 | (unsigned long)new->drift < (unsigned long)time_interpolator->drift; | |
1622 | } | |
1623 | ||
1624 | void | |
1625 | register_time_interpolator(struct time_interpolator *ti) | |
1626 | { | |
1627 | unsigned long flags; | |
1628 | ||
1629 | /* Sanity check */ | |
9f31252c | 1630 | BUG_ON(ti->frequency == 0 || ti->mask == 0); |
1da177e4 LT |
1631 | |
1632 | ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; | |
1633 | spin_lock(&time_interpolator_lock); | |
1634 | write_seqlock_irqsave(&xtime_lock, flags); | |
1635 | if (is_better_time_interpolator(ti)) { | |
1636 | time_interpolator = ti; | |
1637 | time_interpolator_reset(); | |
1638 | } | |
1639 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
1640 | ||
1641 | ti->next = time_interpolator_list; | |
1642 | time_interpolator_list = ti; | |
1643 | spin_unlock(&time_interpolator_lock); | |
1644 | } | |
1645 | ||
1646 | void | |
1647 | unregister_time_interpolator(struct time_interpolator *ti) | |
1648 | { | |
1649 | struct time_interpolator *curr, **prev; | |
1650 | unsigned long flags; | |
1651 | ||
1652 | spin_lock(&time_interpolator_lock); | |
1653 | prev = &time_interpolator_list; | |
1654 | for (curr = *prev; curr; curr = curr->next) { | |
1655 | if (curr == ti) { | |
1656 | *prev = curr->next; | |
1657 | break; | |
1658 | } | |
1659 | prev = &curr->next; | |
1660 | } | |
1661 | ||
1662 | write_seqlock_irqsave(&xtime_lock, flags); | |
1663 | if (ti == time_interpolator) { | |
1664 | /* we lost the best time-interpolator: */ | |
1665 | time_interpolator = NULL; | |
1666 | /* find the next-best interpolator */ | |
1667 | for (curr = time_interpolator_list; curr; curr = curr->next) | |
1668 | if (is_better_time_interpolator(curr)) | |
1669 | time_interpolator = curr; | |
1670 | time_interpolator_reset(); | |
1671 | } | |
1672 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
1673 | spin_unlock(&time_interpolator_lock); | |
1674 | } | |
1675 | #endif /* CONFIG_TIME_INTERPOLATION */ | |
1676 | ||
1677 | /** | |
1678 | * msleep - sleep safely even with waitqueue interruptions | |
1679 | * @msecs: Time in milliseconds to sleep for | |
1680 | */ | |
1681 | void msleep(unsigned int msecs) | |
1682 | { | |
1683 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1684 | ||
75bcc8c5 NA |
1685 | while (timeout) |
1686 | timeout = schedule_timeout_uninterruptible(timeout); | |
1da177e4 LT |
1687 | } |
1688 | ||
1689 | EXPORT_SYMBOL(msleep); | |
1690 | ||
1691 | /** | |
96ec3efd | 1692 | * msleep_interruptible - sleep waiting for signals |
1da177e4 LT |
1693 | * @msecs: Time in milliseconds to sleep for |
1694 | */ | |
1695 | unsigned long msleep_interruptible(unsigned int msecs) | |
1696 | { | |
1697 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1698 | ||
75bcc8c5 NA |
1699 | while (timeout && !signal_pending(current)) |
1700 | timeout = schedule_timeout_interruptible(timeout); | |
1da177e4 LT |
1701 | return jiffies_to_msecs(timeout); |
1702 | } | |
1703 | ||
1704 | EXPORT_SYMBOL(msleep_interruptible); |